搭建SRS4.0视频流高可用集群服务
方案
设备ip | 功能 | 备注 |
---|---|---|
10.80.210.103(主)/10.80.210.104(从) | 指向源站-->nginx+keepalive+vip | vip:10.80.210.213 |
10.80.210.105/10.80.210.106 | 源站集群 | 后期可扩展 |
10.80.210.107/10.80.210.108 | 边缘集群 | 后期可扩展 |
10.80.210.109(主)/10.80.210.110(从) | 指向边缘-->nginx+keepalive+vip | vip:1080.210.214 |
参考:https://mp.weixin.qq.com/s/pd9YQS0WR3hSuHybkm1F7Q (最佳实践:如何扩展你的SRS并发能力)
参考:官方github:https://github.com/ossrs/srs/wiki/v4_CN_Home
指向源站--nginx+keepalive+vip的安装和配置
nginx安装和配置,主从服务都需要安装
安装版本为:nginx-1.22.0-1.el7.ngx.x86_64.rpm
nginx配置
vi /etc/nginx/nginx.conf
新增如下信息:
stream {
# 添加socket转发的代理
upstream socket_proxy {
hash $remote_addr consistent;
# 转发的目的地址和端口
server 10.80.210.105:1935 weight=5 max_fails=3 fail_timeout=30s;
server 10.80.210.106:1935 weight=5 max_fails=3 fail_timeout=30s;
}
# 提供转发的服务,即访问localhost:1935,会跳转至代理socket_proxy指定的转发地址
server {
listen 1935;
proxy_connect_timeout 1s;
proxy_timeout 3s;
proxy_pass socket_proxy;
}
}
启动、开机自启服务
systemctl start nginx
systemctl enable nginx
keepalived安装和配置
安装keepalived
yum install -y keepalived
修改配置文件
cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
vi /etc/keepalived/keepalived.conf
主服务的配置文件信息:
global_defs {
# 路由id:当前安装keepalived的节点主机标识符,保证全局唯一
router_id keep_host103
#vrrp_skip_check_adv_addr
#vrrp_strict
#vrrp_garp_interval 0
#vrrp_gna_interval 0
}
vrrp_script check_web {
script "/etc/keepalived/check_web.sh" # 脚本存放的位置
interval 2 # 每隔两秒运行上一行脚本
weight -20 # 如果脚本运行成功,则升级权重-20,自动切换到备
}
vrrp_instance VI_1 {
# 主机=MASTER;备用机=BACKUP
state MASTER
# 该实例绑定的网卡名称
interface ens192
# 保证主备节点一致
virtual_router_id 51
# 权重,master值 > backup值
priority 100
# 主备组播报发送间隔时间1秒
advert_int 1
# 认证权限密码,防止非法节点进入
authentication {
auth_type PASS
auth_pass 1111
}
# 虚拟出来的ip,可以有多个(vip)
virtual_ipaddress {
10.80.210.213
}
# 调用监控脚本
track_script {
check_web
}
}
从服务的配置文件信息:
global_defs {
# 路由id:当前安装keepalived的节点主机标识符,保证全局唯一
router_id keep_host104
#vrrp_skip_check_adv_addr
#vrrp_strict
#vrrp_garp_interval 0
#vrrp_gna_interval 0
}
vrrp_script check_web {
script "/etc/keepalived/check_web.sh" # 脚本存放的位置
interval 2 # 每隔两秒运行上一行脚本
weight -20 # 如果脚本运行成功,则升级权重-20,自动切换到备
}
vrrp_instance VI_1 {
# 主机=MASTER;备用机=BACKUP
state BACKUP
# 该实例绑定的网卡名称
interface ens192
# 保证主备节点一致
virtual_router_id 51
# 权重,master值 > backup值
priority 88
# 主备组播报发送间隔时间1秒
advert_int 1
# 认证权限密码,防止非法节点进入
authentication {
auth_type PASS
auth_pass 1111
}
# 虚拟出来的ip,可以有多个(vip)
virtual_ipaddress {
10.80.210.213
}
# 调用监控脚本
track_script {
check_web
}
}
编写nginx监控脚本
如果nginx服务停止,keepalived服务也停止,并切换到备主机
脚本如下:vi /etc/keepalived/check_web.sh
#!/bin/bash
num=`ps -C nginx --no-header |wc -l`
if [ $num -eq 0 ]
then
systemctl stop keepalived
fi
启动、开机自启keepalived服务
systemctl start keepalived
systemctl enable keepalived
查看keepalived服务状态;查看虚拟IP生效情况
[root@srs103 ~]# ps -ef|grep keepalived
root 29357 1 0 6月14 ? 00:00:04 /usr/sbin/keepalived -D
root 29358 29357 0 6月14 ? 00:00:03 /usr/sbin/keepalived -D
root 29359 29357 0 6月14 ? 00:00:13 /usr/sbin/keepalived -D
root 32421 32394 0 09:58 pts/0 00:00:00 grep --color=auto keepalived
[root@srs103 ~]#
[root@srs103 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens192: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:99:bb:6d brd ff:ff:ff:ff:ff:ff
inet 10.80.210.103/23 brd 10.80.211.255 scope global ens192
valid_lft forever preferred_lft forever
inet 10.80.210.213/32 scope global ens192
valid_lft forever preferred_lft forever
inet6 fe80::169e:5d40:a142:b0a2/64 scope link
valid_lft forever preferred_lft forever
[root@srs104 ~]# ps -ef|grep keepalived
root 21189 1 0 6月14 ? 00:00:04 /usr/sbin/keepalived -D
root 21190 21189 0 6月14 ? 00:00:04 /usr/sbin/keepalived -D
root 21191 21189 0 6月14 ? 00:00:15 /usr/sbin/keepalived -D
root 24183 24157 0 09:58 pts/0 00:00:00 grep --color=auto keepalived
[root@srs104 ~]#
[root@srs104 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens192: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:e6:70:0a brd ff:ff:ff:ff:ff:ff
inet 10.80.210.104/23 brd 10.80.211.255 scope global ens192
valid_lft forever preferred_lft forever
inet6 fe80::a2fd:cc51:b204:32df/64 scope link
valid_lft forever preferred_lft forever
SRS 源站安装与配置
SRS 源站集群参考官网:https://github.com/ossrs/srs/wiki/v4_CN_OriginCluster
SRS安装参考:
SRS源站1(10.80.210.105)的配置:
说明下,默认的启动配置应该是srs.conf。我做了备份,修改了这个文件。
# main config for srs.
# @see full.conf for detail config.
#监听的流端口
listen 1935;
#最大连接数
max_connections 1000;
srs_log_tank file;
srs_log_file ./objs/srs.log;
daemon on;
http_api {
enabled on;
listen 1985;
}
vhost __defaultVhost__ {
hls {
enabled on;
hls_fragment 10;
hls_window 60;
hls_path ./objs/nginx/html;
hls_m3u8_file [app]/[stream].m3u8;
hls_ts_file [app]/[stream]-[seq].ts;
}
http_remux {
enabled on;
mount [vhost]/[app]/[stream].flv;
}
cluster {
#集群的模式,对于源站集群,值应该是local。
mode local;
#是否开启源站集群
origin_cluster on;
#源站集群中的其他源站的HTTP API地址
coworkers 10.80.210.106:1985;
}
}
启动服务:
systemctl restart srs
systemctl enable srs
SRS源站2(10.80.210.106)的配置:
# main config for srs.
# @see full.conf for detail config.
#监听的流端口
listen 1935;
#最大连接数
max_connections 1000;
srs_log_tank file;
srs_log_file ./objs/srs.log;
daemon on;
http_api {
enabled on;
listen 1985;
}
vhost __defaultVhost__ {
hls {
enabled on;
hls_fragment 10;
hls_window 60;
hls_path ./objs/nginx/html;
hls_m3u8_file [app]/[stream].m3u8;
hls_ts_file [app]/[stream]-[seq].ts;
}
http_remux {
enabled on;
mount [vhost]/[app]/[stream].flv;
}
cluster {
#集群的模式,对于源站集群,值应该是local。
mode local;
#是否开启源站集群
origin_cluster on;
#源站集群中的其他源站的HTTP API地址
coworkers 10.80.210.105:1985;
}
}
启动服务
systemctl restart srs
systemctl enable srs
两份配置文件差异只在 最后一个 coworkers,写出了自己以外的源站IP即可
测试
使用ffmpeg
推流到负载均衡10.80.210.213
#!/bin/sh
for((;;)); do
ffmpeg -re -i /root/test.flv -vcodec copy -acodec copy -f flv -y rtmp://10.80.210.213/live/livestream;
sleep 1;
done
SRS 边缘安装与配置
边缘的配置都是一样的,是通过NG来进行负载的,所以说边缘的配置相对简单,并且是可以无限扩展的
SRS 边缘集群参考:
- Edge Cluster: RTMP: 如何部署RTMP分发集群,譬如CDN支持RTMP分发。
- Edge Cluster: FLV: 如何部署HTTP-FLV分发集群,譬如CDN支持HTTP-FLV分发。
- Edge Cluster: HLS: 如何部署HLS分发集群,比如CDN支持HLS分发。
SRS安装参考:
SRS edge 配置文件
# main config for srs.
# @see full.conf for detail config.
#监听的流端口
listen 1935;
#最大连接数
max_connections 1000;
srs_log_tank file;
srs_log_file ./objs/srs.log;
daemon on;
http_api {
enabled on;
listen 1985;
}
http_server {
enabled on;
listen 8080;
dir ./objs/nginx/html;
}
rtc_server {
enabled on;
listen 8000; # UDP port
# @see https://github.com/ossrs/srs/wiki/v4_CN_WebRTC#config-candidate
candidate $CANDIDATE;
}
vhost __defaultVhost__ {
hls {
enabled on;
hls_fragment 10;
hls_window 60;
hls_path ./objs/nginx/html;
hls_m3u8_file [app]/[stream].m3u8;
hls_ts_file [app]/[stream]-[seq].ts;
}
http_remux {
enabled on;
mount [vhost]/[app]/[stream].flv;
hstrs on;
}
dvr {
enabled on;
dvr_apply all;
#dvr计划
dvr_plan segment;
#录制的路径
# dvr_path ./objs/nginx/html/[app]/[stream].[timestamp].flv;
dvr_path ./objs/nginx/html/[app]/[stream]/[2006]/[01]/[02]/[15].[04].[05].[999].flv;
#segment方式录制时间设置,单位:seconds
dvr_duration 300;
#开启按关键帧且flv
dvr_wait_keyframe on;
#时间戳抖动算法。full使用完全的时间戳矫正;
#zero只是保证从0开始;off不矫正时间戳。
time_jitter full;
}
# http钩子,不需要可省略...
#http_hooks {
# enabled on;
# on_connect http://127.0.0.1:8085/api/v1/clients;
# on_close http://127.0.0.1:8085/api/v1/clients;
# on_publish http://127.0.0.1:8085/api/v1/streams;
# on_unpublish http://127.0.0.1:8085/api/v1/streams;
# on_play http://127.0.0.1:8085/api/v1/sessions;
# on_stop http://127.0.0.1:8085/api/v1/sessions;
# on_dvr http://127.0.0.1:8085/api/v1/dvrs;
# on_hls http://127.0.0.1:8085/api/v1/hls;
# on_hls_notify http://127.0.0.1:8085/api/v1/hls/[app]/[stream]/[ts_url][param];
#}
cluster {
#集群的模式,对于源站集群,remote。
mode remote;
#源站集群中的其他源站的HTTP API地址
origin 10.80.210.105:1935 10.80.210.106:1935;
}
}
最重要就是最后两行了,一个是 Mode和源站地址,源站地址使用的应该是1935端口
启动服务
systemctl restart srs
systemctl enable srs
指向边缘--nginx+keepalive+vip
nginx安装和配置,主从服务都需要安装
安装版本为:nginx-1.22.0-1.el7.ngx.x86_64.rpm
nginx配置
vi /etc/nginx/conf.d/default.conf
新增如下信息:
upstream srs{
server 10.80.210.107:8080;
server 10.80.210.108:8080;
}
server {
listen 80;
server_name localhost;
location ~ /* {
proxy_pass http://srs;
add_header Cache-Control no-cache;
add_header Access-Control-Allow-Origin *;
}
#禁止访问的文件或目录
location ~ ^/(\.user.ini|\.htaccess|\.git|\.svn|\.project|LICENSE|README.md)
{
return 404;
}
#一键申请SSL证书验证目录相关设置
location ~ \.well-known{
allow all;
}
location ~ .*\.(gif|jpg|jpeg|png|bmp|swf)$
{
expires 30d;
error_log /dev/null;
access_log /dev/null;
}
location ~ .*\.(js|css)?$
{
expires 12h;
error_log /dev/null;
access_log /dev/null;
}
}
keepalived安装和配置
安装keepalived
yum install -y keepalived
修改配置文件
cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
vi /etc/keepalived/keepalived.conf
主服务的配置文件信息:
global_defs {
# 路由id:当前安装keepalived的节点主机标识符,保证全局唯一
router_id keep_host109
}
vrrp_script check_web {
script "/etc/keepalived/check_web.sh" # 脚本存放的位置
interval 2 # 每隔两秒运行上一行脚本
weight -20 # 如果脚本运行成功,则升级权重-20,自动切换到备
}
vrrp_instance VI_1 {
# 主机=MASTER;备用机=BACKUP
state MASTER
# 该实例绑定的网卡名称
interface ens192
# 保证主备节点一致
virtual_router_id 51
# 权重,master值 > backup值
priority 100
# 主备组播报发送间隔时间1秒
advert_int 1
# 认证权限密码,防止非法节点进入
authentication {
auth_type PASS
auth_pass 1111
}
# 虚拟出来的ip,可以有多个(vip)
virtual_ipaddress {
10.80.210.214
}
# 调用监控脚本
track_script {
check_web
}
}
从服务的配置文件信息:
global_defs {
# 路由id:当前安装keepalived的节点主机标识符,保证全局唯一
router_id keep_host110
}
vrrp_script check_web {
script "/etc/keepalived/check_web.sh" # 脚本存放的位置
interval 2 # 每隔两秒运行上一行脚本
weight -20 # 如果脚本运行成功,则升级权重-20,自动切换到备
}
vrrp_instance VI_1 {
# 主机=MASTER;备用机=BACKUP
state BACKUP
# 该实例绑定的网卡名称
interface ens192
# 保证主备节点一致
virtual_router_id 51
# 权重,master值 > backup值
priority 88
# 主备组播报发送间隔时间1秒
advert_int 1
# 认证权限密码,防止非法节点进入
authentication {
auth_type PASS
auth_pass 1111
}
# 虚拟出来的ip,可以有多个(vip)
virtual_ipaddress {
10.80.210.214
}
# 调用监控脚本
track_script {
check_web
}
}
编写nginx监控脚本
如果nginx服务停止,keepalived服务也停止,并切换到备主机
脚本如下:vi /etc/keepalived/check_web.sh
#!/bin/bash
num=`ps -C nginx --no-header |wc -l`
if [ $num -eq 0 ]
then
systemctl stop keepalived
fi
启动、开机自启keepalived服务
systemctl start keepalived
systemctl enable keepalived
测试服务功能
在浏览器中访问http://10.80.210.214
使用OBS(点击下载) 推流