« 上一篇下一篇 »

基于Keepalived+Varnish+Nginx实现的高可用LAMP集群架构部署

  Keepalived 是一种高性能的服务器高可用或热备解决方案,Keepalived 可以用来防止服务器单点故障的发生,通过配合 Nginx 可以实现 web 前端服务的高可用。
Keepalived 以 VRRP 协议为实现基础,用 VRRP 协议来实现高可用性(HA)。VRRP(VirtualRouter
  Redundancy Protocol)协议是用于实现路由器冗余的协议,VRRP 协议将两台或多台路由器设备虚拟成一个设备,对外提供虚拟路由器 IP(一个或多个),而在路由器组内部,如果实际拥有这个对外 IP 的路由器如果工作正常的话就是 MASTER,或者是通过算法选举产生,MASTER 实现针对虚拟路由器 IP 的各种网络功能,
如 ARP 请求,ICMP,以及数据的转发等;其他设备不拥有该虚拟 IP,状态是 BACKUP,除了接收 MASTER 的VRRP 状态通告信息外,不执行对外的网络功能。当主机失效时,BACKUP 将接管原先 MASTER 的网络功能。VRRP 协议使用多播数据来传输 VRRP 数据,VRRP 数据使用特殊的虚拟源 MAC 地址发送数据而不是自身网卡的 MAC 地址,VRRP 运行时只有 MASTER 路由器定时发送 VRRP 通告信息,表示 MASTER 工作正常以及虚拟路由器 IP(组),BACKUP 只接收 VRRP 数据,不发送数据,如果一定时间内没有接收到 MASTER 的通告信息,各 BACKUP 将宣告自己成为 MASTER,发送通告信息,重新进行 MASTER 选举状态。

搭建RS1(RS1提供mariadb服务和静态资源)

[root@RS1 Desktop]# yum -y install mariadb-server httpd
[root@RS1 Desktop]# vim /etc/my.cnf
    [mysqld]
    ...
    skip-name-resolve=ON
    innodb-file-per-table=ON
    ...
[root@RS1 Desktop]# systemctl start mariadb
[root@RS1 Desktop]# mysql_secure_installation  #进行数据库相关安全设置
    ...
[root@RS1 Desktop]# mysql -uroot -p
    Enter password:
    MariaDB [(none)]> create database wordpress;
    MariaDB [(none)]> grant all on wordpress.* to 'wpuser'@'10.10.0.%' identified by '123456';
    MariaDB [(none)]> flush privileges;
    MariaDB [(none)]> exit;
[root@RS1 Desktop]# wget https://cn.wordpress.org/wordpress-4.9.4-zh_CN.tar.gz
[root@RS1 Desktop]# tar xf wordpress-4.9.4-zh_CN.tar.gz -C /var/www/html
[root@RS1 Desktop]# vim /var/www/html/index.html
    <h1>10.10.0.21 server</h1>
[root@RS1 Desktop]# vim /etc/httpd/conf.d/vhost.conf
    <virtualhost *:80>
        servername www.test.org
        DirectoryIndex index.html index.php
        Documentroot /var/www/html
        ProxyRequests off
        ProxyPassMatch ^/(.*\.php)$ fcgi://10.10.0.22:9000/var/www/html/$1
        ProxyPassMatch ^/(ping|status)$ fcgi://10.10.0.22:9000/$1
        <Directory />
            options FollowSymlinks
            Allowoverride none
            Require all granted
        </Directory>
    </virtualhost>
[root@RS1 Desktop]# systemctl start httpd
[root@RS1 Desktop]# /var/www/html/wordpress/wp-config-sample.php /var/www/html/wordpress/wp-config.php
[root@RS1 Desktop]# vim /var/www/html/wordpress/wp-config.php  #关联wordpress数据库
    define('DB_NAME', 'wordpress');
    define('DB_USER', 'wpuser');
    define('DB_PASSWORD', '123456');
    define('DB_HOST', '10.10.0.21');
[root@RS1 Desktop]# scp /var/www/html/wordpress 10.10.0.22:/var/www/html/  #复制wordpress到22的主机

搭建RS2(RS2提供动态资源)
 
[root@RS2 Desktop]# yum -y httpd php-fpm php-mysql php-mbstring php-mcrypt
[root@RS2 Desktop]# vim /var/www/html/index.html
    <h1>10.10.0.22 server</h1>
[root@RS2 Desktop]# vim /etc/httpd/conf.d/vhost.conf
    <virtualhost *:80>
        servername www.test.org
        DirectoryIndex index.html index.php
        Documentroot /var/www/html
        ProxyRequests off
        ProxyPassMatch ^/(.*\.php)$ fcgi://127.0.0.1:9000/var/www/html/$1
        ProxyPassMatch ^/(ping|status)$ fcgi://127.0.0.1:9000/$1
        <Directory />
            options FollowSymlinks
            Allowoverride none
            Require all granted
        </Directory>
    </virtualhost>
[root@RS2 Desktop]# systemctl start httpd
[root@RS2 Desktop]# vim /etc/php-fpm.d/www.conf
    listen = 0.0.0.0:9000
    ; listen.allowed_clients = 127.0.0.1  #注释此句,允许其他主机远程访问
    pm.status_path = /status
    ping.path = /ping
    ping.response = pong
[root@RS2 Desktop]# chown apache:apache /var/lib/php/session
[root@RS2 Desktop]# systemctl start php-fpm
搭建DR1
[root@DR1 Desktop]# yum install -y nginx keepalived
[root@DR1 Desktop]# vim /etc/nginx/nginx/conf  #配置nginx反代
    http {
        ...
        upstream websrvs {
            server 10.10.0.21:80;
            server 10.10.0.22:80;
            server 127.0.0.1:80 backup;
        }
   
        server {
            listen 80;
            include /etc/nginx/default.d/*.conf;
            location / {
                proxy_pass http://websrvs;
                proxy_set_header host $http_host;
                proxy_set_header X-Forward-For $remote_addr;
            }
        ...
    }
[root@DR1 Desktop]# vim /etc/nginx/conf.d/localhost.conf  #配置nginx本地服务
    server{
        listen 127.0.0.1:80;
        root /usr/share/nginx/html;
        index index.html;
    }
[root@DR1 Desktop]# vim /usr/share/nginx/html/index.html
    <h1>Balance Server DR1</h1>
[root@DR1 Desktop]# nginx -t  #检查nginx语法
[root@DR1 Desktop]# systemctl start nginx
[root@DR1 Desktop]# vim /etc/keepalived/keepalived.conf  #配置keepalived
    global_defs {
      notification_email {
            root@localhost
      }
      notification_email_from keepalived@localhost
      smtp_server 127.0.0.1
      smtp_connect_timeout 30
      router_id dr1
      vrrp_skip_check_adv_addr
      vrrp_mcast_group4 224.0.0.111
    }
   
    vrrp_script chk_ngx {    #检查此服务器的nginx进程是否存在,如果不存在则减权
      #kill -0 PID,0信号量不发送任何信号但系统会进行错误检查,经常用来检查一个进程是否存在,存在返回0,不存在返回1
      script "killall -0 nginx 2> /dev/null && exit 0 || exit 1"
      weight -10
      interval 1
      fall 3
      rise 3
    }
   
    vrrp_instance VIP_1 {
        state MASTER
        interface eno16777736
        virtual_router_id 1
        priority 100
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 1111@#$%
        }
        track_script {
            chk_ngx
        }
        virtual_ipaddress {
            192.168.4.120/24 dev eno16777736 label eno16777736:0
        }
[root@DR1 Desktop]# systemctl start keepalived.service
搭建DR2,参考DR1自行搭建
客户端测试
[root@client Desktop]# for i in {1..20}; do curl http://192.168.4.120; done
<h1>10.10.0.21 server</h1>
<h1>10.10.0.22 server</h1>
<h1>10.10.0.21 server</h1>
<h1>10.10.0.22 server</h1>
<h1>10.10.0.21 server</h1>
<h1>10.10.0.22 server</h1>
<h1>10.10.0.21 server</h1>
<h1>10.10.0.22 server</h1>
<h1>10.10.0.21 server</h1>
<h1>10.10.0.22 server</h1>
<h1>10.10.0.21 server</h1>
<h1>10.10.0.22 server</h1>
<h1>10.10.0.21 server</h1>
<h1>10.10.0.22 server</h1>
<h1>10.10.0.21 server</h1>
<h1>10.10.0.22 server</h1>
<h1>10.10.0.21 server</h1>
<h1>10.10.0.22 server</h1>
<h1>10.10.0.21 server</h1>
<h1>10.10.0.22 server</h1>
[root@client Desktop]# ab -c 100 -n 10000 http://192.168.4.120/wordpress  #对动态页面进行压测
This is ApacheBench, Version 2.3 <$Revision: 1430300 $>
Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/
Licensed to The Apache Software Foundation, http://www.apache.org/

Benchmarking 192.168.4.120 (be patient)
Completed 1000 requests
Completed 2000 requests
Completed 3000 requests
Completed 4000 requests
Completed 5000 requests
Completed 6000 requests
Completed 7000 requests
Completed 8000 requests
Completed 9000 requests
Completed 10000 requests
Finished 10000 requests


Server Software:        nginx/1.12.2
Server Hostname:        192.168.4.120
Server Port:            80

Document Path:          /wordpress
Document Length:        239 bytes

Concurrency Level:      100
Time taken for tests:  4.685 seconds
Complete requests:      10000
Failed requests:        0
Write errors:          0
Non-2xx responses:      10000
Total transferred:      4600000 bytes
HTML transferred:      2390000 bytes
Requests per second:    2134.44 [#/sec] (mean)
Time per request:      46.851 [ms] (mean)
Time per request:      0.469 [ms] (mean, across all concurrent requests)
Transfer rate:          958.83 [Kbytes/sec] received

Connection Times (ms)
              min  mean[+/-sd] median  max
Connect:        0    1  4.8      0      51
Processing:    10  45  7.4    46      67
Waiting:        1  44  7.8    46      67
Total:        12  47  6.5    46      89

Percentage of the requests served within a certain time (ms)
  50%    46
  66%    47
  75%    48
  80%    49
  90%    53
  95%    58
  98%    63
  99%    67
 100%    89 (longest request)

配置Varnish服务器
[root@Varnish Desktop]# yum install varnish
[root@Varnish Desktop]# vim /etc/varnish/varnish.params
    RELOAD_VCL=1
    VARNISH_VCL_CONF=/etc/varnish/default.vcl
    VARNISH_LISTEN_PORT=6081
    VARNISH_ADMIN_LISTEN_ADDRESS=10.10.0.23
    VARNISH_ADMIN_LISTEN_PORT=6082
    VARNISH_SECRET_FILE=/etc/varnish/secret
    VARNISH_STORAGE="file,/data/cache,1G"  #需要先创建好/data目录
    VARNISH_USER=varnish
    VARNISH_GROUP=varnish
[root@Varnish Desktop]# vim /etc/varnish/default.vcl  #修改配置文件,添加VCL规则
    vcl 4.0;
    import directors;
    probe web_healthchk {  #定义后端健康监测机制
        .url="/index.html";
        .interval=2s;
        .timeout=1s;
        .window=5;
        .threshold=3;
    }
    backend RS1 {  #定义后端RS1
        .host="10.10.0.21";
        .port="80";
        .probe=web_healthchk;
    }
    backend RS2 {  #定义后端RS2
        .host="10.10.0.22";
        .port="80";
        .probe=web_healthchk;
    }
    sub vcl_init {  #初始化服务器
        new WEBGROUP=directors.round_robin();
        WEBGROUP.add_backend(RS1);
        WEBGROUP.add_backend(RS2);
    }
    acl PURGERS {  #定义可用于purge操作的ip来源
        "127.0.0.1";
        "10.10.0.0"/24;
    }
    sub vcl_recv {
        if(req.http.Authorization ||  req.http.Cookie) {  #认证及cookie不缓存
            return(pass);
        }
        if(req.method != "GET" && req.method != "HEAD") {  #除了get和head以外的请求方法不缓存
            return(pass);
        }
        if(req.url ~ "index.php") {  #动态资源不缓存
            return(pass);
        }
        if(req.method == "PURGE") {  #purge方法清理缓存
            if(client.ip ~ PURGERS) {
                return(purge);
            }
        }
        if(req.http.X-Forward-For) {  #为发往后端主机添加的请求报文添加X-Forward-For的首部
            set req.http.X-Forward-For = req.http.X-Forward-For+","+client.ip;   
        }else {
            set req.http.X-Forward-For = client.ip;
        }
        set req.backend_hint = WEBGROUP.backend();  #调用服务器组
        return(hash);
    }
    sub vcl_hash {
        hash_data(req.url);
    }
    sub vcl_backend_response {  #自定义缓存时长
        if(bereq.url ~ "\.(jpg|jpeg|gif|png)$") {
            set beresp.ttl = 1d;
        }
        if(bereq.url ~ "\.(html|css|js)$") {
            set beresp.ttl = 12h;
        }
        if(beresp.http.Set-Cookie) {
            set beresp.grace = 600s;
            return(deliver);
        }
    }
    sub vcl_deliver {
        if(obj.hits > 0) {  #为响应报文添加X-Cache的首部,标识缓存是否命中
            set resp.http.X-Cache = "Hit from "+server.ip;
        }else {
            set resp.http.X-Cache = "Miss";
        }
    }
[root@Varnish Desktop]# systemctl start varnish.service
[root@Varnish Desktop]# ss -tan
    State      Recv-Q Send-Q Local Address:Port              Peer Address:Port             
    LISTEN    0      5      192.168.122.1:53                      *:*                 
    LISTEN    0      128          *:22                      *:*                 
    LISTEN    0      128    127.0.0.1:631                      *:*                 
    LISTEN    0      100    127.0.0.1:25                      *:*                 
    LISTEN    0      128          *:6081                    *:*                 
    LISTEN    0      10    10.10.0.23:6082                    *:*                 
    LISTEN    0      128        :::22                      :::*                 
    LISTEN    0      128        ::1:631                    :::*                 
    LISTEN    0      100        ::1:25                      :::*                 
    LISTEN    0      128        :::6081                    :::*
修改DR1和DR2定义的代理服务器组(DR2的修改参考DR1)
[root@DR1 Desktop]# vim /etc/nginx/nginx.conf
...
    upstream websrvs {
        #server 10.10.0.21:80;
        #server 10.10.0.22:80;
        server 10.10.0.23:6081;  #转发到Varnish服务器
        server 127.0.0.1:80 backup;
    }
...
[root@DR2 Desktop]# systemctl reload nginx.service
  定义虚拟路由,VI_1 为虚拟路由的标示符,自己定义名称
vrrp_instance VI_1 { state MASTER ## 主节点为 MASTER,对应的备份节点为 BACKUP interface eth1 ## 绑定虚拟 IP 的网络接口,与本机 IP 地址所在的网络接口相同,我的是 eth1 virtual_router_id 51 ## 虚拟路由的 ID 号,两个节点设置必须一样,可选 IP 最后一段使用, 相同的 VRID 为一个组,他将决定多播的 MAC 地址,eth1值的获取可以在机器上执行ifconfig命令得到 mcast_src_ip 192.168.1.51 ## 本机 IP 地址 priority 100 ## 节点优先级,值范围 0-254,MASTER 要比 BACKUP 高 nopreempt ## 优先级高的设置 nopreempt 解决异常恢复后再次抢占的问题 advert_int 1 ## 组播信息发送间隔,两个节点设置必须一样,默认 1s ## 设置验证信息,两个节点必须一致 authentication { auth_type PASS auth_pass 1111 ## 真实生产,按需求对应该过来 }
将 track_script 块加入 instance 配置块
track_script { chk_nginx ## 执行 Nginx 监控的服务 }
虚拟 IP 池, 两个节点设置必须一样
virtual_ipaddress { 192.168.1.50 ## 虚拟 ip,可以定义多个 } }</code></pre> 附录: 192.168.31.146(MASTER节点)的keepalived.conf
! Configuration File for keepalived global_defs { router_id dreyer-zk-03 } vrrp_script chk_nginx { script "/etc/keepalived/nginx_check.sh" interval 2 weight -20 } vrrp_instance VI_1 { state MASTER interface eth0 virtual_router_id 146 mcast_src_ip 192.168.31.146 priority 100 nopreempt advert_int 1 authentication { auth_type PASS auth_pass 1111 } track_script { chk_nginx } virtual_ipaddress { 192.168.31.111 } } 192.168.31.154(BACKUP节点)的keepalived.conf ! Configuration File for keepalived global_defs { router_id dreyer-zk-01 } vrrp_script chk_nginx { script "/etc/keepalived/nginx_check.sh" interval 2 weight -20 } vrrp_instance VI_1 { state BACKUP interface eth0 virtual_router_id 146 mcast_src_ip 192.168.31.154 priority 90 advert_int 1 authentication { auth_type PASS auth_pass 1111 } track_script { chk_nginx } virtual_ipaddress { 192.168.31.111 } } nginx_check.sh(Nginx状态检测脚本) #!/bin/bash A=ps -C nginx –no-header |wc -lif [ $A -eq 0 ];then /usr/local/nginx/sbin/nginx sleep 2 if [ps -C nginx --no-header |wc -l-eq 0 ];then killall keepalived fi fi 脚本大意为:检查是否有nginx进程,如果没有,那么就启动nginx,启动后睡眠2秒,再检查是否有nginx的进程,如果没有的话,那么就杀掉keepalived的全部进程(杀掉进程后keepalived才能进行重新选举)
6、启动keepalived # service keepalived start 出现以下,代表启动成功 Starting keepalived: [ OK ]
7、进行高可用测试 7.1、关于虚拟IP 在192.168.31.146中执行ip add命令查看 [root@MiWiFi-R1CM sbin]# ip add 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000 link/ether 00:0c:29:34:1c:e2 brd ff:ff:ff:ff:ff:ff inet 192.168.31.146/24 brd 192.168.31.255 scope global eth0 inet 192.168.31.111/32 scope global eth0 inet6 fe80::20c:29ff:fe34:1ce2/64 scope link valid_lft forever preferred_lft forever 在192.168.31.154中执行ip add命名查看 [root@MiWiFi-R1CM conf]# ip add 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000 link/ether 00:0c:29:64:3d:2a brd ff:ff:ff:ff:ff:ff inet 192.168.31.154/24 brd 192.168.31.255 scope global eth0 inet 192.168.31.111/32 scope global eth0 inet6 fe80::20c:29ff:fe64:3d2a/64 scope link valid_lft forever preferred_lft forever 从输出信息中,我们可以知道146和154机器上都有192.168.31.111这个虚拟IP,这个IP是由keepalived虚拟出来的(keepalived.conf中有配置),关掉一台机器的keepalived再执行ip add命令,那么192.168.31.111这个虚拟IP将不存在。我们可以通过192.168.31.111这个虚拟IP来访问146和154这两台机器,其中哪台机器是master,虚拟IP就会映射到哪个地址。

  我们可以看到由于146被我们设置为master,所以192.168.31.111这个虚拟IP会漂移到146这台机器上,那我们把146的keepalived关掉再看。 停止keepalived: # service keepalived stop
Paste_Image.png
  这个时候192.168.31.111这个虚拟IP已经漂移到154这个机器上了。
那如果我们再将146的keepalived启动,那192.168.31.111就会漂移到146这个机器上。
结论:通过keepalived来实现同一个虚拟IP映射到两台Nginx代理服务器,如果主服务器挂掉或者主服务器的keepalived挂掉又或者主服务器的Nginx挂掉(Nginx挂掉后会杀死keepalived的进程,在脚本中有控制)那从服务器的keepalived会检测到并会接管原先MASTER的网络功能,这种方式来实现Nginx的高可用性(如上文中的keepalived简要介绍)