二阶段测试:
架构:
服务器类型 | 部署组件 | ip地址 |
---|---|---|
DR1调度服务器 主(ha01) | Keepalived+LVS-DR | 192.168.60.30 |
DR2调度服务器 备 (ha02) | Keepalived+LVS-DR | 192.168.60.40 |
web1节点服务器 (slave01) | Nginx+Tomcat+MySQL 备+MHA manager+MHA node | 192.168.60.91 |
web2节点服务器 (slave02) | Nginx+Tomcat +MySQL 备+MHA node | 192.168.60.92 |
NFS存储服务器(master01) | MySQL 主+NFS+MHA node | 192.168.60.93 |
vip | 虚拟ip | 192.168.60.100 |
一、关闭防护墙并修改主机名
[root@localhost ~]# systemctl stop firewalld.service && setenforce 0 [root@ha01 ~]# hostnamectl set-hostname ha01 [root@ha02 ~]# hostnamectl set-hostname ha02 [root@slave1 ~]# hostnamectl set-hostname slave1 [root@slave2 ~]# hostnamectl set-hostname slave2 [root@master ~]# hostnamectl set-hostname master [root@localhost ~]# su
二、调度器ha01和ha02安装ipvsadm
[root@ha01 net]# yum -y install ipvsadm
三、部署lvs_DR
1、在ha01和ha02上配置虚拟网卡
[root@ha01 net]# cd /etc/sysconfig/network-scripts/ [root@ha01 network-scripts]# vim ifcfg-ens33:0 DEVICE=ens33:0 ONBOOT=yes IPADDR=192.168.60.100 NETMASK=255.255.255.255 [root@ha01 network-scripts]# ifup ifcfg-ens33:0
2、更改调度器的响应参数
[root@ha01 network-scripts]# vim /etc/sysctl.conf net.ipv4.ip_forward=0 #关闭数据包转发功能 net.ipv4.conf.all.send_redirects=0 #禁止系统发送icmp重定向的消息。 net.ipv4.conf.default.send_redirects=0 #禁止默认网络接口发送ICMP重定向的消息 net.ipv4.conf.ens33.send_redirects=0 #针对ens33设备,禁止发送ICMP重定向消息 [root@ha01 network-scripts]# sysctl -p net.ipv4.ip_forward = 0 net.ipv4.conf.all.send_redirects = 0 net.ipv4.conf.default.send_redirects = 0 net.ipv4.conf.ens33.send_redirects = 0
3、创建多个逻辑网络接口
[root@ha01 network-scripts]# route add -host 192.168.60.100 dev ens33:0 [root@ha01 network-scripts]# vim /etc/rc.local /usr/sbin/route add -host 192.168.60.100 dev ens33:0
4、配置负载均衡分配策略
[root@ha01 network-scripts]# ipvsadm-save > /etc/sysconfig/ipvsadm #保持策略 [root@ha01 network-scripts]# systemctl start ipvsadm.service #重启ipvsadm服务 [root@ha01 network-scripts]# ipvsadm -C #清空策略 [root@ha01 network-scripts]# ipvsadm -A -t 192.168.60.100:80 -s rr [root@ha01 network-scripts]# ipvsadm -a -t 192.168.60.100:80 -r 192.168.60.92:80 -g [root@ha01 network-scripts]# ipvsadm -a -t 192.168.60.100:80 -r 192.168.60.93:80 -g [root@ha01 network-scripts]# ipvsadm IP Virtual Server version 1.2.1 (size=4096) Prot LocalAddress:Port Scheduler Flags -> RemoteAddress:Port Forward Weight ActiveConn InActConn TCP ha01:http rr -> 192.168.60.92:http Route 1 0 0 -> 192.168.60.93:http Route 1 0 0
5、配置两台slave的web节点
5.1、配置虚拟ip地址
[root@slave1 opt]# cd /etc/sysconfig/network-scripts/ [root@slave1 network-scripts]# cp ifcfg-ens33 ifcfg-lo:0 [root@slave1 network-scripts]# vim ifcfg-lo:0 DEVICE=lo:0 ONBOOT=yes IPADDR=192.168.60.100 NETMASK=255.255.255.255 [root@slave1 network-scripts]# ifup ifcfg-lo:0
5.2、配置添加路由
[root@slave2 network-scripts]# route add -host 192.168.60.100 dev lo:0 [root@slave2 network-scripts]# vim /etc/rc.local route add -host 192.168.60.100 dev lo:0
5.3、配置ARP内核响应参数防止更新VIP中的MAC地址,避免发生冲突
[root@slave1 network-scripts]# vim /etc/sysctl.conf net.ipv4.conf.lo.arp_ignore = 1 net.ipv4.conf.lo.arp_announce = 2 net.ipv4.conf.all.arp_ignore = 1 net.ipv4.conf.all.arp_announce = 2 [root@slave1 network-scripts]# sysctl -p
5.4、在master上部署NFS存储服务器
[root@master opt]# rpm -q rpcbind nfs-utils #检查是否安装nfs rpcbind-0.2.0-47.el7.x86_64 nfs-utils-1.3.0-0.61.el7.x86_64 [root@master opt]# systemctl start nfs [root@master opt]# systemctl start rpcbind #开启服务 [root@master opt]# systemctl enable nfs #设置开机自启 [root@master opt]# systemctl enable rpcbind [root@master opt]# mkdir /opt/web1 /opt/web2 [root@master opt]# echo '<h1>This is node web1</h1>' > /opt/web1/index.html [root@master opt]# echo '<h1>This is node web2</h1>' > /opt/web2/index.html #添加网页内容
[root@master opt]# vim /etc/exports /opt/web1 192.168.60.0/24(ro) /opt/web2 192.168.60.0/24(ro) [root@master opt]# exportfs -rv exporting 192.168.60.0/24:/opt/web2 exporting 192.168.60.0/24:/opt/web1
6、创建软连接并开启nginx服务
[root@slave1 nginx]# ln -s /usr/local/nginx/sbin/nginx /usr/local/sbin/ [root@slave1 nginx]# nginx [root@slave1 nginx]# netstat -antp | grep 80 tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 25085/nginx: master
7、节点服务器挂载共享目录(slave1 slave2分别挂载/opt/web1 /opt/web2)
[root@slave1 nginx]# cd / [root@slave1 /]# showmount -e 192.168.60.91 Export list for 192.168.60.91: /opt/web2 192.168.60.0/24 /opt/web1 192.168.60.0/24
slave1:
[root@slave1 /]# mount.nfs 192.168.60.91:/opt/web1 /usr/local/nginx/html/ [root@slave1 /]# cd /usr/local/nginx/html/ [root@slave1 html]# cat index.html <h1>This is node web1</h1>
slave2:
[root@slave2 /]# mount.nfs 192.168.60.91:/opt/web2 /usr/local/nginx/html/ [root@slave2 /]# cd /usr/local/nginx/html/ [root@slave2 html]# cat index.html <h1>This is node web2</h1>
8、测试lvs_DR模式
[root@master opt]# curl 192.168.60.100 <h1>This is node web2</h1> [root@master opt]# curl 192.168.60.100 <h1>This is node web1</h1> [root@master opt]# curl 192.168.60.100 <h1>This is node web2</h1> [root@master opt]# curl 192.168.60.100 <h1>This is node web1</h1> [root@master opt]# curl 192.168.60.100 <h1>This is node web2</h1>
四、部署Nginx+Tomcat的动静分离(两个slave都装)
1、安装tomcat
[root@slave1 html]# cd /opt/ [root@slave1 opt]# rz -E rz waiting to receive. [root@slave1 opt]# ls apache-tomcat-9.0.16.tar.gz
#写一个安装tomcat的脚本 [root@slave1 opt]# vim tomcat.sh #!/bin/bash #安装部署tomcat systemctl stop firewalld setenforce 0 #安装JDK cd /opt rpm -ivh jdk-8u201-linux-x64.rpm &> /dev/null java -version #设置JDK环境变量 cat > /etc/profile.d/java.sh <<EOF export JAVA_HOME=/usr/java/jdk1.8.0_201-amd64 export CLASSPATH=.:$JAVA_HOME/lib/tools.jar:$JAVA_HOME/lib/dt.jar export PATH=$JAVA_HOME/bin:$PATH EOF source /etc/profile.d/java.sh if [ $? -eq 0 ];then echo -e "\033[34;1m JDK安装完成! \033[0m" fi java -version #安装启动Tomcat cd /opt tar zxvf apache-tomcat-9.0.16.tar.gz &> /dev/null mv apache-tomcat-9.0.16 /usr/local/tomcat ##启动tomcat /usr/local/tomcat/bin/startup.sh if [ $? -eq 0 ];then echo -e "\033[34;1m tomcat安装完成! \033[0m" fi
[root@slave1 opt]# chmod +x tomcat.sh #给脚本赋执行权 [root@slave2 opt]# ./tomcat.sh #执行脚本
2、动静分离配置
slave1:
[root@slave1 opt]# mkdir /usr/local/tomcat/webapps/test [root@slave1 opt]# cd /usr/local/tomcat/webapps/ [root@slave1 webapps]# cd test/ [root@slave1 test]# vim /usr/local/tomcat/webapps/test/index.jsp <%@ page language="java" import="java.util.*" pageEncoding="UTF-8" %> <html> <head> <title>tomcat1</title> </head> <body> <% out.println("This is tomcat1 server");%> <div>动态页面1</div><br/> </body> </html>
[root@slave1 test]# cd /usr/local/tomcat/conf/
slave2:
[root@slave2 opt]# mkdir /usr/local/tomcat/webapps/test [root@slave2 opt]# cd /usr/local/tomcat/webapps/ [root@slave2 webapps]# cd test/ [root@slave2 test]# vim /usr/local/tomcat/webapps/test/index.jsp <%@ page language="java" import="java.util.*" pageEncoding="UTF-8" %> <html> <head> <title>tomcat2</title> </head> <body> <% out.println("This is tomcat2 server");%> <div>动态页面2</div><br/> </body> </html>
[root@slave2 test]# cd /usr/local/tomcat/conf/
3、配置tomcatweb界面配置
[root@slave2 conf]# cd /usr/local/tomcat/conf/ [root@slave2 conf]# cp server.xml{,.bak} [root@slave2 conf]# vim server.xml #修改148行 <Host name="localhost" appBase="webapps" unpackWARs="true" autoDeploy="true" xmlValidation="false" xmlNamespaceAware="false"> <Context docBase="/usr/local/tomcat/webapps/test" path="" reloadable="true" />
4、开启8080端口
[root@slave1 opt]# cd /usr/local/tomcat/bin/ [root@slave1 bin]# ./shutdown.sh Using CATALINA_BASE: /usr/local/tomcat Using CATALINA_HOME: /usr/local/tomcat Using CATALINA_TMPDIR: /usr/local/tomcat/temp Using JRE_HOME: /usr/java/jdk1.8.0_201-amd64 Using CLASSPATH: /usr/local/tomcat/bin/bootstrap.jar:/usr/local/tomcat/bin/tomcat-juli.jar [root@slave1 bin]# ./startup.sh Using CATALINA_BASE: /usr/local/tomcat Using CATALINA_HOME: /usr/local/tomcat Using CATALINA_TMPDIR: /usr/local/tomcat/temp Using JRE_HOME: /usr/java/jdk1.8.0_201-amd64 Using CLASSPATH: /usr/local/tomcat/bin/bootstrap.jar:/usr/local/tomcat/bin/tomcat-juli.jar Tomcat started. [root@slave1 bin]# netstat -antp | grep 8080 tcp6 0 0 :::8080 :::* LISTEN 29638/java tcp6 0 0 ::1:42910 ::1:8080 TIME_WAIT -
5、Nginx server 配置两个slave都配置
[root@slave1 bin]# cd /usr/local/nginx/conf/ [root@slave1 conf]# cp nginx.conf{,.bak} [root@slave1 conf]# vim nginx.conf #gzip on; upstream tomcat_server { server 192.168.60.92:8080 weight=1; server 192.168.60.93:8080 weight=1; } server_name www.web1.com; charset utf-8; location ~ .*.jsp$ { proxy_pass http://tomcat_server; proxy_set_header HOST $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; } location ~ .*\.(gif|jpg|jpeg|png|bmp|swf|css)$ { root /usr/local/nginx/html; expires 10d; }
五、 配置keeplived
1、主服务器ha01
[root@ha01 ~]# yum install ipvsadm keepalived -y [root@ha01 keepalived]# cp keepalived.conf{,.bak} [root@ha01 keepalived]# vim keepalived.conf
[root@ha01 keepalived]# systemctl start keepalived.service [root@ha01 keepalived]# ip addr
2、备服务器 ha02
[root@ha02 ~]# yum install ipvsadm keepalived -y [root@ha02 ~]# cd /etc/keepalived/ [root@ha02 keepalived]# vim keepalived.conf
[root@ha02 keepalived]# systemctl start keepalived.service
3、调整内核 proc 响应参数
[root@ha02 keepalived]# vim /etc/sysctl.conf net.ipv4.conf.all.send_redirects = 0 net.ipv4.conf.default.send_redirects = 0 net.ipv4.conf.ens33.send_redirects = 0 [root@ha02 keepalived]# sysctl -p
六、部署MySQL集群MHA高可用
1、修改 Master、Slave1、Slave2 节点的 Mysql主配置文件/etc/my.cnf
主: [root@master ~]# vim /etc/my.cnf server-id = 1 log_bin = master-bin log-slave-updates = true relay_log_recovery = 1 systemctl restart mysqld
从1: vim /etc/my.cnf server-id = 2 #三台服务器的 server-id 不能一样 log_bin = master-bin relay-log = relay-log-bin relay-log-index = slave-relay-bin.index relay_log_recovery = 1 systemctl restart mysqld
从2: vim /etc/my.cnf #三台服务器的 server-id 不能一样 server-id = 3 relay-log = relay-log-bin relay-log-index = slave-relay-bin.index relay_log_recovery = 1 systemctl restart mysqld
2、在 Master、Slave1、Slave2 节点上都创建两个软链接
ln -s /usr/local/mysql/bin/mysql /usr/sbin/ ln -s /usr/local/mysql/bin/mysqlbinlog /usr/sbin/
3、配置 mysql 一主两从
(1)所有数据库节点进行 mysql 授权
mysql -uroot -p123456 #从数据库同步使用 CREATE USER 'myslave'@'192.168.60.%' IDENTIFIED WITH mysql_native_password BY '123456'; GRANT REPLICATION SLAVE ON *.* TO 'myslave'@'192.168.60.%'; #manager 使用 CREATE USER 'mha'@'192.168.60.%' IDENTIFIED WITH mysql_native_password BY 'manager'; GRANT ALL PRIVILEGES ON *.* TO 'mha'@'192.168.60.%' WITH GRANT OPTION; #防止从库通过主机名连接不上主库 CREATE USER 'mha'@'master' IDENTIFIED WITH mysql_native_password BY 'manager'; GRANT ALL PRIVILEGES ON *.* TO 'mha'@'master'; CREATE USER 'mha'@'slave1' IDENTIFIED WITH mysql_native_password BY 'manager'; GRANT ALL PRIVILEGES ON *.* TO 'mha'@'slave1'; CREATE USER 'mha'@'slave2' IDENTIFIED WITH mysql_native_password BY 'manager'; GRANT ALL PRIVILEGES ON *.* TO 'mha'@'slave2'; flush privileges;
(2)在 Master 节点查看二进制文件和同步点
show master status; +-------------------+----------+--------------+------------------+-------------------+ | File | Position | Binlog_Do_DB | Binlog_Ignore_DB | Executed_Gtid_Set | +-------------------+----------+--------------+------------------+-------------------+ | master-bin.000001 | 1747 | | | | +-------------------+----------+--------------+------------------+-------------------+
(3)在 Slave1、Slave2 节点执行同步操作
change master to master_host='192.168.60.91',master_user='myslave',master_password='123456',master_log_file='master-bin.000001',master_log_pos=2917; start slave;
(4)在 Slave1、Slave2 节点查看数据同步结果
show slave status\G; //确保 IO 和 SQL 线程都是 Yes,代表同步正常。 Slave_IO_Running: Yes Slave_SQL_Running: Yes
(5)两个从库必须设置为只读模式:
set global read_only=1;
(6)插入数据测试数据库同步
##在 Master 主库插入条数据,测试是否同步 create database test_db; use test_db; create table test(id int); insert into test(id) values (1);
7、安装 MHA 软件
(1)所有服务器上都安装 MHA 依赖的环境,首先安装 epel 源
yum install epel-release --nogpgcheck -y yum install -y perl-DBD-MySQL \ perl-Config-Tiny \ perl-Log-Dispatch \ perl-Parallel-ForkManager \ perl-ExtUtils-CBuilder \ perl-ExtUtils-MakeMaker \ perl-CPAN
(2)安装 MHA 软件包,先在所有服务器上必须先安装 node 组件
对于每个操作系统版本不一样,这里 CentOS7.6选择 0.57 版本。 在所有服务器上必须先安装 node 组件,最后在 MHA-manager 节点上安装 manager 组件, 因为 manager 依赖 node 组件。
cd /opt tar zxvf mha4mysql-node-0.57.tar.gz cd mha4mysql-node-0.57 perl Makefile.PL make && make install
(3)在 MHA manager 节点上安装 manager 组件
cd /opt tar zxvf mha4mysql-manager-0.57.tar.gz cd mha4mysql-manager-0.57 perl Makefile.PL make && make install
8、在所有服务器上配置无密码认证
(1)在 manager 节点上配置到所有数据库节点的无密码认证
ssh-keygen -t rsa #一路按回车键 ssh-copy-id 192.168.60.91 ssh-copy-id 192.168.60.92 ssh-copy-id 192.168.60.93
(2)在 master 上配置到数据库节点 slave1 和 slave2 的无密码认证
ssh-keygen -t rsa ssh-copy-id 192.168.60.92 ssh-copy-id 192.168.60.93
(3)在 slave1 上配置到数据库节点 master 和 slave2 的无密码认证
ssh-keygen -t rsa ssh-copy-id 192.168.60.91 ssh-copy-id 192.168.60.93
(4)在 slave2 上配置到数据库节点 master 和 slave1 的无密码认证
ssh-keygen -t rsa ssh-copy-id 192.168.60.91 ssh-copy-id 192.168.60.92
9、在 manager 节点上配置 MHA
(1)在 manager 节点上复制相关脚本到/usr/local/bin 目录
cp -rp /opt/mha4mysql-manager-0.57/samples/scripts /usr/local/bin
(2)复制上述的自动切换时 VIP 管理的脚本到 /usr/local/bin 目录,
这里使用master_ip_failover脚本来管理 VIP 和故障切换
cp /usr/local/bin/scripts/master_ip_failover /usr/local/bin
(3)修改内容如下:(删除原有内容,直接复制并修改vip相关参数)
vim /usr/local/bin/master_ip_failover #!/usr/bin/env perl use strict; use warnings FATAL => 'all'; use Getopt::Long; my ( $command, $ssh_user, $orig_master_host, $orig_master_ip, $orig_master_port, $new_master_host, $new_master_ip, $new_master_port ); #############################添加内容部分######################################### my $vip = '192.168.60.100'; #指定vip的地址 my $brdc = '192.168.60.255'; #指定vip的广播地址 my $ifdev = 'ens33'; #指定vip绑定的网卡 my $key = '1'; #指定vip绑定的虚拟网卡序列号 my $ssh_start_vip = "/sbin/ifconfig ens33:$key $vip"; #代表此变量值为ifconfig ens33:1 192.168.60.100 my $ssh_stop_vip = "/sbin/ifconfig ens33:$key down"; #代表此变量值为ifconfig ens33:1 192.168.60.100 down my $exit_code = 0; #指定退出状态码为0 #my $ssh_start_vip = "/usr/sbin/ip addr add $vip/24 brd $brdc dev $ifdev label $ifdev:$key;/usr/sbin/arping -q -A -c 1 -I $ifdev $vip;iptables -F;"; #my $ssh_stop_vip = "/usr/sbin/ip addr del $vip/24 dev $ifdev label $ifdev:$key"; ################################################################################## GetOptions( 'command=s' => \$command, 'ssh_user=s' => \$ssh_user, 'orig_master_host=s' => \$orig_master_host, 'orig_master_ip=s' => \$orig_master_ip, 'orig_master_port=i' => \$orig_master_port, 'new_master_host=s' => \$new_master_host, 'new_master_ip=s' => \$new_master_ip, 'new_master_port=i' => \$new_master_port, ); exit &main(); sub main { print "\n\nIN SCRIPT TEST====$ssh_stop_vip==$ssh_start_vip===\n\n"; if ( $command eq "stop" || $command eq "stopssh" ) { my $exit_code = 1; eval { print "Disabling the VIP on old master: $orig_master_host \n"; &stop_vip(); $exit_code = 0; }; if ($@) { warn "Got Error: $@\n"; exit $exit_code; } exit $exit_code; } elsif ( $command eq "start" ) { my $exit_code = 10; eval { print "Enabling the VIP - $vip on the new master - $new_master_host \n"; &start_vip(); $exit_code = 0; }; if ($@) { warn $@; exit $exit_code; } exit $exit_code; } elsif ( $command eq "status" ) { print "Checking the Status of the script.. OK \n"; exit 0; } else { &usage(); exit 1; } } sub start_vip() { `ssh $ssh_user\@$new_master_host \" $ssh_start_vip \"`; } ## A simple system call that disable the VIP on the old_master sub stop_vip() { `ssh $ssh_user\@$orig_master_host \" $ssh_stop_vip \"`; } sub usage { print "Usage: master_ip_failover --command=start|stop|stopssh|status --orig_master_host=host --orig_master_ip=ip --orig_master_port=port --new_master_host=host --new_master_ip=ip --new_master_port=port\n"; }
(4)创建 MHA 软件目录并拷贝配置文件,这里使用app1.cnf配置文件来管理 mysql 节点服务器
mkdir /etc/masterha cp /opt/mha4mysql-manager-0.57/samples/conf/app1.cnf /etc/masterha vim /etc/masterha/app1.cnf #删除原有内容,直接复制并修改节点服务器的IP地址 [server default] manager_log=/var/log/masterha/app1/manager.log manager_workdir=/var/log/masterha/app1 master_binlog_dir=/usr/local/mysql/data master_ip_failover_script=/usr/local/bin/master_ip_failover master_ip_online_change_script=/usr/local/bin/master_ip_online_change password=manager ping_interval=1 remote_workdir=/tmp repl_password=123456 repl_user=myslave secondary_check_script=/usr/local/bin/masterha_secondary_check -s 192.168.60.92 -s 192.168.60.93 #从对主监听 shutdown_script="" ssh_user=root user=mha [server1] hostname=192.168.60.91 #主服务器 port=3306 [server2] candidate_master=1 check_repl_delay=0 hostname=192.168.60.92 #备用主服务器 port=3306 [server3] hostname=192.168.60.93 #从服务器2 port=3306
10、第一次配置需要在 Master 节点上手动开启虚拟IP
/sbin/ifconfig ens33:1 192.168.60.120/24
11、在 manager 节点上测试 ssh 无密码认证,如果正常最后会输出 successfully,如下所示。
masterha_check_ssh -conf=/etc/masterha/app1.cnf
12、在 manager 节点上测试 mysql 主从连接情况,最后出现 MySQL Replication Health is OK 字样说明正常。如下所示。
masterha_check_repl -conf=/etc/masterha/app1.cnf
13、在 manager 节点上启动 MHA
nohup masterha_manager --conf=/etc/masterha/app1.cnf --remove_dead_master_conf --ignore_last_failover < /dev/null > /var/log/masterha/app1/manager.log 2>&1 &
14、查看 MHA 状态,可以看到当前的 master 是 master 节点。
masterha_check_status --conf=/etc/masterha/app1.cnf
15、查看 MHA 日志,也以看到当前的 master 是 192.168.233.21,如下所示。
cat /var/log/masterha/app1/manager.log | grep "current master"
//若要关闭 manager 服务,可以使用如下命令。 masterha_stop --conf=/etc/masterha/app1.cnf
mysql>set global read_only=1;一般设置为从数据库为只读 mysql>set global read_only=0;关闭数据库为只读