說明:本篇內容是基於另一篇《LVS負載均衡集羣詳解》爲前提,所以一些基本的上篇文檔詳細介紹過的內容不再重複。
注意:配置前提
1、設置各個節點間的時間同步
2、基於hosts文件實現能夠互相用主機名訪問
3、使用uname -n執行結果要和主機名相同
4、確保ipvsadm服務和httpd服務等只要被設置爲資源的服務關閉開機啓動
一、環境拓撲介紹
與上篇《基於heartbeat v2和ldirectord現實director高可用》相同,能夠實現director節點之間的故障轉移,node節點的故障剔除,恢復自動加入。
二、安裝相關軟件包
依賴的軟件:
ipvsadm
cluster-glue
cluster-glue-libs
corosync
corosynclib
heartbeat
heartbeat-ldirectord
heartbeat-libs
libesmtp
pacemaker
pacemaker-cts
pacemaker-libs
perl-MailTools
resource-agents
libibverbs
libnes
librdmacm
libtool-ltdl
lm_sensors
openhpi-libs
openib
perl-Compress-Zlib
perl-HTML-Parser
perl-HTML-Tagset
perl-TimeDate
perl-libwww-perl
所有軟件包均爲rpm包,需要在director1和director2兩個節點安裝,這裏不再給出軟件包的具體安裝過程。
二、配置corosync整合pacemaker
[root@director1 ~]# chkconfig heartbeat off //關閉heartbeat的開機啓動
[root@director1 ~]# cd /etc/corosync/
[root@director1 corosync]# cp corosync.conf.example corosync.conf
[root@director1 corosync]# vim corosync.conf
------------------------文件內容------------------------------------
compatibility: whitetank //打開兼容corosync-0.8之前的版本
totem { //定義節點之間心跳信息如何傳遞
version: 2 //協議版本
secauth: off //是否開啓節點之間安全認證
threads: 0 //啓動的線程,與CPU個數相等,0爲自動管理
interface { //定義傳遞心跳的接口信息
ringnumber: 0 //如果有多個接口,ringunmber不能相同
bindnetaddr: 192.168.56.0 //指定接口所在的網絡或者接口的IP地址
mcastaddr: 226.94.1.1 //指定多播地址
mcastport: 5405 //多播的端口
}
}
logging { //定義日誌相關信息
fileline: off
to_stderr: no //是否把錯誤信息發送到標準輸出
to_logfile: yes //是否存儲到logfile中指定的日誌文件
to_syslog: yes //是否存儲到系統日誌文件也就是messages
logfile: /var/log/cluster/corosync.log //日誌文件存放路徑
debug: off //是否開啓調試
timestamp: on //日誌信息是否記錄時間戳
logger_subsys { //定義日誌子系統
subsys: AMF
debug: off
}
}
amf { //定義amf相關信息,如果要啓用需安裝openais和openais-lib
mode: disabled
}
service { //自定義的服務
ver: 0 //版本
name: pacemaker //整合pacemaker,當corosync啓動時也啓動pacemaker
}
--------------------------------內容結束-----------------------------------
[root@director1 corosync]# scp corosync.conf director2:/etc/corosync/ //把配置文件拷貝到director2節點
[root@director1 corosync]# mkdir /var/log/cluster //創建日誌存放目錄
[root@director1 corosync]# ssh director2 'mkdir /var/log/cluster' //在director2節點上創建日誌存放目錄
二、啓動corosync服務
[root@director1 corosync]# service corosync start //啓動director1上的corosync服務
[root@director1 corosync]# netstat -unlp | grep 5404 //查看是否有corosync進程監聽udp的5404端口
在director2節點上啓動corosync服務
[root@director1 corosync]# ssh director2 'service corosync start'
查看兩個節點是否正常
[root@director1 corosync]# crm status
Online: [ director1 director2 ] //看到這樣的行說明兩個節點已經在線
三、配置ldirectord配置文件
[root@director1 ~]# cp /usr/share/doc/heartbeat-ldirectord-2.1.4/ldirectord.cf /etc/ha.d/ //拷貝配置文件樣例
[root@director1 ~]# vim /etc/ha.d/ldirectord.cf
---------------------------文件內容-------------------------------
checktimeout=3
checkinterval=1
autoreload=yes
quiescent=yes
virtual=192.168.56.200:80
real=192.168.56.103:80 gate
real=192.168.56.104:80 gate
fallback=127.0.0.1:80 gate
service=http
request=".test.html"
receive="ok"
scheduler=wlc
protocol=tcp
checktype=negotiate
checkport=80
--------------------------------結束-------------------------------------
配置文件內容的具體說明已在上一篇文檔《基於heartbeat v2和ldirectord現實director高可用》中詳細說明了,這裏不再重複描述
拷貝ldirectord配置文件到director2節點
[root@director1 ~]# scp /etc/ha.d/ldirectord.cf director2:/etc/ha.d/
三、配置資源
1、關閉STONITH檢測,由於我這裏沒有STONITH設置所以關閉此項,而且因爲此環境中只有兩個節點所以應該關閉票數策略。
[root@director1 ~]# crm
crm(live)# configure
crm(live)configure# property stonith-enabled=false
crm(live)configure# property no-quorum-policy=ignore
crm(live)configure# commit
crm(live)configure# exit
2、配置資源
[root@director1 ~]# crm
crm(live)# configure
crm(live)configure# primitive VIP ocf:heartbeat:IPaddr params ip=192.168.56.200 nic=eth0:0 cidr_netmask=255.255.255.255 broadcast=192.168.56.200 //配置VIP資源
crm(live)configure# primitive LVS lsb:ldirectord //配置ldirectord資源
crm(live)configure# colocation VIP_with_LVS inf: VIP LVS //定義排列約束,使VIP和LVS兩個資源必須同時在一個節點上
crm(live)configure# location conn1 VIP 100: director2 //定義位置約束,使VIP資源更傾向於運行與director2節點
crm(live)configure# commit //提交配置
crm(live)configure# exit //退出
四、測試
1、在director2節點上驗證
[root@director2 ~]# ifconfig
eth0 Link encap:Ethernet HWaddr 08:00:27:DB:A2:9B
inet addr:192.168.56.102 Bcast:192.168.56.255 Mask:255.255.255.0
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:41132 errors:0 dropped:0 overruns:0 frame:0
TX packets:29820 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:8197161 (7.8 MiB) TX bytes:3815265 (3.6 MiB)
eth0:0 Link encap:Ethernet HWaddr 08:00:27:DB:A2:9B
inet addr:192.168.56.200 Bcast:192.168.56.200 Mask:255.255.255.255
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
UP LOOPBACK RUNNING MTU:16436 Metric:1
RX packets:4338 errors:0 dropped:0 overruns:0 frame:0
TX packets:4338 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:426216 (416.2 KiB) TX bytes:426216 (416.2 KiB)
[root@director2 ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.56.200:80 wlc
-> 192.168.56.103:80 Route 0 0 0
-> 192.168.56.104:80 Route 0 0 0
-> 127.0.0.1:80 Local 1 0 0
可以看到定義的兩個資源已經在director2節點上生效
2、模擬director2節點故障,測試能否把資源轉移到director1節點
[root@director2 ~]# crm node standby //使當前節點轉爲備用節點
在director1節點上驗證:
[root@director1 ~]# ifconfig
eth0 Link encap:Ethernet HWaddr 08:00:27:EF:F7:44
inet addr:192.168.56.101 Bcast:192.168.56.255 Mask:255.255.255.0
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:19310 errors:0 dropped:0 overruns:0 frame:0
TX packets:24406 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:2381082 (2.2 MiB) TX bytes:2949759 (2.8 MiB)
eth0:0 Link encap:Ethernet HWaddr 08:00:27:EF:F7:44
inet addr:192.168.56.200 Bcast:192.168.56.200 Mask:255.255.255.255
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
UP LOOPBACK RUNNING MTU:16436 Metric:1
RX packets:160 errors:0 dropped:0 overruns:0 frame:0
TX packets:160 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:13936 (13.6 KiB) TX bytes:13936 (13.6 KiB)
[root@director1 ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.56.200:80 wlc
-> 192.168.56.103:80 Route 0 0 0
-> 192.168.56.104:80 Route 0 0 0
-> 127.0.0.1:80 Local 1 0 0
可以看到資源已經成功轉移到director1節點
3、重新讓director2上線驗證定義的位置約束是生效
[root@director2 ~]# crm node online
[root@director2 ~]# ifconfig
eth0 Link encap:Ethernet HWaddr 08:00:27:DB:A2:9B
inet addr:192.168.56.102 Bcast:192.168.56.255 Mask:255.255.255.0
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:47245 errors:0 dropped:0 overruns:0 frame:0
TX packets:33846 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:8876133 (8.4 MiB) TX bytes:4296712 (4.0 MiB)
eth0:0 Link encap:Ethernet HWaddr 08:00:27:DB:A2:9B
inet addr:192.168.56.200 Bcast:192.168.56.200 Mask:255.255.255.255
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
UP LOOPBACK RUNNING MTU:16436 Metric:1
RX packets:4377 errors:0 dropped:0 overruns:0 frame:0
TX packets:4377 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:429648 (419.5 KiB) TX bytes:429648 (419.5 KiB)
[root@director2 ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.56.200:80 wlc
-> 192.168.56.103:80 Route 0 0 0
-> 192.168.56.104:80 Route 0 0 0
-> 127.0.0.1:80 Local 1 0 0
驗證成功。