實驗平臺 : CentOS release5.2 (Final)
實驗目標 : 快速撐握和理解Piranha方案.
案例一:
ip:
10.10.42.23 lvs-realserver VIP
10.10.42.201 lvs-master
10.10.42.202 lvs-slave
10.10.42.203 realserver1
10.10.42.205 realserver2
10.10.42.201部署信息:
[root@sc1 lvs-mango]# vi /etc/hosts
10.10.42.201 lvs-master
10.10.42.202 lvs-slave
[root@sc1 lvs-mango]# wget
http://mirrors.sohu.com/fedora-epel/6Server/x86_64/epel-release-6-8.noarch.rpm
[root@sc1 lvs-mango]# wget
ftp://ftp5.gwdg.de/pub/opensuse/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-6/x86_64/ldirectord-3.9.6-0rc1.1.1.x86_64.rpm
[root@sc1 lvs-mango]# rpm -ivh epel-release-6-8.noarch.rpm
warning: epel-release-6-8.noarch.rpm: Header V3 RSA/SHA256 Signature, key ID 0608b895: NOKEY
Preparing... ########################################### [100%]
1:epel-release ########################################### [100%]
[root@sc1 lvs-mango]# yum install perl perl-MailTools perl-Net-SSLeay perl-IO-Socket-INET6.noarch perl-Net-INET6Glue.noarch perl-Socket6.x86_64
[root@sc1 lvs-mango]# rpm -ivh ldirectord-3.9.6-0rc1.1.1.x86_64.rpm
warning: ldirectord-3.9.6-0rc1.1.1.x86_64.rpm: Header V3 RSA/SHA1 Signature, key ID 17280ddf: NOKEY
Preparing... ########################################### [100%]
1:ldirectord ########################################### [100%]
[root@sc1 lvs-mango]# vi /etc/yum.repos.d/epel.repo
[epel]
name=Extra Packages for Enterprise Linux 6 - $basearch
baseurl=http://download.fedoraproject.org/pub/epel/6/$basearch
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch
failovermethod=priority
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6
[epel-debuginfo]
name=Extra Packages for Enterprise Linux 6 - $basearch - Debug
baseurl=http://download.fedoraproject.org/pub/epel/6/$basearch/debug
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-debug-6&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6
gpgcheck=1
[epel-source]
name=Extra Packages for Enterprise Linux 6 - $basearch - Source
baseurl=http://download.fedoraproject.org/pub/epel/6/SRPMS
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-source-6&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6
gpgcheck=1
[root@sc1 lvs-mango]# yum install ipvsadm
[root@sc1 lvs-mango]# yum -y install heartbeat.x86_64 heartbeat-devel.x86_64 heartbeat-libs.x86_64
[root@slave1 ~]# vi /etc/sysconfig/ha/lvs.cf
serial_no = 110
primary = 10.10.42.201
service = lvs
backup_active = 1
backup = 10.10.42.202
heartbeat = 1
heartbeat_port = 539
keepalive = 8
deadtime = 9
network = direct
debug_level = NONE
monitor_links = 0
syncdaemon = 0
tcp_timeout = 5
tcpfin_timeout = 6
udp_timeout = 7
virtual nginx_ceshi {
active = 1
address = 10.10.42.23 eth0:2
vip_nmask = 255.255.255.0
port = 80
send = "GET / HTTP/1.0\r\n\r\n"
expect = "HTTP"
use_regex = 0
load_monitor = none
scheduler = rr
protocol = tcp
timeout = 6
reentry = 15
quiesce_server = 0
server nginx_ceshi_s01 {
address = 10.10.42.203
active = 1
port = 80
weight = 1
}
server nginx_ceshi_s02 {
address = 10.10.42.205
active = 1
port = 80
weight = 1
}
}
服務啓動:
[root@slave1 ~]# /etc/init.d/piranha-gui start
[root@slave1 ~]# /etc/init.d/pulse start
驗證啓動狀態:
[root@slave1 ~]# ifconfig
eth0:2 Link encap:Ethernet HWaddr 00:50:56:8A:69:4B
inet addr:10.10.42.23 Bcast:10.10.42.255 Mask:255.255.255.0
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
10.10.42.202部署信息:
[root@sc2 lvs-mango]# cat /etc/hosts
10.10.42.201 lvs-master
10.10.42.202 lvs-slave
[root@sc2 lvs-mango]# wget
http://mirrors.sohu.com/fedora-epel/6Server/x86_64/epel-release-6-8.noarch.rpm
[root@sc2 lvs-mango]# wget
ftp://ftp5.gwdg.de/pub/opensuse/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-6/x86_64/ldirectord-3.9.6-0rc1.1.1.x86_64.rpm
[root@sc2 lvs-mango]# rpm -ivh epel-release-6-8.noarch.rpm
warning: epel-release-6-8.noarch.rpm: Header V3 RSA/SHA256 Signature, key ID 0608b895: NOKEY
Preparing... ########################################### [100%]
1:epel-release ########################################### [100%]
[root@sc2 lvs-mango]# yum install perl perl-MailTools perl-Net-SSLeay perl-IO-Socket-INET6.noarch perl-Net-INET6Glue.noarch perl-Socket6.x86_64
[root@sc2 lvs-mango]# rpm -ivh ldirectord-3.9.6-0rc1.1.1.x86_64.rpm
warning: ldirectord-3.9.6-0rc1.1.1.x86_64.rpm: Header V3 RSA/SHA1 Signature, key ID 17280ddf: NOKEY
Preparing... ########################################### [100%]
1:ldirectord ########################################### [100%]
[root@sc2 lvs-mango]# vi /etc/yum.repos.d/epel.repo
[epel]
name=Extra Packages for Enterprise Linux 6 - $basearch
baseurl=http://download.fedoraproject.org/pub/epel/6/$basearch
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch
failovermethod=priority
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6
[epel-debuginfo]
name=Extra Packages for Enterprise Linux 6 - $basearch - Debug
baseurl=http://download.fedoraproject.org/pub/epel/6/$basearch/debug
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-debug-6&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6
gpgcheck=1
[epel-source]
name=Extra Packages for Enterprise Linux 6 - $basearch - Source
baseurl=http://download.fedoraproject.org/pub/epel/6/SRPMS
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-source-6&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6
gpgcheck=1
[root@sc2 lvs-mango]# yum install ipvsadm
[root@sc2 lvs-mango]# yum -y install heartbeat.x86_64 heartbeat-devel.x86_64 heartbeat-libs.x86_64
[root@slave1 ~]# vi /etc/sysconfig/ha/lvs.cf
serial_no = 110
primary = 10.10.42.201
service = lvs
backup_active = 1
backup = 10.10.42.202
heartbeat = 1
heartbeat_port = 539
keepalive = 8
deadtime = 9
network = direct
debug_level = NONE
monitor_links = 0
syncdaemon = 0
tcp_timeout = 5
tcpfin_timeout = 6
udp_timeout = 7
virtual nginx_ceshi {
active = 1
address = 10.10.42.23 eth0:2
vip_nmask = 255.255.255.0
port = 80
send = "GET / HTTP/1.0\r\n\r\n"
expect = "HTTP"
use_regex = 0
load_monitor = none
scheduler = rr
protocol = tcp
timeout = 6
reentry = 15
quiesce_server = 0
server nginx_ceshi_s01 {
address = 10.10.42.203
active = 1
port = 80
weight = 1
}
server nginx_ceshi_s02 {
address = 10.10.42.205
active = 1
port = 80
weight = 1
}
}
服務啓動:
[root@slave1 ~]# /etc/init.d/piranha-gui start
[root@slave1 ~]# /etc/init.d/pulse start
驗證啓動狀態:
[root@slave1 ~]# ifconfig
測試集羣狀態:
[root@slave1 ~]#ipvsadm -l
修改DNS指向10.10.42.23後訪問域名,查看後端nginx日誌看是否有請求分發過來。
主從切換驗證:
停止主,通過日誌看後臺執行動作
[root@slave1 ~]# tail -100f /var/log/messages
修改DNS指向10.10.42.23後訪問域名,查看後端nginx日誌看是否有請求分發過來。
後端nginx配置文件:
[root@slave3 ~]# vi /usr/local/nginx/conf/nginx.conf
worker_processes 1;
error_log logs/error.log debug;
events {
worker_connections 60000;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 65;
gzip on;
upstream www.test.com{
server 10.10.130.41:80;
server 10.10.130.42:80;
}
upstream ro.test.com{
server 10.10.5.155:80;
server 10.10.5.156:80;
}
upstream flight.test.com{
server 10.10.130.45:80;
server 10.10.130.46:80;
}
upstream hotel.test.com{
server 10.10.130.43:80;
server 10.10.130.44:80;
}
upstream supply.test.com{
server 10.10.130.102:80;
server 10.10.130.103:80;
}
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log logs/access.log main;
access_log on;
server {
listen 80;
server_name
www.test.com;
access_log logs/www.test.com.log main;
location / {
proxy_set_header ServerName $server_name;
proxy_pass
http://www.test.com;
}
}
server {
listen 80;
server_name ro.test.com;
access_log logs/ro.test.com.log main;
location / {
proxy_set_header ServerName $server_name;
proxy_pass
http://ro.test.com;
}
}
server {
listen 80;
server_name hotel.test.com;
access_log logs/hotel.test.com.log main;
location / {
proxy_set_header ServerName $server_name;
proxy_pass
http://hotel.test.com;
}
}
server {
listen 80;
server_name flight.test.com;
access_log logs/flight.test.com.log main;
location / {
proxy_set_header ServerName $server_name;
proxy_pass
http://flight.test.com;
}
}
server {
listen 80;
server_name supply.test.com;
access_log logs/supply.test.com.log main;
location / {
proxy_set_header ServerName $server_name;
proxy_pass
http://supply.test.com;
}
}
}
[root@slave3 ~]# cat /usr/local/realserver.sh
#!/bin/bash
#RealServer服務腳本,直接路由方式
MpmWeb_VIP=10.10.42.23
start(){
ifconfig lo:0 $MpmWeb_VIP netmask 255.255.255.255 broadcast $MpmWeb_VIP
/sbin/route add -host $MpmWeb_VIP dev lo:0
echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
sysctl -p >/dev/null 2>&1
echo "RealServer Start OK [lvs_dr]"
}
stop(){
echo "0" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "0" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "0" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "0" >/proc/sys/net/ipv4/conf/all/arp_announce
/sbin/ifconfig lo:0 down
/sbin/route del -host $MpmWeb_VIP
sysctl -p >/dev/null 2>&1
echo "RealServer Stoped [lvs_dr]"
}
restart(){
stop
start
}
case $1 in
start)
start
;;
stop)
stop
;;
restart)
restart
;;
status)
/sbin/ifconfig
;;
*)
echo "Usage: $0 {start|stop|restart|status}"
exit 1
esac
案例二:
一.Piranha方案基本簡紹.
1.Piranha方案優點:
1.1.1配置簡潔高效:配置簡便一個lvs.conf配置文件即可搞定(類keepalived方案.)
1.1.2WEB配置界面:WEB配置對於那些不懂LVS配置的人員來說非常吸引力.
1.1.3完整的功能:
主備LVS (Load Balancer)的Heartbeat和HA (pulse,send_arp)
LoadBalancer和Real Server間進程服務的Heartbeat (nanny)
IPVS功能 (lvsd)
IPVS的管理 (ipvsadm)
2.Piranha方案原理結構描述:
Piranha方案是基於LVS基礎上設計的一套負載均衡高可用解決方案.
LVS運行在一對有相似配置的計算機上:
一個作爲活動LVS Router(Active LVS Router),
一個作爲備份LVS Router(Backup LVS Router)。
活動LVS Router服務有兩個角色:
* 均衡負載到真實服務器上。
* 檢查真實服務器提供的服務是否正常。
備份LVS Router用來監控活動的LVS Router,以備活動的LVS Router失敗時由備份LVS Router接管。
Pulse:Pulse進程運行在活動LVS Router和備份LVS Router上。
在備份LVS Router上,pulse發送一個心跳(heartbeat)到活動LVS Router的公網接口上以檢查活動LVS Router是否正常。
在活動LVS Router上,pulse啓動lvs進程並響應來自於備份LVS Router的心跳。
lvsd:lvs進程調用ipvsadm工具去配置和維護IPVS路由表,併爲每一個在真實服務器上的虛擬服務啓動一個nanny進程。
nanny:每一個nanny進程去檢查真實服務器上的虛擬服務狀態,並將故障情況通知lvs進程。假如一個故障被發現,lvs進程通知ipvsadm在IPVS路由表中將此節點刪除。
send_arp:如果備份LVS Router未收到來自於活動LVS Router的響應,
它將調用send_arp將虛擬IP地址再分配到備份LVS Router的公網接口上。
並在公網接口和局域網接口上分別發送一個命令去關掉活動LVS Router上的lvs進程。同時啓動自己的lvs進程來調度客戶端請求。
3.Piranha方案基本套件安裝:
ip:
10.10.42.23 lvs-realserver VIP
10.10.42.201 lvs-master
10.10.42.202 lvs-slave
10.10.42.203 realserver1
10.10.42.205 realserver2
10.10.42.201部署信息:
[root@sc1 lvs-mango]# vi /etc/hosts
10.10.42.201 lvs-master
10.10.42.202 lvs-slave
[root@sc1 lvs-mango]# wget
http://mirrors.sohu.com/fedora-epel/6Server/x86_64/epel-release-6-8.noarch.rpm
[root@sc1 lvs-mango]# wget
ftp://ftp5.gwdg.de/pub/opensuse/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-6/x86_64/ldirectord-3.9.6-0rc1.1.1.x86_64.rpm
[root@sc1 lvs-mango]# rpm -ivh epel-release-6-8.noarch.rpm
warning: epel-release-6-8.noarch.rpm: Header V3 RSA/SHA256 Signature, key ID 0608b895: NOKEY
Preparing... ########################################### [100%]
1:epel-release ########################################### [100%]
[root@sc1 lvs-mango]# rpm -ivh ldirectord-3.9.6-0rc1.1.1.x86_64.rpm
warning: ldirectord-3.9.6-0rc1.1.1.x86_64.rpm: Header V3 RSA/SHA1 Signature, key ID 17280ddf: NOKEY
Preparing... ########################################### [100%]
1:ldirectord ########################################### [100%]
[root@sc1 lvs-mango]# vi /etc/yum.repos.d/epel.repo
[epel]
name=Extra Packages for Enterprise Linux 6 - $basearch
baseurl=http://download.fedoraproject.org/pub/epel/6/$basearch
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch
failovermethod=priority
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6
[epel-debuginfo]
name=Extra Packages for Enterprise Linux 6 - $basearch - Debug
baseurl=http://download.fedoraproject.org/pub/epel/6/$basearch/debug
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-debug-6&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6
gpgcheck=1
[epel-source]
name=Extra Packages for Enterprise Linux 6 - $basearch - Source
baseurl=http://download.fedoraproject.org/pub/epel/6/SRPMS
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-source-6&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6
gpgcheck=1
[root@sc1 lvs-mango]#yum makecache ; yum install perl perl-MailTools perl-Net-SSLeay perl-IO-Socket-INET6.noarch perl-Net-INET6Glue.noarch perl-Socket6.x86_64
[root@sc1 lvs-mango]# yum install ipvsadm
[root@sc1 lvs-mango]# yum -y install heartbeat.x86_64 heartbeat-devel.x86_64 heartbeat-libs.x86_64
4.配置文件介紹:
/etc/sysconfig/ha/lvs.cf //由http://ip:3636 web界面配置的配置文件寫入此文件.
[root@slave1 ~]# vi /etc/sysconfig/ha/lvs.cf |
/etc/init.d/piranha-gui start //啓動piranha服務的WEB配置界面.
/etc/init.d/pulse //啓動piranha服務讀取的就是/etc/sysconfig/ha/lvs.cf.
二. Piranha配置
配置主LVS服務器.
# vi /etc/sysctl.conf找到下面行 //啓用數據轉發.
net.ipv4.ip_forward = 0將0改成1,net.ipv4.ip_forward = 1
執行如下命令來應用:sysctl -p
通過WEB界面配置Piranha服務.
# /etc/init.d/piranha-gui start //啓動Piranha服務.
#/usr/sbin/piranha-passwd //設置密碼,請設置你的piranha服務WEB配置登陸密碼.
http://10.10.42.201:3636/ 輸入用戶名: piranha 及剛纔設置的密碼登陸.
A)CONTROL / MONITORING
B) GLOBAL SETTINGS
Primary server public IP:主服務器用於與應用服務器(Real Server)連接的IP。
Primary server private IP:主服務器用於與備用服務器連接的心跳IP。
Use Network Type:所選用的LVS模式。
C)REDUNDANCY配置
Redundant server public ip:備用服務器的公網IP
Redundant server public IP:備用服務器用於與應用服務器(Real Server)連接的IP。
Redundant server private IP:備用服務器用於與主服務器連接的心跳IP。
Heartbeat interval:備用服務器對主服務器進行心跳檢測的輪詢時間。
Assume dead after:如果主服務器在指定時間內沒有恢復心跳,則宣告服務器失效並進行接管。
Heartbeat runs on port:使用心跳來檢測所使用的端口。
Monitor NIC links for failures:是否檢測網卡的連接狀態。
D)VIRTAL SERVERS配置
Name:定義虛擬服務器的名稱。
Application port:指定此目標應用服務的端口。
Protocol:目標應用服務的網絡協議,TCP或UDP。
Virtual IP Address:定義目標應用所使用的虛擬IP。
Virtual IP Network Mask:定義目標應用所使用的虛擬IP的子網掩碼。
Firewall Mark:當目標應用要使用多個IP端口時,結合IPTABLE設定防火牆標記。
Device:虛擬IP所掛接的網卡設備名。
Re-entry Time:當發現一個Real Server故障後,LVS Route對這個Server的檢測間隔時間。
Server timeout:LVS Route對Real Server發送指令後,若超過此時間沒有響應則認爲服務器發生故障。
Quiesce server:一旦有Real Server加入或恢復,則所有負載隊列記錄均歸"0"並重新進行分配。
Load monitoring tool:在Real Server中通過ruptime或rup命令獲得系統負載,以結合相應用的 Scheduling算法進行調度計算。
Scheduling:此虛擬服務器使用的調度算法。
Persistence:同一客戶端長連接的保持時間。
Persistence Network Mask:長連接保持的子網掩碼(網段)範圍。
Load monitoring tool要求Real Server安裝有ruptime或rup,並要求LVS服務器可以使用root賬號在不需要密碼的情況下通過SSH連接到Real Server。
Scheduling中包括以下8種調度策略:
Round-Robin Scheduling:輪詢策略,IP分發時逐個地對Real Server進行輪詢。
Weighted Round-Robin Scheduling:加權輪詢策略,配合權值進行輪詢策略計算。
Least-Connection:最小連接優先策略,將新的IP請求分發到訪問隊列較短的Real Server。
Weighted Least-Connections:加權最小連接優先策略,配合權值進行最小連接優先策略計算。
Locality-Based Least-Connection Scheduling:以下簡稱LBLCS,根據目標IP地址找出最近使用的服務器,若該服務器可用並且沒有超載(系統壓力未達到一半),就將請求發送到該服務器,否則使用最小連接優先策略。此策略主要針對的是Cache網關服務器。
Locality-Based Least Connections with Replication Scheduling:與LBLCS類似,在LBLCS的基礎上加入複製調度策略,使得"熱門"網站使用時儘量Cache在同一臺網關服務器中,進一步避免了在多臺服務器中保存相同的Cache信息。此策略主要針對的邊是Cache網關服務器。
Destination Hashing Scheduling:通過對目標地址的Hash計算來確定目標服務器。此策略主要針對的是Cache網關服務器。
Source Hashing Scheduling:通過對源地址的Hash計算來確定目標服務器。此策略主要針對的是Cache網關服務器。
E)添加real server
Name:設置此Real Server的名稱。
Address:設置此Real Server的IP地址。
Weight:設置此Real Server的權值,當各Real Server的性能不相同時可設定性能較高的服務器得到較高的權值。
E)MONITORING SCRIPTS配置
Sending Program:通過程序實現Real Server中對應用服務可用性的判斷(不能與Send同時使用)。
Send:直接通過VIRTUAL SERVER中指定的端口發送指令。
Expect:Sending Program或Send後的返回值,如果與此返回值匹配,則表明此應用服務在當前Real Server中運行正常。
Treat expect string as a regular expression:將Expect中的值作爲正則表達式與返回值進行比對。
注意:
此處的功能主要用於判斷Real Server中的目標服務運行是否正常,如果發現服務失效,則主動在此VIRTUAL SERVER中隔離該Real Server。
三、設置LVS相關服務自啓動[Virtual Server端]
/etc/init.d/piranha-gui start
/etc/init.d/pulse start
成功後的截圖如下:
加入開啓啓動:
chkconfig –level 345 piranha-gui on
chkconfig –level 345 pulse on
四、RealServer系統配置[real server端]
#!/bin/bash #RealServer服務腳本,直接路由方式 MpmWeb_VIP=10.10.42.22 start(){ ifconfig lo:0 $MpmWeb_VIP netmask 255.255.255.255 broadcast $MpmWeb_VIP /sbin/route add -host $MpmWeb_VIP dev lo:0 echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce sysctl -p >/dev/null 2>&1 echo "RealServer Start OK [lvs_dr]" }
stop(){ echo "0" >/proc/sys/net/ipv4/conf/lo/arp_ignore echo "0" >/proc/sys/net/ipv4/conf/lo/arp_announce echo "0" >/proc/sys/net/ipv4/conf/all/arp_ignore echo "0" >/proc/sys/net/ipv4/conf/all/arp_announce /sbin/ifconfig lo:0 down /sbin/route del -host $MpmWeb_VIP sysctl -p >/dev/null 2>&1 echo "RealServer Stoped [lvs_dr]" }
restart(){ stop start }
case $1 in
start) start ;; stop) stop ;; restart) restart ;; status) /sbin/ifconfig ;; *) echo "Usage: $0 {start|stop|restart|status}" exit 1 esac |
五、配置realserver前端代理
[root@slave3 ~]# vi /usr/local/nginx/conf/nginx.conf http { upstream www.test.com{ upstream ro.test.com{ upstream flight.test.com{ upstream hotel.test.com{ upstream supply.test.com{
access_log logs/access.log main; server { server { server { server { server { |
[root@slave3 ~]# cat /usr/local/realserver.sh
#!/bin/bash
#RealServer服務腳本,直接路由方式
MpmWeb_VIP=10.10.42.23
start(){
ifconfig lo:0 $MpmWeb_VIP netmask 255.255.255.255 broadcast $MpmWeb_VIP
/sbin/route add -host $MpmWeb_VIP dev lo:0
echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
sysctl -p >/dev/null 2>&1
echo "RealServer Start OK [lvs_dr]"
}
stop(){
echo "0" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "0" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "0" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "0" >/proc/sys/net/ipv4/conf/all/arp_announce
/sbin/ifconfig lo:0 down
/sbin/route del -host $MpmWeb_VIP
sysctl -p >/dev/null 2>&1
echo "RealServer Stoped [lvs_dr]"
}
restart(){
stop
start
}
case $1 in
start)
start
;;
stop)
stop
;;
restart)
restart
;;
status)
/sbin/ifconfig
;;
*)
echo "Usage: $0 {start|stop|restart|status}"
exit 1
esac
六、測試
打開IE:http://supply.test.com/MpmWeb/不斷刷新,能訪問則說明成功。