**
saltstack
**
Saltstack是一個服務器集中管理中心平臺,可以幫助管理員輕鬆的對若干臺服務器進行統一操作。類似的工具還有Ansible,Puppet,func等等。相比於這些工具,salt的特點在於其用python實現,支持各種操作系統類型,採取了C/S架構,部署方便簡單, 且可擴展性很強。
**
環境準備
**
centos6.5
兩臺虛擬機
準備安裝包
[root@foundation15 rhel6]# ls
libyaml-0.1.3-4.el6.x86_64.rpm
python-babel-0.9.4-5.1.el6.noarch.rpm
python-backports-1.0-5.el6.x86_64.rpm
python-backports-ssl_match_hostname-3.4.0.2-2.el6.noarch.rpm
python-chardet-2.2.1-1.el6.noarch.rpm
python-cherrypy-3.2.2-4.el6.noarch.rpm
python-crypto-2.6.1-3.el6.x86_64.rpm
python-crypto-debuginfo-2.6.1-3.el6.x86_64.rpm
python-enum34-1.0-4.el6.noarch.rpm
python-futures-3.0.3-1.el6.noarch.rpm
python-impacket-0.9.14-1.el6.noarch.rpm
python-jinja2-2.8.1-1.el6.noarch.rpm
python-msgpack-0.4.6-1.el6.x86_64.rpm
python-ordereddict-1.1-2.el6.noarch.rpm
python-requests-2.6.0-3.el6.noarch.rpm
python-setproctitle-1.1.7-2.el6.x86_64.rpm
python-six-1.9.0-2.el6.noarch.rpm
python-tornado-4.2.1-1.el6.x86_64.rpm
python-urllib3-1.10.2-1.el6.noarch.rpm
python-zmq-14.5.0-2.el6.x86_64.rpm
PyYAML-3.11-1.el6.x86_64.rpm
repodata
salt-2016.11.3-1.el6.noarch.rpm
salt-api-2016.11.3-1.el6.noarch.rpm
salt-cloud-2016.11.3-1.el6.noarch.rpm
salt-master-2016.11.3-1.el6.noarch.rpm
salt-minion-2016.11.3-1.el6.noarch.rpm
salt-ssh-2016.11.3-1.el6.noarch.rpm
salt-syndic-2016.11.3-1.el6.noarch.rpm
zeromq-4.0.5-4.el6.x86_64.rpm
**
使用
**
主機安裝
yum install salt-master
server端
yum install salt-minion
**
一鍵安裝httpd
**
server端修改配置文件minion
打開master 指定主機地址
編寫文件來執行任務
vim master
# file_roots:
**********************
*********************
file_roots:
base:
- /srv/salt
mkdir /srv/salt
cd /srv/salt/
mkdir httpd
cd httpd/
vim install.sls
apache-install:
pkg.installed:
- pkgs:
- httpd
- php
file.managed:
- name: /etc/httpd/conf/httpd.conf
- source: salt://httpd/files/httpd.conf
- mode: 644
- user: root
service.running:
- name: httpd
- enable: True
- reload: True
- watch:
- file: apache-install
mkdir files
server端
scp /etc/httpd/conf/httpd.conf root@server1:/srv/salt/httpd/files/
啓動任務
salt server2 state.sls httpd.install
[root@server1 httpd]# salt server2 state.sls httpd.install
server2:
----------
ID: apache-install
Function: pkg.installed
Result: True
Comment: All specified packages are already installed
Started: 10:44:21.696917
Duration: 349.741 ms
Changes:
----------
ID: apache-install
Function: file.managed
Name: /etc/httpd/conf/httpd.conf
Result: True
Comment: File /etc/httpd/conf/httpd.conf is in the correct state
Started: 10:44:22.048334
Duration: 40.833 ms
Changes:
----------
ID: apache-install
Function: service.running
Name: httpd
Result: True
Comment: Service httpd has been enabled, and is running
Started: 10:44:22.089892
Duration: 147.796 ms
Changes:
----------
httpd:
True
Summary for server2
------------
Succeeded: 3 (changed=1)
Failed: 0
------------
Total states run: 3
Total run time: 538.370 ms
server端查看
[root@server2 ~]# netstat -antple|grep httpd
tcp 0 0 :::80 :::* LISTEN 0 16092 1705/httpd
[root@server2 ~]# ps ax |grep httpd
1705 ? Ss 0:00 /usr/sbin/httpd
1713 ? S 0:00 /usr/sbin/httpd
1714 ? S 0:00 /usr/sbin/httpd
1715 ? S 0:00 /usr/sbin/httpd
1716 ? S 0:00 /usr/sbin/httpd
1717 ? S 0:00 /usr/sbin/httpd
1718 ? S 0:00 /usr/sbin/httpd
1719 ? S 0:00 /usr/sbin/httpd
1720 ? S 0:00 /usr/sbin/httpd
1740 pts/0 S+ 0:00 grep httpd
修改配置文件中指定的files下httpd文件來控制端口
主機與server端md5碼產生改變時 發送主機文件至客戶端 並執行reload 而不是restart
修改前
[root@server2 conf]# md5sum httpd.conf
27a5c8d9e75351b08b8ca1171e8a0bbd httpd.conf
一致
[root@server1 files]# md5sum httpd.conf
27a5c8d9e75351b08b8ca1171e8a0bbd httpd.conf
修改後
[root@server1 files]# md5sum httpd.conf
b7ca7a0e786418ba7b5ad84efac70265 httpd.conf
執行任務
salt server2 state.sls httpd.install
diff:
---
+++
@@ -133,7 +133,7 @@
# prevent Apache from glomming onto all bound IP addresses (0.0.0.0)
#
#Listen 12.34.56.78:80
-Listen 80###############
+Listen 8080#############發生改變
#
# Dynamic Shared Object (DSO) Support
server端查看
[root@server2 conf]# netstat -antple|grep httpd
tcp 0 0 :::8080 :::* LISTEN 0 16606 1705/httpd
修改成功
**
源碼一鍵安裝nginx
**
[root@server1 salt]# cd nginx/
[root@server1 nginx]# cat install.sls
nginx-install:
pkg.installed:
- pkgs:
- gcc-c++
- zlib-devel
- openssl-devel
- pcre-devel
file.managed:
- name: /mnt/nginx-1.10.1.tar.gz
- source: salt://nginx/files/nginx-1.10.1.tar.gz
cmd.run:
- name: cd /mnt && tar zxf nginx-1.10.1.tar.gz && cd nginx-1.10.1 && ./configure --prefix=/usr/local/nginx --with-http_ssl_module --with-http_stub_status_module &> /dev/null && make && make install && ln -s /usr/local/nginx/sbin/nginx /usr/sbin/nginx
- creates: /usr/local/nginx
安裝部分
用戶部分
[root@server1 salt]# cd user/
[root@server1 user]# ls
nginx.sls
[root@server1 user]# cat nginx.sls
nginx:
user.present:
- uid: 800
- shell: /sbin/nologin
- home: /usr/local/nginx
- createhome: false
開啓服務(結合數個任務一起啓動)
[root@server1 nginx]# cat service.sls
include:
- nginx.install
- user.nginx
/usr/local/nginx/conf/nginx.conf:
file.managed:
- source: salt://nginx/files/nginx.conf
- mode: 644
/etc/init.d/nginx:(啓動腳本)
file.managed:
- source: salt://nginx/files/nginx(腳本本機存放位置)
- mode: 755
nginx-service:
service.running:
- name: nginx
- enable: true
- reload: true
- require:
- file: /etc/init.d/nginx
- watch:
- file: /usr/local/nginx/conf/nginx.conf
腳本代碼
#!/bin/sh
#
# nginx Startup script for nginx
#
# chkconfig: - 85 15
# processname: nginx
# config: /usr/local/nginx/conf/nginx/nginx.conf
# pidfile: /usr/local/nginx/logs/nginx.pid
# description: nginx is an HTTP and reverse proxy server
#
### BEGIN INIT INFO
# Provides: nginx
# Required-Start: $local_fs $remote_fs $network
# Required-Stop: $local_fs $remote_fs $network
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: start and stop nginx
### END INIT INFO
# Source function library.
. /etc/rc.d/init.d/functions
if [ -L $0 ]; then
initscript=`/bin/readlink -f $0`
else
initscript=$0
fi
#sysconfig=`/bin/basename $initscript`
#if [ -f /etc/sysconfig/$sysconfig ]; then
# . /etc/sysconfig/$sysconfig
#fi
nginx=${NGINX-/usr/local/nginx/sbin/nginx}
prog=`/bin/basename $nginx`
conffile=${CONFFILE-/usr/local/nginx/conf/nginx.conf}
lockfile=${LOCKFILE-/var/lock/subsys/nginx}
pidfile=${PIDFILE-/usr/local/nginx/logs/nginx.pid}
SLEEPMSEC=${SLEEPMSEC-200000}
UPGRADEWAITLOOPS=${UPGRADEWAITLOOPS-5}
RETVAL=0
start() {
echo -n $"Starting $prog: "
daemon --pidfile=${pidfile} ${nginx} -c ${conffile}
RETVAL=$?
echo
[ $RETVAL = 0 ] && touch ${lockfile}
return $RETVAL
}
stop() {
echo -n $"Stopping $prog: "
killproc -p ${pidfile} ${prog}
RETVAL=$?
echo
[ $RETVAL = 0 ] && rm -f ${lockfile} ${pidfile}
}
reload() {
echo -n $"Reloading $prog: "
killproc -p ${pidfile} ${prog} -HUP
RETVAL=$?
echo
}
upgrade() {
oldbinpidfile=${pidfile}.oldbin
configtest -q || return
echo -n $"Starting new master $prog: "
killproc -p ${pidfile} ${prog} -USR2
echo
for i in `/usr/bin/seq $UPGRADEWAITLOOPS`; do
/bin/usleep $SLEEPMSEC
if [ -f ${oldbinpidfile} -a -f ${pidfile} ]; then
echo -n $"Graceful shutdown of old $prog: "
killproc -p ${oldbinpidfile} ${prog} -QUIT
RETVAL=$?
echo
return
fi
done
echo $"Upgrade failed!"
RETVAL=1
}
configtest() {
if [ "$#" -ne 0 ] ; then
case "$1" in
-q)
FLAG=$1
;;
*)
;;
esac
shift
fi
${nginx} -t -c ${conffile} $FLAG
RETVAL=$?
return $RETVAL
}
rh_status() {
status -p ${pidfile} ${nginx}
}
# See how we were called.
case "$1" in
start)
rh_status >/dev/null 2>&1 && exit 0
start
;;
stop)
stop
;;
status)
rh_status
RETVAL=$?
;;
restart)
configtest -q || exit $RETVAL
stop
start
;;
upgrade)
rh_status >/dev/null 2>&1 || exit 0
upgrade
;;
condrestart|try-restart)
if rh_status >/dev/null 2>&1; then
stop
start
fi
;;
force-reload|reload)
reload
;;
configtest)
configtest
;;
*)
echo $"Usage: $prog {start|stop|restart|condrestart|try-restart|force-reload|upgrade|reload|status|help|configtest}"
RETVAL=2
esac
exit $RETVAL
啓動測試
[root@server1 nginx]# salt server2 state.sls nginx.service
server2:
----------
ID: nginx-install
Function: pkg.installed
Result: True
Comment: All specified packages are already installed
Started: 12:10:03.592059
Duration: 400.953 ms
Changes:
----------
ID: nginx-install
Function: file.managed
Name: /mnt/nginx-1.10.1.tar.gz
Result: True
Comment: File /mnt/nginx-1.10.1.tar.gz is in the correct state
Started: 12:10:03.994657
Duration: 66.481 ms
Changes:
----------
ID: nginx-install
Function: cmd.run
Name: cd /mnt && tar zxf nginx-1.10.1.tar.gz && cd nginx-1.10.1 && ./configure --prefix=/usr/local/nginx --with-http_ssl_module --with-http_stub_status_module &> /dev/null && make && make install && ln -s /usr/local/nginx/sbin/nginx /usr/sbin/nginx
Result: True
Comment: /usr/local/nginx exists
Started: 12:10:04.061875
Duration: 0.361 ms
Changes:
----------
ID: nginx
Function: user.present
Result: True
Comment: New user nginx created
Started: 12:10:04.062773
Duration: 130.165 ms
Changes:
----------
fullname:
gid:
800
groups:
- nginx
home:
homephone:
name:
nginx
passwd:
x
roomnumber:
shell:
/sbin/nologin
uid:
800
workphone:
----------
ID: /usr/local/nginx/conf/nginx.conf
Function: file.managed
Result: True
Comment: File /usr/local/nginx/conf/nginx.conf is in the correct state
Started: 12:10:04.193104
Duration: 52.364 ms
Changes:
----------
ID: /etc/init.d/nginx
Function: file.managed
Result: True
Comment: File /etc/init.d/nginx updated
Started: 12:10:04.245611
Duration: 51.516 ms
Changes:
----------
diff:
New file
mode:
0755
----------
ID: nginx-service
Function: service.running
Name: nginx
Result: True
Comment: Service nginx has been enabled, and is running
Started: 12:10:04.298296
Duration: 122.8 ms
Changes:
----------
nginx:
True
Summary for server2
------------
Succeeded: 7 (changed=3)
Failed: 0
------------
Total states run: 7
Total run time: 824.640 ms
啓動成功
測試修改配置文件後的結果
修改最大兩個進程
#user nobody;
worker_processes 2;
再啓動一次測試結果
[root@server2 nginx]# ps ax|grep nginx
4635 ? Ss 0:00 nginx: master process /usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/nginx.conf
4702 ? S 0:00 nginx: worker process
4703 ? S 0:00 nginx: worker process
4707 pts/0 S+ 0:00 grep nginx
測試成功
**
一鍵部署 httpd nginx 基於haproxy的負載均衡配置
**
部署harpoxy
haproxy-install:
pkg.installed:
- pkgs:
- haproxy
file.managed:
- name: /etc/haproxy/haproxy.cfg
- source: salt://haproxy/files/haproxy.cfg
service.running:
- name: haproxy
- reload: True
- watch:
- file: haproxy-install
注意配置yum源
準備配置文件
[root@server1 haproxy]# cd files/
[root@server1 files]# ls
haproxy.cfg
vim haproxy.cfg
#---------------------------------------------------------------------
# main frontend which proxys to the backends
#---------------------------------------------------------------------
frontend main *:80
default_backend app
#---------------------------------------------------------------------
#---------------------------------------------------------------------
backend app
balance roundrobin
server app1 172.25.15.8:80 check
server app2 172.25.15.7:80 check
通過salt目錄下的top.sls將三個任務結合
[root@server1 salt]# ls
haproxy httpd nginx top.sls user
[root@server1 salt]# vim top.sls
base:
'server1':
- haproxy.install
'server2':
- httpd.install
'server3':
- nginx.install
運行任務
[root@server1 salt]# salt '*' state.highstate
server3:
----------
ID: nginx-install
Function: pkg.installed
Result: True
Comment: All specified packages are already installed
Started: 14:47:03.692262
Duration: 441.91 ms
Changes:
----------
ID: nginx-install
Function: file.managed
Name: /mnt/nginx-1.10.1.tar.gz
Result: True
Comment: File /mnt/nginx-1.10.1.tar.gz is in the correct state
Started: 14:47:04.137157
Duration: 107.952 ms
Changes:
----------
ID: nginx-install
Function: cmd.run
Name: cd /mnt && tar zxf nginx-1.10.1.tar.gz && cd nginx-1.10.1 && ./configure --prefix=/usr/local/nginx --with-http_ssl_module --with-http_stub_status_module &> /dev/null && make && make install && ln -s /usr/local/nginx/sbin/nginx /usr/sbin/nginx
Result: True
Comment: /usr/local/nginx exists
Started: 14:47:04.246020
Duration: 0.46 ms
Changes:
Summary for server3
------------
Succeeded: 3
Failed: 0
------------
Total states run: 3
Total run time: 550.322 ms
server2:
----------
ID: apache-install
Function: pkg.installed
Result: True
Comment: All specified packages are already installed
Started: 14:47:03.246948
Duration: 657.376 ms
Changes:
----------
ID: apache-install
Function: file.managed
Name: /etc/httpd/conf/httpd.conf
Result: True
Comment: File /etc/httpd/conf/httpd.conf updated
Started: 14:47:03.907012
Duration: 74.654 ms
Changes:
----------
diff:
---
+++
@@ -133,7 +133,7 @@
# prevent Apache from glomming onto all bound IP addresses (0.0.0.0)
#
#Listen 12.34.56.78:80
-Listen 8080
+Listen 80
#
# Dynamic Shared Object (DSO) Support
----------
ID: apache-install
Function: service.running
Name: httpd
Result: True
Comment: Service reloaded
Started: 14:47:04.015879
Duration: 80.605 ms
Changes:
----------
httpd:
True
Summary for server2
------------
Succeeded: 3 (changed=2)
Failed: 0
------------
Total states run: 3
Total run time: 812.635 ms
server1:
----------
ID: haproxy-install
Function: pkg.installed
Result: True
Comment: All specified packages are already installed
Started: 14:47:04.219933
Duration: 502.258 ms
Changes:
----------
ID: haproxy-install
Function: file.managed
Name: /etc/haproxy/haproxy.cfg
Result: True
Comment: File /etc/haproxy/haproxy.cfg is in the correct state
Started: 14:47:04.725139
Duration: 47.684 ms
Changes:
----------
ID: haproxy-install
Function: service.running
Name: haproxy
Result: True
Comment: The service haproxy is already running
Started: 14:47:04.773771
Duration: 35.477 ms
Changes:
Summary for server1
------------
Succeeded: 3
Failed: 0
------------
Total states run: 3
Total run time: 585.419 ms
再瀏覽器查看結果
**
修改grain roles方式來匹配主機
**
vim /etc/salt/minion
# Custom static grains for this minion can be specified here and used in SLS
# files just like all other grains. This example sets 4 custom grains, with
# the 'roles' grain having two values that can be matched against.
grains:
roles:
- apache
# - memcache
# deployment: datacenter4
# cabinet: 13
# cab_u: 14-15
#
/etc/init.d/salt-minion restart
server3同樣修改爲httpd即可
server1端
[root@server1 salt]# salt '*' grains.item roles
server2:
----------
roles:
- apache
server3:
----------
roles:
- httpd
server1:
----------
roles:
查看到結果結果 通過結果來修改top.sls
base:
'server1':
- haproxy.install
'roles:apache':
- match: grain
- httpd.install
'roles:httpd':
- match: grain
- nginx.install
啓動服務
[root@server1 salt]# salt '*' state.highstate
server1:
----------
ID: haproxy-install
Function: pkg.installed
Result: True
Comment: All specified packages are already installed
Started: 15:41:59.844101
Duration: 398.342 ms
Changes:
----------
ID: haproxy-install
Function: file.managed
Name: /etc/haproxy/haproxy.cfg
Result: True
Comment: File /etc/haproxy/haproxy.cfg is in the correct state
Started: 15:42:00.245248
Duration: 48.378 ms
Changes:
----------
ID: haproxy-install
Function: service.running
Name: haproxy
Result: True
Comment: The service haproxy is already running
Started: 15:42:00.294540
Duration: 39.048 ms
Changes:
Summary for server1
------------
Succeeded: 3
Failed: 0
------------
Total states run: 3
Total run time: 485.768 ms
server3:
----------
ID: nginx-install
Function: pkg.installed
Result: True
Comment: All specified packages are already installed
Started: 15:42:00.467240
Duration: 438.167 ms
Changes:
----------
ID: nginx-install
Function: file.managed
Name: /mnt/nginx-1.10.1.tar.gz
Result: True
Comment: File /mnt/nginx-1.10.1.tar.gz is in the correct state
Started: 15:42:00.907928
Duration: 83.286 ms
Changes:
----------
ID: nginx-install
Function: cmd.run
Name: cd /mnt && tar zxf nginx-1.10.1.tar.gz && cd nginx-1.10.1 && ./configure --prefix=/usr/local/nginx --with-http_ssl_module --with-http_stub_status_module &> /dev/null && make && make install && ln -s /usr/local/nginx/sbin/nginx /usr/sbin/nginx
Result: True
Comment: /usr/local/nginx exists
Started: 15:42:00.992076
Duration: 0.492 ms
Changes:
Summary for server3
------------
Succeeded: 3
Failed: 0
------------
Total states run: 3
Total run time: 521.945 ms
server2:
----------
ID: apache-install
Function: pkg.installed
Result: True
Comment: All specified packages are already installed
Started: 15:41:59.941709
Duration: 447.302 ms
Changes:
----------
ID: apache-install
Function: file.managed
Name: /etc/httpd/conf/httpd.conf
Result: True
Comment: File /etc/httpd/conf/httpd.conf is in the correct state
Started: 15:42:00.391541
Duration: 50.933 ms
Changes:
----------
ID: apache-install
Function: service.running
Name: httpd
Result: True
Comment: The service httpd is already running
Started: 15:42:00.443469
Duration: 30.044 ms
Changes:
Summary for server2
------------
Succeeded: 3
Failed: 0
------------
Total states run: 3
Total run time: 528.279 ms
**
推送方式匹配
**
mkdir _grains
cd _grains/
vim my_grains.py
#!/usr/bin/env python
def my_grains():
grains = {}
grains['hello'] = 'world'
grains['salt'] = 'stack'
return grains
運行結果
[root@server1 _grains]# salt server2 saltutil.sync_grains
server2:
- grains.my_grains
查看
[root@server1 _grains]# salt '*' grains.item hello
server2:
----------
hello:
world
server3:
----------
hello:
server1:
----------
hello:
[root@server1 _grains]# salt '*' grains.item salt
server1:
----------
salt:
server3:
----------
salt:
server2:
----------
salt:
stack
**
通過pillar方式匹配
**
打開pillar
# Salt Pillars allow for the building of global data that can be made selectively
# available to different minions based on minion grain filtering. The Salt
# Pillar is laid out in the same fashion as the file server, with environments,
# a top file and sls files. However, pillar data does not need to be in the
# highstate format, and is generally just key/value pairs.
pillar_roots:
base:
- /srv/pillar
#
mkdir /srv/pillar
cd /srv/pillar/
mkdir web
cd web
vim install.sls
{% if grains['fqdn'] == 'server2'%}
webserver: httpd
{% elif grains['fqdn'] == 'server3'%}
webserver: nginx
{% endif %}
刷新pillar
cd /srv/salt/pillar/
vim top.sls
base:
'*':
- web.install
測試pillar刷新
[root@server1 pillar]# salt '*' saltutil.refresh_pillar
server3:
True
server2:
True
server1:
True
運行pillar任務
[root@server1 pillar]# salt '*' pillar.items
server2:
----------
webserver:
httpd
server3:
----------
webserver:
nginx
server1:
---------- (沒有定義爲空)
**
通過模塊修改配置文件/lib方式
**
修改httpd配置文件使其接受參數 ####listen
# Listen: Allows you to bind Apache to specific IP addresses and/or
# ports, in addition to the default. See also the <VirtualHost>
# directive.
#
# Change this to Listen on specific IP addresses as shown below to
# prevent Apache from glomming onto all bound IP addresses (0.0.0.0)
#
#Listen 12.34.56.78:80
Listen {{bind}}:{{port}} ###########修改點
通過lib.sls修改
再httpd目錄下創建 lib.sls
{% set port = 8080 %}
httpd配置文件第一行添加
{% from 'httpd/lib.sls' import port with context %}
啓動服務
[root@server1 httpd]# salt server2 state.sls httpd.install
server2:
----------
ID: apache-install
Function: pkg.installed
Result: True
Comment: All specified packages are already installed
Started: 17:13:20.598640
Duration: 409.492 ms
Changes:
----------
ID: apache-install
Function: file.managed
Name: /etc/httpd/conf/httpd.conf
Result: True
Comment: File /etc/httpd/conf/httpd.conf updated
Started: 17:13:21.010265
Duration: 116.424 ms
Changes:
----------
diff:
---
+++
@@ -134,7 +134,7 @@
# prevent Apache from glomming onto all bound IP addresses (0.0.0.0)
#
#Listen 12.34.56.78:80
-Listen
+Listen 172.25.15.7:8080
#
# Dynamic Shared Object (DSO) Support
----------
ID: apache-install
Function: service.running
Name: httpd
Result: True
Comment: Service reloaded
Started: 17:13:21.157426
Duration: 75.082 ms
Changes:
----------
httpd:
True
Summary for server2
------------
Succeeded: 3 (changed=2)
Failed: 0
------------
Total states run: 3
Total run time: 600.998 ms
第二種方式 vim install.sls
添加jinja模塊 指定ip端口即可
啓動服務
看到修改後的8080又變爲80
apache-install:
pkg.installed:
- pkgs:
- httpd
- php
file.managed:
- name: /etc/httpd/conf/httpd.conf
- source: salt://httpd/files/httpd.conf
- mode: 644
- user: root
- template: jinja
- context:
bind: 172.25.15.7
port: 8080
service.running:
- name: httpd
- enable: True
- reload: True
- watch:
- file: apache-install
啓動服務
[root@server1 httpd]# salt server2 state.sls httpd.install
server2:
----------
ID: apache-install
Function: pkg.installed
Result: True
Comment: All specified packages are already installed
Started: 17:17:56.983768
Duration: 417.926 ms
Changes:
----------
ID: apache-install
Function: file.managed
Name: /etc/httpd/conf/httpd.conf
Result: True
Comment: File /etc/httpd/conf/httpd.conf updated
Started: 17:17:57.403983
Duration: 87.204 ms
Changes:
----------
diff:
---
+++
@@ -1,4 +1,3 @@
-
#
# This is the main Apache server configuration file. It contains the
# configuration directives that give the server its instructions.
@@ -134,7 +133,7 @@
# prevent Apache from glomming onto all bound IP addresses (0.0.0.0)
#
#Listen 12.34.56.78:80
-Listen 172.25.15.7:8080
+Listen 172.25.15.7:80
#
# Dynamic Shared Object (DSO) Support
----------
ID: apache-install
Function: service.running
Name: httpd
Result: True
Comment: Service reloaded
Started: 17:17:57.522911
Duration: 74.63 ms
Changes:
----------
httpd:
True
Summary for server2
------------
Succeeded: 3 (changed=2)
Failed: 0
------------
Total states run: 3
Total run time: 579.760 ms
一鍵部署 httpd nginx hproxy keepalived的高可用配置
編寫keepalived源碼安裝
將已有的安裝包 配置文件 啓動腳本放在 files目錄下即可
[root@server1 salt]# cd keepalived/files/
[root@server1 files]# ls
keepalived keepalived-2.0.6.tar.gz keepalived.conf
開始編輯安裝部分
[root@server1 keepalived]# vim install.sls
#################################################################
pkg.installed: 依賴性的這一部分可以獨立作爲一個模塊 再使用時include即可
- pkgs:
- gcc-c++
- zlib-devel
- openssl-devel
- pcre-devel
kp-install:
file.managed:
- name: /mnt/keepalived-2.0.6.tar.gz
- source: salt://keepalived/files/keepalived-2.0.6.tar.gz
cmd.run:
- name: cd /mnt && tar zxf keepalived-2.0.6.tar.gz && cd keepalived-2.0.6 && ./configure --prefix=/usr/local/keepalived --with-init=SYSV &> /dev/null && make && make install &> /dev/null
- creates: /usr/local/keepalived
/etc/keepalived:
file.directory:
- mode: 755
/etc/sysconfig/keepalived:
file.symlink:
- target: /usr/local/keepalived/etc/sysconfig/keepalived
/sbin/keepalived:
file.symlink:
- target: /usr/local/keepalived/sbin/keepalived
編寫pillar中關於keepalived部分
在pillar中mkdir keepalived即可
####################################################
{% if grains['fqdn'] == 'server1'%}
state: MASTER
vrid: 66
priority: 100
{% elif grains['fqdn'] == 'server4'%}
state: BACKUP
vrid: 66
priority: 50
{% endif %}
這一部分主要用於對keepalived推送時配置文件中參數修改
編寫keepalived的啓動及配置文件部分
################################################################
include:
- keepalived.install
/etc/keepalived/keepalived.conf:
file.managed:
- source: salt://keepalived/files/keepalived.conf
- template: jinja ####注意模塊的使用
- context:
STATE: {{ pillar['state'] }} 大小寫儘量區分
VRID: {{ pillar['vrid'] }}
PRIORITY: {{ pillar['priority'] }}
kp-service:
file.managed:
- name: /etc/init.d/keepalived
- source: salt://keepalived/files/keepalived
- mode: 755
service.running:
- name: keepalived
- reload: True
- watch:
- file: /etc/keepalived/keepalived.conf
便加top.sls文件 做到一鍵部署
########################################################
base:
'server1':
- keepalived.service
- haproxy.install
'server4':
- keepalived.service
- haproxy.install
'roles:apache':
- match: grain
- httpd.install
'roles:httpd':
- match: grain
- nginx.install
修改files下的配置文件 使其可以接受參數來改變
[root@server1 keepalived]# cd files/
[root@server1 files]# vim keepalived.conf
#############################################################
! Configuration File for keepalived
global_defs {
notification_email {
root@localhost
}
notification_email_from keepalived@localhost
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
# vrrp_strict
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_instance VI_1 {
state {{ STATE }}
interface eth0
virtual_router_id {{ VRID }}
priority {{ PRIORITY }}
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
172.25.15.100
}
}
測試
#####################################################
#######################################################
[root@server1 salt]# salt '*' state.highstate
server2 3結果沒有貼出 上一個模塊已經測試出結果 這裏不再贅述
server4
----------
ID: kp-install
Function: file.managed
Name: /mnt/keepalived-2.0.6.tar.gz
Result: True
Comment: File /mnt/keepalived-2.0.6.tar.gz is in the correct state
Started: 11:10:27.235394
Duration: 304.01 ms
Changes:
----------
ID: kp-install
Function: cmd.run
Name: cd /mnt && tar zxf keepalived-2.0.6.tar.gz && cd keepalived-2.0.6 && ./configure --prefix=/usr/local/keepalived --with-init=SYSV &> /dev/null && make && make install &> /dev/null
Result: True
Comment: /usr/local/keepalived exists
Started: 11:10:27.540540
Duration: 3.811 ms
Changes:
----------
ID: /etc/keepalived
Function: file.directory
Result: True
Comment: Directory /etc/keepalived is in the correct state
Started: 11:10:27.544569
Duration: 1.025 ms
Changes:
----------
ID: /etc/sysconfig/keepalived
Function: file.symlink
Result: True
Comment: Symlink /etc/sysconfig/keepalived is present and owned by root:root
Started: 11:10:27.545784
Duration: 16.949 ms
Changes:
----------
ID: /sbin/keepalived
Function: file.symlink
Result: True
Comment: Symlink /sbin/keepalived is present and owned by root:root
Started: 11:10:27.562941
Duration: 5.863 ms
Changes:
----------
ID: /etc/keepalived/keepalived.conf
Function: file.managed
Result: True
Comment: File /etc/keepalived/keepalived.conf updated
Started: 11:10:27.569010
Duration: 121.545 ms
Changes:
----------
diff:
---
+++
@@ -9,7 +9,7 @@
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
- vrrp_strict
+# vrrp_strict
vrrp_garp_interval 0
vrrp_gna_interval 0
}
----------
ID: kp-service
Function: file.managed
Name: /etc/init.d/keepalived
Result: True
Comment: File /etc/init.d/keepalived is in the correct state
Started: 11:10:27.690769
Duration: 69.875 ms
Changes:
----------
ID: kp-service
Function: service.running
Name: keepalived
Result: True
Comment: Service reloaded
Started: 11:10:27.844983
Duration: 82.323 ms
Changes:
----------
keepalived:
True
----------
ID: haproxy-install
Function: pkg.installed
Result: True
Comment: All specified packages are already installed
Started: 11:10:28.781608
Duration: 587.1 ms
Changes:
----------
ID: haproxy-install
Function: file.managed
Name: /etc/haproxy/haproxy.cfg
Result: True
Comment: File /etc/haproxy/haproxy.cfg is in the correct state
Started: 11:10:29.368942
Duration: 35.71 ms
Changes:
----------
ID: haproxy-install
Function: service.running
Name: haproxy
Result: True
Comment: The service haproxy is already running
Started: 11:10:29.405251
Duration: 39.054 ms
Changes:
Summary for server4
-------------
Succeeded: 11 (changed=2)
Failed: 0
-------------
Total states run: 11
Total run time: 1.267 s
server1:
----------
ID: kp-install
Function: file.managed
Name: /mnt/keepalived-2.0.6.tar.gz
Result: True
Comment: File /mnt/keepalived-2.0.6.tar.gz is in the correct state
Started: 11:10:27.294920
Duration: 311.378 ms
Changes:
----------
ID: kp-install
Function: cmd.run
Name: cd /mnt && tar zxf keepalived-2.0.6.tar.gz && cd keepalived-2.0.6 && ./configure --prefix=/usr/local/keepalived --with-init=SYSV &> /dev/null && make && make install &> /dev/null
Result: True
Comment: /usr/local/keepalived exists
Started: 11:10:27.609328
Duration: 0.769 ms
Changes:
----------
ID: /etc/keepalived
Function: file.directory
Result: True
Comment: Directory /etc/keepalived is in the correct state
Started: 11:10:27.610336
Duration: 1.081 ms
Changes:
----------
ID: /etc/sysconfig/keepalived
Function: file.symlink
Result: True
Comment: Symlink /etc/sysconfig/keepalived is present and owned by root:root
Started: 11:10:27.611605
Duration: 14.098 ms
Changes:
----------
ID: /sbin/keepalived
Function: file.symlink
Result: True
Comment: Symlink /sbin/keepalived is present and owned by root:root
Started: 11:10:27.625926
Duration: 4.974 ms
Changes:
----------
ID: /etc/keepalived/keepalived.conf
Function: file.managed
Result: True
Comment: File /etc/keepalived/keepalived.conf updated
Started: 11:10:27.631108
Duration: 196.926 ms
Changes:
----------
diff:
---
+++
@@ -9,7 +9,7 @@
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
- vrrp_strict
+# vrrp_strict
vrrp_garp_interval 0
vrrp_gna_interval 0
}
----------
ID: kp-service
Function: file.managed
Name: /etc/init.d/keepalived
Result: True
Comment: File /etc/init.d/keepalived is in the correct state
Started: 11:10:27.828277
Duration: 67.606 ms
Changes:
----------
ID: kp-service
Function: service.running
Name: keepalived
Result: True
Comment: Service reloaded
Started: 11:10:27.959411
Duration: 59.854 ms
Changes:
----------
keepalived:
True
----------
ID: haproxy-install
Function: pkg.installed
Result: True
Comment: All specified packages are already installed
Started: 11:10:28.821571
Duration: 487.254 ms
Changes:
----------
ID: haproxy-install
Function: file.managed
Name: /etc/haproxy/haproxy.cfg
Result: True
Comment: File /etc/haproxy/haproxy.cfg is in the correct state
Started: 11:10:29.309049
Duration: 46.521 ms
Changes:
----------
ID: haproxy-install
Function: service.running
Name: haproxy
Result: True
Comment: The service haproxy is already running
Started: 11:10:29.356102
Duration: 34.616 ms
Changes:
Summary for server1
-------------
Succeeded: 11 (changed=2)
Failed: 0
-------------
Total states run: 11
Total run time: 1.225 s
在server1查看虛擬ip
配置成功
[root@server1 files]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 52:54:00:2d:98:9a brd ff:ff:ff:ff:ff:ff
inet 172.25.15.6/24 brd 172.25.15.255 scope global eth0
inet 172.25.15.100/32 scope global eth0
inet6 fe80::5054:ff:fe2d:989a/64 scope link
valid_lft forever preferred_lft forever
再瀏覽器中測試
**
測試高可用性能
**
關閉server1 keepalived
[root@server1 files]# /etc/init.d/keepalived stop
Stopping keepalived: [ OK ]
[root@server1 files]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 52:54:00:2d:98:9a brd ff:ff:ff:ff:ff:ff
inet 172.25.15.6/24 brd 172.25.15.255 scope global eth0
inet6 fe80::5054:ff:fe2d:989a/64 scope link
valid_lft forever preferred_lft forever
server4查看虛擬ip是否漂移
#############################################
[root@server4 init.d]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 52:54:00:b7:b9:49 brd ff:ff:ff:ff:ff:ff
inet 172.25.15.9/24 brd 172.25.15.255 scope global eth0
inet 172.25.15.100/32 scope global eth0
inet6 fe80::5054:ff:feb7:b949/64 scope link
valid_lft forever preferred_lft forever
成功
瀏覽器在測試一次
成功
利用腳本控制keepalievd達到haproxy的高可用
vrrp_script chk_haproxy {
script "/opt/chk_haproxy.sh"
interval 2 兩秒鐘執行一次
weight 2
}
控制腳本
#!/bin/bash
# 定時查看haproxy是否存在,如果不存在則啓動haproxy,
# 如果啓動失敗,則停止keepalived
#
status=$(ps aux|grep haproxy | grep -v grep | grep -v bash | wc -l)
if [ "${status}" = "0" ]; then
/etc/init.d/haproxy start
status2=$(ps aux|grep haproxy | grep -v grep | grep -v bash |wc -l)
if [ "${status2}" = "0" ]; then
/etc/init.d/keepalived stop
fi
fi
重啓服務即可
再haproxy出現問題後 自動關閉keepalived 虛擬ip自動漂移至存活的主機
利用數據庫保存minion的返回結果
**
再主機端配置主配置文件
vim /etc/salt/master
# Which returner(s) will be used for minion's result:
#return: mysql
#
master_job_cache: mysql
mysql.host: 'localhost'
mysql.user: 'salt'
mysql.pass: 'westos'
mysql.db: 'salt'
mysql.port: 3306
根據官方文檔導入數據庫salt
vim test.sql
#################################################################
CREATE DATABASE `salt`
DEFAULT CHARACTER SET utf8
DEFAULT COLLATE utf8_general_ci;
USE `salt`;
--
-- Table structure for table `jids`
--
DROP TABLE IF EXISTS `jids`;
CREATE TABLE `jids` (
`jid` varchar(255) NOT NULL,
`load` mediumtext NOT NULL,
UNIQUE KEY `jid` (`jid`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
--
-- Table structure for table `salt_returns`
--
DROP TABLE IF EXISTS `salt_returns`;
CREATE TABLE `salt_returns` (
`fun` varchar(50) NOT NULL,
`jid` varchar(255) NOT NULL,
`return` mediumtext NOT NULL,
`id` varchar(255) NOT NULL,
`success` varchar(10) NOT NULL,
`full_ret` mediumtext NOT NULL,
`alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
KEY `id` (`id`),
KEY `jid` (`jid`),
KEY `fun` (`fun`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
--
-- Table structure for table `salt_events`
--
DROP TABLE IF EXISTS `salt_events`;
CREATE TABLE `salt_events` (
`id` BIGINT NOT NULL AUTO_INCREMENT,
`tag` varchar(255) NOT NULL,
`data` mediumtext NOT NULL,
`alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
`master_id` varchar(255) NOT NULL,
PRIMARY KEY (`id`),
KEY `tag` (`tag`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
導入數據庫
mysql < test.sql
創建管理用戶
grant all on salt.* to salt@localhost identified by 'westos'
重啓服務即可
測試
[root@server1 files]# salt server3 cmd.run 'df -h'
server3:
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/VolGroup-lv_root 19G 1.1G 17G 7% /
tmpfs 499M 32K 499M 1% /dev/shm
/dev/sda1 485M 33M 427M 8% /boot
mysql> use salt
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A
Database changed
mysql> select * from salt_returns\G;
*************************** 1. row ***************************
fun: cmd.run
jid: 20180818144519392316
return: "Filesystem Size Used Avail Use% Mounted on\n/dev/mapper/VolGroup-lv_root 19G 1.1G 17G 7% /\ntmpfs 499M 32K 499M 1% /dev/shm\n/dev/sda1 485M 33M 427M 8% /boot"
id: server3
success: 1
full_ret: {"fun_args": ["df -h"], "jid": "20180818144519392316", "return": "Filesystem Size Used Avail Use% Mounted on\n/dev/mapper/VolGroup-lv_root 19G 1.1G 17G 7% /\ntmpfs 499M 32K 499M 1% /dev/shm\n/dev/sda1 485M 33M 427M 8% /boot", "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2018-08-18T06:45:19.866770", "fun": "cmd.run", "id": "server3"}
alter_time: 2018-08-18 14:45:19
1 row in set (0.00 sec)
ERROR:
No query specified
mysql>
自定義模塊
mkdir /srv/salt/_modules
cd /srv/salt/_modules/
ls
vim my_disk.py
chmod +x my_disk.py
****************************************************88
#!/usr/bin/env python
def df():
return __salt__['cmd.run']('df -h')
**************************************************
刷新
[root@server1 _modules]# salt '*' saltutil.sync_all
server2:
----------
beacons:
engines:
grains:
log_handlers:
modules:
- modules.my_disk ******************************8
output:
proxymodules:
renderers:
returners:
sdb:
states:
utils:
運行
[root@server1 _modules]# salt '*' my_disk.df
server3:
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/VolGroup-lv_root 19G 1.1G 17G 7% /
tmpfs 499M 32K 499M 1% /dev/shm
/dev/sda1 485M 33M 427M 8% /boot
server4:
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/VolGroup-lv_root 19G 1.1G 17G 7% /
tmpfs 499M 16K 499M 1% /dev/shm
/dev/sda1 485M 33M 427M 8% /boot
server2:
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/VolGroup-lv_root 19G 1.1G 17G 7% /
tmpfs 499M 48K 499M 1% /dev/shm
/dev/sda1 485M 33M 427M 8% /boot
server1:
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/VolGroup-lv_root 19G 1.1G 17G 7% /
tmpfs 499M 96K 499M 1% /dev/shm
/dev/sda1 485M 33M 427M 8% /boot
可以在文件中自行添加模塊
配置syndic來優化集羣
配置syndic
yum install salt-syndic
修改master文件
# If this master will be running a salt syndic daemon, syndic_master tells
# this master where to receive commands from.
syndic_master: 172.25.15.9
修改主server的master文件 打開功能
# Set the order_masters setting to True if this master will command lower
# masters' syndic interfaces.
order_masters: True
兩臺主機重啓所有服務
測試
syndic主機上鍊接的爲123主機
[root@server1 _modules]# salt-key
Accepted Keys:
server1
server2
server3
主master 只有syndic主機
[root@server4 rhel6]# salt-key
Accepted Keys:
server1
Denied Keys:
Unaccepted Keys:
Rejected Keys:
[root@server4 rhel6]#
測試
[root@server4 rhel6]# salt '*' test.ping
server2:
True
server3:
True
server1:
True
說明主master與syndic所鏈接的主機產生了聯繫
**
配置ssh鏈接
**
#web2:
# host: 192.168.42.2
server3:
host: 172.25.15.8
user: root
passwd: westos
port: 22
[root@server1 _modules]# salt-ssh '*' test.ping -i
server3:
True