2.28 在線部署多節點openstack-rocky
一:系統安裝
創建三臺虛擬機,勾選cpu虛擬化,硬件設置如上所示,基本思路與本地部署差不多,區別就是yum倉庫的區別,還要開啓yum的緩存軟件包的功能,對緩存的軟件包進行封裝,可以作爲本地安裝的工具包用
上移到install centos 7,然後按tab鍵,在quiet後面空格,輸入net,ifname=0 biosdevname=0
代表把ens 變成eth模式
備註:每個節點安裝系統前都要這麼操作
系統安裝在300G磁盤上,最小化安裝
二:系統環境配置
2.1 修改主機名,網卡IP地址配置
當前設置,網卡211
- CentOS 7 1908 r版 online ct eth0(vm1):192.168.254.20 eth1(nat):192.168.247.20
[root@localhost ~]# cd /etc/sysconfig/network-scripts/
[root@localhost network-scripts]# vi ifcfg-eth0
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=eth0
UUID=cc032fa7-0e90-4a38-8bc8-f54fa9919033
DEVICE=eth0
ONBOOT=yes
IPADDR=192.168.254.20
NETMASK=255.255.255.0
[root@localhost network-scripts]# vi ifcfg-eth1
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=eth1
UUID=b46df96e-f65f-4831-8444-22f75d53e3d7
DEVICE=eth1
ONBOOT=yes
IPADDR=192.168.247.20
NETMASK=255.255.255.0
GATEWAY=192.168.247.2
DNS1=8.8.8.8
DNS2=114.114.114.114
[root@localhost network-scripts]# systemctl restart network
[root@localhost network-scripts]# ping www.baidu.com
PING www.wshifen.com (104.193.88.77) 56(84) bytes of data.
64 bytes from 104.193.88.77 (104.193.88.77): icmp_seq=1 ttl=128 time=164 ms
64 bytes from 104.193.88.77 (104.193.88.77): icmp_seq=2 ttl=128 time=163 ms
64 bytes from 104.193.88.77 (104.193.88.77): icmp_seq=3 ttl=128 time=163 ms
^C
--- www.wshifen.com ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2003ms
rtt min/avg/max/mdev = 163.201/163.657/164.334/0.589 ms
[root@localhost network-scripts]#
[root@localhost network-scripts]# ip addr
1: lo:
inet 127.0.0.1/8 scope host lo
2: eth0:
inet 192.168.254.20/24 brd 192.168.254.255 scope global noprefixroute eth0
3: eth1:
inet 192.168.247.20/24 brd 192.168.247.255 scope global noprefixroute eth1
[root@localhost network-scripts]# hostnamectl set-hostname ct
[root@localhost network-scripts]# su
[root@ct network-scripts]# cd ~
[root@ct ~]#
- CentOS 7 1908 r版 online comp1 eth0(vm1):192.168.254.21
[root@localhost ~]# cd /etc/sysconfig/network-scripts/
[root@localhost network-scripts]# vi ifcfg-eth0
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=eth0
UUID=81d7144e-ca96-475e-a28c-5e10dd9a79cb
DEVICE=eth0
ONBOOT=yes
IPADDR=192.168.254.21
NETMASK=255.255.255.0
[root@localhost network-scripts]# systemctl restart network
[root@localhost network-scripts]# ping 192.168.254.20
PING 192.168.254.20 (192.168.254.20) 56(84) bytes of data.
64 bytes from 192.168.254.20: icmp_seq=1 ttl=64 time=1.26 ms
64 bytes from 192.168.254.20: icmp_seq=2 ttl=64 time=0.725 ms
[root@localhost network-scripts]# hostnamectl set-hostname comp1
[root@localhost network-scripts]# su
[root@comp1 network-scripts]# cd ~
[root@comp1 ~]#
- CentOS 7 1908 r版 online comp2 eth0(vm1):192.168.254.22
[root@localhost ~]# cd /etc/sysconfig/network-scripts/
[root@localhost network-scripts]# vi ifcfg-eth0
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=eth0
UUID=578cea9e-f6f0-4018-8776-098b15378e86
DEVICE=eth0
ONBOOT=yes
IPADDR=192.168.254.22
NETMASK=255.255.55.0
[root@localhost network-scripts]# ping 192.168.254.20
PING 192.168.254.20 (192.168.254.20) 56(84) bytes of data.
64 bytes from 192.168.254.20: icmp_seq=1 ttl=64 time=1.64 ms
64 bytes from 192.168.254.20: icmp_seq=2 ttl=64 time=1.31 ms
^C
--- 192.168.254.20 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1002ms
rtt min/avg/max/mdev = 1.311/1.480/1.649/0.169 ms
[root@localhost network-scripts]# ping 192.168.254.22
PING 192.168.254.22 (192.168.254.22) 56(84) bytes of data.
64 bytes from 192.168.254.22: icmp_seq=1 ttl=64 time=0.042 ms
64 bytes from 192.168.254.22: icmp_seq=2 ttl=64 time=0.023 ms
^C
--- 192.168.254.22 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1001ms
rtt min/avg/max/mdev = 0.023/0.032/0.042/0.011 ms
[root@localhost network-scripts]# ping 192.168.254.21
PING 192.168.254.21 (192.168.254.21) 56(84) bytes of data.
64 bytes from 192.168.254.21: icmp_seq=1 ttl=64 time=1.30 ms
64 bytes from 192.168.254.21: icmp_seq=2 ttl=64 time=0.728 ms
[root@localhost network-scripts]# hostnamectl set-hostname comp2
[root@localhost network-scripts]# su
[root@comp2 network-scripts]# cd ~
[root@comp2 ~]#
2.2 統一永久關閉防火牆,核心防護
[root@ct ~]# systemctl stop firewalld
[root@ct ~]# systemctl disable firewalld
Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@ct ~]# setenforce 0
[root@ct ~]# sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
2.3 統一永久關閉網絡管理
[root@comp2 ~]# systemctl stop NetworkManager
[root@comp2 ~]# systemctl disable NetworkManager
Removed symlink /etc/systemd/system/multi-user.target.wants/NetworkManager.service.
Removed symlink /etc/systemd/system/dbus-org.freedesktop.nm-dispatcher.service.
Removed symlink /etc/systemd/system/network-online.target.wants/NetworkManager-wait-online.service.
2.4 修改hosts文件
[root@ct ~]# vi /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.254.20 ct
192.168.254.21 comp1
192.168.254.22 comp2
[root@ct ~]# scp /etc/hosts [email protected]:/etc
[root@ct ~]# scp /etc/hosts [email protected]:/etc
2.5 進行免交互設置
節點之間相互設置
[root@ct ~]# ssh-keygen -t rsa
[root@ct ~]# ssh-copy-id comp1
[root@ct ~]# ssh-copy-id comp2
[root@comp1 ~]# ssh-keygen -t rsa
[root@comp1 ~]# ssh-copy-id ct
[root@comp1 ~]# ssh-copy-id comp2
[root@comp2 ~]# ssh-keygen -t rsa
[root@comp2 ~]# ssh-copy-id ct
[root@comp2 ~]# ssh-copy-id comp1
2.6 ntp時間同步
同步思路:
控制節點去同步阿里雲時間
計算節點去同步控制節點
- 控制節點(也是網絡節點)
[root@ct ~]# ntpdate ntp.aliyun.com
bash: ntpdate: command not found
[root@ct ~]# yum install -y ntpdate
[root@ct ~]# ntpdate ntp.aliyun.com
28 Feb 19:51:29 ntpdate[45686]: adjust time server 203.107.6.88 offset 0.007953 sec
[root@ct ~]# ntpdate ntp.aliyun.com >> /var/log/ntpdate.log
[root@ct ~]# crontab -e
*/10 * * * * /usr/sbin/ntpdate ntp.aliyun.com >> /var/log/ntpdate.log
//wq保存退出
no crontab for root - using an empty one
crontab: installing new crontab
[root@ct ~]# systemctl restart crond
[root@ct ~]# systemctl enable crond
接下來控制節點去做ntpd服務
[root@ct ~]# yum install ntp -y
[root@ct ~]# vi /etc/ntp.conf
8 restrict default nomodify
17 restrict 192.168.254.0 mask 255.255.255.0 nomodify notrap
21 server 0.centos.pool.ntp.org iburst //21-24刪除
22 server 1.centos.pool.ntp.org iburst
23 server 2.centos.pool.ntp.org iburst
24 server 3.centos.pool.ntp.org iburst
//在刪除的位置增加下面內容
fudeg 127.127.1.0 stratum 10
server 127.127.1.0
[root@ct ~]# systemctl disable chronyd.service
Removed symlink /etc/systemd/system/multi-user.target.wants/chronyd.service.
[root@ct ~]# systemctl stop chronyd.service
[root@ct ~]# systemctl restart ntpd
[root@ct ~]# systemctl enable ntpd
Created symlink from /etc/systemd/system/multi-user.target.wants/ntpd.service to /usr/lib/systemd/system/ntpd.service.
- 計算節點去同步控制節點
[root@comp1 ~]# ntpdate ct
bash: ntpdate: command not found
沒有命令,因爲計算節點都是沒有網絡的,所以去掛載鏡像源,順便先把yum的緩存功能開啓(三個節點都開啓)
[root@ct ~]# vi /etc/yum.conf
cachedir=/var/cache/yum/$basearch/$releasever //此處爲緩存軟件包所在位置
keepcache=1 //改爲1,開啓緩存軟件包功能
sed -i 's/^keepcache=0/keepcache=1/' /etc/yum.conf
搭建yum本地倉庫
[root@comp1 ~]# mkdir /centosjxy
[root@comp1 ~]# vi /etc/fstab
/dev/sr0 /centosjxy iso9660 defaults 0 0
[root@comp1 ~]# mount -a
mount: /dev/sr0 is write-protected, mounting read-only
[root@comp1 ~]# df -Th
Filesystem Type Size Used Avail Use% Mounted on
/dev/sr0 iso9660 4.4G 4.4G 0 100% /centosjxy
[root@comp1 ~]# cd /etc/yum.repos.d/
[root@comp1 yum.repos.d]# ls
CentOS-Base.repo CentOS-Debuginfo.repo CentOS-Media.repo CentOS-Vault.repo
CentOS-CR.repo CentOS-fasttrack.repo CentOS-Sources.repo
[root@comp1 yum.repos.d]# mkdir bak
[root@comp1 yum.repos.d]# mv * bak/
mv: cannot move ‘bak’ to a subdirectory of itself, ‘bak/bak’
[root@comp1 yum.repos.d]# ls
bak
[root@comp1 yum.repos.d]# vi centosjxy.repo
[centosjxy]
name=centosjxy
baseurl=file:///centosjxy
enabled=1
gpgcheck=0
[root@comp1 yum.repos.d]# yum clean all
[root@comp1 yum.repos.d]# yum makecache
備註:因爲打算將rocky的緩存包做成工具包,用於以後可以本地部署,
所以,接下來的操作不可以再yum clean all,只需yum makcache即可
安裝ntpdate,與控制節點同步,comp2做同樣操作
[root@comp1 yum.repos.d]# cd ~
[root@comp1 ~]# yum install ntpdate -y
[root@comp1 ~]# ntpdate ct
28 Feb 20:17:48 ntpdate[48098]: step time server 192.168.254.20 offset -28809.873155 sec
[root@comp1 ~]# ntpdate ct >> /var/log/ntpdate.log
[root@comp1 ~]# crontab -e
*/10 * * * * /usr/sbin/ntpdate ct >> /var/log/ntpdate.log
[root@comp1 ~]# systemctl restart crond
[root@comp1 ~]# systemctl enable crond
[root@comp1 ~]# systemctl disable chronyd.service
Removed symlink /etc/systemd/system/multi-user.target.wants/chronyd.service.
[root@comp1 ~]# systemctl stop chronyd.service
接下來就可以拍一個快照了
之後的操作就是配置yum倉庫,進行openstack在線安裝
三:yum源配置(三個節點都做)
這裏若是將源指向本地,便是本地部署安裝
在線安裝需要每個節點添加nat網卡
給計算節點添加nat網卡
[root@comp1 ~]# cd /etc/sysconfig/network-scripts/
[root@comp1 network-scripts]# vi ifcfg-eth1
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=eth1
UUID=26851f97-893f-4f88-b911-96d074f551fe
DEVICE=eth1
ONBOOT=yes
IPADDR=192.168.247.21 //comp2設爲192.168.247.22
NETMASK=255.255.255.0
GATEWAY=192.168.247.2
DNS1=8.8.8.8
DNS2=114.114.114.114
[root@comp1 network-scripts]# systemctl restart network
[root@comp1 network-scripts]# ping www.baidu.com
PING www.a.shifen.com (180.101.49.12) 56(84) bytes of data.
64 bytes from 180.101.49.12 (180.101.49.12): icmp_seq=1 ttl=128 time=5.11 ms
64 bytes from 180.101.49.12 (180.101.49.12): icmp_seq=2 ttl=128 time=4.93 ms
64 bytes from 180.101.49.12 (180.101.49.12): icmp_seq=3 ttl=128 time=5.38 ms
配置yum在線倉庫
[root@ct ~]# cd /etc/yum.repos.d/
You have new mail in /var/spool/mail/root
[root@ct yum.repos.d]# ls
CentOS-Base.repo CentOS-Debuginfo.repo CentOS-Media.repo CentOS-Vault.repo
CentOS-CR.repo CentOS-fasttrack.repo CentOS-Sources.repo
[root@ct yum.repos.d]# curl -o /etc/yum.repos.d/CentOS.repo http://mirrors.aliyun.com/repo/Centos-7.repo
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 2523 100 2523 0 0 8841 0 --:--:-- --:--:-- --:--:-- 8883
You have new mail in /var/spool/mail/root
[root@ct yum.repos.d]# ls
CentOS.repo //增加文件
[root@ct yum.repos.d]# vi openstack-rocky.repo
[rocky]
name=rocky
baseurl=https://mirrors.aliyun.com/centos/7/cloud/x86_64/openstack-rocky/
enabled=1
gpgcheck=0
[root@ct yum.repos.d]# scp openstack-rocky.repo [email protected]:/etc/yum.repos.d/
[root@ct yum.repos.d]# scp openstack-rocky.repo [email protected]:/etc/yum.repos.d/
[root@ct yum.repos.d]# scp CentOS.repo [email protected]:/etc/yum.repos.d/
[root@ct yum.repos.d]# scp CentOS.repo [email protected]:/etc/yum.repos.d/
[root@ct ~]# yum install -y centos-release-openstack-rocky
[root@ct yum.repos.d]# yum makecache
四:配置openstack-packstack
接下來回到控制節點,安裝openstack-packstack
[root@ct yum.repos.d]# cd ~
[root@ct ~]# cat /etc/redhat-release
CentOS Linux release 7.7.1908 (Core)
[root@ct ~]# yum -y install openstack-packstack
生成應答文件並修改
[root@ct yum.repos.d]# cd ~
[root@ct ~]# packstack --gen-answer-file=openstack.txt
[root@control ~]# sed -i -r 's/(.+_PW)=.+/\1=123123/' openstack.txt
[root@control ~]# sed -i -r 's/192.168.247.20/192.168.254.20/g' openstack.txt
[root@ct ~]# vi openstack.txt
19 CONFIG_MARIADB_INSTALL=y //mariadb數據庫默認安裝,是
22 CONFIG_GLANCE_INSTALL=y //glance鏡像組件,開啓
25 CONFIG_CINDER_INSTALL=y //cinder塊存儲,開啓
29 CONFIG_MANILA_INSTALL=n //manila組件是openstack的擴展系統,默認是N,不需要更改
32 CONFIG_NOVA_INSTALL=y //nova計算組件,開啓
35 CONFIG_NEUTRON_INSTALL=y //neutron網絡組件,開啓
38 CONFIG_HORIZON_INSTALL=y //horizon控制檯組件,開啓
41 CONFIG_SWIFT_INSTALL=n //swift對象存儲,默認是Y,但是在生產環境中一般不裝,選n
46 CONFIG_CEILOMETER_INSTALL=y //ceilometer計費服務,開啓
50 CONFIG_AODH_INSTALL=n //aodh組件,改爲n
53 CONFIG_PANKO_INSTALL=n //panko組件,n
60 CONFIG_HEAT_INSTALL=n //heat編排工具組件,默認是n,不改
94 CONFIG_CONTROLLER_HOST=192.168.254.20 //指定控制節點IP地址
97 CONFIG_COMPUTE_HOST=192.168.254.21,192.168.254.22 //指定計算節點
101 CONFIG_NETWORK_HOSTS=192.168.254.20 //指定網絡節點
557 CONFIG_CINDER_VOLUMES_SIZE=5G //系統在創建cinder組件時會創建一個20G卷,虛擬機空間有限,放小一點
778 CONFIG_NEUTRON_METADATA_PW=123123 //修改網絡metadata的密碼
782 CONFIG_LBAAS_INSTALL=y //lbaas負載均衡組件,必須要裝
790 CONFIG_NEUTRON_FWAAS=y //網絡防火牆組件,必須要裝
794 CONFIG_NEUTRON_VPNAAS=y //網絡VPN組件,必須要裝
817 CONFIG_NEUTRON_ML2_FLAT_NETWORKS=physnet1 //flat網絡這邊要設置物理網卡名字
862 CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS=physnet1:br-ex //ovs_bridge_mappings這邊要設置物理網卡的名字
873 CONFIG_NEUTRON_OVS_BRIDGE_IFACES=br-ex:eth1 //ovs_bridge_ifaces這邊br-ex:eth1是網絡節點的nat網卡
1185 CONFIG_PROVISION_DEMO=n //關閉在線下載一個demo測試的鏡像,這裏把它關掉
三個節點都增加網關
[root@ct ~]# vi /etc/sysconfig/network-scripts/ifcfg-eth0
GATEWAY=192.168.254.1
//增加
[root@ct ~]# systemctl restart network
完成上述配置之後,再統一將原本存在的(192.168.247.20)IP地址改爲控制節點的IP
[root@control ~]# sed -i -r 's/(.+_PW)=.+/\1=123123/' openstack.txt
[root@control ~]# sed -i -r 's/192.18.247.20/192.168.254.20/g' openstack.txt
[root@control ~]# grep -vE "^#|^$" openstack.txt > openstackbak.txt
用下載工具把openstackbak.txt下載後實驗的時候做比對用
開始部署
[root@ct ~]# packstack --answer-file=openstack.txt
動態查看日誌安裝情況
[root@ct ~]# tail -f /var/log/messages
成功
製作r版本地包原
安裝createrepo工具
[root@ct ~]# yum install createrepo -y
控制節點和計算節點1創建存放所有rpm包的文件夾
[root@ct ~]# mkdir /opt/openstackct
[root@ct ~]# find /var/cache/yum/x86_64/7/ -name *.rpm -exec cp {} /opt/openstackct \;
[root@comp1 base]# mkdir /opt/openstackcomp1
[root@comp1 base]# find /var/cache/yum/x86_64/7/ -name *.rpm -exec cp {} /opt/openstackcomp1 \;
計算節點的openstack1目錄移動到控制節點
[root@comp1 openstackcomp1]# cd /
You have new mail in /var/spool/mail/root
[root@comp1 /]# scp -r /opt/openstackcomp1 root@ct:/opt
控制節點將所有包整合到openstack目錄
[root@ct opt]# cp -f /opt/openstackcomp1/* /opt/openstackct
控制節點封裝rpm包並打包
[root@ct opt]# cd /opt/openstackct
[root@ct openstackct]# createrepo ./
[root@ct openstackct]# cd ..
[root@ct opt]# tar zcvf openstack-rocky-gsy.tar.gz openstackct
查看
[root@ct opt]# ls
openstack openstackcomp1 openstackct openstack-rocky-gsy.tar.gz
然後把他撈出虛擬機放到宿主機即可
注意:在部署完成openstack之後,若是重啓服務器後,發現計算節點沒有和控制節點同步,進行手動同步時,反饋找不到服務時,有可能是iptables開啓了
控制節點將所有包整合到openstack目錄
[root@ct opt]# cp -f /opt/openstackcomp1/* /opt/openstackct
控制節點封裝rpm包並打包
[root@ct opt]# cd /opt/openstackct
[root@ct openstackct]# createrepo ./
[root@ct openstackct]# cd ..
[root@ct opt]# tar zcvf openstack-rocky-gsy.tar.gz openstackct
查看
[root@ct opt]# ls
openstack openstackcomp1 openstackct openstack-rocky-gsy.tar.gz
然後把他撈出虛擬機放到宿主機即可