一、環境準備
1.關閉防火牆
systemctl stop firewalld
systeemctl disable firewalld
2.關閉swap
swapoff -a
vi /etc/fstab
刪除/dev/mapper/centos-swap swap swap defaults 0 0
[root@k8s-master1 ~]# free -m
total used free shared buff/cache available
Mem: 1823 123 1468 8 231 1516
Swap: 0 0 0
--可以看到Swap爲0
3.配置主機名
hostnamectl set-hostname k8s-master1
hostnamectl set-hostname k8s-node1
hostnamectl set-hostname k8s-node2
4.配置名稱解析
vi /etc/hosts
192.168.142.115 k8s-master1
192.168.142.116 k8s-master2
192.168.142.118 k8s-node1
192.168.142.119 k8s-node2
5.關閉selinux
setenforce 0
vi /etc/selinux/config
SELINUX=disabled
6.配置時間同步
選擇一個節點作爲服務端,其他節點爲客戶端
master1爲時間服務器的服務端
其他的爲時間服務器的客戶端
1)配置k8s-master1
yum install chrony -y
vi /etc/chrony.conf
server 127.127.1.0 iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst
allow 192.168.142.0/24
local stratum 10
[root@k8s-master1 etc]# systemctl start chronyd
[root@k8s-master1 etc]# systemctl enable chronyd
[root@k8s-master1 etc]# ss -unl | grep 123
[root@k8s-master1 etc]# systemctl restart chronyd
[root@k8s-master1 etc]# ss -unl | grep 123
UNCONN 0 0 *:123
2)配置k8s-node1等
yum install chrony -y
vi /etc/chrony.conf
server 192.168.142.115 iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst
[root@k8s-node1 etc]# systemctl start chronyd
[root@k8s-master1 etc]# systemctl enable chronyd
[root@k8s-master1 etc]# systemctl restart chronyd
查看是否連接master
chronyc sources
二、安裝etcd
1.給etcd頒發證書
1)創建證書頒發機構
2)填寫表單–寫明etcd所在的節點的ip
3)向證書頒發機構申請證書
第一步:上傳TLS安裝包
傳到/root下
略
第二步:
# tar xvf /root/TLS.tar.gz
# cd /root/TLS
# ./cfssl.sh
# cd etcd
# vim server-csr.json
修改host中的IP地址,這裏的IP是etcd所在節點的IP地址
{
"CN": "etcd",
"hosts": [
"192.168.31.63",
"192.168.31.65",
"192.168.31.66"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing"
}
]
}
# ./generate_etcd_cert.sh
# ls *pem
ca-key.pem ca.pem server-key.pem server.pem
ca.pem:證書-公鑰
ca-key.pem:私鑰
任何人申請證書,需要有ca.pem
2.安裝etcd
etcd在master1,node1,node2上安裝
解壓之後,會生成一個文件etcd.service和一個目錄etcd,
etcd.service是systemd服務管理腳本。
centos7:systemd服務管理腳本在哪個目錄?
/usr/lib/systemd/system
--將etcd加入systemctl管理
cp etcd.service /usr/lib/systemd/system/
--修改etcd配置文件
cp -r etcd /opt/
vi /opt/etcd/cfg/etcd.conf
--修改證書
cd /root/TLS/etcd/
\cp ca.pem server.pem server-key.pem /opt/etcd/ssl/
--將etc管理程序和程序目錄發送到node1 和node2
# scp /usr/lib/systemd/system/etcd.service root@k8s-node1:/usr/lib/systemd/system/
# scp /usr/lib/systemd/system/etcd.service root@k8s-node2:/usr/lib/systemd/system/
# scp -r /opt/etcd/ root@k8s-node1:/opt/
# scp -r /opt/etcd/ root@k8s-node2:/opt/
---在node1和node2上修改etcd的配置文件
# vi /opt/etcd/cfg/etcd.conf
--在三個節點一次啓動etcd服務
# systemctl start etcd
# systemctl enable etcd
--檢查是否啓動成功
# /opt/etcd/bin/etcdctl --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.142.115:2379,https://192.168.142.118:2379,https://192.168.142.118:2379" cluster-health
3.安裝master服務
(1)爲api server簽發證書
# cd /root/TLS/k8s/
# ./generate_k8s_cert.sh
(2)
#tar xvf k8s-master.tar.gz
#mv kube-apiserver.service kube-controller-manager.service kube-scheduler.service /usr/lib/systemd/system/
#mv kubernetes /opt/
# cp /root/TLS/k8s/{ca*pem,server.pem,server-key.pem} /opt/kubernetes/ssl/ -rvf
修改apiserver的配置文件
# vi /opt/kubernetes/cfg/kube-apiserver.conf
KUBE_APISERVER_OPTS="--logtostderr=false \ --不將錯誤信息打印到標準窗口上
--v=2 \ --日誌級別,越大越詳細
--log-dir=/opt/kubernetes/logs \--日誌目錄
KUBE_APISERVER_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--etcd-servers=https://192.168.142.115:2379,https://192.168.142.118:2379,https://192.168.142.119:2379 \
--bind-address=192.168.31.61 \ --監聽安全端口的IP地址。關聯的接口必須能被集羣的其他部分以及CLI/web客戶機訪問
--secure-port=6443 \ --監控端口
--advertise-address=192.168.31.61 \ --通告端口
--allow-privileged=true \ --是否允許 privileged 容器運行,允許特權模式的容器,是否允許超級管理員創建容器
--service-cluster-ip-range=10.0.0.0/24 \ --service 要使用的網段,使用 CIDR 格式,參考 kubernetes 中 service 的定義
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \ --除了默認啓用的插件,還應該額外啓動的admission插件
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth=true \--基於bootstrap token方式進行自動頒發證書
--token-auth-file=/opt/kubernetes/cfg/token.csv \
--service-node-port-range=30000-32767 \
--kubelet-client-certificate=/opt/kubernetes/ssl/server.pem \
--kubelet-client-key=/opt/kubernetes/ssl/server-key.pem \
--訪問kubelet所使用的證書
--tls-cert-file=/opt/kubernetes/ssl/server.pem \
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \
--client-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
--訪問api server所需要的證書
--etcd-cafile=/opt/etcd/ssl/ca.pem \
--etcd-certfile=/opt/etcd/ssl/server.pem \
--etcd-keyfile=/opt/etcd/ssl/server-key.pem \
--訪問etcd所需要的證書
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/opt/kubernetes/logs/k8s-audit.log"
修改kube-controller-manager:
vi /opt/kubernetes/cfg/kube-controller-manager.conf
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--leader-elect=true \ --啓動leader選舉?有多個api server,會自動選擇一個
--master=127.0.0.1:8080 \指定了api server的地址
--address=127.0.0.1 \指定controller-manager的地址
--allocate-node-cidrs=true \ --集羣內的pod的CIDR範圍,需要 --allocate-node-cidrs設爲true
--cluster-cidr=10.244.0.0/16 \ --集羣內的pod的CIDR範圍,需要 --allocate-node-cidrs設爲true
--service-cluster-ip-range=10.0.0.0/24 \ --注意kube-apiserver.conf裏的值
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \
--root-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \
--experimental-cluster-signing-duration=87600h0m0s" --證書有效期,這裏與生成證書時ca-config.json保持一致
修改kube-scheduler:
vi kube-scheduler.conf
KUBE_SCHEDULER_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--leader-elect \ --選擇leader
--master=127.0.0.1:8080 \ --Kubernetes api server的地址
--address=127.0.0.1" --kube-scheduler 監聽地址
啓動master
# systemctl start kube-apiserver
# systemctl enable kube-apiserver
# systemctl start kube-scheduler
# systemctl enable kube-scheduler
# systemctl start kube-controller-manager
# systemctl enable kube-controller-manager
# cp /opt/kubernetes/bin/kubectl /bin/
驗證是否啓動成功
root 1617 4.3 18.9 549412 353504 ? Ssl 15:49 0:10 /opt/kubernetes/bin/kube-apiserver --logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --etcd-servers=https://192.168.142.115:2379,https://192.168.142.118:2379,https://192.168.142.119:2379 --bind-address=192.168.142.115 --secure-port=6443 --advertise-address=192.168.142.115 --allow-privileged=true --service-cluster-ip-range=10.0.0.0/24 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction --authorization-mode=RBAC,Node --enable-bootstrap-token-auth=true --token-auth-file=/opt/kubernetes/cfg/token.csv --service-node-port-range=30000-32767 --kubelet-client-certificate=/opt/kubernetes/ssl/server.pem --kubelet-client-key=/opt/kubernetes/ssl/server-key.pem --tls-cert-file=/opt/kubernetes/ssl/server.pem --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem --client-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem --etcd-cafile=/opt/etcd/ssl/ca.pem --etcd-certfile=/opt/etcd/ssl/server.pem --etcd-keyfile=/opt/etcd/ssl/server-key.pem --audit-log-maxage=30 --audit-log-maxbackup=3 --audit-log-maxsize=100 --audit-log-path=/opt/kubernetes/logs/k8s-audit.log
root 1681 0.9 3.2 221368 60796 ? Ssl 15:49 0:02 /opt/kubernetes/bin/kube-controller-manager --logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --leader-elect=true --master=127.0.0.1:8080 --address=127.0.0.1 --allocate-node-cidrs=true --cluster-cidr=10.244.0.0/16 --service-cluster-ip-range=10.0.0.0/24 --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem --root-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem --experimental-cluster-signing-duration=87600h0m0s
root 1707 4.1 1.4 146820 26556 ? Ssl 15:52 0:01 /opt/kubernetes/bin/kube-scheduler --logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --leader-elect --master=127.0.0.1:8080 --address=127.0.0.1
查看啓動日誌:
[root@k8s-master1 cfg]# tail -f /opt/kubernetes/logs/kube-apiserver.INFO
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
healthz check failed
I0410 15:49:20.149045 1617 storage_rbac.go:308] created rolebinding.rbac.authorization.k8s.io/system:controller:cloud-provider in kube-system
I0410 15:49:20.189575 1617 storage_rbac.go:308] created rolebinding.rbac.authorization.k8s.io/system:controller:token-cleaner in kube-system
W0410 15:49:20.260004 1617 lease.go:222] Resetting endpoints for master service "kubernetes" to [192.168.142.115]
I0410 15:49:20.260783 1617 controller.go:606] quota admission added evaluator for: endpoints
I0410 15:49:41.909126 1617 cacher.go:771] cacher (*rbac.ClusterRole): 1 objects queued in incoming channel.
I0410 15:49:42.398822 1617 controller.go:606] quota admission added evaluator for: serviceaccounts
[root@k8s-master1 cfg]# tail -f /opt/kubernetes/logs/kube-scheduler.INFO
I0410 15:52:50.521734 1707 defaults.go:91] TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory
I0410 15:52:50.521743 1707 server.go:162] Starting Kubernetes Scheduler version v1.16.0
I0410 15:52:50.521928 1707 factory.go:294] Creating scheduler from algorithm provider 'DefaultProvider'
I0410 15:52:50.521937 1707 factory.go:382] Creating scheduler with fit predicates 'map[CheckNodeUnschedulable:{} CheckVolumeBinding:{} GeneralPredicates:{} MatchInterPodAffinity:{} MaxAzureDiskVolumeCount:{} MaxCSIVolumeCountPred:{} MaxEBSVolumeCount:{} MaxGCEPDVolumeCount:{} NoDiskConflict:{} NoVolumeZoneConflict:{} PodToleratesNodeTaints:{}]' and priority functions 'map[BalancedResourceAllocation:{} ImageLocalityPriority:{} InterPodAffinityPriority:{} LeastRequestedPriority:{} NodeAffinityPriority:{} NodePreferAvoidPodsPriority:{} SelectorSpreadPriority:{} TaintTolerationPriority:{}]'
W0410 15:52:50.523427 1707 authorization.go:47] Authorization is disabled
W0410 15:52:50.523434 1707 authentication.go:79] Authentication is disabled
I0410 15:52:50.523446 1707 deprecated_insecure_serving.go:51] Serving healthz insecurely on 127.0.0.1:10251
I0410 15:52:50.524314 1707 secure_serving.go:123] Serving securely on [::]:10259
I0410 15:52:51.530570 1707 leaderelection.go:241] attempting to acquire leader lease kube-system/kube-scheduler...
I0410 15:53:09.113793 1707 leaderelection.go:251] successfully acquired lease kube-system/kube-scheduler
[root@k8s-master1 cfg]# tail -f /opt/kubernetes/logs/kube-controller-manager.INFO
Resource=csidrivers storage.k8s.io/v1beta1, Resource=csinodes], removed: []
I0410 15:49:43.298376 1681 shared_informer.go:197] Waiting for caches to sync for garbage collector
I0410 15:49:43.398504 1681 shared_informer.go:204] Caches are synced for garbage collector
I0410 15:49:43.398517 1681 garbagecollector.go:242] synced garbage collector
kubectl 管理工具
[root@k8s-master1 cfg]# kubectl get cs
NAME AGE
scheduler <unknown>
controller-manager <unknown>
etcd-2 <unknown>
etcd-1 <unknown>
etcd-0 <unknown>
配置tls 基於bootstrap自動頒發證書
# kubectl create clusterrolebinding kubelet-bootstrap \
--clusterrole=system:node-bootstrapper \
--user=kubelet-bootstrap
–對應的是: /opt/kubernetes/cfg/kube-apiserver.conf 中
–enable-bootstrap-token-auth=true
–token-auth-file=/opt/kubernetes/cfg/token.csv
–完成訪問kubelet自動頒發證書
===============
加密
(1)對稱加密:加密與解密用相同的密鑰
(2)非對稱加密:加密與解密使用的密鑰對,發送使用公鑰
()單向加密:只能加密,不能解密,如:md5
SSL:
(1)證書來源
從網絡第三方機構購買,通過
自己給自己發證收-自簽證書
PKI(Public Key Infrastructure)