kubernetes v1.18.2 二進制雙棧 kubelet 部署

系統參數優化

cat << EOF | tee 99-k8s.conf
#sysctls for k8s node config
net.ipv4.tcp_slow_start_after_idle=0
net.core.rmem_max=16777216
fs.inotify.max_user_watches=524288
kernel.softlockup_all_cpu_backtrace=1
kernel.softlockup_panic=1
fs.file-max=2097152
fs.inotify.max_user_instances=8192
fs.inotify.max_queued_events=16384
vm.max_map_count=262144
fs.may_detach_mounts=1
net.core.netdev_max_backlog=16384
net.ipv4.tcp_wmem=4096 12582912 16777216
net.core.wmem_max=16777216
net.core.somaxconn=32768
net.ipv4.ip_forward=1
net.ipv4.tcp_max_syn_backlog=8096
net.ipv6.conf.all.disable_ipv6 = 0
net.ipv6.conf.default.disable_ipv6 = 0
net.ipv6.conf.lo.disable_ipv6 = 0
net.ipv6.conf.all.forwarding=1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-arptables=1
net.bridge.bridge-nf-call-iptables=1
net.ipv4.tcp_rmem=4096 12582912 16777216
EOF
# 分發文件
scp -r 99-k8s.conf 192.168.2.175:/etc/sysctl.d/
scp -r 99-k8s.conf 192.168.2.176:/etc/sysctl.d/
scp -r 99-k8s.conf 192.168.2.177:/etc/sysctl.d/
scp -r 99-k8s.conf 192.168.2.185:/etc/sysctl.d/
scp -r 99-k8s.conf 192.168.2.187:/etc/sysctl.d/
# 生效設置
sysctl -p /etc/sysctl.d/99-k8s.conf 
ssh 192.168.2.175 sysctl -p /etc/sysctl.d/99-k8s.conf
ssh 192.168.2.176 sysctl -p /etc/sysctl.d/99-k8s.conf
ssh 192.168.2.177 sysctl -p /etc/sysctl.d/99-k8s.conf
ssh 192.168.2.185 sysctl -p /etc/sysctl.d/99-k8s.conf
ssh 192.168.2.187 sysctl -p /etc/sysctl.d/99-k8s.conf

node 節點依賴安裝

# 每個node 節點執行
# centos8 
dnf install -y epel-release
sed -i "s/enabled=0/enabled=1/" /etc/yum.repos.d/CentOS-PowerTools.repo
dnf  -y update
 dnf install -y dnf-utils ipvsadm telnet wget net-tools \
                conntrack ipset jq iptables curl sysstat \
                libseccomp socat nfs-utils fuse lvm2 device-mapper-persistent-data fuse-devel
# ubuntu 
apt update 
apt upgrade -y
apt install -y ipvsadm telnet wget net-tools conntrack ipset \
 jq iptables curl sysstat libltdl7 libseccomp2 socat nfs-common \
 fuse ceph-common software-properties-common

docker 部署

# 每個node 節點執行
# centos 
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# 添加docker配置
mkdir -p /etc/docker
cat << EOF | tee /etc/docker/daemon.json
{
    "max-concurrent-downloads": 20,
    "data-root": "/apps/docker/data",
    "exec-root": "/apps/docker/root",
    "registry-mirrors": ["https://docker.mirrors.ustc.edu.cn"],
    "ipv6": true,
    "log-driver": "json-file",
    "bridge": "none", 
    "oom-score-adjust": -1000,
    "debug": false,
    "log-opts": {
        "max-size": "100M",
        "max-file": "10"
    },
    "default-ulimits": {
        "nofile": {
            "Name": "nofile",
            "Hard": 65535,
            "Soft": 65535
        },
        "nproc": {
            "Name": "nproc",
            "Hard": 65535,
            "Soft": 65535
        },
       "core": {
            "Name": "core",
            "Hard": -1,
            "Soft": -1
      }

    }
}
EOF
 # 安裝docker 
dnf install -y   http://mirrors.aliyun.com/docker-ce/linux/centos/7/x86_64/stable/Packages/containerd.io-1.2.13-3.1.el7.x86_64.rpm
dnf install -y docker-ce
# reload service 配置
 systemctl daemon-reload
# 重啓docker
 systemctl restart docker
# 設置開機啓動
systemctl enable docker
# Ubuntu 
sudo apt-get remove docker docker-engine docker.io containerd runc
sudo apt-get update
sudo apt-get install \
    apt-transport-https \
    ca-certificates \
    curl \
    gnupg-agent \
    software-properties-common
curl -fsSL http://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -
 sudo add-apt-repository \
   "deb [arch=amd64] http://mirrors.aliyun.com/docker-ce/linux/ubuntu \
   $(lsb_release -cs) \
   stable"
sudo apt-get update
sudo apt-get install docker-ce docker-ce-cli containerd.io
# 添加docker配置
mkdir -p /etc/docker
cat << EOF | tee /etc/docker/daemon.json
{
    "max-concurrent-downloads": 20,
    "data-root": "/apps/docker/data",
    "exec-root": "/apps/docker/root",
    "registry-mirrors": ["https://docker.mirrors.ustc.edu.cn"],
    "ipv6": true,
    "log-driver": "json-file",
    "bridge": "none", 
    "oom-score-adjust": -1000,
    "debug": false,
    "log-opts": {
        "max-size": "100M",
        "max-file": "10"
    },
    "default-ulimits": {
        "nofile": {
            "Name": "nofile",
            "Hard": 65535,
            "Soft": 65535
        },
        "nproc": {
            "Name": "nproc",
            "Hard": 65535,
            "Soft": 65535
        },
       "core": {
            "Name": "core",
            "Hard": -1,
            "Soft": -1
      }

    }
}
EOF
# reload service 配置
 systemctl daemon-reload
# 重啓docker
 systemctl restart docker
# 設置開機啓動
systemctl enable docker
# Ubuntu AND centos 配置一致
#自動加載ipvs 
cat << EOF | tee /etc/modules-load.d/k8s-basic-modules.conf
br_netfilter
nf_conntrack_ipv4
nf_conntrack_ipv6
EOF

cat << EOF | tee /etc/modules-load.d/k8s-ipvs-modules.conf
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
EOF

cni 部署

# 下載cni
mkdir cni
cd cni
wget https://github.com/containernetworking/plugins/releases/download/v0.8.5/cni-plugins-linux-amd64-v0.8.5.tgz
tar -xvf cni-plugins-linux-amd64-v0.8.5.tgz
rm -rf cni-plugins-linux-amd64-v0.8.5.tgz
#  創建二進制遠程存放文件夾
ssh 192.168.2.175 mkdir -p /apps/cni/bin
ssh 192.168.2.176 mkdir -p /apps/cni/bin
ssh 192.168.2.177 mkdir -p /apps/cni/bin
ssh 192.168.2.185 mkdir -p /apps/cni/bin
ssh 192.168.2.187 mkdir -p /apps/cni/bin
#  上傳解壓二進制文件
scp -r * 192.168.2.175:/apps/cni/bin/
scp -r * 192.168.2.176:/apps/cni/bin/
scp -r * 192.168.2.177:/apps/cni/bin/
scp -r * 192.168.2.185:/apps/cni/bin/
scp -r * 192.168.2.187:/apps/cni/bin/

bootstrap-kubeconfig 配置

# 生效環境變量 部署etcd 時已經配置好
source  ./environment.sh
cd ${HOST_PATH}
# 創建bootstrap  kubeconfig 配置
# 設置集羣參數
kubectl config set-cluster ${CLUSTER_NAME} \
  --certificate-authority=${HOST_PATH}/cfssl/pki/k8s/k8s-ca.pem \
  --embed-certs=true \
  --server=${KUBE_API_KUBELET} \
  --kubeconfig=${HOST_PATH}/kubeconfig/bootstrap.kubeconfig
# 設置客戶端認證參數
kubectl config set-credentials system:bootstrap:${TOKEN_ID} \
  --token=${BOOTSTRAP_TOKEN} \
  --kubeconfig=${HOST_PATH}/kubeconfig/bootstrap.kubeconfig
# 設置上下文參數
kubectl config set-context default \
  --cluster=${CLUSTER_NAME} \
  --user=system:bootstrap:${TOKEN_ID} \
  --kubeconfig=${HOST_PATH}/kubeconfig/bootstrap.kubeconfig
# 設置默認上下文
kubectl config use-context default --kubeconfig=${HOST_PATH}/kubeconfig/bootstrap.kubeconfig
#  創建遠程目錄
ssh 192.168.2.175  mkdir -p /apps/k8s/{log,kubelet-plugins,conf} && mkdir -p /apps/work/kubernetes/manifests
ssh 192.168.2.176  mkdir -p /apps/k8s/{log,kubelet-plugins,conf} && mkdir -p /apps/work/kubernetes/manifests
ssh 192.168.2.177  mkdir -p /apps/k8s/{log,kubelet-plugins,conf} && mkdir -p /apps/work/kubernetes/manifests
ssh 192.168.2.185  mkdir -p /apps/k8s/{log,kubelet-plugins,conf} && mkdir -p /apps/work/kubernetes/manifests
ssh 192.168.2.187  mkdir -p /apps/k8s/{log,kubelet-plugins,conf} && mkdir -p /apps/work/kubernetes/manifests
# 分發bootstrap.kubeconfig
scp -r kubeconfig/bootstrap.kubeconfig 192.168.2.175:/apps/k8s/conf/
scp -r kubeconfig/bootstrap.kubeconfig 192.168.2.176:/apps/k8s/conf/
scp -r kubeconfig/bootstrap.kubeconfig 192.168.2.177:/apps/k8s/conf/
scp -r kubeconfig/bootstrap.kubeconfig 192.168.2.185:/apps/k8s/conf/
scp -r kubeconfig/bootstrap.kubeconfig 192.168.2.187:/apps/k8s/conf/

kubelet bootstrap 相關配置

# 創建bootstrap secret yaml
mkdir yaml
cat << EOF | tee ${HOST_PATH}/yaml/bootstrap-secret.yaml
apiVersion: v1
kind: Secret
metadata:
  # Name MUST be of form "bootstrap-token-<token id>"
  name: bootstrap-token-${TOKEN_ID}
  namespace: kube-system

# Type MUST be 'bootstrap.kubernetes.io/token'
type: bootstrap.kubernetes.io/token
stringData:
  # Human readable description. Optional.
  description: "The default bootstrap token generated by 'kubelet '."

  # Token ID and secret. Required.
  token-id: ${TOKEN_ID}
  token-secret: ${TOKEN_SECRET}

  # Allowed usages.
  usage-bootstrap-authentication: "true"
  usage-bootstrap-signing: "true"

  # Extra groups to authenticate the token as. Must start with "system:bootstrappers:"
  auth-extra-groups: system:bootstrappers:worker,system:bootstrappers:ingress
EOF
# 生成集羣授權yaml
cat << EOF | tee ${HOST_PATH}/yaml/kube-api-rbac.yaml
---
# kube-controller-manager 綁定
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: controller-node-clusterrolebing
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kube-controller-manager
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: User
  name: system:kube-controller-manager
---
# 創建kube-scheduler 綁定
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: scheduler-node-clusterrolebing
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kube-scheduler
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: User
  name: system:kube-scheduler
---
# 創建kube-controller-manager 到auth-delegator 綁定
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: controller-manager:system:auth-delegator
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:auth-delegator
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: User
  name: system:kube-controller-manager
---
#授予 kubernetes 證書訪問 kubelet API 的權限
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kube-system-cluster-admin
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: User
  name: system:serviceaccount:kube-system:default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubelet-node-clusterbinding
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:node
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kube-apiserver:kubelet-apis
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kubelet-api-admin
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: User
  name: kubernetes
EOF
# 創建kubelet-bootstrap 授權
cat << EOF | tee ${HOST_PATH}/yaml/kubelet-bootstrap-rbac.yaml
---
# 允許 system:bootstrappers 組用戶創建 CSR 請求
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubelet-bootstrap
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:node-bootstrapper
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers
---
# 自動批准 system:bootstrappers 組用戶 TLS bootstrapping 首次申請證書的 CSR 請求
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-client-auto-approve-csr
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers
---
# 自動批准 system:nodes 組用戶更新 kubelet 自身與 apiserver 通訊證書的 CSR 請求
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-client-auto-renew-crt
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:nodes
---
# 自動批准 system:nodes 組用戶更新 kubelet 10250 api 端口證書的 CSR 請求
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-server-auto-renew-crt
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeserver
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:nodes
---
EOF
# 生成yaml 提交到k8s 集羣
kubectl apply -f yaml/bootstrap-secret.yaml
kubectl apply -f yaml/kube-api-rbac.yaml
kubectl apply -f yaml/kubelet-bootstrap-rbac.yaml 

kubelet 二進制文件準備

# 進入二進制所在文件夾
cd ${HOST_PATH}/kubernetes/server/bin
scp -r kubelet 192.168.2.175:/apps/k8s/bin
scp -r kubelet 192.168.2.176:/apps/k8s/bin
scp -r kubelet 192.168.2.177:/apps/k8s/bin
scp -r kubelet 192.168.2.187:/apps/k8s/bin
scp -r kubelet 192.168.2.185:/apps/k8s/bin

kubelet 配置文件

# 192.168.2.175 節點配置
cat << EOF | tee /apps/k8s/conf/kubelet
KUBELET_OPTS="--bootstrap-kubeconfig=/apps/k8s/conf/bootstrap.kubeconfig \\
              --network-plugin=cni \\
              --cni-conf-dir=/apps/cni/etc/net.d \\
              --cni-bin-dir=/apps/cni/bin \\
              --kubeconfig=/apps/k8s/conf/kubelet.kubeconfig \\
              --node-ip=fc00:bd4:efa8:1001:5054:ff:fe49:9888 \\ # 其它節點記得修改 記得刪除這個註釋
              --hostname-override=k8s-master-1 \\ # 其它節點記得修改 記得刪除這個註釋
              --cert-dir=/apps/k8s/ssl \\
              --runtime-cgroups=/systemd/system.slice \\
              --root-dir=/apps/work/kubernetes/kubelet \\
              --log-dir=/apps/k8s/log \\
              --alsologtostderr=true \\
              --config=/apps/k8s/conf/kubelet.yaml \\
              --logtostderr=false \\
              --pod-infra-container-image=docker.io/juestnow/pause-amd64:3.1 \\
              --image-pull-progress-deadline=30s \\
              --v=2 \\
              --volume-plugin-dir=/apps/k8s/kubelet-plugins/volume"
EOF
cat << EOF | tee /apps/k8s/conf/kubelet.yaml
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
staticPodPath: "/apps/work/kubernetes/manifests"
syncFrequency: 30s
fileCheckFrequency: 20s
httpCheckFrequency: 20s
address: fc00:bd4:efa8:1001:5054:ff:fe49:9888 # 其它節點記得修改 記得刪除這個註釋
port: 10250
readOnlyPort: 0
tlsCipherSuites:
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
- TLS_RSA_WITH_AES_256_GCM_SHA384
- TLS_RSA_WITH_AES_128_GCM_SHA256
rotateCertificates: true
authentication:
  x509:
    clientCAFile: "/apps/k8s/ssl/k8s/k8s-ca.pem"
  webhook:
    enabled: true
    cacheTTL: 2m0s
  anonymous:
    enabled: false
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
registryPullQPS: 5
registryBurst: 10
eventRecordQPS: 15
eventBurst: 30
enableDebuggingHandlers: true
healthzPort: 10248
healthzBindAddress: fc00:bd4:efa8:1001:5054:ff:fe49:9888 # 其它節點記得修改 記得刪除這個註釋
oomScoreAdj: -999
clusterDomain: cluster.local  # 集羣域名根據自己修改 記得刪除這個註釋
clusterDNS:
- 8888:8000::2 # dns IP根據自己修改 記得刪除這個註釋
- 10.66.0.2  # dns IP根據自己修改 記得刪除這個註釋
streamingConnectionIdleTimeout: 4h0m0s
nodeStatusUpdateFrequency: 10s
nodeStatusReportFrequency: 5m0s
nodeLeaseDurationSeconds: 40
imageMinimumGCAge: 2m0s
imageGCHighThresholdPercent: 70
imageGCLowThresholdPercent: 50
volumeStatsAggPeriod: 1m0s
kubeletCgroups: "/systemd/system.slice"
cgroupsPerQOS: true
cgroupDriver: cgroupfs
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
topologyManagerPolicy: none
runtimeRequestTimeout: 2m0s
hairpinMode: hairpin-veth
maxPods: 100
podPidsLimit: -1
resolvConf: "/etc/resolv.conf"
cpuCFSQuota: true
cpuCFSQuotaPeriod: 100ms
maxOpenFiles: 1000000
contentType: application/vnd.kubernetes.protobuf
kubeAPIQPS: 15
kubeAPIBurst: 30
serializeImagePulls: false
evictionHard:
  imagefs.available: 10%
  memory.available: 500Mi
  nodefs.available: 10%
evictionSoft:
  imagefs.available: 15%
  memory.available: 500Mi
  nodefs.available: 15%
evictionSoftGracePeriod:
  imagefs.available: 2m
  memory.available: 2m
  nodefs.available: 2m
evictionPressureTransitionPeriod: 20s
podsPerCore: 0
evictionMinimumReclaim:
  imagefs.available: 500Mi
  memory.available: 0Mi
  nodefs.available: 500Mi
enableControllerAttachDetach: true
makeIPTablesUtilChains: true
iptablesMasqueradeBit: 14
iptablesDropBit: 15
featureGates:
  EndpointSlice: true
  ServiceTopology: true
  IPv6DualStack: true
failSwapOn: false
containerLogMaxSize: 10Mi
containerLogMaxFiles: 5
configMapAndSecretChangeDetectionStrategy: Watch
systemReserved:
  cpu: 1000m
  ephemeral-storage: 1Gi
  memory: 1024Mi
kubeReserved:
  cpu: 500m
  ephemeral-storage: 1Gi
  memory: 512Mi
systemReservedCgroup: "/systemd/system.slice"
kubeReservedCgroup: "/systemd/system.slice"
enforceNodeAllocatable:
- pods
- kube-reserved
- system-reserved
allowedUnsafeSysctls:
- kernel.msg*
- kernel.shm*
- kernel.sem
- fs.mqueue.*
- net.*
EOF
# 其它節點參考192.168.2.175 配置

配置k8s-ha-master

# 項目地址: https://github.com/qist/k8s/tree/master/dockerfile/k8s-ha-master
# CP_HOSTS masterIP 加端口 默認監聽端口6443 不能在master 監聽端口重複不然啓動不了
# 每個節點部署
cat << EOF | tee /apps/work/kubernetes/manifests/k8s-ha-master.yaml
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    component: k8s-ha-master
    tier: control-plane
  name: k8s-ha-master
  namespace: kube-system
spec:
  containers:
  - args:
    - "CP_HOSTS=[fc00:bd4:efa8:1001:5054:ff:fe49:9888]:5443,[fc00:bd4:efa8:1001:5054:ff:fe47:357b]:5443,[fc00:bd4:efa8:1001:5054:ff:fec6:74fb]:5443"
    image: juestnow/k8s-ha-master:1.17.9
    imagePullPolicy: IfNotPresent
    name: k8s-ha-master
    env:
    - name: CP_HOSTS
      value: "[fc00:bd4:efa8:1001:5054:ff:fe49:9888]:5443,[fc00:bd4:efa8:1001:5054:ff:fe47:357b]:5443,[fc00:bd4:efa8:1001:5054:ff:fec6:74fb]:5443"
  hostNetwork: true
  priorityClassName: system-cluster-critical
status: {}
EOF

創建 kubelet systemd文件

cat << EOF | tee kubelet.service
[Unit]
Description=Kubernetes Kubelet
[Service]
LimitNOFILE=65535
LimitNPROC=65535
LimitCORE=infinity
LimitMEMLOCK=infinity
EnvironmentFile=-/apps/k8s/conf/kubelet
ExecStart=/apps/k8s/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
KillMode=process
[Install]
WantedBy=multi-user.target
EOF
# 上傳啓動文件到服務器
scp -r kubelet.service 192.168.2.175:/usr/lib/systemd/system
scp -r kubelet.service 192.168.2.176:/usr/lib/systemd/system
scp -r kubelet.service 192.168.2.177:/usr/lib/systemd/system
scp -r kubelet.service 192.168.2.185:/usr/lib/systemd/system
scp -r kubelet.service 192.168.2.187:/usr/lib/systemd/system

啓動 kubelet

# 刷新service
ssh  192.168.2.175 systemctl daemon-reload
ssh  192.168.2.176 systemctl daemon-reload
ssh  192.168.2.177 systemctl daemon-reload
ssh  192.168.2.185 systemctl daemon-reload
ssh  192.168.2.187 systemctl daemon-reload
# 設置開機啓動
ssh  192.168.2.175 systemctl enable kubelet.service
ssh  192.168.2.176 systemctl enable kubelet.service
ssh  192.168.2.177 systemctl enable kubelet.service
ssh  192.168.2.185 systemctl enable kubelet.service
ssh  192.168.2.187 systemctl enable kubelet.service
# 啓動 kubelet
ssh  192.168.2.175 systemctl  start kubelet.service
ssh  192.168.2.176 systemctl  start kubelet.service
ssh  192.168.2.177 systemctl  start kubelet.service
ssh  192.168.2.185 systemctl  start kubelet.service
ssh  192.168.2.187 systemctl  start kubelet.service
# 查看啓動狀態
ssh  192.168.2.175 systemctl  status kubelet.service
ssh  192.168.2.176 systemctl  status kubelet.service
ssh  192.168.2.177 systemctl  status kubelet.service
ssh  192.168.2.185 systemctl  status kubelet.service
ssh  192.168.2.187 systemctl  status kubelet.service
# 這裏會出現不斷重啓 請等待 k8s-ha-master  pod 正常啓動 kubelet  進程恢復正常
[root@k8s-master-1 ~]# ps -ef | grep nginx
root       67368   67351  0 May07 ?        00:00:00 /bin/sh /usr/bin/nginx-proxy CP_HOSTS=[fc00:bd4:efa8:1001:5054:ff:fe49:9888]:5443,[fc00:bd4:efa8:1001:5054:ff:fe47:357b]:5443,[fc00:bd4:efa8:1001:5054:ff:fec6:74fb]:5443
root       67388   67368  0 May07 ?        00:00:00 nginx: master process nginx -g daemon off;
100        67389   67388  0 May07 ?        00:00:09 nginx: worker process
100        67390   67388  0 May07 ?        00:00:00 nginx: worker process
100        67391   67388  0 May07 ?        00:00:00 nginx: worker process
100        67392   67388  0 May07 ?        00:00:00 nginx: worker process
[root@k8s-master-1 ~]# docker ps| grep nginx-prox
c81fc90b5fda        juestnow/k8s-ha-master        "/usr/bin/nginx-prox…"   17 hours ago        Up 17 hours                             k8s_k8s-ha-master_k8s-ha-master-k8s-master-1_kube-system_1d8831b1fdf32d5c2df16b1e20cbeac2_0

驗證kubelet 是否部署成功

root@Qist:/mnt/g/work/ipv6/1/yaml# kubectl get node
NAME           STATUS     ROLES    AGE   VERSION
k8s-master-1   NotReady   <none>   21m   v1.18.2
k8s-master-2   NotReady   <none>   21m   v1.18.2
k8s-master-3   NotReady   <none>   21m   v1.18.2
k8s-node-1     NotReady   <none>   21m   v1.18.2
k8s-node-2     NotReady   <none>   21m   v1.18.2
# 沒部署網絡插件 cni 就會出現這樣的裝
root@Qist:/mnt/g/work/ipv6/1/yaml# kubectl get node -o wide
NAME           STATUS     ROLES    AGE   VERSION   INTERNAL-IP                            EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION                CONTAINER-RUNTIME
k8s-master-1   NotReady   <none>   21m   v1.18.2   fc00:bd4:efa8:1001:5054:ff:fe49:9888   <none>        CentOS Linux 8 (Core)   4.18.0-147.8.1.el8_1.x86_64   docker://19.3.8
k8s-master-2   NotReady   <none>   21m   v1.18.2   fc00:bd4:efa8:1001:5054:ff:fe47:357b   <none>        CentOS Linux 8 (Core)   4.18.0-147.8.1.el8_1.x86_64   docker://19.3.8
k8s-master-3   NotReady   <none>   21m   v1.18.2   fc00:bd4:efa8:1001:5054:ff:fec6:74fb   <none>        CentOS Linux 8 (Core)   4.18.0-147.8.1.el8_1.x86_64   docker://19.3.8
k8s-node-1     NotReady   <none>   21m   v1.18.2   fc00:bd4:efa8:1001:5054:ff:fe7f:7551   <none>        CentOS Linux 8 (Core)   4.18.0-147.8.1.el8_1.x86_64   docker://19.3.8
k8s-node-2     NotReady   <none>   21m   v1.18.2   fc00:bd4:efa8:1001:5054:ff:fedc:9845   <none>        CentOS Linux 8 (Core)   4.18.0-147.8.1.el8_1.x86_64   docker://19.3.8
# 所有部署節點已經註冊到K8S 集羣
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章