Kubernetes實錄(5) 使用kubeadm配置HA模式kubernets 1.13.0集羣

Kubernetes實錄系列記錄文檔完整目錄參考: Kubernetes實錄-目錄

相關記錄鏈接地址 :

本篇記錄使用kubeadm配置Kubernetes 集羣HA方案的實驗過程,主要參考官方文檔

主機名稱 ip地址 操作系統 角色 軟件版本 備註
ejucsmaster-shqs-1 10.99.12.201
10.99.13.201<存儲>
CentOS 7.5 proxy, master haproxy+keepalived
docker-ce 18.06.1

kubernets 1.13.0

VIP=10.99.12.200
ejucsmaster-shqs-2 10.99.12.202
10.99.13.202<存儲>
CentOS 7.5 proxy, master haproxy+keepalived
docker-ce 18.06.1

kubernets 1.13.0

VIP=10.99.12.200
ejucsmaster-shqs-3 10.99.12.203
10.99.13.203<存儲>
CentOS 7.5 proxy, master haproxy+keepalived
docker-ce 18.06.1

kubernets 1.13.0

VIP=10.99.12.200
ejucsnode-shqs-1 10.99.12.204
10.99.13.204<存儲>
CentOS 7.5 worker docker-ce 18.06.1

kubernets 1.13.0

ejucsnode-shqs-2 10.99.12.205
10.99.13.205<存儲>
CentOS 7.5 worker docker-ce 18.06.1

kubernets 1.13.0

ejucsnode-shqs-2 10.99.12.206
10.99.13.206<存儲>
CentOS 7.5 worker docker-ce 18.06.1

kubernets 1.13.0

備註:存儲採用GlusterFS,相關實驗記錄不包含在本記錄文檔內

零、更新並初始操作系統

1. [所有節點] 網絡配置

Bond Mode類型 IP地址 使用端口 應用
bond0 mode=1 10.99.12.201-206 em1 em3 kubernets
bond1 mode=1 10.99.13.201-206 em1 em3 GlusterFS<存儲>
#bond0
vi /etc/sysconfig/network-scripts/ifcfg-bond0
NAME=bond0
DEVICE=bond0
TYPE=Bond
ONBOOT=yes
NETBOOT=yes
BOOTPROTO=static
NM_CONTROLLED=no
BONDING_OPTS="miimon=100 mode=1"
IPADDR=10.99.12.201
PREFIX=24
GATEWAY=10.99.12.254

# bond1
vi /etc/sysconfig/network-scripts/ifcfg-bond1
NAME=bond1
DEVICE=bond1
TYPE=Bond
ONBOOT=yes
NETBOOT=yes
BOOTPROTO=static
NM_CONTROLLED=no
BONDING_OPTS="miimon=100 mode=1"
IPADDR=10.99.13.201
PREFIX=24

# em1, em3與em1同樣的配置,只是NAME,DEVICE根據實際填寫爲em3
vi /etc/sysconfig/network-scripts/ifcfg-em1
NAME=em1
DEVICE=em1
TYPE=Ethernet
ONBOOT=yes
NETBOOT=yes
BOOTPROTO=none
NM_CONTROLLED=no
MASTER=bond0
SLAVE=yes

# em2, em4與em2同樣的配置,只是NAME,DEVICE根據實際填寫爲em4
vi /etc/sysconfig/network-scripts/ifcfg-em2
NAME=em2
DEVICE=em2
TYPE=Ethernet
ONBOOT=yes
NETBOOT=yes
BOOTPROTO=none
NM_CONTROLLED=no
MASTER=bond1
SLAVE=yes

2. 主機名稱配置

IP地址 主機名稱 角色 備註
10.99.12.201 ejucsmaster-shqs-1 master
10.99.12.202 ejucsmaster-shqs-2 master
10.99.12.203 ejucsmaster-shqs-3 master
10.99.12.204 ejucsnode-shqs-1 worker
10.99.12.205 ejucsnode-shqs-2 worker
10.99.12.206 ejucsnode-shqs-3 worker
# 各個主機上執行如下指令
hostnamectl set-hostname ejucsmaster-shqs-1
hostnamectl set-hostname ejucsmaster-shqs-2
hostnamectl set-hostname ejucsmaster-shqs-3
hostnamectl set-hostname ejucsnode-shqs-1
hostnamectl set-hostname ejucsnode-shqs-2
hostnamectl set-hostname ejucsnode-shqs-3

3. /etc/hosts

# vi /etc/hosts

127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
10.99.12.200 csapi.ejuops.com
10.99.12.201 ejucsmaster-shqs-1
10.99.12.202 ejucsmaster-shqs-2
10.99.12.203 ejucsmaster-shqs-3
10.99.12.204 ejucsnode-shqs-1
10.99.12.205 ejucsnode-shqs-2
10.99.12.206 ejucsnode-shqs-3

4. 域名解析地址配置(DNS)

這裏配置的是企業內部構建的dns服務器

cat <<EOF >  /etc/resolv.conf
nameserver 10.99.73.5
nameserver 10.99.73.6
EOF

5. NTP配置(chronyd)

這裏配置的是企業內部構建的NTP服務器服務器

# cat /etc/chrony.conf
... 
server 10.99.73.5 iburst
server 10.99.73.6 iburst


systemctl start chronyd.service
systemctl enable chronyd.service

6. 防火牆關閉

yum install firewalld -y
systemctl stop firewalld.service
systemctl disable firewalld.service

7. 禁用Selinux,生效需要重啓,可以通過命令臨時許可

sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
setenforce 0

8. 禁用交互空間 swap

swapoff -a
sed -i 's/.*swap.*/#&/' /etc/fstab

free 可以查看到swap等於0

9. 內核參數調整

cat /etc/sysctl.conf
...
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1

sysctl -p

10. ulimit設置

cat <<EOF > /etc/security/limits.d/90-nproc.conf
*          soft    nproc     50000
*          hard    nproc     60000
*          soft    nofile    1024000
*          hard    nofile    1024000
root       soft    nproc     unlimited
EOF

11. 安裝常用工具或者開發包

yum -y install epel-release
yum -y install bridge-utils tcpdump screen lrzsz net-tools python-devel gcc wget curl zip unzip nc telnet vim bind-utils openssl-devel yum -y install yum-plugin-priorities tcpdump screen net-tools  gcc wget curl lrzsz zip unzip nc telnet bind-utils rsync vim traceroute sysstat perf iotop iftop strace dstat htop pciutils mtr tree git lsof nmap sudo ntpdate bzip2 gzip xz cmake autoconf automake pcre pcre-devel zlib zlib-devel libselinux-python python-simplejson nethogs nload iptraf multitail tmux atop saidar bmon libcurl-devel libattr-devel python-devel openssl-devel openldap-devel readline-devel gmp-devel libmcrypt-devel mhash-devel libxslt-devel libjpeg-devel freetype-devel libxml2-devel zlib-devel glib2-devel bzip2-devel ncurses-devel e2fsprogs-devel krb5-devel libidn-devel libffi-devel

12. 加載模塊

yum install bridge-utils -y
modprobe br_netfilter

一、配置HAproxy+keepalived

Kubernets做HA方案多個Apiserver服務需要一個負載均衡服務(當然也可以採用其他方案),這裏使用HAproxy+keepalived構建一個TCP負載均衡服務。構建在3臺master節點上。

主機名稱 ip地址 操作系統 組件 VIP
ejucsmaster-shqs-1 10.99.12.201 CentOS 7.5 haproxy+keepalived 10.99.12.200
ejucsmaster-shqs-2 10.99.12.202 CentOS 7.5 haproxy+keepalived 10.99.12.200
ejucsmaster-shqs-3 10.99.12.203 CentOS 7.5 haproxy+keepalived 10.99.12.200
[所有master節點]安裝keepalived haproxy
yum install keepalived haproxy -y

[所有master節點的haproxy配置]
vi /etc/haproxy/haproxy.cfg
global
    daemon
    nbproc    4
    user      haproxy
    group     haproxy
    maxconn   50000
    pidfile   /var/run/haproxy.pid
    log       127.0.0.1   local0
    chroot    /var/lib/haproxy

defaults
    log       global
    log       127.0.0.1   local0
    maxconn   50000
    retries   3
    balance   roundrobin
    option    httplog
    option    dontlognull
    option    httpclose
    option    abortonclose
    timeout   http-request 10s
    timeout   connect 10s
    timeout   server 1m
    timeout   client 1m
    timeout   queue 1m
    timeout   check 5s

listen stats :1234
    stats     enable
    mode      http
    option    httplog
    log       global
    maxconn   10
    stats     refresh 30s
    stats     uri /
    stats     hide-version
    stats     realm HAproxy
    stats     auth admin:admin@haproxy
    stats     admin if TRUE

listen kube-api-lb
    bind      0.0.0.0:8443
    balance   roundrobin
    mode      tcp
    option    tcplog
    server    ejucsmaster-shqs-1 10.99.12.201:6443 weight 1 maxconn 10000 check inter 10s rise 2 fall 3
    server    ejucsmaster-shqs-1 10.99.12.202:6443 weight 1 maxconn 10000 check inter 10s rise 2 fall 3
    server    ejucsmaster-shqs-1 10.99.12.203:6443 weight 1 maxconn 10000 check inter 10s rise 2 fall 3

systemctl enable haproxy
systemctl start haproxy

[所有master節點 keepalived配置]
vi /etc/keepalived/keepalived.conf
global_defs {
   router_id csapiserver
   #vrrp_skip_check_adv_addr
   #vrrp_strict
   #vrrp_garp_interval 0
   #vrrp_gna_interval 0
}

vrrp_script chk_haproxy {
    script "killall -0 haproxy"
    interval 2
    weight 2
}

vrrp_instance VI_1 {
    state MASTER #其他服務器爲BACKUP
    interface bond0
    virtual_router_id 51
    priority 101  # 其他服務器爲100,99
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        10.99.12.200
    }
    track_script {
        chk_haproxy
    }
}

systemctl enable keepalived.service
systemctl start keepalived.service
netstat -pltn 
...
tcp        0      0 0.0.0.0:8443            0.0.0.0:*               LISTEN      50670/haproxy 

#[master-1]
ip add
bond0: <BROADCAST,MULTICAST,MASTER,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
        link/ether 14:18:77:64:11:a1 brd ff:ff:ff:ff:ff:ff
        inet 10.99.12.201/24 brd 10.99.12.255 scope global bond0
                              valid_lft forever preferred_lft forever
        inet 10.99.12.200/32 scope global bond0
                              valid_lft forever preferred_lft forever

二、安裝docker-ce

1. [所有節點]安裝依賴

yum -y install yum-utils device-mapper-persistent-data lvm2 conntrack-tools bridge-utils ipvsadm

2. 添加docker軟件源(使用國內)[所有節點執行]

yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum list docker-ce --showduplicates|sort -r
	docker-ce.x86_64 18.09.0.ce-1.el7 docker-ce-stable
	docker-ce.x86_64 18.06.1.ce-3.el7 docker-ce-stable     # 使用這個版本
	docker-ce.x86_64 18.06.0.ce-3.el7 docker-ce-stable
	docker-ce.x86_64 18.03.1.ce-1.el7.centos docker-ce-stable
	docker-ce.x86_64 18.03.0.ce-1.el7.centos docker-ce-stable
	docker-ce.x86_64 17.12.1.ce-1.el7.centos docker-ce-stable
	docker-ce.x86_64 17.12.0.ce-1.el7.centos docker-ce-stable
	docker-ce.x86_64 17.09.1.ce-1.el7.centos docker-ce-stable
	docker-ce.x86_64 17.09.0.ce-1.el7.centos docker-ce-stable
	docker-ce.x86_64 17.06.2.ce-1.el7.centos docker-ce-stable
	docker-ce.x86_64 17.06.1.ce-1.el7.centos docker-ce-stable
	docker-ce.x86_64 17.06.0.ce-1.el7.centos docker-ce-stable
	docker-ce.x86_64 17.03.3.ce-1.el7 docker-ce-stable         
	docker-ce.x86_64 17.03.2.ce-1.el7.centos docker-ce-stable
	docker-ce.x86_64 17.03.1.ce-1.el7.centos docker-ce-stable
	docker-ce.x86_64 17.03.0.ce-1.el7.centos docker-ce-stable

3. 安裝指定版本docker-ce[18.06.1] [所有節點執行]

yum -y install docker-ce-18.06.1.ce

mkdir /etc/docker
cat > /etc/docker/daemon.json <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ]
}
EOF

4. 安裝yum插件,用來固定docker版本[所有節點執行]

yum -y install yum-plugin-versionlock
yum versionlock docker-ce

5. 配置docker自啓動[所有節點執行]

mkdir -p /etc/systemd/system/docker.service.d
systemctl daemon-reload
systemctl enable docker.service
systemctl start docker.service

三、[所有節點]kubeadm配置集羣前的準備工作

1. 軟件源設置

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
        http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

yum clean all && yum makecache

2. 所有節點安裝kubelet,kubeadm, kubectl

# 這裏安裝的是當前最新版本1.13.0 可以指定版本安裝
yum -y install kubelet kubeadm kubectl
systemctl enable kubelet.service
systemctl start kubelet.service
systemctl status kubelet.service # 狀態異常屬於正常情況,等待kubeadm指定

3. 所有節點拉取kubernets鏡像

kubeadm配置集羣時使用的是google官方的進行,在國內會被牆。所以這裏使用mirror倉庫去下載然後再修改tag;有些鏡像只需要master節點下載,其他worker節點不需要。這裏統一所有節點都下載

# 查看需要的鏡像
kubeadm config images list
    k8s.gcr.io/kube-apiserver:v1.13.0
    k8s.gcr.io/kube-controller-manager:v1.13.0
    k8s.gcr.io/kube-scheduler:v1.13.0
    k8s.gcr.io/kube-proxy:v1.13.0
    k8s.gcr.io/pause:3.1
    k8s.gcr.io/etcd:3.2.24
    k8s.gcr.io/coredns:1.2.6
 
docker pull mirrorgooglecontainers/kube-apiserver:v1.13.0
docker pull mirrorgooglecontainers/kube-controller-manager:v1.13.0
docker pull mirrorgooglecontainers/kube-scheduler:v1.13.0
docker pull mirrorgooglecontainers/kube-proxy:v1.13.0
docker pull mirrorgooglecontainers/pause:3.1
docker pull mirrorgooglecontainers/etcd:3.2.24
docker pull coredns/coredns:1.2.6

docker tag docker.io/mirrorgooglecontainers/kube-proxy:v1.13.0 k8s.gcr.io/kube-proxy:v1.13.0
docker tag docker.io/mirrorgooglecontainers/kube-scheduler:v1.13.0 k8s.gcr.io/kube-scheduler:v1.13.0
docker tag docker.io/mirrorgooglecontainers/kube-apiserver:v1.13.0 k8s.gcr.io/kube-apiserver:v1.13.0
docker tag docker.io/mirrorgooglecontainers/kube-controller-manager:v1.13.0 k8s.gcr.io/kube-controller-manager:v1.13.0
docker tag docker.io/mirrorgooglecontainers/etcd:3.2.24  k8s.gcr.io/etcd:3.2.24
docker tag docker.io/mirrorgooglecontainers/pause:3.1  k8s.gcr.io/pause:3.1
docker tag docker.io/coredns/coredns:1.2.6  k8s.gcr.io/coredns:1.2.6

docker rmi docker.io/mirrorgooglecontainers/kube-proxy:v1.13.0
docker rmi docker.io/mirrorgooglecontainers/kube-scheduler:v1.13.0
docker rmi docker.io/mirrorgooglecontainers/kube-apiserver:v1.13.0
docker rmi docker.io/mirrorgooglecontainers/kube-controller-manager:v1.13.0
docker rmi docker.io/mirrorgooglecontainers/etcd:3.2.24
docker rmi docker.io/mirrorgooglecontainers/pause:3.1
docker rmi docker.io/coredns/coredns:1.2.6

#備註,可以只在一個節點上下載後save下來,然後copy到其他節點load。這樣速度會快些
docker save -o kube-proxy_v1.13.0.tar k8s.gcr.io/kube-proxy:v1.13.0
docker save -o kube-apiserver_v1.13.0.tar k8s.gcr.io/kube-apiserver:v1.13.0
docker save -o kube-controller-manager_v1.13.0.tar k8s.gcr.io/kube-controller-manager:v1.13.0
docker save -o coredns_1.2.6.tar k8s.gcr.io/coredns:1.2.6
docker save -o etcd_3.2.24.tar k8s.gcr.io/etcd:3.2.24 
docker save -o pause_3.1.tar k8s.gcr.io/pause:3.1

scp to other nodes

for var in $(ls);do docker load < $var;done

四、多個Master節點部署Kubernetes(HA)

在第一個節點上執行,然後將其他2個master節點加入進來

touch kubeadm-config.yaml
vi kubeadm-config.yaml
	apiVersion: kubeadm.k8s.io/v1beta1
	kind: ClusterConfiguration
	kubernetesVersion: v1.13.0
	apiServer:
	  CertSANs:
	  - "csapi.ejuops.com"
	  - 10.99.12.200
	  - 10.99.12.201
	  - 10.99.12.202
	  - 10.99.12.203
	  - ejucsmaster-shqs-1
	  - ejucsmaster-shqs-1
	  - ejucsmaster-shqs-1
	controlPlaneEndpoint: "csapi.ejuops.com:8443"
	networking:
	    # This CIDR is a Calico default. Substitute or remove for your CNI provider.
	    podSubnet: "192.168.0.0/16"

kubeadm init --config kubeadm-config.yaml
[init] Using Kubernetes version: v1.13.0
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [ejucsmaster-shqs-1 localhost] and IPs [10.99.12.201 127.0.0.1 ::1]
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [ejucsmaster-shqs-1 localhost] and IPs [10.99.12.201 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [ejucsmaster-shqs-1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local csapi.ejuops.com] and IPs [10.96.0.1 10.99.12.201]
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "admin.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 25.029937 seconds
[uploadconfig] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.13" in namespace kube-system with the configuration for the kubelets in the cluster
[patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "ejucsmaster-shqs-1" as an annotation
[mark-control-plane] Marking the node ejucsmaster-shqs-1 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node ejucsmaster-shqs-1 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: dagkeg.wurlnm9icfauj0qu
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstraptoken] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstraptoken] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstraptoken] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstraptoken] creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[addons] Applied essential addon: kube-proxy

Your Kubernetes master has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of machines by running the following on each node
as root:

  kubeadm join csapi.ejuops.com:8443 --token dagkeg.wurlnm9icfauj0qu --discovery-token-ca-cert-hash sha256:d5ad0684a249e2f482be7e74b0883768ff08c21732e0d7f14cc00bf77ac768eb
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

2. 同步證書到其他master節點

# on master1
USER=root
CONTROL_PLANE_IPS="10.99.12.202 10.99.12.203"
for host in ${CONTROL_PLANE_IPS}; do
    scp /etc/kubernetes/pki/ca.crt "${USER}"@$host:
    scp /etc/kubernetes/pki/ca.key "${USER}"@$host:
    scp /etc/kubernetes/pki/sa.key "${USER}"@$host:
    scp /etc/kubernetes/pki/sa.pub "${USER}"@$host:
    scp /etc/kubernetes/pki/front-proxy-ca.crt "${USER}"@$host:
    scp /etc/kubernetes/pki/front-proxy-ca.key "${USER}"@$host:
    scp /etc/kubernetes/pki/etcd/ca.crt "${USER}"@$host:etcd-ca.crt
    scp /etc/kubernetes/pki/etcd/ca.key "${USER}"@$host:etcd-ca.key
    scp /etc/kubernetes/admin.conf "${USER}"@$host:
done

# on master2,3
USER=root
mkdir -p /etc/kubernetes/pki/etcd
mv /${USER}/ca.crt /etc/kubernetes/pki/
mv /${USER}/ca.key /etc/kubernetes/pki/
mv /${USER}/sa.pub /etc/kubernetes/pki/
mv /${USER}/sa.key /etc/kubernetes/pki/
mv /${USER}/front-proxy-ca.crt /etc/kubernetes/pki/
mv /${USER}/front-proxy-ca.key /etc/kubernetes/pki/
mv /${USER}/etcd-ca.crt /etc/kubernetes/pki/etcd/ca.crt
mv /${USER}/etcd-ca.key /etc/kubernetes/pki/etcd/ca.key
mv /${USER}/admin.conf /etc/kubernetes/admin.conf

# 在master2,3執行,加入集羣,注意參數:--experimental-control-plane
kubeadm join csapi.ejuops.com:8443 --token dagkeg.wurlnm9icfauj0qu --discovery-token-ca-cert-hash sha256:d5ad0684a249e2f482be7e74b0883768ff08c21732e0d7f14cc00bf77ac768eb --experimental-control-plane

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config


kubectl get nodes --all-namespaces
NAME                 STATUS   	ROLES    AGE     VERSION
ejucsmaster-shqs-1   NotReady    master   24m     v1.13.0
ejucsmaster-shqs-2   NotReady    master   4m34s   v1.13.0
ejucsmaster-shqs-3   NotReady    master   3m49s   v1.13.0

五、配置網絡插件Calico[其中一個master節點執行]

所有節點下載相關docke鏡像,有些鏡像只需要在master節點下載,其他worker節點不需要。這裏統一下載到所有節點

[所有節點]
docker pull quay.io/calico/typha:v3.3.2
docker pull quay.io/calico/node:v3.3.2
docker pull quay.io/calico/cni:v3.3.2

[任意一個master節點]
wget https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml
wget https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml

kubectl apply -f Calico/rbac-kdd.yaml
kubectl apply -f Calico/calico.yaml
kubectl get pods --all-namespaces
NAMESPACE     NAME                                         READY   STATUS    RESTARTS   AGE
kube-system   calico-node-6k6f5                            1/2     Running   0          4m24s
kube-system   calico-node-b9fnw                            1/2     Running   0          3m39s
kube-system   calico-node-j8hn4                            1/2     Running   0          9m43s
kube-system   coredns-86c58d9df4-88vhb                     1/1     Running   0          24m
kube-system   coredns-86c58d9df4-8vwm9                     1/1     Running   0          24m
kube-system   etcd-ejucsmaster-shqs-1                      1/1     Running   0          23m
kube-system   etcd-ejucsmaster-shqs-2                      1/1     Running   0          4m22s
kube-system   etcd-ejucsmaster-shqs-3                      1/1     Running   0          3m37s
kube-system   kube-apiserver-ejucsmaster-shqs-1            1/1     Running   0          23m
kube-system   kube-apiserver-ejucsmaster-shqs-2            1/1     Running   0          4m23s
kube-system   kube-apiserver-ejucsmaster-shqs-3            1/1     Running   0          3m38s
kube-system   kube-controller-manager-ejucsmaster-shqs-1   1/1     Running   1          23m
kube-system   kube-controller-manager-ejucsmaster-shqs-2   1/1     Running   0          4m23s
kube-system   kube-controller-manager-ejucsmaster-shqs-3   1/1     Running   0          3m38s
kube-system   kube-proxy-8przj                             1/1     Running   0          3m39s
kube-system   kube-proxy-xwchh                             1/1     Running   0          24m
kube-system   kube-proxy-xz6rl                             1/1     Running   0          4m24s
kube-system   kube-scheduler-ejucsmaster-shqs-1            1/1     Running   1          23m
kube-system   kube-scheduler-ejucsmaster-shqs-2            1/1     Running   0          4m23s
kube-system   kube-scheduler-ejucsmaster-shqs-3            1/1     Running   0          3m38s

六、節點加入集羣

worker節點加入集羣

#[所有worker節點執行]
kubeadm join csapi.ejuops.com:8443 --token dagkeg.wurlnm9icfauj0qu --discovery-token-ca-cert-hash sha256:d5ad0684a249e2f482be7e74b0883768ff08c21732e0d7f14cc00bf77ac768eb

[preflight] Running pre-flight checks
[discovery] Trying to connect to API Server "csapi.ejuops.com:8443"
[discovery] Created cluster-info discovery client, requesting info from "https://csapi.ejuops.com:8443"
[discovery] Requesting info from "https://csapi.ejuops.com:8443" again to validate TLS against the pinned public key
[discovery] Cluster info signature and contents are valid and TLS certificate validates against pinned roots, will use API Server "csapi.ejuops.com:8443"
[discovery] Successfully established connection with API Server "csapi.ejuops.com:8443"
[join] Reading configuration from the cluster...
[join] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet] Downloading configuration for the kubelet from the "kubelet-config-1.13" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[tlsbootstrap] Waiting for the kubelet to perform the TLS Bootstrap...
[patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "ejucsnode-shqs-1" as an annotation

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the master to see this node join the cluster.
kubectl get nodes
NAME                 STATUS   ROLES    AGE     VERSION
ejucsmaster-shqs-1   Ready    master   29m     v1.13.0
ejucsmaster-shqs-2   Ready    master   9m11s   v1.13.0
ejucsmaster-shqs-3   Ready    master   8m26s   v1.13.0
ejucsnode-shqs-1     Ready    <none>   36s     v1.13.0
ejucsnode-shqs-2     Ready    <none>   28s     v1.13.0
ejucsnode-shqs-3     Ready    <none>   26s     v1.13.0

[master]
kubectl label node ejucsnode-shqs-1 node-role.kubernetes.io/node=
kubectl label node ejucsnode-shqs-2 node-role.kubernetes.io/node=
kubectl label node ejucsnode-shqs-3 node-role.kubernetes.io/node=

kubectl get nodes
NAME                 STATUS   ROLES    AGE     VERSION
ejucsmaster-shqs-1   Ready    master   30m     v1.13.0
ejucsmaster-shqs-2   Ready    master   10m     v1.13.0
ejucsmaster-shqs-3   Ready    master   9m34s   v1.13.0
ejucsnode-shqs-1     Ready    node     104s    v1.13.0
ejucsnode-shqs-2     Ready    node     96s     v1.13.0
ejucsnode-shqs-3     Ready    node     94s     v1.13.0

kubectl get nodes --show-labels
NAME                 STATUS   ROLES    AGE     VERSION   LABELS
ejucsmaster-shqs-1   Ready    master   30m     v1.13.0   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=ejucsmaster-shqs-1,node-role.kubernetes.io/master=
ejucsmaster-shqs-2   Ready    master   10m     v1.13.0   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=ejucsmaster-shqs-2,node-role.kubernetes.io/master=
ejucsmaster-shqs-3   Ready    master   9m47s   v1.13.0   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=ejucsmaster-shqs-3,node-role.kubernetes.io/master=
ejucsnode-shqs-1     Ready    node     117s    v1.13.0   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=ejucsnode-shqs-1,node-role.kubernetes.io/node=
ejucsnode-shqs-2     Ready    node     109s    v1.13.0   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=ejucsnode-shqs-2,node-role.kubernetes.io/node=
ejucsnode-shqs-3     Ready    node     107s    v1.13.0   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=ejucsnode-shqs-3,node-role.kubernetes.io/node=

七、部署Kubernetes-dashboard

Dashboard的部署參考文檔,Kubernetes初體驗(1) 使用kubeadm配置3節點kubernets 1.12.0集羣的第四小節:安裝kubernets-dashboard插件【master節點】
根據該文檔部署的Dashboard是使用的自簽發證書,使用https方式訪問。注意,yaml文件跟官方下載的有修改的地方

  1. dashboard 容器只在master節點運行
  2. 自簽發證書拷貝到所有master節點,並配置掛載到dashboard容器
  3. 使用NodePort方式對外提供服務
  4. 鏡像使用第三方的,google被牆
  5. token過期時間調整–token-ttl=10800
  6. ServiceAccount自定義,這裏將權限放大了。

可以修改配置文件採用HTTP方式訪問,https可以放在前面的負載均衡或者通過nginx單獨去做更好些。具體配置可以參考一個國外大牛的文檔 https://blog.heptio.com/on-securing-the-kubernetes-dashboard-16b09b1b7aca

八、示例演示

這個示例是網上找到的,好像是一本kubernetes書籍上。這裏就是做個演示。一個tomcat應用+mysql數據庫。

# cat myweb-dm.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myweb
spec:
  replicas: 2
  selector:
    matchLabels:
      app: myweb
  template:
    metadata:
      labels:
        app: myweb
    spec:
      containers:
      - name: myweb
        image: kubeguide/tomcat-app:v1
        ports:
        - containerPort: 8080
        env:
        - name: MYSQL_SERVICE_HOST
          value: 'mysql'
        - name: MYSQL_SERVICE_PORT
          value: '3306'

# cat myweb-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: myweb
spec:
  type: NodePort
  ports:
  - port: 8080
    nodePort: 30001
  selector:
    app: myweb

# cat mysql-dm.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql
spec:
  replicas: 1
  selector:
    matchLabels:
      app: mysql
  template:
    metadata:
      labels:
        app: mysql
    spec:
      containers:
      - name: mysql
        image: mysql:5.7
        ports:
        - containerPort: 3306 
        env:
        - name: MYSQL_ROOT_PASSWORD
          value: "123456"

# cat mysql-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: mysql
spec:
  ports:
  - port: 3306
  selector:
    app: mysql
kubectl apply -f mysql-dm.yaml
kubectl apply -f mysql-svc.yaml 
kubectl apply -f myweb-svc.yaml 
kubectl apply -f myweb-svc.yaml 

# kubectl get deployment -n default
NAME    READY   UP-TO-DATE   AVAILABLE   AGE
mysql   1/1     1            1           3m
myweb   2/2     2            2           3m

# kubectl get pods -n default
NAME                     READY   STATUS    RESTARTS   AGE
mysql-76999dd7c8-l96w9   1/1     Running   0          3m
myweb-76c7df5b6c-hjw7p   1/1     Running   0          3m
myweb-76c7df5b6c-zdzbb   1/1     Running   0          3m

# kubectl get svc -n default
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE
kubernetes   ClusterIP   10.96.0.1        <none>        443/TCP          2h
mysql        ClusterIP   10.109.235.163   <none>        3306/TCP         4m
myweb        NodePort    10.96.26.71      <none>        8080:30001/TCP   4m

訪問該演示實例

在這裏插入圖片描述

九、Continuing

到這裏kubernetes 1.13.0 HA集羣部署完了,基本可以用了。但是還是產線使用還是缺很多東西的,例如共存存儲卷,監控,性能收集,日誌解決方案等等

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章