CentOS7安裝k8s-v1.13.4

第一部分 環境初始化

環境準備:

k8s-master1	10.3.8.101	etcd/docker/kube-apiserver/kube-controller-manager/kube-scheduler/flannel
k8s-worker1	10.3.8.104	etcd/docker/kube-proxy/kubelet/flannel
k8s-worker2	10.3.8.105	etcd/docker/kube-proxy/kubelet/flannel

以上系統最小化安裝,修改好主機名,禁用selinux,關閉防火牆,並寫好/etc/hosts:

# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
10.3.8.101  k8s-master1
10.3.8.104  k8s-worker1
10.3.8.105  k8s-worker2

設置SSH免密碼登錄

這裏k8s-master1兼作部署節點:

[root@k8s-master1 ~]# ssh-keygen -t rsa [root@k8s-master1 ~]# ssh-copy-id k8s-master1 [root@k8s-master1 ~]# ssh-copy-id k8s-worker1 [root@k8s-master1 ~]# ssh-copy-id k8s-worker2

配置內核參數

[root@k8s-master1 ~]# cat > /etc/sysctl.d/kubernetes.conf << EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
[root@k8s-master1 ~]# sysctl -p /etc/sysctl.d/kubernetes.conf >& /dev/null

安裝Docker(各個節點都要)

# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -P /etc/yum.repos.d/
# yum install -y docker-ce
# mkdir -p /etc/docker
# cat > /etc/docker/daemon.json << EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "registry-mirrors": ["https://hub-mirror.c.163.com", "https://docker.mirrors.ustc.edu.cn"],
  "log-driver": "json-file",
  "log-opts": {
"max-size": "100m",
"max-file": "3"
  },
  "storage-driver": "overlay2",
  "max-concurrent-downloads": 20
}
EOF
# systemctl enable docker
# systemctl start docker
# docker info
......
Registry Mirrors:
 https://hub-mirror.c.163.com/
 https://docker.mirrors.ustc.edu.cn/

## 準備部署目錄(各個節點都要)
# mkdir -p /opt/kubernetes/{cfg,bin/cni,ssl,log}
#vim /etc/profile
export PATH=/opt/kubernetes/bin/:$PATH
# source /etc/profile

證書製作

下載並安裝CFSSL

[root@k8s-master1 ~]# cd /opt/kubernetes/bin/
[root@k8s-master1 bin]# wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -O cfssl
[root@k8s-master1 bin]# wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -O cfssljson
[root@k8s-master1 bin]# wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -O cfssl-certinfo
[root@k8s-master1 bin]# chmod +x cfssl*

創建CA證書

[root@k8s-master1 ~]# mkdir /usr/local/src/ssl && cd /usr/local/src/ssl
[root@k8s-master1 ssl]# cfssl print-defaults config > ca-config.json
[root@k8s-master1 ssl]# cfssl print-defaults csr > ca-csr.json

修改CA配置文件

[root@k8s-master1 ssl]# vi ca-config.json 
{
    "signing": {
        "default": {
            "expiry": "87600h"
        },
        "profiles": {
            "kubernetes": {
                "expiry": "87600h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth",
                    "client auth"
                ]
            }
        }
    }
}

修改CA請求文件

[root@k8s-master1 ssl]# vi ca-csr.json 
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "Guangdong",
            "L": "Guangzhou",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
[root@k8s-master1 ssl]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca
[root@k8s-master1 ssl]# ls -l
total 20
-rw-r--r-- 1 root root  387 Mar 20 17:00 ca-config.json
-rw-r--r-- 1 root root 1005 Mar 20 17:05 ca.csr
-rw-r--r-- 1 root root  269 Mar 20 17:03 ca-csr.json
-rw------- 1 root root 1679 Mar 20 17:05 ca-key.pem
-rw-r--r-- 1 root root 1371 Mar 20 17:05 ca.pem

創建ETCD證書

[root@k8s-master1 ssl]# cat etcd-csr.json 
{
    "CN": "etcd",
    "hosts": [
      "127.0.0.1",
      "10.3.8.101",
      "10.3.8.104",
      "10.3.8.105"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "Guangdong",
            "L": "Guangzhou",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
[root@k8s-master1 ssl]# cfssl gencert \
-ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes \
etcd-csr.json | cfssljson -bare etcd
[root@k8s-master1 ssl]# ls -l etcd*
-rw-r--r-- 1 root root 1086 Mar 20 17:27 etcd.csr
-rw-r--r-- 1 root root  402 Mar 20 17:27 etcd-csr.json
-rw------- 1 root root 1679 Mar 20 17:27 etcd-key.pem
-rw-r--r-- 1 root root 1460 Mar 20 17:27 etcd.pem

創建kubernetes證書

[root@k8s-master1 ssl]# vi kubernetes-csr.json 
{
    "CN": "kubernetes",
    "hosts": [
      "127.0.0.1",
      "10.3.8.101",
      "10.3.8.102",
      "10.3.8.103",
      "10.1.0.1",
      "10.254.0.2",
      "kubernetes",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local"],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "Guangdong",
            "L": "Guangzhou",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
[root@k8s-master1 ssl]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes

創建admin證書

[root@k8s-master1 ssl]# vi admin-csr.json
{ 
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Guangdong",
      "L": "Guangzhou",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}
[root@k8s-master1 ssl]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

創建kube-proxy證書

[root@k8s-master1 ssl]# vi kube-proxy-csr.json
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Guangdong",
      "L": "Guangzhou",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
[root@k8s-master1 ssl]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

創建Flannel證書

[root@k8s-master1 ssl]# vi flanneld-csr.json
{
  "CN": "flanneld",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Guangdong",
      "L": "Guangzhou",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
[root@k8s-master1 ssl]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes flanneld-csr.json | cfssljson -bare flanneld

將證書複製到/opt/kubernetes/ssl目錄下:

[root@k8s-master1 ssl]# cp *.pem /opt/kubernetes/ssl
[root@k8s-master1 ssl]# scp *.pem k8s-worker1:/opt/kubernetes/ssl
[root@k8s-master1 ssl]# scp *.pem k8s-worker2:/opt/kubernetes/ssl

查看校驗證書:

openssl x509 -noout -text -in kubernetes.pem
cfssl-certinfo -cert kubernetes.pem

ETCD集羣部署

準備ETCD軟件

# cd /usr/local/src
# wget https://github.com/etcd-io/etcd/releases/download/v3.3.12/etcd-v3.3.12-linux-amd64.tar.gz
# tar zxf etcd-v3.3.12-linux-amd64.tar.gz
# cd etcd-v3.3.12-linux-amd64/
# cp etcd etcdctl /opt/kubernetes/bin/
# scp etcd etcdctl k8s-worker1:/opt/kubernetes/bin
# 

配置ETCD參數

[root@k8s-master1 ~]# vi /opt/kubernetes/cfg/etcd.conf
#[member]
ETCD_NAME="k8s-master1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_SNAPSHOT_COUNTER="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
ETCD_LISTEN_PEER_URLS="https://10.3.8.101:2380"
ETCD_LISTEN_CLIENT_URLS="https://10.3.8.101:2379,https://127.0.0.1:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.3.8.101:2380"
# if you use different ETCD_NAME (e.g. test),
# set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
ETCD_INITIAL_CLUSTER="k8s-master1=https://10.3.8.101:2380,k8s-worker1=https://10.3.8.104:2380,k8s-worker2=https://10.3.8.105:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://10.3.8.101:2379"
#[security]
CLIENT_CERT_AUTH="true"
ETCD_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_PEER_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"

創建ETCD數據目錄

[root@k8s-master1 ~]# mkdir -p /var/lib/etcd/default.etcd

創建ETCD系統服務

[root@k8s-master1 ~]# vi /etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd
EnvironmentFile=-/opt/kubernetes/cfg/etcd.conf
# set GOMAXPROCS to number of processors
ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /opt/kubernetes/bin/etcd"

[Install]
WantedBy=multi-user.target

文件分發到兩個worker節點

[root@k8s-master1 ~]# scp /opt/kubernetes/cfg/etcd.conf k8s-worker1:/opt/kubernetes/cfg/
[root@k8s-master1 ~]# scp /opt/kubernetes/cfg/etcd.conf k8s-worker2:/opt/kubernetes/cfg/
[root@k8s-master1 ~]# scp /etc/systemd/system/etcd.service k8s-worker1:/etc/systemd/system/
[root@k8s-master1 ~]# scp /etc/systemd/system/etcd.service k8s-worker2:/etc/systemd/system/

修改k8s-worker1的etcd.conf文件

[root@k8s-worker1 ~]# vi /opt/kubernetes/cfg/etcd.conf
# 未改動部分沒有列出來
ETCD_NAME="k8s-worker1"
ETCD_LISTEN_PEER_URLS="https://10.3.8.104:2380"
ETCD_LISTEN_CLIENT_URLS="https://10.3.8.104:2379,https://127.0.0.1:2379"
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.3.8.104:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://10.3.8.104:2379"

創建ETCD數據目錄

[root@k8s-worker1 ~]# mkdir -p /var/lib/etcd/default.etcd

修改k8s-worker2的etcd.conf文件

[root@k8s-worker2 ~]# vi /opt/kubernetes/cfg/etcd.conf
ETCD_NAME="k8s-worker2"
ETCD_LISTEN_PEER_URLS="https://10.3.8.105:2380"
ETCD_LISTEN_CLIENT_URLS="https://10.3.8.105:2379,https://127.0.0.1:2379"
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.3.8.105:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://10.3.8.105:2379"

創建ETCD數據目錄

[root@k8s-worker2 ~]# mkdir -p /var/lib/etcd/default.etcd

加載並啓動系統服務

# systemctl daemon-reload
# systemctl enable etcd
# systemctl start etcd
# systemctl status etcd

驗證集羣

在所有節點上:

# vi /etc/profile,在末尾添加
export ETCDCTL_CERT_FILE=/opt/kubernetes/ssl/etcd.pem
export ETCDCTL_KEY_FILE=/opt/kubernetes/ssl/etcd-key.pem
export ETCDCTL_CA_FILE=/opt/kubernetes/ssl/ca.pem
export ETCDCTL_ENDPOINTS=https://10.3.8.101:2379,https://10.3.8.104:2379,https://10.3.8.105:2379
# source /etc/profile
# etcdctl cluster-health
member 68901cd2c39ac88 is healthy: got healthy result from https://10.3.8.104:2379
member 5b4bf4f7034bb829 is healthy: got healthy result from https://10.3.8.105:2379
member ce825ba3add8b819 is healthy: got healthy result from https://10.3.8.101:2379
cluster is healthy

Master節點部署

準備kubernetes軟件包

登錄https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG.md,選擇相應的版本下載:

[root@k8s-master1 ~]# cd /usr/local/src/

下載官方腳本,執行腳本自動下載
[

root@k8s-master1 src]# wget https://dl.k8s.io/v1.13.4/kubernetes.tar.gz
[root@k8s-master1 src]# tar zxf kubernetes.tar.gz
[root@k8s-master1 src]# cd kubernetes/cluster
[root@k8s-master1 cluster]# ./get-kube-binaries.sh

或者手工下載指定軟件包

[root@k8s-master1 src]# wget https://dl.k8s.io/v1.13.4/kubernetes-server-linux-amd64.tar.gz

還有client和node版的,不需要下載了,server版的已經全包含進來了。

解壓kubernetes-server-linux-amd64.tar.gz並拷貝到適當的目錄:

[root@k8s-master1 src]# tar xzf kubernetes-server-linux-amd64.tar.gz
[root@k8s-master1 src]# cd kubernetes/server/bin
[root@k8s-master1 bin]# cp kube-apiserver kube-controller-manager kube-scheduler /opt/kubernetes/bin/

創建 kube-apiserver 使用的客戶端 token 文件

[root@k8s-master1 ~]# export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
[root@k8s-master1 ~]# cat > /opt/kubernetes/ssl/bootstrap-token.csv <<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF

在token-auth.csv中擁有以列爲單位的認證信息,格式爲token,username,uid

創建基礎用戶名/密碼認證配置

[root@k8s-master1 ~]# vi /opt/kubernetes/ssl/basic-auth.csv
admin,admin,1
readonly,readonly,2

格式爲:密碼,用戶名,ui,爲後面創建dashborad後用戶認證。

部署Kubernetes API Server

[root@k8s-master1 ~]# vi /usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
ExecStart=/opt/kubernetes/bin/kube-apiserver \
  --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \
  --bind-address=0.0.0.0 \
  --insecure-bind-address=127.0.0.1 \
  --authorization-mode=Node,RBAC \
  --runtime-config=rbac.authorization.k8s.io/v1 \
  --kubelet-https=true \
  --anonymous-auth=false \
  --basic-auth-file=/opt/kubernetes/ssl/basic-auth.csv \
  --enable-bootstrap-token-auth \
  --token-auth-file=/opt/kubernetes/ssl/bootstrap-token.csv \
  --service-cluster-ip-range=10.1.0.0/16 \
  --service-node-port-range=30000-32767 \
  --tls-cert-file=/opt/kubernetes/ssl/kubernetes.pem \
  --tls-private-key-file=/opt/kubernetes/ssl/kubernetes-key.pem \
  --client-ca-file=/opt/kubernetes/ssl/ca.pem \
  --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
  --etcd-cafile=/opt/kubernetes/ssl/ca.pem \
  --etcd-certfile=/opt/kubernetes/ssl/kubernetes.pem \
  --etcd-keyfile=/opt/kubernetes/ssl/kubernetes-key.pem \
  --etcd-servers=https://10.3.8.101:2379,https://10.3.8.104:2379,https://10.3.8.105:2379 \
  --enable-swagger-ui=true \
  --allow-privileged=true \
  --audit-log-maxage=30 \
  --audit-log-maxbackup=3 \
  --audit-log-maxsize=100 \
  --audit-log-path=/opt/kubernetes/log/api-audit.log \
  --event-ttl=1h \
  --v=2 \
  --logtostderr=false \
  --log-dir=/opt/kubernetes/log
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

啓動API Server服務

[root@k8s-master1 ~]# systemctl daemon-reload
[root@k8s-master1 ~]# systemctl enable kube-apiserver
[root@k8s-master1 ~]# systemctl start kube-apiserver
[root@k8s-master1 ~]# systemctl status kube-apiserver

查看API版本

[root@k8s-master1 ~]# curl localhost:8080/api
{
  "kind": "APIVersions",
  "versions": [
    "v1"
  ],
  "serverAddressByClientCIDRs": [
    {
      "clientCIDR": "0.0.0.0/0",
      "serverAddress": "10.3.8.101:6443"
    }
  ]
}

部署Controller Manager服務

[root@k8s-master1 ~]# vi /usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
ExecStart=/opt/kubernetes/bin/kube-controller-manager \
  --address=127.0.0.1 \
  --master=http://127.0.0.1:8080 \
  --allocate-node-cidrs=true \
  --service-cluster-ip-range=10.1.0.0/16 \
  --cluster-cidr=10.2.0.0/16 \
  --cluster-name=kubernetes \
  --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \
  --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \
  --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \
  --root-ca-file=/opt/kubernetes/ssl/ca.pem \
  --leader-elect=true \
  --v=2 \
  --logtostderr=false \
  --log-dir=/opt/kubernetes/log

Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target

啓動Controller Manager

[root@k8s-master1 ~]# systemctl daemon-reload
[root@k8s-master1 ~]# systemctl enable kube-controller-manager
[root@k8s-master1 ~]# systemctl start kube-controller-manager
[root@k8s-master1 ~]# systemctl status kube-controller-manager

部署Kubernetes Scheduler

[root@k8s-master1 ~]# vi /usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
ExecStart=/opt/kubernetes/bin/kube-scheduler \
  --address=127.0.0.1 \
  --master=http://127.0.0.1:8080 \
  --leader-elect=true \
  --v=2 \
  --logtostderr=false \
  --log-dir=/opt/kubernetes/log

Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target

啓動Kubernetes Scheduler

[root@k8s-master1 ~]# systemctl daemon-reload
[root@k8s-master1 ~]# systemctl enable kube-scheduler
[root@k8s-master1 ~]# systemctl start kube-scheduler
[root@k8s-master1 ~]# systemctl status kube-scheduler

部署kubectl 命令行工具

[root@k8s-master1 ~]# cp /usr/local/src/kubernetes/server/bin/kubectl /opt/kubernetes/bin/

配置命令補全:

[root@k8s-master1 ~]# yum install bash-completion
[root@k8s-master1 ~]# source /usr/share/bash-completion/bash_completion
[root@k8s-master1 ~]# source <(kubectl completion bash)

命令kubectl在默認情況下(即未指定–kubeconfig=參數時),會到$HOME/.kube目錄下尋找名爲config的配置文件,配置文件中包含集羣ip地址、端口號、用戶名、密碼、證書、名稱空間等信息,kubectl據此建構訪問集羣的上下文。以下命令kubectl config均未指定–kubeconfig=參數。

設置集羣參數

[root@k8s-master1 ~]# kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=https://10.3.8.101:6443

設置客戶端認證參數

[root@k8s-master1 ~]# kubectl config set-credentials admin \
--client-certificate=/opt/kubernetes/ssl/admin.pem \
--embed-certs=true \
--client-key=/opt/kubernetes/ssl/admin-key.pem

設置上下文參數

[root@k8s-master1 ~]# kubectl config set-context kubernetes \
--cluster=kubernetes \
--user=admin

設置默認上下文

[root@k8s-master1 ~]# kubectl config use-context kubernetes

查看kubeconfig內容

[root@k8s-master1 ~]# kubectl config view
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: DATA+OMITTED
    server: https://10.3.8.101:6443
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    user: admin
  name: kubernetes
current-context: kubernetes
kind: Config
preferences: {}
users:
- name: admin
  user:
    client-certificate-data: REDACTED
    client-key-data: REDACTED

驗證master節點功能

[root@k8s-master1 ~]# kubectl get cs

準備部署node節點
將相關軟件包複製到node節點中

[root@k8s-master1 ~]# cd /usr/local/src/kubernetes/server/bin/
[root@k8s-master1 bin]# scp kubelet kube-proxy k8s-worker1:/opt/kubernetes/bin/
[root@k8s-master1 bin]# scp kubelet kube-proxy k8s-worker2:/opt/kubernetes/bin/

創建角色綁定

[root@k8s-master1 ssl]# kubectl create clusterrolebinding kubelet-bootstrap \
--clusterrole=system:node-bootstrapper \
--user=kubelet-bootstrap

創建kubeconfig 文件,設置集羣參數

[root@k8s-master1 ssl]# kubectl config set-cluster kubernetes \
  --certificate-authority=/opt/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=https://10.3.8.101:6443 \
  --kubeconfig=bootstrap.kubeconfig
[root@k8s-master1 ssl]# kubectl config set-cluster kubernetes \
  --certificate-authority=/opt/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=https://10.3.8.101:6443 \
  --kubeconfig=kube-proxy.kubeconfig

設置客戶端認證參數,token值爲之前生成的

[root@k8s-master1 ssl]# kubectl config set-credentials kubelet-bootstrap \
   --token=${BOOTSTRAP_TOKEN} \
   --kubeconfig=bootstrap.kubeconfig
[root@k8s-master1 ssl]# kubectl config set-credentials kube-proxy \
   --client-certificate=/opt/kubernetes/ssl/kube-proxy.pem \
   --client-key=/opt/kubernetes/ssl/kube-proxy-key.pem \
   --embed-certs=true \
   --kubeconfig=kube-proxy.kubeconfig

設置上下文參數

[root@k8s-master1 ssl]# kubectl config set-context default \
   --cluster=kubernetes \
   --user=kubelet-bootstrap \
   --kubeconfig=bootstrap.kubeconfig
[root@k8s-master1 ssl]# kubectl config set-context default \
   --cluster=kubernetes \
   --user=kube-proxy \
   --kubeconfig=kube-proxy.kubeconfig

選擇默認上下文並向node節點分發在master端生成的

bootstrap.kubeconfig文件
[root@k8s-master1 ssl]# kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
[root@k8s-master1 ssl]# kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

[root@k8s-master1 ssl]# cp *.kubeconfig /opt/kubernetes/cfg
[root@k8s-master1 ssl]# scp *.kubeconfig k8s-worker1:/opt/kubernetes/cfg
[root@k8s-master1 ssl]# scp *.kubeconfig k8s-worker2:/opt/kubernetes/cfg

Node節點部署

Node節點是Kubernetes集羣中的工作負載節點,每個node都會被master分配一些工作負載,每個node節點都運行以下關鍵服務進程:
Kubelet:負責pod對應的容器的創建、啓停等任務,同時與master節點密切協作,實現集羣管理的基本功能。
Kube-proxy:實現kubernetes service的通信與負載均衡機制的重要組件。
Docker Engine(docker):Docker引擎,負責本機的容器創建和管理工作。

部署kubelet

在k8s集羣中,每個Node節點都會啓動kubelet進程,用來處理Master節點下發到本節點的任務,管理Pod和pod中的容器。kubelet會在API Server上註冊節點信息,定期向Master彙報節點資源使用情況,並通過cAdvisor監控容器和節點資源。
設置CNI支持

[root@k8s-worker1 ~]# mkdir -p /etc/cni/net.d
[root@k8s-worker1 ~]# vi /etc/cni/net.d/10-default.conf
{
        "name": "flannel",
        "type": "flannel",
        "delegate": {
            "bridge": "docker0",
            "isDefaultGateway": true,
            "mtu": 1400
        }
}

創建kubelet服務配置

[root@k8s-worker1 ~]# mkdir /var/lib/kubelet
[root@k8s-worker1 ~]# vi /usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service

[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/opt/kubernetes/bin/kubelet \
  --address=10.3.8.104 \
  --hostname-override=10.3.8.104 \
  --pod-infra-container-image=mirrorgooglecontainers/pause-amd64 \
  --experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \
  --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \
  --cert-dir=/opt/kubernetes/ssl \
  --network-plugin=cni \
  --cni-conf-dir=/etc/cni/net.d \
  --cni-bin-dir=/opt/kubernetes/bin/cni \
  --cluster-dns=10.1.0.2 \
  --cluster-domain=cluster.local. \
  --hairpin-mode hairpin-veth \
  --allow-privileged=true \
  --fail-swap-on=false \
  --cgroup-driver=systemd \
  --runtime-cgroups=/systemd/system.slice \
  --kubelet-cgroups=/systemd/system.slice \
  --v=2 \
  --logtostderr=false \
  --log-dir=/opt/kubernetes/log
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target

啓動Kubelet

[root@k8s-worker1 ~]# systemctl daemon-reload
[root@k8s-worker1 ~]# systemctl enable kubelet
[root@k8s-worker1 ~]# systemctl start kubelet
[root@k8s-worker1 ~]# systemctl status kubelet

查看csr請求(注意是在k8s-maste上執行)

[root@k8s-master1 ~]# kubectl get csr
NAME                                                   AGE   REQUESTOR           CONDITION
node-csr-THuGyzjc4RyGvpPH3iiutbvegrRZX-Zyf_KJGhd1WhA   28s   kubelet-bootstrap   Pending

批准kubelet 的 TLS 證書請求

[root@k8s-master1 ~]# kubectl get csr|grep 'Pending' | awk 'NR>0{print $1}'| xargs kubectl certificate approve

執行完畢後,查看節點狀態如果是Ready的狀態就說明一切正常:

[root@k8s-master1 ~]# kubectl get node
NAME         STATUS   ROLES    AGE   VERSION
10.3.8.104   Ready    <none>   54s   v1.13.4

部署Kubernetes Proxy

從Kubernetes 1.12版本起,kube-proxy服務默認使用ipvs實現,取消了之前的iptables。這有助於提升K8s大規模集羣環境下的性能和穩定性。
配置kube-proxy使用IPVS

[root@k8s-worker1 ~]# yum install -y ipvsadm ipset conntrack
[root@k8s-worker1 ~]# lsmod|grep ip_vs
ip_vs_sh               12688  0 
ip_vs_wrr              12697  0 
ip_vs_rr               12600  1 
ip_vs                 145497  7 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack          133095  7 ip_vs,......,nf_conntrack_ipv4
libcrc32c              12644  4 xfs,ip_vs,nf_nat,nf_conntrack

創建kube-proxy服務配置

[root@k8s-worker1 ~]# mkdir /var/lib/kube-proxy
[root@k8s-worker1 ~]# vi /usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/opt/kubernetes/bin/kube-proxy \
  --bind-address=10.3.8.104 \
  --hostname-override=10.3.8.104 \
  --kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig \
  --masquerade-all \
  --feature-gates=SupportIPVSProxyMode=true \
  --proxy-mode=ipvs \
  --ipvs-min-sync-period=5s \
  --ipvs-sync-period=5s \
  --ipvs-scheduler=rr \
  --logtostderr=true \
  --v=2 \
  --logtostderr=false \
  --log-dir=/opt/kubernetes/log
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
啓動Kubernetes Proxy
[root@k8s-worker1 ~]# systemctl daemon-reload
[root@k8s-worker1 ~]# systemctl enable kube-proxy
[root@k8s-worker1 ~]# systemctl start kube-proxy
[root@k8s-worker1 ~]# systemctl status kube-proxy

雖然status結果顯示綠色的active (running),但也存在問題:

Failed to execute iptables-restore for nat: exit status 1 (iptables-restore: line 7 failed

檢查LVS狀態

[root@k8s-worker1 ~]# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.1.0.1:443 rr
  -> 10.3.8.101:6443              Masq    1      0          0     

至此,在k8s-worker1節點上部署Kubelet和proxy完成,在K8s-worker2上重複上述過程,部署完後,回到k8s-master1節點上查看集羣狀態:

[root@k8s-master1 ~]# kubectl get nodes
NAME       STATUS   ROLES    AGE   VERSION
10.3.8.104   Ready    <none>   65m   v1.13.4
10.3.8.105   Ready    <none>   14s   v1.13.4

因爲Master中沒有裝kubelet,所以kubectl get nodes就看不到Master的。
給節點打標籤:

[root@k8s-master1 docker]# kubectl label node 10.3.8.104  node-role.kubernetes.io/node='node'
[root@k8s-master1 docker]# kubectl label node 10.3.8.105  node-role.kubernetes.io/node='node'
[root@k8s-master1 docker]# kubectl get node
NAME       STATUS   ROLES   AGE   VERSION
10.3.8.104   Ready    node    12d   v1.13.4
10.3.8.105   Ready    node    12d   v1.13.4

Flannel網絡部署

部署flannel軟件包

[root@k8s-master1 ~]# cd /usr/local/src
[root@k8s-master1 src]# wget \
https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz
[root@k8s-master1 src]# tar zxvf flannel-v0.11.0-linux-amd64.tar.gz
[root@k8s-master1 src]# cp flanneld mk-docker-opts.sh /opt/kubernetes/bin/
[root@k8s-master1 src]# scp flanneld mk-docker-opts.sh k8s-worker1:/opt/kubernetes/bin/
[root@k8s-master1 src]# scp flanneld mk-docker-opts.sh k8s-worker2:/opt/kubernetes/bin/
[root@k8s-master1 src]# cd kubernetes/cluster/centos/node/bin/
[root@k8s-master1 bin]# cp remove-docker0.sh /opt/kubernetes/bin/
[root@k8s-master1 bin]# scp remove-docker0.sh k8s-worker1:/opt/kubernetes/bin/
[root@k8s-master1 bin]# scp remove-docker0.sh k8s-worker2:/opt/kubernetes/bin/

配置flannel

[root@k8s-master1 bin]# vi /opt/kubernetes/cfg/flannel
FLANNEL_ETCD="-etcd-endpoints=https://10.3.8.101:2379,https://10.3.8.104:2379,https://10.3.8.105:2379"
FLANNEL_ETCD_KEY="-etcd-prefix=/kubernetes/network"
FLANNEL_ETCD_CAFILE="-etcd-cafile=/opt/kubernetes/ssl/ca.pem"
FLANNEL_ETCD_CERTFILE="-etcd-certfile=/opt/kubernetes/ssl/flanneld.pem"
FLANNEL_ETCD_KEYFILE="-etcd-keyfile=/opt/kubernetes/ssl/flanneld-key.pem"

創建Flannel系統服務

[root@k8s-master1 ~]# vi /usr/lib/systemd/system/flannel.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network.target
Before=docker.service
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/flannel
ExecStartPre=/opt/kubernetes/bin/remove-docker0.sh
ExecStart=/opt/kubernetes/bin/flanneld ${FLANNEL_ETCD} ${FLANNEL_ETCD_KEY} ${FLANNEL_ETCD_CAFILE} ${FLANNEL_ETCD_CERTFILE} ${FLANNEL_ETCD_KEYFILE}
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -d /run/flannel/docker
Type=notify
[Install]
WantedBy=multi-user.target
RequiredBy=docker.service

mk-docker-opts.sh 腳本將分配給 flanneld 的 Pod 子網網段信息寫入 /run/flannel/docker 文件,後續 docker 啓動時使用這個文件中的環境變量配置 docker0 網橋;
flanneld 使用系統缺省路由所在的接口與其它節點通信,對於有多個網絡接口(如內網和公網)的節點,可以用 -iface 參數指定通信接口,如上面的 eth0 接口;
flanneld 運行時需要 root 權限;
複製配置文件到其它節點

[root@k8s-master1 ~]# scp /opt/kubernetes/cfg/flannel k8s-worker1:/opt/kubernetes/cfg/
[root@k8s-master1 ~]# scp /opt/kubernetes/cfg/flannel k8s-worker2:/opt/kubernetes/cfg/
[root@k8s-master1 ~]# scp /usr/lib/systemd/system/flannel.service k8s-worker1:/usr/lib/systemd/system/
[root@k8s-master1 ~]# scp /usr/lib/systemd/system/flannel.service k8s-worker2:/usr/lib/systemd/system/

安裝CNI插件

CNI插件官網:

https://github.com/containernetworking/plugins/releases
[root@k8s-master1 ~]# cd /usr/local/src/
[root@k8s-master1 src]# wget \
https://github.com/containernetworking/plugins/releases/download/v0.7.5/cni-plugins-amd64-v0.7.5.tgz
[root@k8s-master1 src]# tar zxf cni-plugins-amd64-v0.7.5.tgz -C /opt/kubernetes/bin/cni/
[root@k8s-master1 src]# scp -r /opt/kubernetes/bin/cni/* k8s-worker1:/opt/kubernetes/bin/cni/
[root@k8s-master1 src]# scp -r /opt/kubernetes/bin/cni/* k8s-worker2:/opt/kubernetes/bin/cni/

在master節點創建Etcd的key

[root@k8s-master1 ~]# etcdctl --ca-file /opt/kubernetes/ssl/ca.pem \
--cert-file /opt/kubernetes/ssl/flanneld.pem --key-file /opt/kubernetes/ssl/flanneld-key.pem \
--no-sync -C https://10.3.8.101:2379,https://10.3.8.104:2379,https://10.3.8.105:2379 \
mk /kubernetes/network/config '{ "Network": "10.2.0.0/16", "Backend": { "Type": "vxlan", "VNI": 1 }}'

啓動flannel

[root@k8s-master1 ~]# systemctl daemon-reload
[root@k8s-master1 ~]# systemctl enable flannel
[root@k8s-master1 ~]# systemctl start flannel
[root@k8s-master1 ~]# systemctl status flannel

查看網絡配置

[root@k8s-master1 ~]# etcdctl ls /kubernetes/network -r
/kubernetes/network/config
/kubernetes/network/subnets
/kubernetes/network/subnets/10.2.42.0-24
/kubernetes/network/subnets/10.2.52.0-24
/kubernetes/network/subnets/10.2.63.0-24

查看路由

[root@k8s-master1 ~]# etcdctl get /kubernetes/network/subnets/10.2.52.0-24
{"PublicIP":"10.3.8.104","BackendType":"vxlan","BackendData":{"VtepMAC":"36:3a:ec:77:84:66"}}
[root@k8s-master1 ~]# etcdctl get /kubernetes/network/subnets/10.2.42.0-24
{"PublicIP":"10.3.8.105","BackendType":"vxlan","BackendData":{"VtepMAC":"12:ef:62:03:5c:cb"}}
[root@k8s-master1 ~]# etcdctl get /kubernetes/network/subnets/10.2.63.0-24
{"PublicIP":"10.3.8.101","BackendType":"vxlan","BackendData":{"VtepMAC":"96:33:67:a3:f4:b0"}}

flannel服務啓動時主要做了以下幾步的工作:
從etcd中獲取network的配置信息
劃分subnet,並在etcd中進行註冊
將子網信息記錄到/run/flannel/subnet.env中

[root@k8s-master1 ~]# cat /run/flannel/subnet.env 
FLANNEL_NETWORK=10.2.0.0/16
FLANNEL_SUBNET=10.2.63.1/24
FLANNEL_MTU=1450
FLANNEL_IPMASQ=false

之後將會有一個腳本將subnet.env轉寫成一個docker的環境變量文件/run/flannel/docker

[root@k8s-master1 ~]# cat /run/flannel/docker 
DOCKER_OPT_BIP="--bip=10.2.63.1/24"
DOCKER_OPT_IPMASQ="--ip-masq=true"
DOCKER_OPT_MTU="--mtu=1450"
DOCKER_OPTS=" --bip=10.2.63.1/24 --ip-masq=true --mtu=1450"

配置Docker使用Flannel
在Unit段中的After後面添加flannel.service參數,在Wants下面添加Requires=flannel.service.
[Service]段中Type後面添加EnvironmentFile=-/run/flannel/docker段,在ExecStart後面添加$DOCKER_OPTS參數
配置如下:

[root@k8s-master1 ~]# vi /usr/lib/systemd/system/docker.service 
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service containerd.service flannel.service
Wants=network-online.target
Requires=docker.socket flannel.service

[Service]
Type=notify
EnvironmentFile=-/run/flannel/docker
ExecStart=/usr/bin/dockerd $DOCKER_OPTS
...

將配置分發到另外兩個節點中(源和目標機器都要安裝rsync)

[root@k8s-master1 ~]# rsync -av /usr/lib/systemd/system/docker.service  k8s-worker1:/usr/lib/systemd/system/docker.service 
[root@k8s-master1 ~]# rsync -av /usr/lib/systemd/system/docker.service  k8s-worker2:/usr/lib/systemd/system/docker.service

所有節點重啓Docker服務

# systemctl daemon-reload
# systemctl restart docker

運行ip a命令,如果docker0和flannel.1在一個網段,則表示正常:

[root@k8s-master1 ~]# ip a | egrep "flannel|docker" | grep inet
    inet 10.2.63.0/32 scope global flannel.1
    inet 10.2.63.1/24 brd 10.2.63.255 scope global docker0

查看主機路由表:

[root@k8s-master1 ~]# ip route
default via 10.3.8.254 dev ens192 proto static metric 100 
10.2.42.0/24 via 10.2.42.0 dev flannel.1 onlink 
10.2.52.0/24 via 10.2.52.0 dev flannel.1 onlink 
10.2.63.0/24 dev docker0 proto kernel scope link src 10.2.63.1 
10.3.8.0/24 dev ens192 proto kernel scope link src 10.3.8.101 metric 100 

至此flannel網絡配置完成,k8s的集羣也部署完成,下面我們來建立pod測試集羣之間網絡的連通性。
部署應用測試

[root@k8s-master1 ~]# kubectl run my-nginx --image=nginx --port=80 --replicas=3

都14分鐘了還沒下載完nginx鏡像?有問題,查看事件:

[root@k8s-master1 ~]# kubectl describe pod my-nginx-64fc468bd4-7gbck
......
...... mirrorgooglecontainers/pause-amd64:latest not found

可以看到是pause-amd64這個鏡像拉取不下來,切換到node節點,從阿里雲上拉取再改名:

[root@k8s-worker1 ~]# docker pull registry.cn-beijing.aliyuncs.com/zhoujun/pause-amd64:3.1
[root@k8s-worker1 ~]# docker tag registry.cn-beijing.aliyuncs.com/zhoujun/pause-amd64:3.1 mirrorgooglecontainers/pause-amd64:latest
[root@k8s-worker1 ~]# systemctl restart docker

節點k8s-worker2上做同樣的操作。

[root@k8s-worker1 ~]# docker ps -a

可以看到兩個容器,一個是nginx,一個是pause-amd64。

到k8s-master1上查看pod的IP:

測試連通性:

[root@k8s-master1 ~]# ping 10.2.90.5 -c 2
[root@k8s-master1 ~]# ping 10.2.85.3 -c 2

暴露服務,創建service

[root@k8s-master1 ~]# kubectl expose deployment my-nginx --port=8080 --target-port=80 --external-ip=10.3.8.104

這個external-ip就是某個node節點的對外IP。

[root@k8s-master1 ~]# curl -I http://10.3.8.104:8080
HTTP/1.1 200 OK

至此,kubernetes集羣大功告成。

刪除pod:

[root@k8s-master1 ~]# kubectl scale deployment/my-nginx --replicas=0
[root@k8s-master1 ~]# kubectl delete deployment/my-nginx

下面的curl命令,分別返回集羣中的Pod列表、Service列表、RC列表:

curl localhost:8080/api/v1/pods
curl localhost:8080/api/v1/services
curl localhost:8080/api/v1/replicationcontrollers

CoreDNS和Dashboard部署

部署CoreDNS

[root@k8s-master1 ~]# cd /usr/local/src/kubernetes/cluster/addons/dns/coredns
[root@k8s-master1 coredns]# cp coredns.yaml.base coredns.yaml
[root@k8s-master1 coredns]# vi coredns.yaml
# 修改如下兩個地方爲自己的domain和cluster ip地址
1.kubernetes __PILLAR__DNS__DOMAIN__
改爲 kubernetes cluster.local.
2.clusterIP: __PILLAR__DNS__SERVER__
改爲:
clusterIP: 10.1.0.2

創建coredns服務:

[root@k8s-master1 coredns]# kubectl apply -f coredns.yaml
[root@k8s-master1 coredns]# kubectl get pod -n kube-system -o wide
NAME                READY   STATUS           RESTARTS   AGE     IP          NODE
coredns-fff89c9b9-5tttj   0/1     ImagePullBackOff   0          3m10s   10.2.52.6   10.3.8.104 

狀態ImagePullBackOff,查看事件:

[root@k8s-master1 coredns]# kubectl describe pod coredns-fff89c9b9-5tttj -n kube-system

最後幾行可以看到: 10.3.8.104 Back-off pulling image “k8s.gcr.io/coredns:1.2.6”
到node節點(包括k8s-worker1和k8s-worker2),下載其它的coredns再改名:

# docker pull coredns/coredns:1.2.6
# docker tag coredns/coredns:1.2.6 k8s.gcr.io/coredns:1.2.6

過一會就能看到pod是running狀態了。

[root@k8s-master1 coredns]# kubectl scale deploy coredns --replicas=2 -n kube-system
[root@k8s-master1 coredns]# kubectl get pod -o wide -n kube-system
NAME              READY STATUS	RESTARTS	AGE	IP        NODE
coredns-fff89c9b9-5tttj 1/1   Running	0      50m  10.2.52.6  10.3.8.104
coredns-fff89c9b9-lv65z 1/1   Running	0       26s   10.2.42.6  10.3.8.105
[root@k8s-master1 coredns]# kubectl get svc --all-namespaces
NAMESPACE	NAME      TYPE   CLUSTER-IP	EXTERNAL-IP PORT(S)    AGE
default    kubernetes	ClusterIP 10.1.0.1  <none>   443/TCP     5d23h
kube-system	kube-dns	ClusterIP 10.1.0.2   <none>   53/UDP,53/TCP	32m

CoreDNS解析測試
不要用image=docker.io/busybox,這個鏡像的nslookup測試會失敗。

[root@k8s-master1 coredns]# kubectl run dig --rm -it --image=docker.io/azukiapp/dig /bin/sh
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
If you don't see a command prompt, try pressing enter.
/ # nslookup www.baidu.com
Server:         10.1.0.2
Address:        10.1.0.2#53

Non-authoritative answer:
www.baidu.com   canonical name = www.a.shifen.com.
Name:   www.a.shifen.com
Address: 163.177.151.109
Name:   www.a.shifen.com
Address: 163.177.151.110

部署Dashboard

下載dashborad文件地址,大神已經修改好了我們直接執行就可以:

[root@k8s-master1 ~]# mkdir /opt/kubernetes/dashboard && cd /opt/kubernetes/dashboard
[root@k8s-master1 dashboard]# git clone https://github.com/unixhot/salt-kubernetes.git
[root@k8s-master1 dashboard]# cd salt-kubernetes/addons
[root@k8s-master1 addons]# kubectl apply -f dashboard/
[root@k8s-master1 addons]# kubectl cluster-info
Kubernetes master is running at https://10.3.8.101:6443
CoreDNS is running at https://10.3.8.101:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
kubernetes-dashboard is running at https://10.3.8.101:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy

訪問dashboard的方式有三種:
通過kube-apiserver訪問,見前面kubectl cluster-info輸出;
通過 kubectl proxy 訪問;
通過http://NodeIP:nodePort訪問;

查看dashborad對外映射端口

[root@k8s-master1 addons]# kubectl get svc  --all-namespaces
NAMESPACE	NAME           TYPE    CLUSTER-IP  EXTERNAL-IP	PORT(S)       AGE
default    kubernetes        ClusterIP 10.1.0.1    <none>   443/TCP     6d
kube-system	kube-dns           ClusterIP 10.1.0.2    <none>    53/UDP,53/TCP	75m
kube-system	kubernetes-dashboard	NodePort   10.1.146.241	<none>   443:30001/TCP	3m1s

那麼可以通過https://10.3.8.104:30001/ 或者https://10.3.8.105:30001訪問。
登錄的時候,選擇令牌。然後在master端執行如下命令,生成認證token登錄:

[root@k8s-master1 ~]# kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')

將token:一行後面的內容複製到令牌裏,即可登錄儀表板。

部署traefik Ingress
理解Ingress
簡單的說,ingress就是從kubernetes集羣外訪問集羣的入口,將用戶的URL請求轉發到不同的service上。Ingress相當於nginx、apache等負載均衡方向代理服務器,其中還包括規則定義,即URL的路由信息,路由信息得的刷新由Ingress controller來提供。

理解Ingress Controller
Ingress Controller 實質上可以理解爲是個監視器,Ingress Controller 通過不斷地跟 kubernetes API 打交道,實時的感知後端 service、pod 等變化,比如新增和減少 pod,service 增加與減少等;當得到這些變化信息後,Ingress Controller 再結合下文的 Ingress 生成配置,然後更新反向代理負載均衡器,並刷新其配置,達到服務發現的作用。不過traefik出現後,它就要廢棄了,畢竟Ingress Controller不是原生的工具。

介紹traefik Ingress
Traefik是一款開源的反向代理與負載均衡工具。它最大的優點是能夠與常見的微服務系統直接整合,可以實現自動化動態配置。目前支持Docker, Swarm, Mesos/Marathon, Mesos, Kubernetes, Consul, Etcd, Zookeeper, BoltDB, Rest API等等後端模型。

部署Traefik Ingress
本文將採用daemonset方式部署Traefik Ingress來進行服務發佈。
部署Traefik的配置文件可以在如下github倉庫中找到:

https://github.com/rootsongjc/kubernetes-handbook/tree/master/manifests/traefik-ingress

下載相關yaml文件:

mkdir /opt/kubernetes/Traefik/ && cd /opt/kubernetes/Traefik/
traefik_url=”https://raw.githubusercontent.com/rootsongjc/kubernetes-handbook/master/manifests/traefik-ingress”
wget $traefik_url/ingress-rbac.yaml
wget $traefik_url/ingress.yaml
wget $traefik_url/traefik.yaml
wget $traefik_url/ui.yaml

其中,ingress-rbac.yaml用於service account驗證,不需要修改,內容如下:

[root@k8s-master Traefik]# vim ingress-rbac.yaml 
apiVersion: v1
kind: ServiceAccount
metadata:
  name: ingress
  namespace: kube-system

---

kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: ingress
subjects:
  - kind: ServiceAccount
    name: ingress
    namespace: kube-system
roleRef:
  kind: ClusterRole
  name: cluster-admin
  apiGroup: rbac.authorization.k8s.io
[root@k8s-master Traefik]# kubectl apply -f ingress-rbac.yaml

創建DaemonSet
由於是指定邊緣節點來部署Traefik,所以要給指定的節點打上label:

[root@k8s-master ~]# kubectl get nodes --show-labels
[root@k8s-master ~]# kubectl label node 10.3.8.104 traefik=proxy
[root@k8s-master ~]# kubectl label node 10.3.8.105 traefik=proxy
[root@k8s-master ~]# kubectl get nodes --show-labels

通過DaemonSet方式部署Traefik服務:

[root@k8s-master Traefik]# vi traefik.yaml
# 將文件最後一行改下。
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
  name: traefik-ingress-lb
  namespace: kube-system
  labels:
    k8s-app: traefik-ingress-lb
spec:
  template:
    metadata:
      labels:
        k8s-app: traefik-ingress-lb
        name: traefik-ingress-lb
    spec:
      terminationGracePeriodSeconds: 60
      hostNetwork: true
      restartPolicy: Always
      serviceAccountName: ingress
      containers:
      - image: traefik
        name: traefik-ingress-lb
        resources:
          limits:
            cpu: 200m
            memory: 30Mi
          requests:
            cpu: 100m
            memory: 20Mi
        ports:
        - name: http
          containerPort: 80
          hostPort: 80
        - name: admin
          containerPort: 8580
          hostPort: 8580
        args:
        - --web
        - --web.address=:8580
        - --kubernetes
      nodeSelector:
        # edgenode: "true"
        traefik: "proxy"

其中 traefik 監聽 node 的 80 和 8580 端口,80 提供正常服務,8580 是其自帶的 UI 界面,原本默認是 8080,因爲環境裏端口衝突了,所以這裏臨時改一下。

[root@k8s-master Traefik]# kubectl apply -f traefik.yaml

Traefik UI部署

[root@k8s-master Traefik]# cat ui.yaml
# 不需要修改
apiVersion: v1
kind: Service
metadata:
  name: traefik-web-ui
  namespace: kube-system
spec:
  selector:
    k8s-app: traefik-ingress-lb
  ports:
  - name: web
    port: 80
    targetPort: 8580
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: traefik-web-ui
  namespace: kube-system
spec:
  rules:
  - host: traefik-ui.local
    http:
      paths:
      - path: /
        backend:
          serviceName: traefik-web-ui
          servicePort: web
[root@k8s-master Traefik]# kubectl apply -f ui.yaml

準備二個服務實例
實例一:
kubectl run my-nginx --image=nginx --replicas=2   #默認80端口
kubectl expose deploy my-nginx --port=88 --target-port=80 --name=my-nginx
實例二:
kubectl run whats-my-ip --image=cloudnativelabs/whats-my-ip --replicas=2  #默認8080端口
kubectl expose deploy whats-my-ip --target-port=8080 --port=8080 --name=whats-my-ip


創建規則ingress.yaml
Ingress有兩種代理方法,一是域名,二是路徑。域名方式形如xxx.domain.com,yyy.domain.com等,域名部分不同。路徑方式形如name.domain.com/path1,name.domain.com/path2,路徑部分不同,這裏用第一種方式。
[root@k8s-master Traefik]# vi ingress.yaml 
cat ingress.yaml 
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: traefik-ingress
  namespace: default
spec:
  rules:
  - host: mynginx.linuxs.top    #要訪問的域名
    http:
      paths:
      - path: /
        backend:
          serviceName: my-nginx     #關聯服務名及端口
          servicePort: 88
  - host: whatsmyip.linuxs.top
    http:
      paths:
      - path: /
        backend:
          serviceName: whats-my-ip
          servicePort: 8080

有新service增加時,修改該文件後可以使用kubectl replace -f ingress.yaml來更新。

[root@k8s-master Traefik]# kubectl apply -f ingress.yaml

查看traefik關聯了哪些服務:

[root@k8s-master1 traefik]# kubectl get ing
NAME         HOSTS                              ADDRESS PORTS AGE
traefik-ingress   mynginx.linuxs.top,whatsmyip.linuxs.top           80    56m

也可通過UI:http://10.3.8.104:8580/dashboard/ 查看traefik關聯了哪些服務:

測試:
在集羣的任意一個節點上執行:

[root@k8s-master1 traefik]# curl -I -H Host:mynginx.linuxs.top http://10.3.8.104
HTTP/1.1 200 OK
Accept-Ranges: bytes
Content-Length: 612
Content-Type: text/html
Date: Thu, 04 Apr 2019 13:25:47 GMT
Etag: "5c9a3176-264"
Last-Modified: Tue, 26 Mar 2019 14:04:38 GMT
Server: nginx/1.15.10

[root@k8s-master1 traefik]# curl -I -H Host:whatsmyip.linuxs.top http://10.3.8.104
HTTP/1.1 200 OK
Content-Length: 51
Content-Type: text/plain; charset=utf-8
Date: Thu, 04 Apr 2019 13:26:15 GMT

如果在kubernetes集羣以外訪問就需要設置DNS,或者修改本機的hosts文件,在其中加入:
10.3.8.104 mynginx.linuxs.top
10.3.8.104 whatsmyip.linuxs.top

瀏覽器訪問mynginx.linuxs.top:

瀏覽器訪問whatsmyip.linuxs.top:

刷新一下就能看到輪詢到另一個服務:

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章