kubernetes 1.8 + calico

docke-ce Version:      17.09.0-ce 

該版本存在BUG, 在ym或者json文件中不能制定對內存的限制,否則服務啓動容器,所以以下倒入yml文件之前需要將對memory的資源限制條件刪除就可以正常運行。

本地化 kube-apiserver, kube-controller-manager , kube-scheduler     同時10.64.221.244既是 master 也是 nodes


master:

master-2347205.lvs01.dev.ebayc3.com    10.64.221.244

node:

node01-2646823.slc01.dev.ebayc3.com    10.65.146.152

node002-1934005.phx02.dev.ebayc3.com    10.147.190.81


* 初始化環境

DNS可以解析則可以省略,否則需要在/etc/host添加hostname 和對應IP的解析

優化系統參數:

net.ipv4.ip_local_port_range = 30000    60999

net.netfilter.nf_conntrack_max = 26214400

net.netfilter.nf_conntrack_tcp_timeout_established = 86400

net.netfilter.nf_conntrack_tcp_timeout_close_wait = 3600

* 創建 驗證

安裝 cfssl

mkdir -p /opt/local/cfssl

cd /opt/local/cfssl

wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64

mv cfssl_linux-amd64 cfssl

wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64

mv cfssljson_linux-amd64 cfssljson

wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64

mv cfssl-certinfo_linux-amd64 cfssl-certinfo

chmod +x *

* 創建 CA 證書配置

mkdir /opt/ssl

cd /opt/ssl

# config.json 文件

vi  config.json

{

  "signing": {

    "default": {

      "expiry": "87600h"

    },

    "profiles": {

      "kubernetes": {

        "usages": [

            "signing",

            "key encipherment",

            "server auth",

            "client auth"

        ],

        "expiry": "87600h"

      }

    }

  }

}

# csr.json 文件

vi csr.json

{

  "CN": "kubernetes",

  "key": {

    "algo": "rsa",

    "size": 2048

  },

  "names": [

    {

      "C": "CN",

      "ST": "Shanghai",

      "L": "Shanghai",

      "O": "k8s",

      "OU": "System"

    }

  ]

}

* 生成 CA 證書和私鑰

cd /opt/ssl/

/opt/local/cfssl/cfssl gencert -initca csr.json | /opt/local/cfssl/cfssljson -bare ca

[root@node1-2325419 ssl]# ls -lt

total 20

-rw-r--r-- 1 root root 1005 Oct 16 17:41 ca.csr

-rw------- 1 root root 1675 Oct 16 17:41 ca-key.pem

-rw-r--r-- 1 root root 1363 Oct 16 17:41 ca.pem

-rw-r--r-- 1 root root  210 Oct 16 17:39 csr.json

-rw-r--r-- 1 root root  292 Oct 16 17:38 config.json

* 分發證書

# 創建證書目錄

mkdir -p /etc/kubernetes/ssl

# 拷貝所有文件到目錄下

cp *.pem /etc/kubernetes/ssl

# 這裏要將文件拷貝到所有的k8的master機器上

scp *.pem node01-2646823.slc01.dev.ebayc3.com:/etc/kubernetes/ssl/

scp *.pem node002-1934005.phx02.dev.ebayc3.com:/etc/kubernetes/ssl/

配置yum源:

# vim /etc/yum.repos.d/kubernetes.repo 

[kubernetes]

name=Kubernetes

baseurl=http://yum.kubernetes.io/repos/kubernetes-el7-x86_64

enabled=1

gpgcheck=0

[virt7-docker-common-release]

name=virt7-docker-common-release

baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64

enabled=1

gpgcheck=0

[centos]

name=extras

baseurl=http://mirror.centos.org/centos/7/extras/x86_64/

enabled=1

gpgcheck=0

[docker-ce-stable]

name=Docker CE Stable - $basearch

baseurl=https://download.docker.com/linux/centos/7/$basearch/stable

enabled=1

gpgcheck=0

[nginx]

name=nginx

baseurl=https://nginx.org/packages/rhel/7/x86_64/

enabled=1

gpgcheck=0

    

# 安裝

yum makecache


yum install docker-ce conntrack-tools -y

* 更改docker 配置

# 使docker使用的Cgroup Driver爲systemd

vi /etc/docker/daemon.json

{

     "exec-opts":["native.cgroupdriver=systemd"]

}


# 修改配置(修改ExecStart配置)

vi /usr/lib/systemd/system/docker.service

ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS $DOCKER_OPTS $DOCKER_DNS_OPTIONS


# 修改其他配置

mkdir -p /usr/lib/systemd/system/docker.service.d/

vi /usr/lib/systemd/system/docker.service.d/docker-options.conf


# 添加如下 :   (注意 environment 必須在同一行,如果出現換行會無法加載)

[Service]

Environment="DOCKER_OPTS=--insecure-registry=10.254.0.0/16 --graph=/opt/docker --registry-mirror=http://b438f72b.m.daocloud.io --disable-legacy-registry --iptables=false"

* 重新讀取配置,啓動 docker 

systemctl daemon-reload

systemctl start docker

systemctl enable docker

* etcd 集羣

yum -y install etcd

* 創建 etcd 證書

cd /opt/ssl/

vi etcd-csr.json

{

  "CN": "etcd",

  "hosts": [

    "127.0.0.1",

    "10.64.221.244",

    "10.64.221.245",

    "10.9.219.148"

  ],

  "key": {

    "algo": "rsa",

    "size": 2048

  },

  "names": [

    {

      "C": "CN",

      "ST": "Shanghai",

      "L": "Shanghai",

      "O": "k8s",

      "OU": "System"

    }

  ]

}

# 生成 etcd   密鑰

/opt/local/cfssl/cfssl gencert -ca=/opt/ssl/ca.pem \

  -ca-key=/opt/ssl/ca-key.pem \

  -config=/opt/ssl/config.json \

  -profile=kubernetes etcd-csr.json | /opt/local/cfssl/cfssljson -bare etcd

# 查看生成

[root@master-2347205 ssl]# ls etcd* -l

-rw-r--r-- 1 root root 1050 Oct 30 00:17 etcd.csr

-rw-r--r-- 1 root root  259 Oct 30 00:16 etcd-csr.json

-rw------- 1 root root 1679 Oct 30 00:17 etcd-key.pem

-rw-r--r-- 1 root root 1424 Oct 30 00:17 etcd.pem

# 拷貝到etcd服務器

# etcd 

[root@master-2347205 ssl]# cp etcd*.pem /etc/kubernetes/ssl/

# 如果 etcd 非 root 用戶,讀取證書會提示沒權限

[root@master-2347205 ssl]# chmod 644 /etc/kubernetes/ssl/etcd-key.pem

* 修改 etcd 配置

修改 etcd 啓動文件 /usr/lib/systemd/system/etcd.service

# etcd1

[root@master-2347205 ssl]# vi /usr/lib/systemd/system/etcd.service

[Unit]

Description=Etcd Server

After=network.target

After=network-online.target

Wants=network-online.target

[Service]

Type=notify

WorkingDirectory=/var/lib/etcd/

#EnvironmentFile=-/etc/etcd/etcd.conf

User=etcd

# set GOMAXPROCS to number of processors

ExecStart=/usr/bin/etcd \

  --name=etcd1 \

  --cert-file=/etc/kubernetes/ssl/etcd.pem \

  --key-file=/etc/kubernetes/ssl/etcd-key.pem \

  --peer-cert-file=/etc/kubernetes/ssl/etcd.pem \

  --peer-key-file=/etc/kubernetes/ssl/etcd-key.pem \

  --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \

  --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \

  --initial-advertise-peer-urls=https://10.64.241.244:2380 \

  --listen-peer-urls=https://10.64.241.244:2380 \

  --listen-client-urls=https://10.64.241.244:2379,http://127.0.0.1:2379 \

  --advertise-client-urls=https://10.64.241.244:2379 \

  --initial-cluster-token=k8s-etcd-cluster \

  --initial-cluster=etcd1=https://10.64.241.244:2380 \

  --initial-cluster-state=new \

  --data-dir=/var/lib/etcd

Restart=on-failure

RestartSec=5

LimitNOFILE=65536

[Install]

WantedBy=multi-user.target

* 啓動 etcd

[root@master-2347205 issl]# systemctl enable etcd

[root@master-2347205 ssl]# systemctl start etcd

[root@master-2347205 issl]# systemctl status etcd

# 如果報錯 請使用

journalctl -f -t etcd  和 journalctl -u etcd 來定位問題

* 驗證 etcd 集羣狀態

查看 etcd 集羣狀態:

# etcdctl --endpoints=https://10.64.241.244:2379 \

         --cert-file=/etc/kubernetes/ssl/etcd.pem \

        --ca-file=/etc/kubernetes/ssl/ca.pem \

         --key-file=/etc/kubernetes/ssl/etcd-key.pem \

         member list

f2f13b8cab1c8b9a: name=etcd1 peerURLs=https://10.64.241.244:2380 clientURLs=https://10.64.241.244:2379 isLeader=true

* 安裝 kubectl 工具

**Master端配置

# 首先安裝 kubectl

wget https://dl.k8s.io/v1.8.0/kubernetes-client-linux-amd64.tar.gz

tar -xzvf kubernetes-client-linux-amd64.tar.gz

cp kubernetes/client/bin/* /usr/bin/

chmod a+x /usr/bin/kube*

# 驗證安裝

kubectl version

Client Version: version.Info{Major:"1", Minor:"8", GitVersion:"v1.8.0", GitCommit:"6e937839ac04a38cac63e6a7a306c5d035fe7b0a", GitTreeState:"clean", BuildDate:"2017-09-28T22:57:57Z", GoVersion:"go1.8.3", Compiler:"gc", Platform:"linux/amd64"}

The connection to the server localhost:8080 was refused - did you specify the right host or port?

* 創建 admin 證書

kubectl 與 kube-apiserver 的安全端口通信,需要爲安全通信提供 TLS 證書和祕鑰。

cd /opt/ssl/

vi admin-csr.json

{

  "CN": "admin",

  "hosts": [],

  "key": {

    "algo": "rsa",

    "size": 2048

  },

  "names": [

    {

      "C": "CN",

      "ST": "ShenZhen",

      "L": "ShenZhen",

      "O": "system:masters",

      "OU": "System"

    }

  ]

}

# 生成 admin 證書和私鑰

cd /opt/ssl/

/opt/local/cfssl/cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \

  -ca-key=/etc/kubernetes/ssl/ca-key.pem \

  -config=/opt/ssl/config.json \

  -profile=kubernetes admin-csr.json | /opt/local/cfssl/cfssljson -bare admin

# 查看生成

# ls admin*

admin.csr  admin-csr.json  admin-key.pem  admin.pem

cp admin*.pem /etc/kubernetes/ssl/

scp admin*.pem node01-2646823.slc01.dev.ebayc3.com:/etc/kubernetes/ssl/

scp admin*.pem node002-1934005.phx02.dev.ebayc3.com:/etc/kubernetes/ssl/

* 配置 kubectl kubeconfig 文件

server 配置爲 本機IP 各自連接本機的 Api

# 配置 kubernetes 集羣

kubectl config set-cluster kubernetes \

  --certificate-authority=/etc/kubernetes/ssl/ca.pem \

  --embed-certs=true \

  --server=https://10.64.221.244:6443

# 配置 客戶端認證

kubectl config set-credentials admin \

  --client-certificate=/etc/kubernetes/ssl/admin.pem \

  --embed-certs=true \

  --client-key=/etc/kubernetes/ssl/admin-key.pem

  

kubectl config set-context kubernetes \

  --cluster=kubernetes \

  --user=admin

kubectl config use-context kubernetes

* kubectl config 文件

# kubeconfig 文件在 如下:

ls /root/.kube/config  -l

-rw------- 1 root root 6303 Oct 30 01:13 /root/.kube/config

* 部署 Kubernetes Master 節點

Master 需要部署 kube-apiserver , kube-scheduler , kube-controller-manager 這三個組件。 kube-scheduler 作用是調度pods分配到那個node裏,簡單來說就是資源調度。 kube-controller-manager 作用是 對 deployment controller , replication controller, endpoints controller, namespace controller, and serviceaccounts controller等等的循環控制,與kube-apiserver交互。

安裝 組件

# 從github 上下載版本

cd /tmp

wget https://dl.k8s.io/v1.8.0/kubernetes-server-linux-amd64.tar.gz

tar -xzvf kubernetes-server-linux-amd64.tar.gz

cd kubernetes

\cp -r server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl,kube-proxy,kubelet} /usr/bin/

* 創建 kubernetes 證書

cd /opt/ssl

vi kubernetes-csr.json

{

  "CN": "kubernetes",

  "hosts": [

    "127.0.0.1",

    "10.64.241.244",

    "10.254.0.1",

    "kubernetes",

    "kubernetes.default",

    "kubernetes.default.svc",

    "kubernetes.default.svc.cluster",

    "kubernetes.default.svc.cluster.local"

  ],

  "key": {

    "algo": "rsa",

    "size": 2048

  },

  "names": [

    {

      "C": "CN",

      "ST": "Shanghai",

      "L": "Shanghai",

      "O": "k8s",

      "OU": "System"

    }

  ]

}

## 這裏 hosts 字段中 三個 IP 分別爲 127.0.0.1 本機, 10.64.221.244 爲 Master 的IP,多個Master需要寫多個, 10.254.0.1 爲 kubernetes SVC 的 IP 一般是 部署網絡的第一個IP , 如: 10.254.0.1 , 在啓動完成後,我們使用   kubectl get svc ,就可以查看到

生成 kubernetes 證書和私鑰

/opt/local/cfssl/cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \

  -ca-key=/etc/kubernetes/ssl/ca-key.pem \

  -config=/opt/ssl/config.json \

  -profile=kubernetes kubernetes-csr.json | /opt/local/cfssl/cfssljson -bare kubernetes

# 查看生成

[root@master-2347205 ssl]# ls -lt kubernetes*

-rw-r--r-- 1 root root 1245 Oct 30 01:16 kubernetes.csr

-rw------- 1 root root 1679 Oct 30 01:16 kubernetes-key.pem

-rw-r--r-- 1 root root 1619 Oct 30 01:16 kubernetes.pem

-rw-r--r-- 1 root root  440 Oct 30 01:16 kubernetes-csr.json

# 拷貝到目錄

cp -r kubernetes*.pem /etc/kubernetes/ssl/

scp -r kubernetes*.pem node01-2646823.slc01.dev.ebayc3.com:/etc/kubernetes/ssl/

scp -r kubernetes*.pem node002-1934005.phx02.dev.ebayc3.com :/etc/kubernetes/ssl/

配置 kube-apiserver

kubelet 首次啓動時向 kube-apiserver 發送 TLS Bootstrapping 請求,kube-apiserver 驗證 kubelet 請求中的 token 是否與它配置的 token 一致,如果一致則自動爲 kubelet生成證書和祕鑰。

# 生成 token

[root@master-2347205 ssl]# head -c 16 /dev/urandom | od -An -t x | tr -d ' '

c4d059eb65035a8c33a7f017f2763834

# 創建 token.csv 文件

cd /opt/ssl

vi token.csv

c4d059eb65035a8c33a7f017f2763834,kubelet-bootstrap,10001,"system:kubelet-bootstrap"

# 拷貝

cp token.csv /etc/kubernetes/

* 創建 kube-apiserver.service 文件

創建 kube-apiserver.service 文件

# 1.8 新增 (Node) --authorization-mode=Node,RBAC

# 自定義 系統 service 文件一般存於 /etc/systemd/system/ 下

# 配置爲 各自的本地 IP

vi /etc/systemd/system/kube-apiserver.service

[Unit]

Description=Kubernetes API Server

Documentation=https://github.com/GoogleCloudPlatform/kubernetes

After=network.target

[Service]

User=root

ExecStart=/usr/bin/kube-apiserver \

  --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \

  --advertise-address=10.64.241.244 \

  --allow-privileged=true \

  --apiserver-count=1 \

  --audit-log-maxage=30 \

  --audit-log-maxbackup=3 \

  --audit-log-maxsize=100 \

  --audit-log-path=/var/lib/audit.log \

  --authorization-mode=Node,RBAC \

  --bind-address=10.64.241.244 \

  --client-ca-file=/etc/kubernetes/ssl/ca.pem \

  --enable-swagger-ui=true \

  --etcd-cafile=/etc/kubernetes/ssl/ca.pem \

  --etcd-certfile=/etc/kubernetes/ssl/etcd.pem \

  --etcd-keyfile=/etc/kubernetes/ssl/etcd-key.pem \

  --etcd-servers=https://10.64.241.244:2379 \

  --event-ttl=1h \

  --kubelet-https=true \

  --insecure-bind-address=10.64.241.244 \

  --runtime-config=rbac.authorization.k8s.io/v1alpha1 \

  --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \

  --service-cluster-ip-range=10.254.0.0/16 \

  --service-node-port-range=30000-32000 \

  --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \

  --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \

  --enable-bootstrap-token-auth \

  --token-auth-file=/etc/kubernetes/token.csv \

  --v=2

Restart=on-failure

RestartSec=5

Type=notify

LimitNOFILE=65536

[Install]

WantedBy=multi-user.target

# 這裏面要注意的是 --service-node-port-range=30000-32000

# 這個地方是 映射外部端口時 的端口範圍,隨機映射也在這個範圍內映射,指定映射端口必須也在這個範圍內。

* 啓動 kube-apiserver

systemctl daemon-reload

systemctl enable kube-apiserver

systemctl start kube-apiserver

systemctl status kube-apiserver

* 配置 kube-controller-manager

master 配置爲 本地 IP

# 創建 kube-controller-manager.service 文件

vi /etc/systemd/system/kube-controller-manager.service

[Unit]

Description=Kubernetes Controller Manager

Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]

ExecStart=/usr/bin/kube-controller-manager \

  --address=127.0.0.1 \

  --master=http://10.64.241.244:8080 \

  --allocate-node-cidrs=true \

  --service-cluster-ip-range=10.254.0.0/16 \

  --cluster-cidr=10.233.0.0/16 \

  --cluster-name=kubernetes \

  --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \

  --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \

  --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \

  --root-ca-file=/etc/kubernetes/ssl/ca.pem \

  --leader-elect=true \

  --v=2

Restart=on-failure

RestartSec=5

[Install]

WantedBy=multi-user.target


* 啓動 kube-controller-manager

systemctl daemon-reload

systemctl enable kube-controller-manager

systemctl start kube-controller-manager

systemctl status kube-controller-manager


* 配置 kube-scheduler

master 配置爲本地 IP

# 創建 kube-cheduler.service 文件

vi /etc/systemd/system/kube-scheduler.service

[Unit]

Description=Kubernetes Scheduler

Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]

ExecStart=/usr/bin/kube-scheduler \

  --address=127.0.0.1 \

  --master=http://10.64.241.244:8080 \

  --leader-elect=true \

  --v=2

Restart=on-failure

RestartSec=5

[Install]

WantedBy=multi-user.target

* 啓動 kube-scheduler

systemctl daemon-reload

systemctl enable kube-scheduler

systemctl start kube-scheduler

systemctl status kube-scheduler


* 驗證 Master 節點

[root@master-2347205 ssl]# kubectl get componentstatuses

NAME                 STATUS    MESSAGE              ERROR

controller-manager   Healthy   ok                   

scheduler            Healthy   ok                   

etcd-0               Healthy   {"health": "true"} 


部署 Master Node 部分

Node 部分 需要部署的組件有 docker calico kubectl kubelet kube-proxy 這幾個組件。

配置 kubelet

kubelet 啓動時向 kube-apiserver 發送 TLS bootstrapping 請求,需要先將 bootstrap token 文件中的 kubelet-bootstrap 用戶賦予 system:node-bootstrapper 角色,然後 kubelet 纔有權限創建認證請求(certificatesigningrequests)。

# 先創建認證請求

# user 爲 master 中 token.csv 文件裏配置的用戶

# 只需創建一次就可以

kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

* 創建 kubelet kubeconfig 文件

server 配置爲 master 本機 IP

# 配置集羣

kubectl config set-cluster kubernetes \

  --certificate-authority=/etc/kubernetes/ssl/ca.pem \

  --embed-certs=true \

  --server=https://10.64.221.244:6443 \

  --kubeconfig=bootstrap.kubeconfig

# 配置客戶端認證

kubectl config set-credentials kubelet-bootstrap \

  --token=c4d059eb65035a8c33a7f017f2763834 \

  --kubeconfig=bootstrap.kubeconfig

# 配置關聯

kubectl config set-context default \

  --cluster=kubernetes \

  --user=kubelet-bootstrap \

  --kubeconfig=bootstrap.kubeconfig

# 配置默認關聯

kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

# 拷貝生成的 bootstrap.kubeconfig 文件

mv bootstrap.kubeconfig /etc/kubernetes/

* 創建 kubelet.service 文件

# 創建 kubelet 目錄

> 配置爲 node 本機 IP (所有node節點配置相同)

mkdir /var/lib/kubelet

vi /etc/systemd/system/kubelet.service

[Unit]

Description=Kubernetes Kubelet

Documentation=https://github.com/GoogleCloudPlatform/kubernetes

After=docker.service

Requires=docker.service

[Service]

WorkingDirectory=/var/lib/kubelet

ExecStart=/usr/bin/kubelet \

  --address=10.64.241.244 \

  --hostname-override=10.64.241.244 \

  --cgroup-driver=systemd \

  --network-plugin=cni \

  --pod-infra-container-image=jicki/pause-amd64:3.0 \

  --experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \

  --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \

  --cert-dir=/etc/kubernetes/ssl \

  --cluster_dns=10.254.0.2 \

  --cluster_domain=cluster.local. \

  --hairpin-mode promiscuous-bridge \

  --allow-privileged=true \

  --fail-swap-on=false \

  --serialize-image-pulls=false \

  --logtostderr=true \

  --max-pods=512 \

  --v=2

ExecStopPost=/sbin/iptables -A INPUT -s 10.0.0.0/8 -p tcp --dport 4194 -j ACCEPT

ExecStopPost=/sbin/iptables -A INPUT -s 172.16.0.0/12 -p tcp --dport 4194 -j ACCEPT

ExecStopPost=/sbin/iptables -A INPUT -s 10.0.0.0/8 -p tcp --dport 4194 -j ACCEPT

ExecStopPost=/sbin/iptables -A INPUT -p tcp --dport 4194 -j DROP

Restart=on-failure

RestartSec=5

[Install]

WantedBy=multi-user.target

# 如上配置:

10.64.221.244      爲本機的IP

10.254.0.2       預分配的 dns 地址

cluster.local.   爲 kubernetes 集羣的 domain

jicki/pause-amd64:3.0  這個是 pod 的基礎鏡像,既 gcr 的 gcr.io/google_containers/pause-amd64:3.0 鏡像, 下載下來修改爲自己的倉庫中的比較快。


啓動 kubelet

systemctl daemon-reload

systemctl enable kubelet

systemctl start kubelet

systemctl status kubelet

# 如果報錯 請使用

journalctl -f -t kubelet  和 journalctl -u kubelet 來定位問題


* 配置 TLS 認證

[root@master-2347205 ssl]# kubectl get csr

NAME                                                   AGE       REQUESTOR           CONDITION

node-csr-jeCesmBKEc2_7Bt4FHDNbbQXXy7syzJP3P2HfGPePi8   1d        kubelet-bootstrap   Pending


# 增加 認證

[root@master-2347205 ssl]# kubectl certificate approve node-csr-jeCesmBKEc2_7Bt4FHDNbbQXXy7syzJP3P2HfGPePi8

certificatesigningrequest "node-csr-jeCesmBKEc2_7Bt4FHDNbbQXXy7syzJP3P2HfGPePi8" approved


* 驗證 nodes

[root@master-2347205 ssl]# kubectl get nodes

NAME            STATUS    ROLES     AGE       VERSION

10.64.241.244   Ready     <none>    1d        v1.8.0


# 成功以後會自動生成配置文件與密鑰

# 配置文件

ls /etc/kubernetes/kubelet.kubeconfig   

/etc/kubernetes/kubelet.kubeconfig

# 密鑰文件

# ls /etc/kubernetes/ssl/kubelet*

/etc/kubernetes/ssl/kubelet-client.crt  /etc/kubernetes/ssl/kubelet-client.key  /etc/kubernetes/ssl/kubelet.crt  /etc/kubernetes/ssl/kubelet.key


* 配置 kube-proxy

創建 kube-proxy 證書

# 證書方面由於我們node端沒有裝 cfssl


# 我們回到 master 端 機器 去配置證書,然後拷貝過來

# cd /opt/ssl

vi kube-proxy-csr.json


{

  "CN": "system:kube-proxy",

  "hosts": [],

  "key": {

    "algo": "rsa",

    "size": 2048

  },

  "names": [

    {

      "C": "CN",

      "ST": "Shanghai",

      "L": "Shanghai",

      "O": "k8s",

      "OU": "System"

    }

  ]

}

生成 kube-proxy 證書和私鑰

/opt/local/cfssl/cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \

  -ca-key=/etc/kubernetes/ssl/ca-key.pem \

  -config=/opt/ssl/config.json \

  -profile=kubernetes  kube-proxy-csr.json | /opt/local/cfssl/cfssljson -bare kube-proxy


# 查看生成

ls kube-proxy*

kube-proxy.csr  kube-proxy-csr.json  kube-proxy-key.pem  kube-proxy.pem


# 拷貝到目錄

cp kube-proxy*.pem /etc/kubernetes/ssl/

scp kube-proxy*.pem node01-2646823.slc01.dev.ebayc3.com:/etc/kubernetes/ssl/

scp akube-proxy*.pem node002-1934005.phx02.dev.ebayc3.com:/etc/kubernetes/ssl/


創建 kube-proxy kubeconfig 文件

server配置爲各自本機IP,Nod節點配置爲server的IP,多節點master則配其代理的IP


# 配置集羣

kubectl config set-cluster kubernetes \

  --certificate-authority=/etc/kubernetes/ssl/ca.pem \

  --embed-certs=true \

  --server=https://10.64.221.244:6443 \

  --kubeconfig=kube-proxy.kubeconfig


# 配置客戶端認證

kubectl config set-credentials kube-proxy \

  --client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \

  --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \

  --embed-certs=true \

  --kubeconfig=kube-proxy.kubeconfig

  

# 配置關聯

kubectl config set-context default \

  --cluster=kubernetes \

  --user=kube-proxy \

  --kubeconfig=kube-proxy.kubeconfig


# 配置默認關聯

kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

# 拷貝到目錄

mv kube-proxy.kubeconfig /etc/kubernetes/

* 創建 kube-proxy.service 文件

配置爲 各自的 IP

# 創建 kube-proxy 目錄

mkdir -p /var/lib/kube-proxy

vi /etc/systemd/system/kube-proxy.service

[Unit]

Description=Kubernetes Kube-Proxy Server

Documentation=https://github.com/GoogleCloudPlatform/kubernetes

After=network.target

[Service]

WorkingDirectory=/var/lib/kube-proxy

ExecStart=/usr/bin/kube-proxy \

  --bind-address=10.64.241.244 \

  --hostname-override=10.64.241.244 \

  --cluster-cidr=10.254.0.0/16 \

  --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \

  --logtostderr=true \

  --v=2

Restart=on-failure

RestartSec=5

LimitNOFILE=65536

[Install]

WantedBy=multi-user.target

* 啓動 kube-proxy

systemctl daemon-reload

systemctl enable kube-proxy

systemctl start kube-proxy

systemctl status kube-proxy

# 如果報錯 請使用

journalctl -f -t kube-proxy  和 journalctl -u kube-proxy 來定位問題


# ALL node節點配置

------------------------------------

cd /tmp

wget https://dl.k8s.io/v1.8.0/kubernetes-server-linux-amd64.tar.gz

tar -xzvf kubernetes-server-linux-amd64.tar.gz

cd kubernetes

cp -r server/bin/{kube-proxy,kubelet} /usr/bin/


mkdir -p /etc/kubernetes/ssl/

scp ca.pem kube-proxy.pem kube-proxy-key.pem  node-*:/etc/kubernetes/ssl/

# kubelet

# 首先 創建 kubelet kubeconfig 文件

kubectl config set-cluster kubernetes \

  --certificate-authority=/etc/kubernetes/ssl/ca.pem \

  --embed-certs=true \

  --server=https://0.64.241.244:6443 \

  --kubeconfig=bootstrap.kubeconfig

# 配置客戶端認證

kubectl config set-credentials kubelet-bootstrap \

  --token=446d97ddb6b3a4cb363a34cf29afc86d \

  --kubeconfig=bootstrap.kubeconfig

# 配置關聯

kubectl config set-context default \

  --cluster=kubernetes \

  --user=kubelet-bootstrap \

  --kubeconfig=bootstrap.kubeconfig

  

# 配置默認關聯

kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

# 拷貝生成的 bootstrap.kubeconfig 文件

mv bootstrap.kubeconfig /etc/kubernetes/

      

# 創建 kube-proxy kubeconfig 文件

kubectl config set-cluster kubernetes \

  --certificate-authority=/etc/kubernetes/ssl/ca.pem \

  --embed-certs=true \

  --server=https://10.64.241.244:6443 \

  --kubeconfig=kube-proxy.kubeconfig

# 配置客戶端認證

kubectl config set-credentials kube-proxy \

  --client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \

  --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \

  --embed-certs=true \

  --kubeconfig=kube-proxy.kubeconfig

   

# 配置關聯

kubectl config set-context default \

  --cluster=kubernetes \

  --user=kube-proxy \

  --kubeconfig=kube-proxy.kubeconfig

# 配置默認關聯

kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

# 拷貝到目錄

mv kube-proxy.kubeconfig /etc/kubernetes/  

------------------------------------


創建Nginx 代理

在每個 node 都必須創建一個 Nginx 代理, 這裏特別注意, 當 Master 也做爲 Node 的時候 不需要配置 Nginx-proxy

# 創建配置目錄

mkdir -p /etc/nginx

# 寫入代理配置

cat << EOF > /etc/nginx/nginx.conf

error_log stderr notice;

worker_processes auto;

events {

  multi_accept on;

  use epoll;

  worker_connections 1024;

}

stream {

    upstream kube_apiserver {

        least_conn;

        server 10.64.221.244:6443;

    }

    server {

        listen        0.0.0.0:6443;

        proxy_pass    kube_apiserver;

        proxy_timeout 10m;

        proxy_connect_timeout 1s;

    }

}

EOF

# 配置 Nginx 基於 docker 進程,然後配置 systemd 來啓動

cat << EOF > /etc/systemd/system/nginx-proxy.service

[Unit]

Description=kubernetes apiserver docker wrapper

Wants=docker.socket

After=docker.service

[Service]

User=root

PermissionsStartOnly=true

ExecStart=/usr/bin/docker run -p 6443:6443 \\

                              -v /etc/nginx:/etc/nginx \\

                              --name nginx-proxy \\

                              --net=host \\

                              --restart=on-failure:5 \\

                              --memory=512M \\

                              nginx:1.13.3-alpine

ExecStartPre=-/usr/bin/docker rm -f nginx-proxy

ExecStop=/usr/bin/docker stop nginx-proxy

Restart=always

RestartSec=15s

TimeoutStartSec=30s

[Install]

WantedBy=multi-user.target

EOF

# 啓動 Nginx

systemctl daemon-reload

systemctl start nginx-proxy

systemctl enable nginx-proxy

systemctl status nginx-proxy

# 重啓 Node 的 kubelet 與 kube-proxy

systemctl restart kubelet

systemctl status kubelet

systemctl restart kube-proxy

systemctl status kube-proxy

Master 配置 TLS 認證

[root@master-2347205 ~]# kubectl get csr

NAME                                                   AGE       REQUESTOR           CONDITION

node-csr-ab7uNyElYjU2cVKcUQWLCSynBaTBD38eGLetav-Hkjc   23h       kubelet-bootstrap   Pending

node-csr-jeCesmBKEc2_7Bt4FHDNbbQXXy7syzJP3P2HfGPePi8   1d        kubelet-bootstrap   Approved,Issued

node-csr-rNEoTvYkwQrruBnc_kAFJ1WRNVv-5vopplJzbVevTHw   23h       kubelet-bootstrap   Pending

[root@master-2347205 ~]# kubectl certificate approve NAME 

[root@master-2347205 ~]# kubectl get nodes

NAME            STATUS    ROLES     AGE       VERSION

10.147.190.81   Ready     <none>    23h       v1.8.0

10.64.241.244   Ready     <none>    1d        v1.8.0

10.65.146.152   Ready     <none>    23h       v1.8.0

Calico 網絡

修改 kubelet.service

vi /etc/systemd/system/kubelet.service

# 增加 如下配置

--network-plugin=cni \

# 重新加載配置

systemctl daemon-reload

systemctl restart kubelet.service

systemctl status kubelet.service

修改 kube-proxy.service

# 重新加載配置

systemctl daemon-reload

systemctl restart kube-proxy.service

systemctl status kube-proxy.service

安裝 Calico

(官網地址 http://docs.projectcalico.org/v2.6/getting-started/kubernetes/installation/hosted/hosted)

wget http://docs.projectcalico.org/v2.6/getting-started/kubernetes/installation/hosted/calico.yaml

wget http://docs.projectcalico.org/v2.6/getting-started/kubernetes/installation/rbac.yaml

# 下載 鏡像

# 國外鏡像 有牆

quay.io/calico/node:v2.6.0

quay.io/calico/cni:v1.11.0

quay.io/calico/kube-controllers:v1.0.0

# 國內鏡像

jicki/node:v2.6.0

jicki/cni:v1.11.0

jicki/kube-controllers:v1.0.0

 配置 calico 

 # 注意修改如下選項:

 data:

  # Configure this with the location of your etcd cluster.

  etcd_endpoints: "https://10.64.241.244:2379"

 

 # 這裏面要寫入 base64 的信息

data:

  etcd-key: (cat /etc/kubernetes/ssl/etcd-key.pem | base64 | tr -d '\n')

  etcd-cert: (cat /etc/kubernetes/ssl/etcd.pem | base64 | tr -d '\n')

  etcd-ca: (cat /etc/kubernetes/ssl/ca.pem | base64 | tr -d '\n')

具體配置如下:

 data:

  # Populate the following files with etcd TLS configuration if desired, but leave blank if

  # not using TLS for etcd.

  # This self-hosted install expects three files with the following names.  The values

  # should be base64 encoded strings of the entire contents of each file.

   etcd-key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBMWpqRVE2bWRnYXc1WUhvemxadUpwVlMzb1QxVDk5aDV1Mm1JcDhHN3BrQW4rVUloCklrRjIrOG93OTZxUkJERzlaa1V2R3h3ZXJtVWx2Y21LOFFlQ1BGKzFHYUZ5U3doVG52dTNLRW5FcXhNaHMzbmcKelM5QXI4SDVDejNDZ05jSldhais3ZTFaU2FBRStKTFF4SzZXdmRadnlSTXdVMnZiVldQMDlvV2hRaTJxUkJJYwpnVFhEMk9XNXhyakgrSzd3K1JnaERsM0NNNmlyTnQrM0xLMWtySlNwdStVeVVlRGZqbWtDWnVIUnBnMUVDTUJ5Ckxrc3ZwZVFlYTZNTHFDMVBIV1ZzQmdPWWFPYU45Z2doQ2FWMUQ4M094QmFBa2QzTmF0aHpDZ0RLb3g5TTNuWVoKL2hrSW1xYkthS1pkbmxWT2pqY2hOdzQwT2R0dHYxT3ROYjF5L1FJREFRQUJBb0lCQURpZURqazMxZm9VczZGdApDcjhxdVlscHg1S2s2OUZnZEdQMDgzb3ErTlpxdEdYcmRXaGhWdm9iS1MvNzg1SGhONGFWR0dzWTFuOFBHcGQ3CmNlNWNKcTZnT1BRS09IUGVRb09OVVk0aldRaEI2VEZQTC92cTFqVGRsbDNETitubmE0QTM2eENsSDJrWEV0bEQKckV1ZlBSWDFXOXdQNTBEVkM4WVl3Z0I1dHZJUE5hRHNFaExFZDBSZlJQWXZ2NkxJS1pZMGhHcXhvMjFPYThpZApiZEEwK3I2dm9OV043TVFENDFHN09EM0t4S3V3OG0zU1FDTTR1ZDVVd25FWTFrOU8xWExqZi9mQTRxRTNNNjI4ClFFaURjVnVDNTJjd1ZQb0M3QmdsV2ZnTUNPK0FnUjVDMnQzblpxNEdUQWJjMWhFYmJsTUdDbVJxbzB5SUt2S0QKdjhtL2kzMENnWUVBM0QrZDlLbUxQRU44UlYzVE9WOUt1ajQ3NXVXaGdlOXBXWGtJRWkrOTRZYjhyYzRXWUFXawo3YVhUdW95OW4yaTkvdE85VGV3cUtvcm9WeTQ4MHd3OFJPTzdMbnNRdFZhMGFjYktVaS96NUc4bkxQSnkvb1FQCjljRXNUbklET0NPWURRV0l0b1hCaS9qRGZCOURWY1RCTkJaaENKc05ySEdQc2xRM2owWkRySWNDZ1lFQStQNjEKZDZmTkpLNFNSMkpIbC8zRGdoWWZxSEZHQnF1MU00UTJoaTE1c0Zrbm15STRiczBwdXpxbHFETjhIVGRGTitUYQpCbVZ3bDcvTVI0OUtMWTAvbFViM3NEcllQNHZsRVFmcDM2RDgzcUsrNnA5Z2IySE1ncnpDNHlTaWNNcmFyQUpQCm5PU1pkSHVCV0VwS1dYcXpNWS9vT1lncXpmK2NDY1ZCaU9kbXFWc0NnWUVBcTYwNjVsYXhuVXZOTmhTb2JIUUcKdjUxNis4UEtYSW5OU0Z5N0dkSDA1REpnQ3VvMUxxdTNuelkvRDQ1YzBFREl1RFU3dmQxMEVLMHQ1YWE0NnBrQwo4WC9CaTcySmpKQUZHTjVISGtFYWp0akZaSmVuWXhSMWpFNEN4eksvcDZhM0FvOXB2VHdOWFptT1p0WEhtcmMvCkVlYlB3L0Y0ZXc0WndCczlJNFRWZzBrQ2dZRUFuNzNoTkFQT1k1TGRuVGhiRUJLNHBET0hXZXhaN1p1cFk0U1YKNmwwd2JjZTAxaDBOTVdDa1BqN3cya2sxZmhUZ2JteCt4NDI2djFCSVFUeVhiVVBxSDkzb3VBSDRqRVEyMEVkUQpicXo4cmVTaE1TclFsb1Nhek9GRzhxeWxIbEcyR3BaYzl4M2RxVkkyK0NxcEttRENwN0JZWHRpU0JaalFSYUJzClcrV0JBbk1DZ1lBNXdKcmZPenBZb08vYkZHWXFPSkY5QmxOT0E4eG42T2w2RDBmZDY0bGZJTDJTUEg3MnNna3gKN1lHTHZGdThvNWhoYml4ejJUQ3ZSRUw0Z1NGWVRhcEcwN0haWEFtbkFqcDAyN3dSWlR4YXdWZ29RTE5DOU9TTwpydmhXSWZjSjMzQWdHWUFNSVVaWWpvL2RQLzRpOWF0VGFQdmI5bmNzaHVNVXkxTURXSnIxOEE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=

   etcd-cert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQ3akNDQXRhZ0F3SUJBZ0lVY1RaMjVEWWNQcGU2MXBuWkZqeU5jVHNqRURRd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1p6RUxNQWtHQTFVRUJoTUNRMDR4RVRBUEJnTlZCQWdUQ0ZOb1pXNWFhR1Z1TVJFd0R3WURWUVFIRXdoVAphR1Z1V21obGJqRU1NQW9HQTFVRUNoTURhemh6TVE4d0RRWURWUVFMRXdaVGVYTjBaVzB4RXpBUkJnTlZCQU1UCkNtdDFZbVZ5Ym1WMFpYTXdIaGNOTVRjeE1ETXdNRGN4TWpBd1doY05NamN4TURJNE1EY3hNakF3V2pCaE1Rc3cKQ1FZRFZRUUdFd0pEVGpFUk1BOEdBMVVFQ0JNSVUyaGxibHBvWlc0eEVUQVBCZ05WQkFjVENGTm9aVzVhYUdWdQpNUXd3Q2dZRFZRUUtFd05yT0hNeER6QU5CZ05WQkFzVEJsTjVjM1JsYlRFTk1Bc0dBMVVFQXhNRVpYUmpaRENDCkFTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTlk0eEVPcG5ZR3NPV0I2TTVXYmlhVlUKdDZFOVUvZlllYnRwaUtmQnU2WkFKL2xDSVNKQmR2dktNUGVxa1FReHZXWkZMeHNjSHE1bEpiM0ppdkVIZ2p4Zgp0Um1oY2tzSVU1Nzd0eWhKeEtzVEliTjU0TTB2UUsvQitRczl3b0RYQ1Ztby91M3RXVW1nQlBpUzBNU3VscjNXCmI4a1RNRk5yMjFWajlQYUZvVUl0cWtRU0hJRTF3OWpsdWNhNHgvaXU4UGtZSVE1ZHdqT29xemJmdHl5dFpLeVUKcWJ2bE1sSGczNDVwQW1iaDBhWU5SQWpBY2k1TEw2WGtIbXVqQzZndFR4MWxiQVlEbUdqbWpmWUlJUW1sZFEvTgp6c1FXZ0pIZHpXclljd29BeXFNZlRONTJHZjRaQ0pxbXltaW1YWjVWVG80M0lUY09ORG5iYmI5VHJUVzljdjBDCkF3RUFBYU9CbHpDQmxEQU9CZ05WSFE4QkFmOEVCQU1DQmFBd0hRWURWUjBsQkJZd0ZBWUlLd1lCQlFVSEF3RUcKQ0NzR0FRVUZCd01DTUF3R0ExVWRFd0VCL3dRQ01BQXdIUVlEVlIwT0JCWUVGSFFjQnRoTXRXcnlPaG5nSUQwZQpCTE4wR0M2Yk1COEdBMVVkSXdRWU1CYUFGSWw3M2JJQ1RqandkdFNtcGNSMlNOVDNPREpHTUJVR0ExVWRFUVFPCk1BeUhCSDhBQUFHSEJBcEE4ZlF3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUlVdlFyN0ZYNGtiTVM3VXU4b2wKSVlLV1JLZG5ZS0FzWTdybGlIaklOUTg1QTh4dDZ2aU9RS2t2bGRXRVBYUGxpcHJUZjFkVzdKbVhRbjZyUExNMQp4NzRhREZGUm5maHFoeWhMWjY3TGRBQzE3cGlGNHJVczNzQ280Ymd1RURyaGpWOWk0aFRCcU0xK0pucmRENVhKCmtoUExZdkdMS3UzODB1ZzhHU3VIamRFd2VGWHFtTkNoVDNiL2syTjBtalhLTGNZOGRsalpWT1NFZFhhSHZKM1gKQ3l1emE1YWJDbnVsZDhZMUJycEUvZXEvU29sMHBqa0c5NDZ6U1lwaEpsS0JadGRmbElYRGg0ZG91RlRKd2RxeAprMEl3NVA3Y21SZW5ieGJFMCs1YUV6MDFVWVJ1ZnlMK1dGU3VZTWU3cUovV3NPVUxxVWVkbUpocE9yN29NK0tmCnpxUT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=

   etcd-ca: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR3akNDQXFxZ0F3SUJBZ0lVQXZJRGN1U1FBTnhxV1pZZC9XSGE3OER0OFdvd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1p6RUxNQWtHQTFVRUJoTUNRMDR4RVRBUEJnTlZCQWdUQ0ZOb1pXNWFhR1Z1TVJFd0R3WURWUVFIRXdoVAphR1Z1V21obGJqRU1NQW9HQTFVRUNoTURhemh6TVE4d0RRWURWUVFMRXdaVGVYTjBaVzB4RXpBUkJnTlZCQU1UCkNtdDFZbVZ5Ym1WMFpYTXdIaGNOTVRjeE1ETXdNRGN3T0RBd1doY05Nakl4TURJNU1EY3dPREF3V2pCbk1Rc3cKQ1FZRFZRUUdFd0pEVGpFUk1BOEdBMVVFQ0JNSVUyaGxibHBvWlc0eEVUQVBCZ05WQkFjVENGTm9aVzVhYUdWdQpNUXd3Q2dZRFZRUUtFd05yT0hNeER6QU5CZ05WQkFzVEJsTjVjM1JsYlRFVE1CRUdBMVVFQXhNS2EzVmlaWEp1ClpYUmxjekNDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFMejJxb0VTTGxTZjl3ZGsKb05DNnpYUDUyVzhHZ0p6TWFTQWVKNzgrbElvMG5YOWFyeWlheCtDOHg4dG1VcU1sMklzRzhmc01NY3JOM3VOeApRZjF5MXRBZGJHQ0lCR2Q0d1FNcnc3ZU1QNld6L285SFpKMW1lcmlPTHF6YlNqc2VSYmY3VloxZGZHYkxZTDN6CkI2WGZJam5MbjRlS2YrdHIxdkEvc3ppZmIvMjkyYW1jd3RZVER4NkJ1WFlaY1F0ZktBeExNY3psSVNKaEx2cWQKYkpWQkRnYWVPRTMzeTFYeG0vWkR3cnN2VjdIZUhjczY3OUdVdU1QVE9ZRkZQRnFNRGlSam9acWkzOWV6MU9kUgpHKzdIcTJOMDVzQVpTMkJ1eFE5RjZsZFVTTTRZNkdobkVWejRKZzVmVElHdlVWZ2FOR1F0aGpzZ2JvUnVYNXppCkZvSzI2RzhDQXdFQUFhTm1NR1F3RGdZRFZSMFBBUUgvQkFRREFnRUdNQklHQTFVZEV3RUIvd1FJTUFZQkFmOEMKQVFJd0hRWURWUjBPQkJZRUZJbDczYklDVGpqd2R0U21wY1IyU05UM09ESkdNQjhHQTFVZEl3UVlNQmFBRklsNwozYklDVGpqd2R0U21wY1IyU05UM09ESkdNQTBHQ1NxR1NJYjNEUUVCQ3dVQUE0SUJBUUI5UWY3Z004K0pNakZECnBRYm9HM2g2NkNyMC9xOEZNbzlGaURjWlYxb1dpejQvVEdGK1phU1hSOFFxWmRTK0dNQU1xaGUydGdLbTZ3TngKK3VvS2w5TDEzUjkrSmVWWFdvSWpmV3V0SFNyNk5NbVdLMnJjVVNoTjg0cW1vMW81ZjBta3paUWxHZU5OT3V3cAp5ZzBtY2***TcxbDFVeVY0RzR5YWRyQVFHRHhvbDhud0ViZXBKdlkwU2FkVXFIRjVsYW4ySEd5VVV3UXpsdlMwCjRDbVFHQUJNclY5M1phbTBaZTNWbU1WK3BFeVhRbnNXS2lqTG5tSjd5WU5vYjZFd2VXcS93eFc4Y2dCVEpncW0KNnUzRDRVc3NWRXZhQXlsL3F3Zkx6Zkk1NTNxc0kySTg4T1BQd0M5SE1IUU9pWTFja3FIMnlWRm5Pd3htRUphVAo4YzJZbU95TgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==

 

             # Configure the IP Pool from which Pod IPs will be chosen.

             - name: CALICO_IPV4POOL_CIDR

              value: "10.233.0.0/16"

導入 yaml 文件

[root@master-2347205 ~]# kubectl apply -f calico.yaml 

configmap "calico-config" created

secret "calico-etcd-secrets" created

daemonset "calico-node" created

deployment "calico-policy-controller" created

serviceaccount "calico-policy-controller" created

serviceaccount "calico-node" created

[root@master-2347205 ~]# kubectl apply -f rbac.yaml

clusterrole "calico-policy-controller" created

clusterrolebinding "calico-policy-controller" created

clusterrole "calico-node" created

clusterrolebinding "calico-node" created

[root@master-2347205 ~]# kubectl get pods -n kube-system

NAME                                      READY     STATUS    RESTARTS   AGE

calico-kube-controllers-f58c74bfd-xmwjk   1/1       Running   1          8h

calico-node-4b79k                         2/2       Running   4          8h

calico-node-9b68d                         2/2       Running   2          8h

calico-node-mgxjw                         2/2       Running   2          8h

安裝 Calicoctl

cd /usr/bin/

wget -c  https://github.com/projectcalico/calicoctl/releases/download/v1.3.0/calicoctl

chmod +x calicoctl

## 創建 calicoctl 配置文件

# 配置文件, 在 安裝了 calico 網絡的 機器下

mkdir /etc/calico

vi /etc/calico/calicoctl.cfg

apiVersion: v1

kind: calicoApiConfig

metadata:

spec:

  datastoreType: "etcdv2"

  etcdEndpoints: "https://110.64.241.244:2379"

  etcdKeyFile: "/etc/kubernetes/ssl/etcd-key.pem"

  etcdCertFile: "/etc/kubernetes/ssl/etcd.pem"

  etcdCACertFile: "/etc/kubernetes/ssl/ca.pem"

 

 # 查看 calico 狀態

 [root@master-2347205 ~]#  calicoctl node status

Calico process is running.

IPv4 BGP status

+---------------+-------------------+-------+----------+-------------+

| PEER ADDRESS  |     PEER TYPE     | STATE |  SINCE   |    INFO     |

+---------------+-------------------+-------+----------+-------------+

| 10.147.190.81 | node-to-node mesh | up    | 05:50:54 | Established |

| 10.65.146.152 | node-to-node mesh | up    | 05:50:53 | Established |

+---------------+-------------------+-------+----------+-------------+

IPv6 BGP status

No IPv6 peers found.

測試集羣

# 創建一個 nginx deplyment

apiVersion: extensions/v1beta1 

kind: Deployment 

metadata: 

  name: nginx-dm

spec: 

  replicas: 

  template: 

    metadata: 

      labels: 

        name: nginx 

    spec: 2

      containers: 

        - name: nginx 

          image: nginx:alpine 

          imagePullPolicy: IfNotPresent

          ports: 

            - containerPort: 80         

---

apiVersion: v1 

kind: Service

metadata: 

  name: nginx-svc 

spec: 

  ports: 

    - port: 80

      targetPort: 80

      protocol: TCP 

  selector: 

    name: nginx

    

[root@master-2347205 ~]# kubectl get pods -o wide

NAME                        READY     STATUS    RESTARTS   AGE       IP               NODE

nginx-dm-55b58f68b6-fdqlv   1/1       Running   1          7h        10.233.165.193   10.147.190.81

nginx-dm-55b58f68b6-mp9vj   1/1       Running   2          7h        10.233.103.9     10.64.241.244    

 # 在 node 裏 curl

[root@master-2347205 ~]# curl http://10.254.49.180:80 -I

HTTP/1.1 200 OK

Server: nginx/1.13.6

Date: Tue, 31 Oct 2017 10:00:09 GMT

Content-Type: text/html

Content-Length: 612

Last-Modified: Fri, 27 Oct 2017 22:20:30 GMT

Connection: keep-alive

ETag: "59f3b12e-264"

Accept-Ranges: bytes

[root@node01-2646823 ~]# curl http://10.254.49.180:80 -I

HTTP/1.1 200 OK

Server: nginx/1.13.6

Date: Tue, 31 Oct 2017 09:59:48 GMT

Content-Type: text/html

Content-Length: 612

Last-Modified: Fri, 27 Oct 2017 22:20:30 GMT

Connection: keep-alive

ETag: "59f3b12e-264"

Accept-Ranges: bytes

配置 KubeDNS

官方 github yaml 相關 https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/dns

下載鏡像

# 官方鏡像

gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.5

gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.5

gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.5

 

# 我的鏡像

jicki/k8s-dns-sidecar-amd64:1.14.5

jicki/k8s-dns-kube-dns-amd64:1.14.5

jicki/k8s-dns-dnsmasq-nanny-amd64:1.14.5

下載 yaml 文件

curl -O https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/dns/kube-dns.yaml.base

# 修改後綴

mv kube-dns.yaml.base kube-dns.yaml

*系統預定義的 RoleBinding

預定義的 RoleBinding system:kube-dns 將 kube-system 命名空間的 kube-dns ServiceAccount 與 system:kube-dns Role 綁定, 該 Role 具有訪問 kube-apiserver DNS 相關 API 的權限;

[root@master-2347205 ~]# kubectl get clusterrolebindings system:kube-dns -o yaml

apiVersion: rbac.authorization.k8s.io/v1

kind: ClusterRoleBinding

metadata:

  annotations:

    rbac.authorization.kubernetes.io/autoupdate: "true"

  creationTimestamp: 2017-10-30T08:25:06Z

  labels:

    kubernetes.io/bootstrapping: rbac-defaults

  name: system:kube-dns

  resourceVersion: "78"

  selfLink: /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system%3Akube-dns

  uid: d5af7ba9-bd4b-11e7-85cf-74dbd18002e4

roleRef:

  apiGroup: rbac.authorization.k8s.io

  kind: ClusterRole

  name: system:kube-dns

subjects:

- kind: ServiceAccount

  name: kube-dns

  namespace: kube-system

  

修改 kube-dns.yaml

1. # clusterIP: __PILLAR__DNS__SERVER__ 修改爲我們之前定義的 dns IP 10.254.0.2

2. # 修改 --domain=__PILLAR__DNS__DOMAIN__.   爲 我們之前 預定的 domain 名稱 --domain=cluster.local.

3. # 修改 --server=/__PILLAR__DNS__DOMAIN__/127.0.0.1#10053  中 domain 爲我們之前預定的 --server=/cluster.local./127.0.0.1#10053

4. # 修改 --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.__PILLAR__DNS__DOMAIN__, 中的 domain 爲我們之前預定的  --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local.,

5. # 修改 --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.__PILLAR__DNS__DOMAIN__,  中的 domain 爲我們之前預定的  --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local.,

 導入 yaml 文件

# 替換所有的 images 使用國內鏡像

sed -i 's/gcr\.io\/google_containers/jicki/g' *

# 導入

[root@master-2347205 ~]# kubectl create -f .

service "kube-dns" created

serviceaccount "kube-dns" created

configmap "kube-dns" created

deployment "kube-dns" created

查看 kubedns 服務

[root@master-2347205 ~]# kubectl get all --namespace=kube-system

NAME             DESIRED   CURRENT   READY     UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE

ds/calico-node   3         3         3         3            3           <none>          9h

NAME                              DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE

deploy/calico-kube-controllers    1         1         1            1           9h

deploy/calico-policy-controller   0         0         0            0           9h

deploy/kube-dns                   1         1         1            1           3h

NAME                                     DESIRED   CURRENT   READY     AGE

rs/calico-kube-controllers-f58c74bfd     1         1         1         9h

rs/calico-policy-controller-566dc8d645   0         0         0         9h

rs/kube-dns-ff55764f4                    1         1         1         3h

NAME                              DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE

deploy/calico-kube-controllers    1         1         1            1           9h

deploy/calico-policy-controller   0         0         0            0           9h

deploy/kube-dns                   1         1         1            1           3h

NAME             DESIRED   CURRENT   READY     UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE

ds/calico-node   3         3         3         3            3           <none>          9h

NAME                                     DESIRED   CURRENT   READY     AGE

rs/calico-kube-controllers-f58c74bfd     1         1         1         9h

rs/calico-policy-controller-566dc8d645   0         0         0         9h

rs/kube-dns-ff55764f4                    1         1         1         3h

NAME                                         READY     STATUS    RESTARTS   AGE

po/calico-kube-controllers-f58c74bfd-xmwjk   1/1       Running   1          9h

po/calico-node-4b79k                         2/2       Running   4          9h

po/calico-node-9b68d                         2/2       Running   2          9h

po/calico-node-mgxjw                         2/2       Running   2          9h

po/kube-dns-ff55764f4-qprf4                  3/3       Running   0          3h

NAME                       TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)         AGE

svc/kube-dns               ClusterIP   10.254.0.2       <none>        53/UDP,53/TCP   3h

驗證 dns 服務

在驗證 dns 之前,在 dns 未部署之前創建的 pod 與 deployment 等,都必須刪除,重新部署,否則無法解析

[root@master-2347205 ~]# kubectl get pods -o wide

NAME                        READY     STATUS    RESTARTS   AGE       IP               NODE

alpine                      1/1       Running   1          7h        10.233.131.130   10.65.146.152

nginx-dm-55b58f68b6-fdqlv   1/1       Running   1          7h        10.233.165.193   10.147.190.81

nginx-dm-55b58f68b6-mp9vj   1/1       Running   2          7h        10.233.103.9     10.64.241.244

 

[root@master-2347205 ~]# kubectl get svc -o wide

NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE       SELECTOR

kubernetes   ClusterIP   10.254.0.1      <none>        443/TCP   1d        <none>

nginx-svc    ClusterIP   10.254.49.180   <none>        80/TCP    7h        name=nginx

# 創建一個 pods 來測試一下 nameserver

apiVersion: v1

kind: Pod

metadata:

  name: alpine

spec:

  containers:

  - name: alpine

    image: alpine

    command:

    - sh

    - -c

    - while true; do sleep 1; done

[root@master-2347205 ~]# kubectl get pods

NAME                        READY     STATUS    RESTARTS   AGE

alpine                      1/1       Running   1          7h

[root@master-2347205 ~]# kubectl exec -it alpine nslookup nginx-svc

nslookup: can't resolve '(null)': Name does not resolve

Name:      nginx-svc

Address 1: 10.254.49.180 nginx-svc.default.svc.cluster.local

部署 Ingress 與 Dashboard

 部署 dashboard && heapster

 官方 dashboard 的github https://github.com/kubernetes/dashboard

下載 dashboard 鏡像

# 官方鏡像

gcr.io/google_containers/kubernetes-dashboard-amd64:v1.6.3

# 國內鏡像

jicki/kubernetes-dashboard-amd64:v1.6.3

 

下載 yaml 文件

curl -O https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/dashboard/dashboard-controller.yaml

curl -O https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/dashboard/dashboard-service.yaml

# 因爲開啓了 RBAC 所以這裏需要創建一個 RBAC 認證

vi dashboard-rbac.yaml

apiVersion: v1

kind: ServiceAccount

metadata:

  name: dashboard

  namespace: kube-system

---

kind: ClusterRoleBinding

apiVersion: rbac.authorization.k8s.io/v1alpha1

metadata:

  name: dashboard

subjects:

  - kind: ServiceAccount

    name: dashboard

    namespace: kube-system

roleRef:

  kind: ClusterRole

  name: cluster-admin

  apiGroup: rbac.authorization.k8s.io

 

導入 yaml

# 替換所有的 images

sed -i 's/gcr\.io\/google_containers/jicki/g' *

# dashboard-controller.yaml 增加 rbac 授權

# 在第二個 spec 下面 增加

    spec:

      serviceAccountName: dashboard

# 導入文件

[[root@master-2347205 dashboard]# kubectl apply -f .

deployment "kubernetes-dashboard" created

serviceaccount "dashboard" created

clusterrolebinding "dashboard" created

service "kubernetes-dashboard" created

# 查看 svc 與 pod

[root@master-2347205 ~]# kubectl get svc -n kube-system

NAME                   TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)         AGE

kube-dns               ClusterIP   10.254.0.2       <none>        53/UDP,53/TCP   4h

kubernetes-dashboard   ClusterIP   10.254.171.137   <none>        80/TCP          3h

* 部署 Nginx Ingress

Kubernetes 暴露服務的方式目前只有三種:LoadBlancer Service、NodePort Service、Ingress; 什麼是 Ingress ? Ingress 就是利用 Nginx Haproxy 等負載均衡工具來暴露 Kubernetes 服務。

官方 Nginx Ingress github https://github.com/kubernetes/ingress-nginx

# ingress 有多種方式 1.  deployment 自由調度 replicas

                     2.  daemonset 全局調度 分配到所有node裏

#  deployment 自由調度過程中,由於我們需要 約束 controller 調度到指定的 node 中,所以需要對 node 進行 label 標籤

# 默認如下:

[root@master-2347205 ~]# kubectl get nodes

NAME            STATUS    ROLES     AGE       VERSION

10.147.190.81   Ready     <none>    1d        v1.8.0

10.64.241.244   Ready     <none>    1d        v1.8.0

10.65.146.152   Ready     <none>    1d        v1.8.0

 

 #打上 label

[root@master-2347205 ingress-nginx]# kubectl label nodes 10.64.241.244 ingress=proxy

node "10.64.241.244" labeled

# 打完標籤以後

[root@master-2347205 ingress-nginx]# kubectl get nodes --show-labels

NAME            STATUS    ROLES     AGE       VERSION   LABELS

10.147.190.81   Ready     <none>    1d        v1.8.0    beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=10.147.190.81

10.64.241.244   Ready     <none>    1d        v1.8.0    beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,ingress=proxy,kubernetes.io/hostname=10.64.241.244

10.65.146.152   Ready     <none>    1d        v1.8.0    beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=10.65.146.152

 

# 下載鏡像

# 官方鏡像

gcr.io/google_containers/defaultbackend:1.0

gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13

 

# 國內鏡像

jicki/defaultbackend:1.0

jicki/nginx-ingress-controller:0.9.0-beta.13

 

* 部署 Nginx  backend , Nginx backend 用於統一轉發 沒有的域名 到指定頁面。

curl -O https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/namespace.yaml

curl -O https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/default-backend.yaml

# 替換所有的 images

sed -i 's/gcr\.io\/google_containers/jicki/g' *

# 直接導入既可, 這裏不需要修改

[root@master-2347205 ingress-nginx]# kubectl apply -f default-backend.yaml 

deployment "default-http-backend" created

service "default-http-backend" created

# 查看服務

[root@master-2347205 ingress-nginx]# kubectl get deployment -n ingress-nginx default-http-backend

NAME                   DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE

default-http-backend   1         1         1            1           3h

# 部署 Ingress RBAC 認證

curl -O https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/rbac.yaml

# 導入 yaml 文件

[root@master-2347205 ingress-nginx]# kubectl apply -f rbac.yml 

namespace "nginx-ingress" created

serviceaccount "nginx-ingress-serviceaccount" created

clusterrole "nginx-ingress-clusterrole" created

role "nginx-ingress-role" created

rolebinding "nginx-ingress-role-nisa-binding" created

clusterrolebinding "nginx-ingress-clusterrole-nisa-binding" created

# 部署 Ingress Controller 組件

# 下載 yaml 文件

curl -O https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/with-rbac.yaml

# 替換所有的 images

sed -i 's/gcr\.io\/google_containers/jicki/g' *

# 上面 對 兩個 node 打了 label 所以配置 replicas: 2

# 修改 yaml 文件 增加 rbac 認證 , hostNetwork  還有 nodeSelector, 第二個 spec 下 增加。

spec:

  replicas: 2

  ....

    spec:

      hostNetwork: true

      serviceAccountName: nginx-ingress-serviceaccount

      nodeSelector:

        ingress: proxy

    ....

 # 導入 yaml 文件

[root@master-2347205 ingress-nginx]# kubectl apply -f with-rbac.yaml

deployment "nginx-ingress-controller" created

# 查看服務(如果是多節點master會被調度到不同的節點)

[root@master-2347205 ingress-nginx]# kubectl get pods -n ingress-nginx -o wide

NAME                                        READY     STATUS    RESTARTS   AGE       IP              NODE

default-http-backend-845497d9ff-qcwxc       1/1       Running   0          3h        10.233.103.13   10.64.241.244

nginx-ingress-controller-5576895f6b-5ck4v   1/1       Running   0          3h        10.64.241.244   10.64.241.244

 

 # 查看我們原有的 svc

 [root@master-2347205 ingress-nginx]# kubectl get svc -n ingress-nginx

NAME                   TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)   AGE

default-http-backend   ClusterIP   10.254.89.2   <none>        80/TCP    3h

# 創建 yaml 文件

vi dashboard-ingress.yaml

apiVersion: extensions/v1beta1

kind: Ingress

metadata:

  name: dashboard-ingress

  namespace: kube-system

spec:

  rules:

  - host: master-2347205.lvs01.dev.ebayc3.com

    http:

      paths:

      - backend:

          serviceName: kubernetes-dashboard

          servicePort: 80

 

# 導入 yaml

[root@master-2347205 dashboard]# kubectl apply -f dashboard-ingress.yaml 

ingress "dashboard-ingress" created

# 查看 ingress

[root@master-2347205 dashboard]# kubectl get ingress -n kube-system -o wide

NAME                HOSTS                                 ADDRESS         PORTS     AGE

dashboard-ingress   master-2347205.lvs01.dev.ebayc3.com   10.64.241.244   80        3h

# 測試訪問

[root@master-2347205 dashboard]#  curl -I   master-2347205.lvs01.dev.ebayc3.com

HTTP/1.1 200 OK

Server: nginx/1.13.5

Date: Tue, 31 Oct 2017 10:23:36 GMT

Content-Type: text/html; charset=utf-8

Content-Length: 848

Connection: keep-alive

Accept-Ranges: bytes

Cache-Control: no-store

Last-Modified: Fri, 28 Jul 2017 12:38:51 GMT




Update Version

Master Or Node Update 二進制文件

cd /tmp

wget https://dl.k8s.io/v1.8.0/kubernetes-server-linux-amd64.tar.gz

systemctl stop kube-apiserver

systemctl stop kube-controller-manager

systemctl stop kube-scheduler

systemctl stop kubelet

systemctl stop kube-proxy

tar zxvf kubernetes-server-linux-amd64.tar.gz

cd kubernetes

cp -r server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl,kube-proxy,kubelet} /usr/local/bin/

systemctl start kube-apiserver

systemctl start kube-controller-manager

systemctl start kube-scheduler

systemctl start kubelet

systemctl start kube-proxy

systemctl status kube-apiserver

systemctl status kube-controller-manager

systemctl status kube-scheduler

systemctl status kubelet

systemctl status kube-proxy

cd ..

rm -rf kubernetes*

Node Update 二進制文件

cd /tmp

wget https://dl.k8s.io/v1.8.0/kubernetes-server-linux-amd64.tar.gz

systemctl stop kubelet

systemctl stop kube-proxy

tar zxvf kubernetes-server-linux-amd64.tar.gz

cd kubernetes

cp -r server/bin/{kubectl,kube-proxy,kubelet} /usr/local/bin/

systemctl start kubelet

systemctl start kube-proxy

systemctl status kubelet

systemctl status kube-proxy

cd ..

rm -rf kubernetes*


特殊 env

# yaml 中的一些 特殊 env

    env:

    - name: MY_POD_NAME

      valueFrom:

        fieldRef:

          fieldPath: metadata.name

    - name: MY_POD_NAMESPACE

      valueFrom:

        fieldRef:

          fieldPath: metadata.namespace

    - name: MY_POD_IP

      valueFrom:

        fieldRef:

          fieldPath: status.podIP


發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章