kubernetes&&簡單安裝部署

資源分配

準備幾個虛擬機或物理機並按實際需求劃分資源,此處實驗環境下資源分配如下:

  • 主機名 - ip - 作用
  • k8s-master01 - 192.168.0.200 - k8s的主節點01
  • k8s-node01 - 192.168.0.210 - k8s的從節點01
  • k8s-node02 - 192.168.0.211 - k8s的從節點02
  • k8s-

固化主機名與ip對應關係

其目的是爲了便於識別且方面主機之間的訪問
此處暫時以通過修改hosts的方式實現,真實環境通過DNS服務實現即可。

# 固化主機名
hostnamectl set-hostname k8s-master01
# 固化IP地址
[root@k8s-master01 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens33
DEVICE=ens33
ONBOOT=yes
IPADDR=192.168.0.200
NETMASK=255.255.255.0
GATEWAY=192.168.0.1
DNS1=192.168.0.1
# 重啓網卡生效
# 固化主機名和IP對應關係
[root@k8s-master01 ~]# cat /etc/hosts
192.168.0.200  k8s-master01
192.168.0.210  k8s-node01
192.168.0.211  k8s-node02
# 檢測
[root@k8s-master01 ~]# ping k8s-master01
PING k8s-master01 (192.168.0.200) 56(84) bytes of data.
64 bytes from k8s-master01 (192.168.0.200): icmp_seq=1 ttl=64 time=0.061 ms

安裝依賴包

yum  install  -y  conntrack  ntpdate  ntp  ipvsadm  ipset  jq  iptables curl  sysstat libseccomp  wget  vim  net-tools  git

關閉firewalld並禁止自啓動,安裝iptables並清空規則並保存並設置開機自啓

systemctl  stop firewalld && systemctl disable firewalld
yum  -y  install iptables-services
systemctl  start iptables
systemctl enable iptables
iptables -F
service  iptables save

關閉SELINUX

# 關閉swap虛擬內存分區
swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config

調整內核參數

[root@k8s-master01 etc]# cat /etc/sysctl.d/kubernetes.conf 
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
# 加載所需模塊
modprobe ip_conntrack
modprobe  br_netfilter
# 檢測
lsmod | grep br_netfilter
# 生效
sysctl -p /etc/sysctl.d/kubernetes.conf

net.ipv6.conf.all.disable_ipv6=1 : 關閉ipv6的協議
net.bridge.bridge-nf-call-iptables=1 : 開啓網橋模式
vm.swappiness=0 : 禁止使用swap空間,只有當系統OOM時才允許使用它
vm.overcommit_memory=1 : 不檢查物理內存是否夠用
vm.panic_on_oom=0 : 開啓OOM
fs.file-max=52706963 : 設置文件的句柄數目
fs.nr_open=52706963 : 設置文件的最大打開數據
/etc/sysctl.d/ : 該目錄下的文件將在服務器開機時被調用

設置時區並重啓與時間有關的服務

# 設置系統時區爲 中國/上海
timedatectl set-timezone Asia/Shanghai
# 將當前的UTC時間寫入硬件時鐘
timedatectl set-local-rtc 0
systemctl  restart rsyslog
systemctl  restart crond
# 停止並禁止自啓動無關服務
systemctl stop postfix
systemctl disable postfix

設置rsyslogd和systemd journald

# 創建持久化保存日誌的目錄
mkdir /var/log/journal
mkdir /etc/systemd/journald.conf.d
[root@k8s-master01 etc]# cat /etc/systemd/journald.conf.d/99-prophet.conf
[Journal]
# 持久化保存到磁盤
Storage=persistent
# 壓縮歷史日誌
Compress=yes
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000
# 最大佔用空間 10G
SystemMaxUse=10G
# 單日誌文件最大 200M
SystemMaxFileSize=200M
# 日誌保存時間 2周
MaxRetentionSec=2week
# 不將日誌轉發到syslog
ForwardToSyslog=no
# 重啓服務
systemctl restart systemd-journald

升級系統內核爲4.44

其目的是爲了避免kubernetes、docker運行不穩定的bug

rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
# 安裝完成後檢查/boot/grub2/grub.cfg中對應內核menuentry中是否包含initrd16配置,如果沒有,再安裝一次
yum --enablerepo=elrepo-kernel install -y  kernel-lt
# 設置開機從新內核啓動
grub2-set-default "CentOS Linux (4.4.224-1.el7.elrepo.x86_64) 7 (Core)"

kube-proxy開啓ipvs的前置條件

modprobe  br_netfilter
[root@k8s-master01 etc]# cat /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
modprobe  --  ip_vs
modprobe  --  ip_vs_rr
modprobe  --  ip_vs_wrr
modprobe  --  ip_vs_sh
modprobe  --  nf_conntrack_ipv4
# 授權
chmod  755 /etc/sysconfig/modules/ipvs.modules
# 執行
bash /etc/sysconfig/modules/ipvs.modules
# 檢測
lsmod | grep -e ip_vs -e nf_conn

安裝docker軟件

yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum update -y  &&  yum install -y  docker-ce
# 創建目錄
mkdir /etc/docker
# 配置daemon
[root@k8s-master01 local]# cat /etc/docker/daemon.json 
{
    "exec-opts": ["native.cgroupdriver=systemd"],
    "log-driver": "json-file",
    "log-opts": {
        "max-size": "100m"
    }
}
# 創建目錄用於存放docker的配置文件
mkdir  -p  /etc/systemd/system/docker.service.d
systemctl daemon-reload && systemctl restart docker && systemctl enable docker
# 重啓令配置生效
grub2-set-default "CentOS Linux (4.4.224-1.el7.elrepo.x86_64) 7 (Core)" && reboot

安裝kubeadm主從配置

[root@k8s-master01 local]# cat /etc/yum.repos.d/kubernetes.repo 
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg

yum  -y  install  kubeadm-1.15.1 kubectl-1.15.1 kubelet-1.15.1
systemctl enable kubelet.service

導入基礎鏡像(可選)

# 下載docker基本鏡像
[root@k8s-master01 etc]# cd /usr/local/
# 導入鏡像
[root@k8s-master01 local]# cat load_images.sh 
#!/bin/bash

ls  /usr/local/kubeadm-basic.images  > /tmp/image-list.txt

cd  /usr/local/kubeadm-basic.images

for i in $( cat /tmp/image-list.txt)
do
    docker  load  -i  $i
done

rm -r /tmp/image-list.txt

初始化主節點

kubeadm config print init-defaults > kubeadm-config.yaml
[root@k8s-master01 local]# cat /usr/local/install-k8s/core/kubeadm-config.yaml 
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.1.135
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: k8s-master01
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.15.1
networking:
  dnsDomain: cluster.local
  podSubnet: "10.244.0.0/16"
  serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
  SupportIPVSProxyMode: true
mode: ipvs

# 若需要重啓初始化,需提前執行reset命令
#kubeadm reset
#iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
# 初始化
kubeadm init --config=kubeadm-config.yaml  --experimental-upload-certs | tee kubeadm-init.log
cat kubeadm-init.log
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

在這裏插入圖片描述
在這裏插入圖片描述

部署網絡

wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl create -f kube-flannel.yml

加入主節點及其他節點

kubeadm join 192.168.1.135:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:566f8c22bade6a88bf3a697f2d6aefc284d13dbd7a2331785eaf52f9b572fa2c
# 若卡住了,重新生成token再次嘗試即可
#kubeadm token create --ttl 0
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章