rancher ha


image-20200611165442851

環境

IP 別名 配置 應用
172.19.129.68 op 2U4G harbor / pg / nginx
172.19.201.241 m1 2U8G k3s master
172.19.201.242 m2 2U8G k3s master
172.19.201.243 m3 2U8G k3s master

Harbor安裝

harbor離線docker安裝

軟件

K3s(1.18.2)

https://github.com/rancher/k3s/releases/download/v1.18.2%2Bk3s1/k3s
https://github.com/rancher/k3s/releases/download/v1.18.2%2Bk3s1/k3s-airgap-images-amd64.tar
curl -sfL https://get.k3s.io -o install.sh

Rancher

rancher-images.txt
rancher-load-images.sh
rancher-save-images.sh

打包images

chmod a+x ./rancher-save-images.sh
./rancher-save-images.sh --image-list ./rancher-images.txt

推送images到私有倉庫

harbor倉庫中創建rancher項目
docker login -u admin -p Harbor12345 harbor.${domain}
# harbor中創建項目: rancher
sh ./rancher-load-images.sh --image-list ./rancher-images.txt --registry harbor.${domain}

安裝K3s集羣

掛載光盤源(centos8)

mkdir -p /media/cdrom
mount /dev/sr0 -t iso9660 /media/cdrom

rm -f /etc/yum.repos.d/*.repo
cat > /etc/yum.repos.d/CentOS-Media.repo << EOF
[c8-media-BaseOS]
name=CentOS-BaseOS-$releasever - Media
baseurl=file:///media/cdrom/BaseOS
gpgcheck=0
enabled=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial

[c8-media-AppStream]
name=CentOS-AppStream-$releasever - Media
baseurl=file:///media/cdrom/AppStream
gpgcheck=0
enabled=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
EOF

## 重建cache
yum clean all
yum makecache
## 開機有效
echo "/dev/sr0    /media/cdrom     auto    defaults    0  0" >> /etc/fstab

所有K3s節點

腳本變量

# ip
export do=172.19.129.68
export m1=172.19.201.241
export m2=172.19.201.242
export m3=172.19.201.243
export domain=do.io

腳本

# 修改提示符
echo "export PS1='[\u@\h \t \w]\\$ '" >> /etc/profile
# 系統語言
echo 'LANG="en_US.UTF-8"' >> /etc/profile
# harbor認證
echo "export domain=${domain}" >> /etc/profile
echo "${do} harbor.${domain}" >> /etc/hosts
echo "${do} do" >> /etc/hosts
echo "${m1} m1" >> /etc/hosts
echo "${m2} m2" >> /etc/hosts
echo "${m3} m3" >> /etc/hosts
source /etc/profile
# 優化OS
cat > /etc/sysctl.conf << EOF
net.bridge.bridge-nf-call-ip6tables=1
net.bridge.bridge-nf-call-iptables=1
net.ipv4.ip_forward=1
net.ipv4.conf.all.forwarding=1
net.ipv4.neigh.default.gc_thresh1=4096
net.ipv4.neigh.default.gc_thresh2=6144
net.ipv4.neigh.default.gc_thresh3=8192
net.ipv4.neigh.default.gc_interval=60
net.ipv4.neigh.default.gc_stale_time=120

# 參考 https://github.com/prometheus/node_exporter#disabled-by-default
kernel.perf_event_paranoid=-1

#sysctls for k8s node config
net.ipv4.tcp_slow_start_after_idle=0
net.core.rmem_max=16777216
fs.inotify.max_user_watches=524288
kernel.softlockup_all_cpu_backtrace=1

kernel.softlockup_panic=0

kernel.watchdog_thresh=30
fs.file-max=2097152
fs.inotify.max_user_instances=8192
fs.inotify.max_queued_events=16384
vm.max_map_count=262144
fs.may_detach_mounts=1
net.core.netdev_max_backlog=16384
net.ipv4.tcp_wmem=4096 12582912 16777216
net.core.wmem_max=16777216
net.core.somaxconn=32768
net.ipv4.ip_forward=1
net.ipv4.tcp_max_syn_backlog=8096
net.ipv4.tcp_rmem=4096 12582912 16777216

net.ipv6.conf.all.disable_ipv6=1
net.ipv6.conf.default.disable_ipv6=1
net.ipv6.conf.lo.disable_ipv6=1

kernel.yama.ptrace_scope=0
vm.swappiness=0

# 可以控制core文件的文件名中是否添加pid作爲擴展。
kernel.core_uses_pid=1

# Do not accept source routing
net.ipv4.conf.default.accept_source_route=0
net.ipv4.conf.all.accept_source_route=0

# Promote secondary addresses when the primary address is removed
net.ipv4.conf.default.promote_secondaries=1
net.ipv4.conf.all.promote_secondaries=1

# Enable hard and soft link protection
fs.protected_hardlinks=1
fs.protected_symlinks=1

# 源路由驗證
# see details in https://help.aliyun.com/knowledge_detail/39428.html
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce=2
net.ipv4.conf.all.arp_announce=2

# see details in https://help.aliyun.com/knowledge_detail/41334.html
net.ipv4.tcp_max_tw_buckets=5000
net.ipv4.tcp_syncookies=1
net.ipv4.tcp_fin_timeout=30
net.ipv4.tcp_synack_retries=2
kernel.sysrq=1
EOF

cat > /etc/security/limits.conf << EOF
root soft nofile 65535
root hard nofile 65535
* soft nofile 65535
* hard nofile 65535
EOF
sysctl -p

# 關閉selinux
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
setenforce 0
# 關閉防火牆
systemctl stop firewalld.service && systemctl disable firewalld.service
# 修改時區
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
# 關閉swap
swapoff -a && sysctl -w vm.swappiness=0 && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
# 重啓
reboot

準備鏡像

cd /home/k3s_1.18.2
mkdir -p /var/lib/rancher/k3s/agent/images/
\cp ./k3s-airgap-images-amd64.tar /var/lib/rancher/k3s/agent/images/
chmod a+x ./k3s ./install.sh

使用 Containerd

# docker
echo "alias docker='k3s crictl'" >> /etc/profile
## harbor認證 
\cp /home/certs/ca.crt /etc/ssl/certs/
## 配置 /etc/rancher/k3s/registries.yaml
mkdir -p /etc/rancher/k3s
cat > /etc/rancher/k3s/registries.yaml << EOF
mirrors:
  docker.io:  # 用docker.io遇到問題時,可嘗試改爲harbor.do.io.
    endpoint:
      - "https://harbor.${domain}"
configs:
  docker.io: 
    auth:
      username: admin # 鏡像倉庫用戶名
      password: Harbor12345 # 鏡像倉庫密碼
    tls:
      cert_file: /home/certs/${domain}.cert #<鏡像倉庫所用的客戶端證書文件路徑>
      key_file: /home/certs/${domain}.key #<鏡像倉庫所用的客戶端密鑰文件路徑>
      ca_file: /home/certs/ca.key # <鏡像倉庫所用的ca文件路徑>
EOF
## 檢查
cat /etc/rancher/k3s/registries.yaml

Dqlit or PG

DQlite

Master-1
export INSTALL_K3S_SKIP_DOWNLOAD=true
export INSTALL_K3S_EXEC="server --cluster-init --write-kubeconfig ~/.kube/config --write-kubeconfig-mode 666"
\cp ./k3s /usr/local/bin/ && ./install.sh
cat /var/lib/rancher/k3s/server/node-token
Master-N

export K3S_TOKEN=

export INSTALL_K3S_SKIP_DOWNLOAD=true
export INSTALL_K3S_EXEC="server --server https://m1:6443 --write-kubeconfig ~/.kube/config --write-kubeconfig-mode 666"
\cp ./k3s /usr/local/bin/ && ./install.sh

Postgres 11.8

docker-compose.yml
version: '3.1'

services:

  db:
    image: postgres:11.8
    container_name: pg
    restart: always
    environment:
      - POSTGRES_PASSWORD: 12345
    ports:
      - "5432:5432"
    volumes:
      - /home/pgdata:/var/lib/postgresql/data

  adminer:
    image: adminer
    container_name: adminer
    restart: always
    ports:
      - 8888:8080
Master-1-N
export INSTALL_K3S_SKIP_DOWNLOAD=true
export INSTALL_K3S_EXEC="server --datastore-endpoint=postgres://postgres:12345@harbor.${domain}:5432/kubernetes?sslmode=disable
 --write-kubeconfig ~/.kube/config --write-kubeconfig-mode 666"
\cp ./k3s /usr/local/bin/ && ./install.sh

檢查

# 檢查
echo $INSTALL_K3S_EXEC# 鏡像倉庫是否爲harbor
cat /var/lib/rancher/k3s/agent/etc/containerd/config.toml
kubectl get nodes
kubectl get pods --all-namespaces
# 修改kubeconfig指向ranchder.do.io 實現負載均衡
## sed -i "s#server: https://127.0.0.1:6443#server: https://rancher.${domain}:6443#g"  /etc/rancher/k3s/k3s.yaml

Worker-N

變量
cat /var/lib/rancher/k3s/server/node-token # master1上取token執行
export K3S_TOKEN=
腳本
export INSTALL_K3S_SKIP_DOWNLOAD=true
export K3S_URL=https://do:6443
export INSTALL_K3S_EXEC="agent"
chmod a+x ./k3s ./install.sh 
cp ./k3s /usr/local/bin/ && ./install.sh

安裝rancher

公網取數據包

# 添加Helm Chart倉庫
helm repo add rancher-stable https://releases.rancher.com/server-charts/stable
#rancher.2.4.3.tgz落到本地
helm fetch rancher-stable/rancher

安裝腳本

Master-1

cd /home/rancher243
# 安裝Helm 3.2.1
chmod a+x ./helm
\cp ./helm /usr/local/bin/
# 2. 自簽證書渲染 rancher helm模板
helm template rancher ./rancher-2.4.3.tgz --output-dir . \
--namespace cattle-system \
--set hostname=rancher.${domain} \
--set rancherImage=harbor.${domain}/rancher/rancher \
--set privateCA=true \
--set ingress.tls.source=secret \
--set systemDefaultRegistry=harbor.${domain} \
--set useBundledSystemChart=true

# 3. 安裝
kubectl create namespace cattle-system
# #TLS密文
kubectl -n cattle-system create secret tls tls-rancher-ingress --cert=/home/certs/do.io.crt --key=/home/certs/do.io.key
## 私有簽發
\cp /home/certs/do.io.crt ./cacerts.pem
kubectl -n cattle-system create secret generic tls-ca --from-file=cacerts.pem
## 安裝 rancher
kubectl -n cattle-system apply -R -f ./rancher

檢查

### 查看pod事件
kubectl -n cattle-system describe pod
### rancher是否運行
kubectl -n cattle-system get pods
## 訪問
https://rancher.do.io

Nginx

docker-compose.yml

services:
  nginx:
    image: nginx
    container_name: nginx
    restart: always
    ports:
      - "80:80"
      - "6443:6443"
      - "8443:8443"
    volumes:
      - /home/nginx.conf:/etc/nginx/nginx.conf:ro

nginx.conf

worker_processes 4;
worker_rlimit_nofile 40000;

events {
worker_connections 8192;
}

stream {
    upstream rancher_servers_http {
        least_conn;
        server 172.19.201.241:80 max_fails=3 fail_timeout=5s;
        server 172.19.201.242:80 max_fails=3 fail_timeout=5s;
    }
    server {
        listen 80;
        proxy_pass rancher_servers_http;
    }

    upstream rancher_servers_https {
        least_conn;
        server 172.19.201.241:443 max_fails=3 fail_timeout=5s;
        server 172.19.201.242:443 max_fails=3 fail_timeout=5s;
    }
    server {
        listen     8443;
        proxy_pass rancher_servers_https;
    }

    upstream apiserver_https {
        least_conn;
        server 172.19.201.241:6443 max_fails=3 fail_timeout=5s;
        server 172.19.201.242:6443 max_fails=3 fail_timeout=5s;
    }
    server {
        listen     6443;
        proxy_pass apiserver_https;
    }
}

websocker錯誤

nginx.conf
worker_processes 4;
worker_rlimit_nofile 40000;

events {
    worker_connections 8192;
}

http {
    include ./conf.d/*.conf;
}
./conf.d/rancher.conf
upstream rancher {
    least_conn;
    server 10.10.10.10:443 max_fails=3 fail_timeout=5s;
    server 10.10.10.11:443 max_fails=3 fail_timeout=5s;
    server 10.10.10.12:443 max_fails=3 fail_timeout=5s;
}

server {
    listen 80;
    server_name rancher.do.io;
    return 301 https://$server_name$request_uri;
}

server {
    listen 443 ssl ;
    server_name rancher.do.io;

    ssl on;
    ssl_certificate /home/certs/cert.pem;
    ssl_certificate_key /home/certs/cert.key;

    location / {
        proxy_pass https://rancher;
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header X-Forwarded-Proto $scheme;
        proxy_set_header Upgrade $http_upgrade;
        proxy_set_header Connection "Upgrade";
    }
}
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章