環境準備:k8s集羣,1個master節點,3個node節點
[root@k8s-master mongo-cluster]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.120 k8s-master
192.168.1.121 k8s-node1
192.168.1.122 k8s-node2
192.168.1.123 k8s-node3
docker版本::
[root@k8s-master ~]# docker --version
Docker version 20.10.1, build 831ebea
kubernete版本:
[root@k8s-master ~]# kubelet --version
Kubernetes v1.16.0
kubelet-1.16.0 kubeadm-1.16.0 kubectl-1.16.0
首先創建好NFS存儲
三臺機器都安裝並啓動服務:
[root@k8s-master ~]# yum install nfs-utils rpcbind -y
[root@k8s-master mongo-cluster]# systemctl restart nfs-server
[root@k8s-master mongo-cluster]# systemctl restart rpcbind
###創建NFS掛載目錄:
[root@k8s-master mongo-cluster]# mkdir /home/pvdata/share -p
NFS服務器上編寫NFS配置文件:
[root@k8s-master mongo-cluster]# cat /etc/exports
/home/pvdata/share *(rw,sync,insecure,no_subtree_check,no_root_squash)
#########服務器端加載配置文件
[root@k8s-master mongo-cluster]# exportfs -r
########查看掛載詳情
[root@k8s-master mongo-cluster]# exportfs
/home/pvdata/share
<world>
########
[root@k8s-master mongo-cluster]# cat serviceaccount.yaml #配置nfs-provisioner 的SA賬號
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-provisioner
[root@k8s-master mongo-cluster]# kubectl apply -f serviceaccount.yaml #執行該文件
[root@k8s-master mongo-cluster]# cat service-rbac.yaml #對sa賬號做rbac授權,RBAC(Role-Based Access control)基於角色的訪問控制
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"] - apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"] - apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"] - apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"] - apiGroups: [""]
resources: ["services", "endpoints"]
verbs: ["get"] - apiGroups: ["extensions"]
resources: ["podsecuritypolicies"]
resourceNames: ["nfs-provisioner"]
verbs: ["use"]
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-provisioner
subjects:
- kind: ServiceAccount
name: nfs-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: nfs-provisioner-runner
apiGroup: rbac.authorization.k8s.io
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-provisioner
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-provisioner
subjects:
-
kind: ServiceAccount
name: nfs-provisioner
namespace: default
roleRef:
kind: Role
name: leader-locking-nfs-provisioner
apiGroup: rbac.authorization.k8s.io[root@k8s-master mongo-cluster]# kubectl apply -f service-rbac.yaml #執行該文件
[root@k8s-master mongo-cluster]# cat service-rbac.yaml #創建pod用來運行nfs-provisioner
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-provisioner-runner
rules: - apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"] - apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"] - apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"] - apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"] - apiGroups: [""]
resources: ["services", "endpoints"]
verbs: ["get"] - apiGroups: ["extensions"]
resources: ["podsecuritypolicies"]
resourceNames: ["nfs-provisioner"]
verbs: ["use"]
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-provisioner
subjects:
- kind: ServiceAccount
name: nfs-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: nfs-provisioner-runner
apiGroup: rbac.authorization.k8s.io
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-provisioner
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-provisioner
subjects:
- kind: ServiceAccount
name: nfs-provisioner
namespace: default
roleRef:
kind: Role
name: leader-locking-nfs-provisioner
apiGroup: rbac.authorization.k8s.io
[root@k8s-master mongo-cluster]# cat nfs-provisioner-deploy.yaml #創建pod用來運行nfs-provisioner
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-provisioner
spec:
selector:
matchLabels:
app: nfs-provisioner
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-provisioner
spec:
serviceAccount: nfs-provisioner
containers:
- name: nfs-provisioner
image: registry.cn-hangzhou.aliyuncs.com/open-ali/nfs-client-provisioner:latest
volumeMounts:- name: nfs-client-root
mountPath: /persistentvolumes
env: - name: PROVISIONER_NAME
value: jim.com/nfs #可自定義 - name: NFS_SERVER
value: 192.168.1.120 #對應NFS服務器IP地址 - name: NFS_PATH
value: /home/pvdata/share #對應NFS配置好的共享目錄
volumes:
- name: nfs-client-root
- name: nfs-client-root
nfs:
server: 192.168.1.120 #對應NFS服務器IP地址
path: /home/pvdata/share #對應NFS配置好的共享目錄
[root@k8s-master mongo-cluster]# kubectl apply -f nfs-provisioner-deploy.yaml #執行該文件
[root@k8s-master mongo-cluster]# kubectl get pod -l app=nfs-provisioner #檢查生成的pod;
NAME READY STATUS RESTARTS AGE
nfs-provisioner-7f866ccccf-8tvr2 1/1 Running 0 95m
[root@k8s-master mongo-cluster]# cat storageclass.yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: nfs-storageclass #命名,可自定義,後面PVC需要引用
provisioner: jim.com/nfs #對應環境變量PROVISIONER_NAME的值
[root@k8s-master mongo-cluster]# kubectl apply -f storageclass.yaml #執行該文件
[root@k8s-master mongo-cluster]# kubectl get storageclasses #檢查該存儲類是否創建成功
NAME PROVISIONER AGE
nfs-storageclass jim.com/nfs 96m
[root@k8s-master mongo-cluster]# cat mongo_sta_nodeport.yaml
apiVersion: v1
kind: Service
metadata:
name: mongo
labels:
app: mongo
spec:
ports:
- name: mongo
port: 27017
targetPort: 27017
clusterIP: None
selector:
app: mongo
apiVersion: v1
kind: Service
metadata:
name: mongo-service #配置外網訪問,用於mongo客戶端管理工具的管理;
labels:
app: mongo
spec:
ports:
- name: mongo-http
port: 27017
selector:
app: mongo
type: NodePort
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: mongo-default-view #配置rbac高級訪問權限
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: view
subjects:
- kind: ServiceAccount
name: default
namespace: default
apiVersion: apps/v1
kind: StatefulSet #通過statefulset部署mongo有狀態應用;
metadata:
name: mongo
spec:
selector:
matchLabels:
app: mongo
serviceName: "mongo"
replicas: 3 #3個副本;
template:
metadata:
labels:
app: mongo
role: mongo
environment: test
spec:
terminationGracePeriodSeconds: 10
containers:
- name: mongo
image: mongo:3.4
command:- mongod
- "--replSet"
- rs0
- "--bind_ip"
- 0.0.0.0
- "--smallfiles"
- "--noprealloc"
ports: - containerPort: 27017
volumeMounts: - name: mongo-persistent-storage
mountPath: /data/db
- name: mongo-sidecar #作爲MongoDB集羣的管理者,將使用此Headless Service來維護各個MongoDB實例之間的集羣關係,以及集羣規模變化時的自動更新。
image: cvallance/mongo-k8s-sidecar
env:- name: MONGO_SIDECAR_POD_LABELS
value: "role=mongo,environment=test"
volumeClaimTemplates:- metadata:
name: mongo-persistent-storage
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-storageclass" #指定上面創建的NFS存儲類,用於持久化存儲mongoDB數據庫文件;
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 10Gi
- metadata:
- name: MONGO_SIDECAR_POD_LABELS
[root@k8s-master mongo-cluster]# kubectl apply -f mongo_sta_nodeport.yaml #運行以上yaml文件,創建mongo資源;
[root@k8s-master mongo-cluster]# kubectl get pod -l app=mongo -o wide #查看創建好的mongo pod信息;
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
mongo-0 2/2 Running 0 111m 10.244.2.9 k8s-node2 <none> <none>
mongo-1 2/2 Running 0 53m 10.244.1.14 k8s-node1 <none> <none>
mongo-2 2/2 Running 0 53m 10.244.3.12 k8s-node3 <none> <none>
[root@k8s-master mongo-cluster]# kubectl get svc -l app=mongo -o wide #查看mongo的服務資源,記住nodeport自動分配的端口號(31614),後面客戶端管理軟件連接時需要用到;
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
mongo ClusterIP None <none> 27017/TCP 28s app=mongo
mongo-service NodePort 10.1.101.194 <none> 27017:30448/TCP 28s app=mongo
[root@k8s-master mongo-cluster]# curl http://192.168.1.121:30448 #驗證http訪問,192.168.1.121爲我的node外網IP地址,可以改成你自己的,30448爲nodeport自動生成的端口號;
It looks like you are trying to access MongoDB over HTTP on the native driver port. #集羣訪問正常;