Kebernetes 學習總結(6) Volumes

在K8S中,數據卷是通過Pod實現持久化的,如果Pod刪除,數據卷也會一起刪除。k8s的數據卷是docker數據卷的擴展,K8S適配各種存儲系統,包括本地存儲EmptyDir、HostPath, 網絡存儲NFS、GlusterFS、PV/PVC等,以及雲存儲gce pcd、azure disk、OpenStack cinder、aws ebs、vSphere volume等。
一、本地存儲
1) emptyDir
emptyDir 按需創建、隨着pod的刪除,它也會被刪除,可以充當臨時空間或cache;同一個pod內的多個containers 之間可以共享emptyDir類型的卷。
例1、

[root@docker79 volume]# vim pod-vol-demo.yaml
[root@docker79 volume]# cat pod-vol-demo.yaml
apiVersion: v1
kind: Pod
metadata:
  name: pod-demo
  namespace: default
  labels:
    app: myapp
    tier: frontend
  annotations:
    inspiry.com/author: "cluster admin"
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
    ports:
    - name: http
      containerPort: 80
    volumeMounts:
    - name: html
      mountPath: /data/web/html/
  - name: busybox
    image: busybox:latest
    imagePullPolicy: IfNotPresent
    volumeMounts:
    - name: html
      mountPath: /data/
    command:
    - "/bin/sh"
    - "-c"
    - "sleep 7200"
  volumes:
  - name: html
    emptyDir: {}
[root@docker79 volume]# kubectl apply -f pod-vol-demo.yaml
pod/pod-demo created
[root@docker79 volume]# kubectl get pods
NAME             READY     STATUS    RESTARTS   AGE
pod-demo         2/2       Running   0          11s
[root@docker79 volume]# kubectl exec -it pod-demo -c busybox  -- /bin/sh
/ # ls
bin   data  dev   etc   home  proc  root  sys   tmp   usr   var
/ # ls /data
/ # echo $(date) >> /data/index.html
/ # 
[root@docker79 volume]# kubectl exec -it pod-demo -c myapp  -- /bin/sh
/ # ls
bin    dev    home   media  proc   run    srv    tmp    var
data   etc    lib    mnt    root   sbin   sys    usr
/ # ls /data/web/html/index.html
/data/web/html/index.html
/ # cat /data/web/html/index.html
Wed Sep 5 02:21:51 UTC 2018
/ #
[root@docker79 volume]# kubectl delete -f pod-vol-demo.yaml
pod "pod-demo" deleted

例2、

[root@docker79 volume]# vim pod-vol-demo.yaml
[root@docker79 volume]# cat pod-vol-demo.yaml
apiVersion: v1
kind: Pod
metadata:
  name: pod-demo
  namespace: default
  labels:
    app: myapp
    tier: frontend
  annotations:
    inspiry.com/author: "cluster admin"
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
    imagePullPolicy: IfNotPresent
    ports:
    - name: http
      containerPort: 80
    volumeMounts:
    - name: html
      mountPath: /usr/share/nginx/html/
  - name: busybox
    image: busybox:latest
    imagePullPolicy: IfNotPresent
    volumeMounts:
    - name: html
      mountPath: /data/
    command:
    - "/bin/sh"
    - "-c"
    - "while true; do echo $$(date) >> /data/index.html ; sleep 2; done"
  volumes:
  - name: html
    emptyDir: {}
[root@docker79 volume]# kubectl apply -f pod-vol-demo.yaml
pod/pod-demo created

[root@docker79 volume]# kubectl get pods -o wide
NAME        READY     STATUS    RESTARTS   AGE       IP            NODE       NOMINATED NODE
pod-demo    2/2       Running   0          23s       10.244.2.49   docker78   <none>
[root@docker79 volume]# curl http://10.244.2.49
Wed Sep 5 02:43:32 UTC 2018
Wed Sep 5 02:43:34 UTC 2018
......

2) hostPath
hostPath 在宿主機上創建,與容器建立關聯關係。
例、

[root@docker79 volume]# cat pod-vol-hostpath.yaml
apiVersion: v1
kind: Pod
metadata:
  name: pod-vol-hostpath
  namespace: default
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
    volumeMounts:
    - name: html
      mountPath: /usr/share/nginx/html/
  volumes:
  - name: html
    hostPath:
      path: /data/pod/volume1/
      type: DirectoryOrCreate
[root@docker79 volume]#

[root@docker79 ~]# ssh docker78
Last login: Tue Sep  4 14:56:29 2018 from docker79
[root@docker78 ~]# mkdir -p /data/pod/volume1
[root@docker78 ~]# echo 78 > /data/pod/volume1/index.html
[root@docker78 ~]# 登出
Connection to docker78 closed.
[root@docker79 ~]# ssh docker77
Last login: Tue Aug 28 15:04:15 2018 from docker79
[root@docker77 ~]# mkdir -p /data/pod/volume1
[root@docker77 ~]# echo 77 > /data/pod/volume1/index.html
[root@docker77 ~]# 登出
Connection to docker77 closed.

[root@docker79 ~]# cd manifests/volume/
[root@docker79 volume]# kubectl apply -f pod-vol-hostpath.yaml
pod/pod-vol-hostpath created
[root@docker79 volume]# kubectl get pods -o wide
NAME        READY     STATUS    RESTARTS   AGE       IP            NODE       NOMINATED NODE
pod-demo    2/2       Running   0          26m       10.244.2.49   docker78   <none>
pod-vol-hostpath    1/1       Running   0          11s       10.244.2.50   docker78   <none>
[root@docker79 volume]#
[root@docker79 volume]# curl http://10.244.2.50
78
[root@docker79 volume]#

二、網絡存儲
1) nfs
Pod (Container) ------> NFS storage

[root@docker ~]# echo nfs > /data/volumes/index.html
[root@docker ~]#
[root@docker ~]# ip add show ens192
2: ens192: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:0c:29:c7:42:5b brd ff:ff:ff:ff:ff:ff
    inet 192.168.20.223/24 brd 192.168.20.255 scope global noprefixroute ens192
       valid_lft forever preferred_lft forever
    inet6 fe80::bdac:2fd7:290b:aba/64 scope link noprefixroute
       valid_lft forever preferred_lft forever
[root@docker ~]# cat /etc/exports
/data/volumes 192.168.20.0/24(rw,no_root_squash)
[root@docker ~]#
[root@docker79 volume]# vim pod-vol-nfs.yaml
[root@docker79 volume]# cat pod-vol-nfs.yaml
apiVersion: v1
kind: Pod
metadata:
  name: pod-vol-nfs
  namespace: default
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
    volumeMounts:
    - name: html
      mountPath: /usr/share/nginx/html/
  volumes:
  - name: html
    nfs:
      path: /data/volumes
      server: 192.168.20.223
[root@docker79 volume]# kubectl apply -f pod-vol-nfs.yaml
pod/pod-vol-nfs created
[root@docker79 volume]# kubectl get pods -o wide
NAME                             READY     STATUS    RESTARTS   AGE       IP            NODE       NOMINATED NODE
pod-demo                         2/2       Running   0          2h        10.244.2.49   docker78   <none>
pod-vol-hostpath                 1/1       Running   0          1h        10.244.2.50   docker78   <none>
pod-vol-nfs                      1/1       Running   0          6s        10.244.2.51   docker78   <none>
[root@docker79 volume]# curl http://10.244.2.51
nfs
[root@docker79 volume]#

2) persistentVolumeClaim
persistentVolumeClaim是 將分散式的存儲層資源組成PV,然後在PV上創建PVC,最後將PVC掛載到Pod中的Container上進行存儲數據的一種方式。底層的存儲資源可以是NFS、iscsi、ceph、glusterfs等。

PV和PVC的生命週期
供應準備:通過集羣外的存儲系統或者公有云存儲方案來提供存儲持久化支持。
靜態提供:管理員手動創建多個PV,供PVC使用。
動態提供:動態創建PVC特定的PV,並綁定。
綁定:用戶創建pvc並指定需要的資源和訪問模式。在找到可用pv之前,pvc會保持未綁定狀態。
使用:用戶可在pod中像使用volume一樣使用pvc。
釋放:用戶刪除pvc來回收存儲資源,pv將變成“released”狀態。由於還保留着之前的數據,這些數據需要根據不同的策略來處理,否則這些存儲資源無法被其他pvc使用。
回收(Reclaiming):pv可以設置三種回收策略:保留(Retain),回收(Recycle)和刪除(Delete)
保留策略:允許人工處理保留的數據。
刪除策略:將刪除pv和外部關聯的存儲資源,需要插件支持。
回收策略:將執行清除操作,之後可以被新的pvc使用,需要插件支持。

PV卷階段狀態
Available – 資源尚未被PVC使用
Bound – 卷已經被綁定到PVC了
Released – PVC被刪除,PV卷處於釋放狀態,但未被集羣回收。
Failed – PV卷自動回收失敗

PV卷的訪問模式
ReadWriteOnce – 單node的讀寫 
ReadOnlyMany – 多node的只讀 
ReadWriteMany – 多node的讀寫

操作步驟
本文中使用nfs演示,如下:
Pod (Container) ----> PVC ----> PV ----> NFS storage

首先準備底層存儲資源

[root@docker ~]# cat /etc/exports
/data/volumes/v1 192.168.20.0/24(rw,no_root_squash)
/data/volumes/v2 192.168.20.0/24(rw,no_root_squash)
/data/volumes/v3 192.168.20.0/24(rw,no_root_squash)
/data/volumes/v4 192.168.20.0/24(rw,no_root_squash)
/data/volumes/v5 192.168.20.0/24(rw,no_root_squash)
[root@docker ~]# exportfs -rv
exporting 192.168.20.0/24:/data/volumes/v5
exporting 192.168.20.0/24:/data/volumes/v4
exporting 192.168.20.0/24:/data/volumes/v3
exporting 192.168.20.0/24:/data/volumes/v2
exporting 192.168.20.0/24:/data/volumes/v1
[root@docker ~]#

然後利用存儲資源 創建 PersistentVolume (PV)

[root@docker79 volume]# vim pv-vol-demo.yaml
[root@docker79 volume]# cat pv-vol-demo.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv01
  labels:
    name: pv01
spec:
  nfs:
    path: /data/volumes/v1
    server: 192.168.20.223
  accessModes: ["ReadWriteMany","ReadWriteOnce"]
  capacity:
    storage: 5Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv02
  labels:
    name: pv02
spec:
  nfs:
    path: /data/volumes/v2
    server: 192.168.20.223
  accessModes: ["ReadWriteMany","ReadWriteOnce"]
  capacity:
    storage: 10Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv03
  labels:
    name: pv03
spec:
  nfs:
    path: /data/volumes/v3
    server: 192.168.20.223
  accessModes: ["ReadWriteMany","ReadWriteOnce"]
  capacity:
    storage: 15Gi
[root@k8s-master-dev volumes]# kubectl apply -f pv-vol-demo.yaml
persistentvolume/pv01 created
persistentvolume/pv02 created
persistentvolume/pv03 created
[root@k8s-master-dev volumes]# kubectl get pv
NAME      CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM     STORAGECLASS   REASON    AGE
pv01      5Gi        RWO,RWX        Retain           Available                                      13s
pv02      10Gi       RWO,RWX        Retain           Available                                      13s
pv03      15Gi       RWO,RWX        Retain           Available                                      13s
[root@k8s-master-dev volumes]#

最後創建 PersistentVolumeClaim (PVC) ,並將其掛載到pod 中的container上。

[root@k8s-master-dev volumes]# vim pod-pvc-vol-demo.yaml
[root@k8s-master-dev volumes]# cat pod-pvc-vol-demo.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mypvc
  namespace: default
spec:
  accessModes: ["ReadWriteMany"]
  resources:
    requests:
      storage: 6Gi
---
apiVersion: v1
kind: Pod
metadata:
  name: pod-pvc-vol
  namespace: default
spec:
  containers:
  - name: myapp
    image: nginx:1.15-alpine
    volumeMounts:
    - name: html
      mountPath: /usr/share/nginx/html/
  volumes:
  - name: html
    persistentVolumeClaim:
      claimName: mypvc
[root@k8s-master-dev volumes]# kubectl apply -f pod-pvc-vol-demo.yaml
persistentvolumeclaim/mypvc created
pod/pod-pvc-vol created
[root@k8s-master-dev volumes]# kubectl get pv
NAME      CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM           STORAGECLASS   REASON    AGE
pv01      5Gi        RWO,RWX        Retain           Available                                            5m
pv02      10Gi       RWO,RWX        Retain           Bound       default/mypvc                            5m
pv03      15Gi       RWO,RWX        Retain           Available                                            5m
[root@k8s-master-dev volumes]# kubectl get pvc
NAME      STATUS    VOLUME    CAPACITY   ACCESS MODES   STORAGECLASS   AGE
mypvc     Bound     pv02      10Gi       RWO,RWX                       13s
[root@k8s-master-dev volumes]#

由於創建的PVC容量要求6G,所以創建PVC時它自動選擇pv02(10G) 。上例中必須事先手工創建pv,然後纔可以創建PVC並mount,這種方式屬於靜態提供。如果希望瞭解動態提供(storageclass) ,可參考:http://docs.kubernetes.org.cn/803.html

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章