Thank you for reading this post, don't forget to subscribe!
Запускаем в minikub
Запускаем в кластере
У нас уже установлен NFS сервер(192.168.1.82) и kubernetes (в моём случае миникуб)
1 2 |
df -h | grep nfs 192.168.1.82:/nfs 79G 5.1G 71G 7% /nfs-client |
На все клиенты ставим nfs клиент
yum install nfs-utils
systemctl enable rpcbind
systemctl enable nfs-server
systemctl enable nfs-lock
systemctl enable nfs-idmap
systemctl start rpcbind
systemctl start nfs-server
systemctl start nfs-lock
systemctl start nfs-idmap
выкачиваем
[root@minikub ~]# wget https://bitbucket.org/sysadm-ru/kubernetes/raw/faf2f86a2c1bb82053c5aba9ea7c96463e4e61b0/yamls/nfs-provisioner/rbac.yaml
cat rbac.yaml
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
kind: ServiceAccount apiVersion: v1 metadata: name: nfs-client-provisioner --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: nfs-client-provisioner-runner rules: - apiGroups: [""] resources: ["persistentvolumes"] verbs: ["get", "list", "watch", "create", "delete"] - apiGroups: [""] resources: ["persistentvolumeclaims"] verbs: ["get", "list", "watch", "update"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] verbs: ["create", "update", "patch"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: run-nfs-client-provisioner subjects: - kind: ServiceAccount name: nfs-client-provisioner namespace: default roleRef: kind: ClusterRole name: nfs-client-provisioner-runner apiGroup: rbac.authorization.k8s.io --- kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: name: leader-locking-nfs-client-provisioner rules: - apiGroups: [""] resources: ["endpoints"] verbs: ["get", "list", "watch", "create", "update", "patch"] --- kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: leader-locking-nfs-client-provisioner subjects: - kind: ServiceAccount name: nfs-client-provisioner # replace with namespace where provisioner is deployed namespace: default roleRef: kind: Role name: leader-locking-nfs-client-provisioner apiGroup: rbac.authorization.k8s.io |
kubectl apply -f rbac.yaml
rm -rf ~/tmp/k8s/dynamic-nfs-provisioning/ && mkdir -p ~/tmp/k8s/dynamic-nfs-provisioning/ && cd ~/tmp/k8s/dynamic-nfs-provisioning/
cat class.yaml
1 2 3 4 5 6 7 8 9 |
apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: managed-nfs-storage annotations: storageclass.kubernetes.io/is-default-class: "true" provisioner: example.com/nfs parameters: archiveOnDelete: "false" |
по умолчанию pv будут удаляться поэтому добавим их хранение, для этого поправим файл class.yaml
[codesyntax lang="php"]
1 2 3 4 5 6 7 8 9 10 11 |
apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: managed-nfs-storage annotations: storageclass.kubernetes.io/is-default-class: "true" provisioner: example.com/nfs reclaimPolicy: Retain parameters: archiveOnDelete: "false" |
[/codesyntax]
kubectl apply -f class.yaml
1 2 3 4 5 6 |
[root@minikub dynamic-nfs-provisioning]# kubectl get storageclass NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE managed-nfs-storage</strong> (default) example.com/nfs Retain </strong> Immediate false 3m42s standard (default) k8s.io/minikube-hostpath Delete Immediate false 54d |
1 |
curl -LJO https://bitbucket.org/sysadm-ru/kubernetes/raw/71509f958c946bf0173392801a7fba45941f5397/yamls/nfs-provisioner/deployment.yaml |
1 2 |
vi deployment.yaml <<NFS Server IP>> меняю на 192.168.1.82 (в 2 местах) |
cat deployment.yaml
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
apiVersion: apps/v1 kind: Deployment metadata: name: nfs-client-provisioner spec: replicas: 1 selector: matchLabels: app: nfs-client-provisioner strategy: type: Recreate template: metadata: labels: app: nfs-client-provisioner spec: serviceAccountName: nfs-client-provisioner containers: - name: nfs-client-provisioner image: quay.io/external_storage/nfs-client-provisioner:latest volumeMounts: - name: nfs-client-root mountPath: /persistentvolumes env: - name: PROVISIONER_NAME value: example.com/nfs - name: NFS_SERVER value: 192.168.1.82 - name: NFS_PATH value: /nfs volumes: - name: nfs-client-root nfs: server: 192.168.1.82 path: /nfs |
[/codesyntax]
kubectl apply -f deployment.yaml
Создаем PVC
cat pvc.yaml
[codesyntax lang="php"]
1 2 3 4 5 6 7 8 9 10 11 12 |
apiVersion: v1 kind: PersistentVolumeClaim metadata: name: pvc1 spec: storageClassName: managed-nfs-storage accessModes: - ReadWriteMany resources: requests: storage: 500Mi |
[/codesyntax]
1 2 3 4 5 6 7 8 9 10 |
[root@minikub dynamic-nfs-provisioning]# kubectl get pvc NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE pvc-files-of-site-test-ru Bound pv-files-of-site-test-ru 2Gi RWX 14d research-pvc Bound research-vol 2Gi RWX 17d [root@minikub dynamic-nfs-provisioning]# kubectl get pv NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE pv-files-of-site-test-ru 2Gi RWX Retain Bound default/pvc-files-of-site-test-ru 14d research-vol 2Gi RWX Retain Bound default/research-pvc 17d [root@minikub dynamic-nfs-provisioning]# kubectl apply -f pvc.yaml persistentvolumeclaim/pvc1 created |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 |
[root@minikub dynamic-nfs-provisioning]# kubectl apply -f pvc.yaml persistentvolumeclaim/pvc1 created [root@minikub dynamic-nfs-provisioning]# kubectl get pvc NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE pvc-files-of-site-test-ru Bound pv-files-of-site-test-ru 2Gi RWX 14d pvc1 Bound pvc-29b9a2a4-478b-4466-94e6-e665a779440c 500Mi RWX managed-nfs-storage 17s research-pvc Bound research-vol 2Gi RWX 17d [root@minikub dynamic-nfs-provisioning]# kubectl get pv NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE pv-files-of-site-test-ru 2Gi RWX Retain Bound default/pvc-files-of-site-test-ru 14d pvc-29b9a2a4-478b-4466-94e6-e665a779440c 500Mi RWX Retain Bound default/pvc1 managed-nfs-storage 27s research-vol 2Gi RWX Retain Bound default/research-pvc 17d |
======================================================================================================================================================
запускаем в кластере
У нас есть nfs server 10.242.146.21 на котором мы прописали следующие доступы:
cat /etc/exports | grep prod
/nfs/prod_vsrv_kubernetes 10.242.146.0/24(rw,sync,no_root_squash,no_subtree_check)
На все клиенты ставим nfs клиент
yum install nfs-utils
systemctl enable rpcbind
systemctl enable nfs-server
systemctl enable nfs-lock
systemctl enable nfs-idmap
systemctl start rpcbind
systemctl start nfs-server
systemctl start nfs-lock
systemctl start nfs-idmap
cat rbac.yaml
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
kind: ServiceAccount apiVersion: v1 metadata: name: nfs-pod-provisioner-sa --- kind: ClusterRole # Role of kubernetes apiVersion: rbac.authorization.k8s.io/v1 # auth API metadata: name: nfs-provisioner-clusterRole rules: - apiGroups: [""] # rules on persistentvolumes resources: ["persistentvolumes"] verbs: ["get", "list", "watch", "create", "delete"] - apiGroups: [""] resources: ["persistentvolumeclaims"] verbs: ["get", "list", "watch", "update"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] verbs: ["create", "update", "patch"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: nfs-provisioner-rolebinding subjects: - kind: ServiceAccount name: nfs-pod-provisioner-sa # defined on top of file namespace: default roleRef: # binding cluster role to service account kind: ClusterRole name: nfs-provisioner-clusterRole # name defined in clusterRole apiGroup: rbac.authorization.k8s.io --- kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: name: nfs-pod-provisioner-otherRoles rules: - apiGroups: [""] resources: ["endpoints"] verbs: ["get", "list", "watch", "create", "update", "patch"] --- kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: nfs-pod-provisioner-otherRoles subjects: - kind: ServiceAccount name: nfs-pod-provisioner-sa # same as top of the file # replace with namespace where provisioner is deployed namespace: default roleRef: kind: Role name: nfs-pod-provisioner-otherRoles apiGroup: rbac.authorization.k8s.io |
cat nfs_class.yaml
1 2 3 4 5 6 7 8 |
apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: nfs-storageclass # IMPORTANT pvc needs to mention this name provisioner: test-nfs-provisioner # name can be anything parameters: archiveOnDelete: "false" |
cat nfs_provision.yaml
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
kind: Deployment apiVersion: apps/v1 metadata: name: nfs-pod-provisioner spec: replicas: 1 selector: matchLabels: app: nfs-pod-provisioner strategy: type: Recreate template: metadata: labels: app: nfs-pod-provisioner spec: serviceAccountName: nfs-pod-provisioner-sa # name of service account created in rbac.yaml containers: - name: nfs-pod-provisioner image: quay.io/external_storage/nfs-client-provisioner:latest volumeMounts: - name: nfs-provisioner-v mountPath: /persistentvolumes env: - name: PROVISIONER_NAME # do not change value: test-nfs-provisioner # SAME AS PROVISONER NAME VALUE IN STORAGECLASS - name: NFS_SERVER # do not change value: 10.242.146.21 # Ip of the NFS SERVER - name: NFS_PATH # do not change value: /nfs/prod_vsrv_kubernetes # path to nfs directory setup volumes: - name: nfs-provisioner-v # same as volumemouts name nfs: server: 10.242.146.21 path: /nfs/prod_vsrv_kubernetes |
kubectl apply -f rbac.yaml
kubectl apply -f nfs_class.yaml
kubectl apply -f nfs_provision.yaml
и пример pvc
cat pvc.yaml
1 2 3 4 5 6 7 8 9 10 11 12 |
apiVersion: v1 kind: PersistentVolumeClaim metadata: name: nfs-pvc-test spec: storageClassName: nfs-storageclass # SAME NAME AS THE STORAGECLASS accessModes: - ReadWriteMany # must be the same as PersistentVolume resources: requests: storage: 50Mi |