更新时间:2024-04-23 GMT+08:00
分享

NFS部署

安装NFS-Server

  1. 安装nfs
    yum install -y nfs-common nfs-utils
  2. 创建数据目录
    # mkdir -p /data/nfs
  3. 访问授权

    添加nfs-server访问白名单及文件存储路径

    # vim /etc/exports
    /data/nfs 192.168.0.0/24(no_root_squash,rw,sync,no_subtree_check)

    如果允许所有访问,用/data/nfs *(no_root_squash,rw,sync,no_subtree_check)

  4. 加载配置
    # exportfs -rv
  5. 启动NFS和rpcbind
    # systemctl enable nfs && systemctl enable rpcbind && systemctl start rpcbind nfs
    图1 启动NFS和rpcbind1
    图2 启动NFS和rpcbind2
  6. 查看 RPC 服务的注册状况
    # rpcinfo -p 192.168.0.69
    图3 查看 RPC 服务的注册状况
  7. showmount测试
    # showmount -e 192.168.0.69
    图4 showmount测试

K8s配置NFS-Client

上传“k8s镜像插件”客户端到K8s的所有Node节点服务器的/soft/nfs,并安装。

上传“yaml文件”到k8s的控制节点服务器。

图5 图示1
  1. 安装nfs-client

    上传“nfs非容器客户端”到3台node节点,

    yum install -y nfs-common nfs-utils rpcbind
  2. Node导入镜像

    上传“k8s镜像插件”到3台node节点,并导入镜像

    # docker load -i busybox-latest.tar.gz
    # docker load -i nfs-client-provisioner-v3.2.0.tar.gz
    图6 图示2
  3. Master创建命名空间
    # kubectl create namespace nfs
  4. Master创建NFS服务K8s插件
    # kubectl apply -f rbac.yaml
    apiVersion: v1
    kind: ServiceAccount
    metadata:
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: nfs
    ---
    kind: ClusterRole
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
    name: nfs-client-provisioner-runner
    rules:
      - apiGroups: [""]
        resources: ["persistentvolumes"]
        verbs: ["get", "list", "watch", "create", "delete"]
      - apiGroups: [""]
        resources: ["persistentvolumeclaims"]
        verbs: ["get", "list", "watch", "update"]
      - apiGroups: ["storage.k8s.io"]
        resources: ["storageclasses"]
        verbs: ["get", "list", "watch"]
      - apiGroups: [""]
        resources: ["events"]
        verbs: ["create", "update", "patch"]
    ---
    kind: ClusterRoleBinding
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
    name: run-nfs-client-provisioner
    subjects:
    - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: nfs
    roleRef:
    kind: ClusterRole
    name: nfs-client-provisioner-runner
    apiGroup: rbac.authorization.k8s.io
    ---
    kind: Role
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
    name: leader-locking-nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: nfs
    rules:
      - apiGroups: [""]
        resources: ["endpoints"]
        verbs: ["get", "list", "watch", "create", "update", "patch"]
    ---
    kind: RoleBinding
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
    name: leader-locking-nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: nfs
    subjects:
    - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: nfs
    roleRef:
    kind: Role
    name: leader-locking-nfs-client-provisioner
    apiGroup: rbac.authorization.k8s.io
    # kubectl apply -f class.yaml
    apiVersion: storage.k8s.io/v1
    kind: StorageClass
    metadata:
    name: managed-nfs-storage
    provisioner: fuseim.pri/ifs # or choose another name, must match deployment's env PROVISIONER_NAME'
    parameters:
      archiveOnDelete: "false"
    # kubectl apply -f deployment.yaml
    apiVersion: apps/v1
    kind: Deployment
    metadata:
    name: nfs-client-provisioner
    labels:
    app: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: nfs
    spec:
    replicas: 1
    strategy:
    type: Recreate
    selector:
    matchLabels:
    app: nfs-client-provisioner
    template:
    metadata:
    labels:
    app: nfs-client-provisioner
    spec:
    serviceAccountName: nfs-client-provisioner
    containers:
    - name: nfs-client-provisioner
    image: kopkop/nfs-client-provisioner-arm64:v3.1.0-k8s1.11
    volumeMounts:
    - name: nfs-client-root
    mountPath: /persistentvolumes
    env:
    - name: PROVISIONER_NAME
    value: fuseim.pri/ifs
    - name: NFS_SERVER
    value: 192.168.0.69   #NFS服务器IP地址
    - name: NFS_PATH
    value: /data/nfs       #NFS服务器上文件存储路径
    volumes:
    - name: nfs-client-root
    nfs:
    server: 192.168.0.69     #NFS服务器IP地址
    path: /data/nfs          #NFS服务器上文件存储路径

    看到以下信息,表明deployment创建成功

    图7 图示3
  5. 测试

    创建NFS服务存储目录虚拟映射

    # kubectl apply -f test-claim.yaml
    kind: PersistentVolumeClaim
    apiVersion: v1
    metadata:
    name: test-claim
    annotations:
    volume.beta.kubernetes.io/storage-class: "managed-nfs-storage"
    spec:
    accessModes:
    - ReadWriteMany
    resources:
    requests:
    storage: 1000Mi #资源大小限制
    # kubectl apply -f test-pod.yaml
    kind: Pod
    apiVersion: v1
    metadata:
    name: test-pod
    spec:
    containers:
    - name: test-pod
    image: busybox:latest
    command:
    - "/bin/sh"
    args:
    - "-c"
    - "touch /data/nfs/SUCCESS && exit 0 || exit 1"
    volumeMounts:
    - name: nfs-pvc
    mountPath: "/data/nfs" #挂载到镜像中的路径
    restartPolicy: "Never"
    volumes:
    - name: nfs-pvc
    persistentVolumeClaim:
    claimName: test-claim #存储目录虚拟映射,跟test-claim.yaml的名称一致
    图8 图示4

    到NSF-Server服务器(192.168.0.69)所在机器的/data/nfs,可以看到创建了一个随机默认目录和SUCCESS文件

    图9 图示5
  6. 容器挂载

    创建k8s客户端服务,上传daas-claim.yaml到服务器,并执行

    图10 图示6
    # kubectl create ns daas
    # kubectl apply -f daas-claim.yaml
    kind: PersistentVolumeClaim
    apiVersion: v1
    metadata:
    name: daas-claim
    namespace: daas
    annotations:
    volume.beta.kubernetes.io/storage-class: "managed-nfs-storage"
    spec:
    accessModes:
    - ReadWriteMany
    resources:
    requests:
    storage: 1000Mi #资源大小限制

    挂载容器指定目录(/etc/daas/kerberos)到NFS服务器

    图11 图示7
    volumeMounts:
    - name: nfs-pvc
    mountPath: "/etc/daas/kerberos" #容器目录
    volumes:
    - name: nfs-pvc
    persistentVolumeClaim:
    claimName: daas-claim #NFS客户端服务名称

    进入NFS服务器,看到test.txt,表明容器到NFS的挂载已成功

    # cd /data/nfs/daas-daas-claim-pvc-xxx
    图12 图示8

相关文档