nfs 文件系统
使用 nfs 文件系统 实现kubernetes存储动态挂载
1. 安装服务端和客户端
root@hello:~# apt install nfs-kernel-server nfs-common
其中 nfs-kernel-server 为服务端, nfs-common 为客户端。
2. 配置 nfs 共享目录
root@hello:~# mkdir /nfs root@hello:~# sudo vim /etc/exports /nfs *(rw,sync,no_root_squash,no_subtree_check)
各字段解析如下:
/nfs: 要共享的目录
:指定可以访问共享目录的用户 ip, * 代表所有用户。192.168.3. 指定网段。192.168.3.29 指定 ip。
rw:可读可写。如果想要只读的话,可以指定 ro。
sync:文件同步写入到内存与硬盘中。
async:文件会先暂存于内存中,而非直接写入硬盘。
no_root_squash:登入 nfs 主机使用分享目录的使用者,如果是 root 的话,那么对于这个分享的目录来说,他就具有 root 的权限!这个项目『极不安全』,不建议使用!但如果你需要在客户端对 nfs 目录进行写入操作。你就得配置 no_root_squash。方便与安全不可兼得。
root_squash:在登入 nfs 主机使用分享之目录的使用者如果是 root 时,那么这个使用者的权限将被压缩成为匿名使用者,通常他的 uid 与 gid 都会变成 nobody 那个系统账号的身份。
subtree_check:强制 nfs 检查父目录的权限(默认)
no_subtree_check:不检查父目录权限
配置完成后,执行以下命令导出共享目录,并重启 nfs 服务:
root@hello:~# exportfs -a root@hello:~# systemctl restart nfs-kernel-server root@hello:~# root@hello:~# systemctl enable nfs-kernel-server
客户端挂载
root@hello:~# apt install nfs-common root@hello:~# mkdir -p /nfs/ root@hello:~# mount -t nfs 192.168.1.66:/nfs/ /nfs/
root@hello:~# df -ht filesystem type size used avail use% mounted on udev devtmpfs 7.8g 0 7.8g 0% /dev tmpfs tmpfs 1.6g 2.9m 1.6g 1% /run /dev/mapper/ubuntu--vg-ubuntu--lv ext4 97g 9.9g 83g 11% / tmpfs tmpfs 7.9g 0 7.9g 0% /dev/shm tmpfs tmpfs 5.0m 0 5.0m 0% /run/lock tmpfs tmpfs 7.9g 0 7.9g 0% /sys/fs/cgroup /dev/loop0 squashfs 56m 56m 0 100% /snap/core18/2128 /dev/loop1 squashfs 56m 56m 0 100% /snap/core18/2246 /dev/loop3 squashfs 33m 33m 0 100% /snap/snapd/12704 /dev/loop2 squashfs 62m 62m 0 100% /snap/core20/1169 /dev/loop4 squashfs 33m 33m 0 100% /snap/snapd/13640 /dev/loop6 squashfs 68m 68m 0 100% /snap/lxd/21835 /dev/loop5 squashfs 71m 71m 0 100% /snap/lxd/21029 /dev/sda2 ext4 976m 107m 803m 12% /boot tmpfs tmpfs 1.6g 0 1.6g 0% /run/user/0 192.168.1.66:/nfs nfs4 97g 6.4g 86g 7% /nfs
创建配置默认存储
[root@k8s-master-node1 ~/yaml]# vim nfs-storage.yaml
[root@k8s-master-node1 ~/yaml]#
[root@k8s-master-node1 ~/yaml]# cat nfs-storage.yaml
apiversion: storage.k8s.io/v1
kind: storageclass
metadata:
name: nfs-storage
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner
parameters:
archiveondelete: "true" ## 删除pv的时候,pv的内容是否要备份
---
apiversion: apps/v1
kind: deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
spec:
replicas: 1
strategy:
type: recreate
selector:
matchlabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceaccountname: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: registry.cn-hangzhou.aliyuncs.com/chenby/nfs-subdir-external-provisioner:v4.0.2
# resources:
# limits:
# cpu: 10m
# requests:
# cpu: 10m
volumemounts:
- name: nfs-client-root
mountpath: /persistentvolumes
env:
- name: provisioner_name
value: k8s-sigs.io/nfs-subdir-external-provisioner
- name: nfs_server
value: 192.168.1.66 ## 指定自己nfs服务器地址
- name: nfs_path
value: /nfs/ ## nfs服务器共享的目录
volumes:
- name: nfs-client-root
nfs:
server: 192.168.1.66
path: /nfs/
---
apiversion: v1
kind: serviceaccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
---
kind: clusterrole
apiversion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apigroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apigroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apigroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apigroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apigroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: clusterrolebinding
apiversion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: serviceaccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleref:
kind: clusterrole
name: nfs-client-provisioner-runner
apigroup: rbac.authorization.k8s.io
---
kind: role
apiversion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
rules:
- apigroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: rolebinding
apiversion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
subjects:
- kind: serviceaccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleref:
kind: role
name: leader-locking-nfs-client-provisioner
apigroup: rbac.authorization.k8s.io创建
[root@k8s-master-node1 ~/yaml]# kubectl apply -f nfs-storage.yaml storageclass.storage.k8s.io/nfs-storage created deployment.apps/nfs-client-provisioner created serviceaccount/nfs-client-provisioner created clusterrole.rbac.authorization.k8s.io/nfs-client-provisioner-runner created clusterrolebinding.rbac.authorization.k8s.io/run-nfs-client-provisioner created role.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created rolebinding.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created [root@k8s-master-node1 ~/yaml]#
查看是否创建默认存储
[root@k8s-master-node1 ~/yaml]# kubectl get storageclasses.storage.k8s.io name provisioner reclaimpolicy volumebindingmode allowvolumeexpansion age nfs-storage (default) k8s-sigs.io/nfs-subdir-external-provisioner delete immediate false 100s [root@k8s-master-node1 ~/yaml]#
创建pvc进行测试
[root@k8s-master-node1 ~/yaml]# vim pvc.yaml
[root@k8s-master-node1 ~/yaml]# cat pvc.yaml
kind: persistentvolumeclaim
apiversion: v1
metadata:
name: nginx-pvc
spec:
accessmodes:
- readwritemany
resources:
requests:
storage: 200mi
[root@k8s-master-node1 ~/yaml]#
[root@k8s-master-node1 ~/yaml]# kubectl apply -f pvc.yaml
persistentvolumeclaim/nginx-pvc created
[root@k8s-master-node1 ~/yaml]#查看pvc
[root@k8s-master-node1 ~/yaml]# [root@k8s-master-node1 ~/yaml]# kubectl get pvc name status volume capacity access modes storageclass age nginx-pvc bound pvc-8a4b6065-904a-4bae-bef9-1f3b5612986c 200mi rwx nfs-storage 4s [root@k8s-master-node1 ~/yaml]#
查看pv
[root@k8s-master-node1 ~/yaml]# kubectl get pv name capacity access modes reclaim policy status claim storageclass reason age pvc-8a4b6065-904a-4bae-bef9-1f3b5612986c 200mi rwx delete bound default/nginx-pvc nfs-storage 103s [root@k8s-master-node1 ~/yaml]#
以上就是kubernetes k8s 存储动态挂载配置详解的详细内容,更多关于kubernetes 存储动态挂载的资料请关注代码网其它相关文章!
发表评论