1. 简介

删除pod的时候,对应的删除容器,容器层里的数据也会被跟着一起删除掉

2. emptyDir

在宿主机里随机生成一个目录,删除pod之后emptyDir所对应的目录会被一起删除

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: pod1
name: pod1
spec:
volumes:
- name: v1
emptyDir: {}
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: c1
resources: {}
volumeMounts:
- name: v1
mountPath: /data
- image: alpine
imagePullPolicy: IfNotPresent
command: ["sh","-c","sleep 1d"]
name: c2
resources: {}
volumeMounts:
- name: v1
mountPath: /data
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}

3. hostPath

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: hostpath
name: hostpath
spec:
volumes:
- name: v1
hostPath:
path: /xx
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: hostpath
resources: {}
volumeMounts:
- name: v1
mountPath: /data
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}

4. nfs共享存储

4.1 安装nfs-server

1
[root@node001 chap4-valume]# yum install nfs-utils -y

启动服务

1
[root@node001 chap4-valume]# systemctl enable nfs-server --now

创建共享目录

1
2
3
4
[root@node001 chap4-valume]# mkdir /nfs
[root@node001 chap4-valume]# vim /etc/exports
[root@node001 chap4-valume]# exportfs -arv
exporting *:/nfs

所有worker安装nfs客户端

1
2
[root@node002 ~]# yum install nfs-utils -y
[root@node003 ~]# yum install nfs-utils -y

查看共享

1
2
3
[root@node003 manifests]# showmount -e 192.168.10.31
Export list for 192.168.10.31:
/nfs *

4.2 创建yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
aaa.txt
[root@node001 chap4-valume]# cat nfs.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: nfs
name: nfs
spec:
volumes:
- name: v1
nfs:
server: 192.168.10.31
path: /nfs
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: hostpath
resources: {}
volumeMounts:
- name: v1
mountPath: /data
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}

5. k8s持久性存储

5.1 创建nfs-pv

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv0003
spec:
capacity:
storage: 5Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Recycle
#storageClassName: slow
nfs:
path: /nfs2
server: 192.168.10.31

回收策略
如果回收策略是Recycle,当删除了pvc,pv的状态会自动变为Avaliable,pv里所存储的数据会被删除
弱国回收策略是Retain,当删除了pvc,pv里的数据是不会删除的,但是pv的状态不会变成avaliable

5.2 创建pvc

1
2
3
4
5
6
7
8
9
10
11
12
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mypvc01
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 4Gi
#storageClassName: slow

pvc和pv关联要素

1.accessmode
 务必要确保pv和pvc的值是一样的
2.大小
 pv的大小>=pvc里要求的大小
3.storageclass

5.3 挂载pvc

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: pvc
name: pvc
spec:
volumes:
- name: v1
persistentVolumeClaim:
claimName: mypvc01
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: hostpath
resources: {}
volumeMounts:
- name: v1
mountPath: /data
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}

6. 动态卷供应

用户创建pvc的时候,自动把pv创建出来

6.1 创建nfs分配器

下载分配器
https://github.com/kubernetes-sigs/nfs-ganesha-server-and-external-provisioner

创建分配器

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
[root@node001 deploy]# kubectl apply -f rbac.yaml

修改deployment.yaml
env:
- name: PROVISIONER_NAME
value: k8s-sigs.io/nfs-subdir-external-provisioner
- name: NFS_SERVER
value: 192.168.10.31
- name: NFS_PATH
value: /nfs2
volumes:
- name: nfs-client-root
nfs:
server: 192.168.10.31
path: /nfs2

[root@node001 deploy]# kubectl apply -f deployment.yaml

6.2 创建存储类

1
2
3
4
5
6
7
8
apiVersion: storage.k8s.io/v1
kind: StorageClass
allowVolumeExpansion: true
metadata:
name: mysc
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner
parameters:
archiveOnDelete: "false"

6.3 创建pvc

1
2
3
4
5
6
7
8
9
10
11
12
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mypvc02
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 1Gi
storageClassName: mysc