在pod上直接使用Pod存储卷分为两步
-
在pod的 Volumes
字段上定义存储卷名称及存储卷 -
在容器内部挂载 Volumes
字段中所定义的存储卷。
emptyDir类型
emptyDir
类型的存储卷不可持久存储,其生命周期和容器生命周期相同。
emptyDir示例
-
创建配置清单
root@k8s-master01:~/yaml/chapter05# vim volumes-empyterdir-demo.yaml
apiVersion: v1
kind: Pod
metadata:
name: volumes-emptydir-demo
namespace: default
spec:
initContainers:
- name: config-file-downloader
image: ikubernetes/admin-box
imagePullPolicy: IfNotPresent
command: ["/bin/sh","-c","wget -O /data/envoy.yaml http://ilinux.io/envoy.yaml"]
volumeMounts:
- name: config-file-store
mountPath: /data
containers:
- name: envoy
image: envoyproxy/envoy-alpine:v1.13.1
command: ['/bin/sh','-c']
args: ['envoy -c /etc/envoy/envoy.yaml']
volumeMounts: # 挂载存储卷
- name: config-file-store
mountPath: /etc/envoy
readOnly: true
volumes: # 定义存储卷
- name: config-file-store # 存储卷名称
emptyDir: # 存储卷类型
medium: Memory # 存储介质,默认磁盘
sizeLimit: 16Mi
-
应用配置清单
root@k8s-master01:~/yaml/chapter05# kubectl apply -f volumes-empyterdir-demo.yaml
pod/volumes-emptydir-demo created
-
查看容器内的挂载点
# 查看容器内的配置文件是否被成功挂载
root@k8s-master01:~/yaml/chapter05# kubectl exec volumes-emptydir-demo -- cat /etc/envoy/envoy.yaml
Defaulted container "envoy" out of: envoy, config-file-downloader (init)
admin:
access_log_path: /tmp/admin_access.log
address:
socket_address: { address: 0.0.0.0, port_value: 9901 }
static_resources:
listeners:
- name: listener_0
address:
socket_address: { address: 0.0.0.0, port_value: 80 }
filter_chains:
- filters:
- name: envoy.http_connection_manager
config:
stat_prefix: ingress_http
codec_type: AUTO
route_config:
name: local_route
virtual_hosts:
- name: local_service
domains: ["*"]
routes:
- match: { prefix: "/" }
route: { cluster: local_service }
http_filters:
- name: envoy.router
clusters:
- name: local_service
connect_timeout: 0.25s
type: STATIC
lb_policy: ROUND_ROBIN
load_assignment:
cluster_name: local_service
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: 127.0.0.1
port_value: 8080
# 查看容器内的监听端口
root@k8s-master01:~/yaml/chapter05# kubectl exec volumes-emptydir-demo -- netstat -tnl
Defaulted container "envoy" out of: envoy, config-file-downloader (init)
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State
tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN
tcp 0 0 0.0.0.0:9901 0.0.0.0:* LISTEN
# 查看卷挂载
root@k8s-master01:~/yaml/chapter05# kubectl exec volumes-emptydir-demo -- mount | grep envoy
Defaulted container "envoy" out of: envoy, config-file-downloader (init)
tmpfs on /etc/envoy type tmpfs (ro,relatime)
# 卷挂载成功。
hostPath类型
hostPath
类型的挂载卷,是将本地的某个存在的路径挂载到容器内部指定的挂载目录下。
hostPath
的挂在卷可以指定其类型,其具体的挂载类型有以下几种:
-
File
:事先必须存在的文件路径; -
Directory
:事先必须存在的目录路径; -
DirectoryOrCreate
:指定的路径不存时自动将其创建为0755权限的空目录,属主属组均为kubelet; -
FileOrCreate
:指定的路径不存时自动将其创建为0644权限的空文件,属主和属组同为kubelet; -
Socket
:事先必须存在的Socket文件路径; -
CharDevice
:事先必须存在的字符设备文件路径; -
BlockDevice
:事先必须存在的块设备文件路径; -
""
:空字符串,默认配置,在关联hostPath存储卷之前不进行任何检查。
hostPath示例
-
编写配置清单
root@k8s-master01:~/yaml/chapter05# vim volumes-hostpath-demo.yaml
apiVersion: v1
kind: Pod
metadata:
name: volumes-hostpath-demo
spec:
containers:
- name: filebeat
image: ikubernetes/filebeat:5.6.7-alpine
env:
- name: REDIS_HOST
value: redis.ilinux.io:6379
- name: LOG_LEVEL
value: info
volumeMounts:
- name: varlog
mountPath: /var/log
- name: socket
mountPath: /var/run/docker.sock
- name: varlibdockercontainers
mountPath: /var/lib/docker/contianers
readOnly: true
volumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: socket
hostPath:
path: /var/run/docker.sock
-
应用配置清单
root@k8s-master01:~/yaml/chapter05# kubectl apply -f volumes-hostpath-demo.yaml
pod/volumes-hostpath-demo created
# 查看pod详细信息
root@k8s-master01:~/yaml/chapter05# kubectl describe pods volumes-hostpath-demo
Name: volumes-hostpath-demo
Namespace: default
Priority: 0
Node: k8s-node03/172.16.11.83
Start Time: Tue, 13 Apr 2024 06:28:13 +0000
Labels: <none>
Annotations: <none>
Status: Running
IP: 10.244.3.79
IPs:
IP: 10.244.3.79
Containers:
filebeat:
Container ID: docker://46a9666f684bb0ac85c71d36c276546ab48b8195efb34ac061bdb26f09468a1a
Image: ikubernetes/filebeat:5.6.7-alpine
Image ID: docker-pullable://ikubernetes/filebeat@sha256:3957f67b612aa8628f643f8ede02b71bfbabf34892ef136f1e5ee18bbc0775aa
Port: <none>
Host Port: <none>
State: Running
Started: Tue, 13 Apr 2024 06:28:24 +0000
Ready: True
Restart Count: 0
Environment:
REDIS_HOST: redis.ilinux.io:6379
LOG_LEVEL: info
Mounts: # 容器的挂载卷信息
/var/lib/docker/contianers from varlibdockercontainers (ro)
/var/log from varlog (rw)
/var/run/docker.sock from socket (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-wqhfx (ro)
Conditions:
Type Status
Initialized True
Ready True
ContainersReady True
PodScheduled True
Volumes:
varlog:
Type: HostPath (bare host directory volume)
Path: /var/log
HostPathType:
varlibdockercontainers:
Type: HostPath (bare host directory volume)
Path: /var/lib/docker/containers
HostPathType:
socket:
Type: HostPath (bare host directory volume)
Path: /var/run/docker.sock
HostPathType:
kube-api-access-wqhfx:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 55s default-scheduler Successfully assigned default/volumes-hostpath-demo to k8s-node03
Normal Pulling 53s kubelet Pulling image "ikubernetes/filebeat:5.6.7-alpine"
Normal Pulled 45s kubelet Successfully pulled image "ikubernetes/filebeat:5.6.7-alpine" in 8.108131139s
Normal Created 44s kubelet Created container filebeat
Normal Started 44s kubelet Started container filebeat # 容器已经启动
NFS类型存储卷
NFS
为网络类型的存储卷,其要确保每个k8s
的节点上必须能够挂载nfs
文件系统。
NFS示例
-
配置一台nfs-server
# 查看nfs服务器地址
[root@nfs ~]# ip a show eth0
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 52:54:00:f5:86:46 brd ff:ff:ff:ff:ff:ff
inet 172.16.11.79/24 brd 172.16.11.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
inet6 fe80::128c:e1f8:720f:875f/64 scope link noprefixroute
valid_lft forever preferred_lft forever
# 创建共享目录
[root@nfs ~]# mkdir -pv /data/redis
mkdir: created directory '/data'
mkdir: created directory '/data/redis'
# 让999用户对/data/redis目录有读写权限
[root@nfs ~]# chown 999 /data/redis
[root@nfs ~]# ls -ld /data/redis
drwxr-xr-x 2 systemd-coredump root 4096 Jul 13 14:47 /data/redis
[root@nfs ~]# id systemd-coredump
uid=999(systemd-coredump) gid=997(systemd-coredump) groups=997(systemd-coredump)
# 配置nfs
[root@nfs ~]# vim /etc/exports
/data/redis 172.16.11.0/24(rw)
# 安装nfs-utils
[root@nfs ~]# dnf install nfs-utils -y
# 启动nfs
[root@nfs ~]# systemctl start nfs-server.service
# 查看2049端口是否被监听
[root@nfs ~]# ss -tnl | grep 2049
LISTEN 0 64 0.0.0.0:2049 0.0.0.0:*
LISTEN 0 64 [::]:2049 [::]:*
-
在所有 k8s
节点上安装nfs
驱动
root@k8s-node01:~# apt install nfs-common
# 测试手动挂载
root@k8s-node02:~# mount -t nfs 172.16.11.79:/data/redis /mnt
# 查看是否被挂载
root@k8s-node02:~# mount | grep mnt
172.16.11.79:/data/redis on /mnt type nfs4 (rw,relatime,vers=4.2,rsize=1048576,wsize=1048576,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,clientaddr=172.16.11.82,local_lock=none,addr=172.16.11.79)
-
编辑配置清单
root@k8s-master01:~/yaml/chapter05# vim volumes-nfs-demo.yaml
apiVersion: v1
kind: Pod
metadata:
name: volumes-nfs-demo
labels:
app: redis
spec:
containers:
- name: redis
image: redis:alpine
ports:
- containerPort: 6379
name: redisport
securityContext:
runAsUser: 999 # 此处使用的用户为id是999的用户,要确保nfs服务器上id 999的用户对挂载的目录有写权限。
volumeMounts:
- mountPath: /data
name: redisdata
volumes:
- name: redisdata
nfs:
server: 172.16.11.79
path: /data/redis
readOnly: false
-
应用配置清单
root@k8s-master01:~/yaml/chapter05# kubectl apply -f volumes-nfs-demo.yaml
pod/volumes-nfs-demo created
-
查看pod详细信息
root@k8s-master01:~/yaml/chapter05# kubectl describe pod volumes-nfs-demo
Name: volumes-nfs-demo
Namespace: default
Priority: 0
Node: k8s-node01/172.16.11.81
Start Time: Tue, 13 Apr 2024 07:22:24 +0000
Labels: app=redis
Annotations: <none>
Status: Running
IP: 10.244.1.50
IPs:
IP: 10.244.1.50
Containers:
redis:
Container ID: docker://d06c8e430cb3b38c9e266ee50b1caeaf834f4b51b6270540735eff780bc2f968
Image: redis:alpine
Image ID: docker-pullable://redis@sha256:442fbfdeccf203c277827cfd8e7e727ce411611e1a6caeda9cca8115ed17b9cc
Port: 6379/TCP
Host Port: 0/TCP
State: Running
Started: Tue, 13 Apr 2024 07:22:40 +0000
Ready: True
Restart Count: 0
Environment: <none>
Mounts:
/data from redisdata (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-wppkn (ro)
Conditions:
Type Status
Initialized True
Ready True
ContainersReady True
PodScheduled True
Volumes:
redisdata:
Type: NFS (an NFS mount that lasts the lifetime of a pod)
Server: 172.16.11.79
Path: /data/redis
ReadOnly: false
kube-api-access-wppkn:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 87s default-scheduler Successfully assigned default/volumes-nfs-demo to k8s-node01
Normal Pulling 86s kubelet Pulling image "redis:alpine"
Normal Pulled 74s kubelet Successfully pulled image "redis:alpine" in 11.045849701s
Normal Created 73s kubelet Created container redis
Normal Started 72s kubelet Started container redis
Longhorn使用示例
Longhorn
部署完毕后会自动创建出StorageClass
,我们只需要创建pvc
即可,需要注意longhorn
的pvc
默认删除策略为delete
,如果需要保留则需要手动将其StorageClass
内的策略改为Retain
。
创建pvc
-
编写pvc资源清单
root@k8s-master01:~/yaml/chapter05# vim pvc-dyn-longhon-demo.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-dyn-longhorn-demo
namespace: default
spec:
accessModes: ["ReadWriteOnce"]
volumeMode: Filesystem
resources:
requests:
storage: 2Gi
limits:
storage: 10Gi
storageClassName: longhorn
-
应用清单
root@k8s-master01:~/yaml/chapter05# kubectl apply -f pvc-dyn-longhon-demo.yaml
persistentvolumeclaim/pvc-dyn-longhorn-demo created
root@k8s-master01:~/yaml/chapter05# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc-dyn-longhorn-demo Bound pvc-5db5bc10-5277-4452-bf3b-d821fa31cde1 2Gi RWO longhorn 2m10s
Pod中使用PVC
-
创建资源清单
root@k8s-master01:~/yaml/chapter05# vim volume-pvc-longhorn-demo.yaml
apiVersion: v1
kind: Pod
metadata:
name: volumes-pvc-longhorn-demo
namespace: default
spec:
containers:
- name: redis
image: redis:alpine
imagePullPolicy: IfNotPresent
ports:
- containerPort: 6379
name: redisport
volumeMounts:
- name: redis-data-vol
mountPath: /data
volumes:
- name: redis-data-vol
persistentVolumeClaim:
claimName: pvc-dyn-longhorn-demo
-
应用配置清单
root@k8s-master01:~/yaml/chapter05# kubectl apply -f volumes-pvc-longhorn-demo.yaml
pod/volumes-pvc-longhorn-demo created
root@k8s-master01:~/yaml/chapter05# kubectl get pods
NAME READY STATUS RESTARTS AGE
volumes-pvc-longhorn-demo 1/1 Running 0 3m34s
-
查看挂载的卷
root@k8s-master01:~/yaml/chapter05# kubectl exec volumes-pvc-longhorn-demo -- mount | grep data
/dev/longhorn/pvc-5db5bc10-5277-4452-bf3b-d821fa31cde1 on /data type ext4 (rw,relatime)
存储卷PV、PVC示例
本示例中会创建多个pv
、pvc
示例,以验证pvc在挑选pv时的策略,pv所使用的后端存储为nfs。
创建nfs共享目录
在nfs-server
上创建出共享的目录
# 创建出目录
[root@nfs ~]# mkdir /data/redis00{1,2,3,4,5}
# 修改nfs配置文件
[root@nfs data]# vim /etc/exports
/data/redis 172.16.11.0/24(rw)
/data/redis001 172.16.11.0/24(rw)
/data/redis002 172.16.11.0/24(rw)
/data/redis003 172.16.11.0/24(rw)
/data/redis004 172.16.11.0/24(rw)
/data/redis005 172.16.11.0/24(rw)
# 重新导出目录
[root@nfs data]# exportfs -ar
[root@nfs data]# exportfs
/data/redis 172.16.11.0/24
/data/redis001 172.16.11.0/24
/data/redis002 172.16.11.0/24
/data/redis003 172.16.11.0/24
/data/redis004 172.16.11.0/24
/data/redis005 172.16.11.0/24
# 将该目录下目录的属主改为999
[root@nfs data]# chown 999 ./*
[root@nfs data]# ll
total 24
drwxr-xr-x 2 systemd-coredump root 4096 Jul 13 14:47 redis
drwxr-xr-x 2 systemd-coredump root 4096 Jul 13 17:49 redis001
drwxr-xr-x 2 systemd-coredump root 4096 Jul 13 17:47 redis002
drwxr-xr-x 2 systemd-coredump root 4096 Jul 13 17:49 redis003
drwxr-xr-x 2 systemd-coredump root 4096 Jul 13 17:49 redis004
drwxr-xr-x 2 systemd-coredump root 4096 Jul 13 17:49 redis005
PV定义示例
-
创建资源清单
# pv-nfs-demo资源清单,大小5Gi,多路读写
root@k8s-master01:~/yaml/chapter05# vim pv-nfs-demo.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-nfs-demo
spec:
capacity:
storage: 5Gi
volumeMode: Filesystem
accessModes:
- ReadWriteMany # 多路读写
persistentVolumeReclaimPolicy: Retain # PV回收策略为保留
mountOptions:
- hard
- nfsvers=4.1
nfs:
path: "/data/redis001"
server: 172.16.11.79
# pv-nfs-001资源清单,访问模式RWM,大小10Gi
root@k8s-master01:~/yaml/chapter05# vim pv-nfs-002.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-nfs-002
spec:
capacity:
storage: 10Gi
volumeMode: Filesystem
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
path: "/data/redis002"
server: 172.16.11.79
# pv-nfs-003资源清单,访问模式RWO,大小1Gi
root@k8s-master01:~/yaml/chapter05# vim pv-nfs-003.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-nfs-003
spec:
capacity:
storage: 1Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
path: "/data/redis003"
server: 172.16.11.79
-
创建出pv
root@k8s-master01:~/yaml/chapter05# kubectl apply -f pv-nfs-demo.yaml -f pv-nfs-002.yaml -f pv-nfs-003.yaml
persistentvolume/pv-nfs-demo created
persistentvolume/pv-nfs-002 created
persistentvolume/pv-nfs-003 created
# get pv资源
root@k8s-master01:~/yaml/chapter05# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pv-nfs-002 10Gi RWX Retain Available 98m
pv-nfs-003 1Gi RWO Retain Available 98m
pv-nfs-demo 5Gi RWX Retain Available 98m
pvc定义示例
-
创建资源清单
# 创建pvc资源清单,要求访问模式RWM,指定pv大小,最大10Gi最小3Gi
root@k8s-master01:~/yaml/chapter05# vim pvc-demo-001.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-demo-001
namespace: default
spec:
accessModes: ["ReadWriteMany"]
volumeMode: Filesystem
resources:
requests:
storage: 3Gi # 最小3Gi
limits:
storage: 10Gi # 最大10Gi
# 创建pvc资源清单,要求pv的访问模型为RWO,PV大小最大2Gi,最小5Gi
root@k8s-master01:~/yaml/chapter05# vim pvc-demo-002.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-demo-002
spec:
accessMode: ["ReadWriteOnce"]
volumeMode: Filesystem
resource:
requests:
storage: 2Gi
limits:
storage: 5Gi
selector:
matchLabels:
usedof: "redisdata"
-
应用pvc配置清单
root@k8s-master01:~/yaml/chapter05# kubectl apply -f pvc-demo-001.yaml
persistentvolumeclaim/pvc-demo-001 created
# 可以看到pvc资源已经被创建
root@k8s-master01:~/yaml/chapter05# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc-demo-001 Bound pv-nfs-demo 5Gi RWX 6s
# 查看哪个pv被关联
root@k8s-master01:~/yaml/chapter05# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pv-nfs-002 10Gi RWX Retain Available 117m
pv-nfs-003 1Gi RWO Retain Available 117m
pv-nfs-demo 5Gi RWX Retain Bound default/pvc-demo-001 117m
# 5G的pv被关联,因为pvc-demo-001请求的大小为2Gi,RWM,Filesystem,而5Gi大小的PV符合其要求。
Pod中使用PVC来挂载卷
-
创建资源清单
root@k8s-master01:~/yaml/chapter05# vim volumes-pvc-demo.yaml
apiVersion: v1
kind: Pod
metadata:
name: volumes-pvc-demo
namespace: default
spec:
containers:
- name: redis
image: redis:alpine
imagePullPolicy: IfNotPresent
ports:
- containerPort: 6379
name: redisport
volumeMounts:
- mountPath: /data
name: redis-rbd-vol
securityContext:
runAsUser: 999
runAsGroup: 999
volumes:
- name: redis-rbd-vol # 定义卷名称
persistentVolumeClaim: # 使用PVC
claimName: pvc-demo-001 # 绑定创建的pvc-demo-001的pvc
-
应用配置清单
root@k8s-master01:~/yaml/chapter05# kubectl apply -f volumes-pvc-demo.yaml
pod/volumes-pvc-demo created
# 创建成功
root@k8s-master01:~/yaml/chapter05# kubectl get pods volumes-pvc-demo
NAME READY STATUS RESTARTS AGE
volumes-pvc-demo 1/1 Running 0 10s
-
进入容器验证
# 可以看到已经被挂载
root@k8s-master01:~/yaml/chapter05# kubectl exec volumes-pvc-demo -- mount | grep "data"
172.16.11.79:/data/redis001 on /data type nfs4 (rw,relatime,vers=4.1,rsize=1048576,wsize=1048576,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,clientaddr=172.16.11.83,local_lock=none,addr=172.16.11.79)
总结
以上可以看出用户只需要定义pvc
,以及pod
,而存储相关的细节只需要交给管理员即可。
原文始发于微信公众号(TechOps之窗):与我一起了解K8S存储
版权声明:本文内容由互联网用户自发贡献,该文观点仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 举报,一经查实,本站将立刻删除。
文章由极客之音整理,本文链接:https://www.bmabk.com/index.php/post/289517.html