Create RBD Image

##
#  创建 RBD 镜像, On the ceph-client node
#  --image-feature layering 指定特征
#  默认 features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
#
[root@ec-k8s-m2 mnt]# rbd create k8s-ceph-rbd --size 5G 
[root@ec-k8s-m2 mnt]# 
[root@ec-k8s-m2 mnt]# 
[root@ec-k8s-m2 mnt]# rbd info k8s-ceph-rbd
rbd image 'k8s-ceph-rbd':
	size 5 GiB in 1280 objects
	order 22 (4 MiB objects)
	id: d6126b8b4567
	block_name_prefix: rbd_data.d6126b8b4567
	format: 2
	features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
	op_features: 
	flags: 
	create_timestamp: Wed Nov  7 19:41:03 2018


##
#  映射
#
[root@ec-k8s-m2 mnt]# rbd map k8s-ceph-rbd 
rbd: sysfs write failed
RBD image feature set mismatch. You can disable features unsupported by the kernel with "rbd feature disable k8s-ceph-rbd object-map fast-diff deep-flatten".
In some cases useful info is found in syslog - try "dmesg | tail".
rbd: map failed: (6) No such device or address

##
#  临时关闭内核不支持的特性
#
[root@ec-k8s-m2 mnt]# rbd feature disable k8s-ceph-rbd exclusive-lock, object-map, fast-diff, deep-flatten

[root@ec-k8s-m2 mnt]# rbd info k8s-ceph-rbd
rbd image 'k8s-ceph-rbd':
	size 5 GiB in 1280 objects
	order 22 (4 MiB objects)
	id: d6126b8b4567
	block_name_prefix: rbd_data.d6126b8b4567
	format: 2
	features: layering
	op_features: 
	flags: 
	create_timestamp: Wed Nov  7 19:41:03 2018

##
#  映射镜像
#
[root@ec-k8s-m2 mnt]# rbd map k8s-ceph-rbd 
/dev/rbd2

[root@ec-k8s-m2 mnt]# rbd showmapped
id pool image        snap device    
0  rbd  foo          -    /dev/rbd0 
1  rbd  k8s-foo      -    /dev/rbd1 
2  rbd  k8s-ceph-rbd -    /dev/rbd2 

##
#  ummap 镜像,否则K8S 创建 Pod 时会无法挂载 RBD 镜像。
#  Warning  FailedMount  
#  4s (x2 over 58s)  kubelet, ec-k8s-n3
#  MountVolume.WaitForAttach failed for volume "ceph-rbd-pv" : rbd image rbd/k8s-ceph-rbd is still being used

[root@ec-k8s-m2 mnt]# rbd unmap k8s-ceph-rbd
[root@ec-k8s-m2 mnt]#

Create Ceph-Secret

##
#  获取 client.admin 密钥

[root@ec-k8s-m2 ~]# ceph auth get-key client.admin 

[root@ec-k8s-m2 ~]# cat /etc/ceph/ceph.client.admin.keyring 
[client.admin]
	key = AQD/YOFbiy3LMBAARetHV3/Wu1omqDEXrIK68Q==
	caps mds = "allow *"
	caps mgr = "allow *"
	caps mon = "allow *"
	caps osd = "allow *"

[root@ec-k8s-m2 ~]# echo AQD/YOFbiy3LMBAARetHV3/Wu1omqDEXrIK68Q== | base64 
QVFEL1lPRmJpeTNMTUJBQVJldEhWMy9XdTFvbXFERVhySUs2OFE9PQo=

##
#  On the k8s-master node
#  vim 00-ceph-secret.yaml
#  key: client.admin 密钥

# ceph-secret.yaml
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret
type: "kubernetes.io/rbd"
data:
  key: QVFEL1lPRmJpeTNMTUJBQVJldEhWMy9XdTFvbXFERVhySUs2OFE9PQo=

##
#  
[root@ec-k8s-m1 ceph-rbd]# kubectl create -f 00-ceph-secret.yaml 
secret/ceph-secret created

[root@ec-k8s-m1 ceph-rbd]# kubectl get secrets 
NAME                 TYPE                     DATA      AGE
ceph-secret          Opaque                   1         13s

Create PV

##
#  11-rbd-pv.yaml
#
apiVersion: v1
kind: PersistentVolume
metadata:
  name: ceph-rbd-pv
spec:
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteOnce
  rbd:
    monitors:
      - 172.16.0.61:6789
      - 172.16.0.62:6789
      - 172.16.0.51:6789
    pool: rbd
    image: k8s-ceph-rbd
    user: admin
    secretRef:
      name: ceph-secret
    fsType: xfs #ext4
    readOnly: false
  persistentVolumeReclaimPolicy: Recycle

##
#  
[root@ec-k8s-m1 ceph-rbd]# kubectl create -f 11-rbd-pv.yaml 
persistentvolume/ceph-rbd-pv created

[root@ec-k8s-m1 ceph-rbd]# kubectl get pv
NAME        CAPACITY  ACCESS MODES  RECLAIM POLICY  STATUS    CLAIM STORAGECLASS   REASON    AGE
ceph-rbd-pv 5Gi       RWO           Recycle         Available                                10s

Create PVC

##
#  12-rbd-pv-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
   name: ceph-rbd-pv-claim
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 5Gi
##
#  
[root@ec-k8s-m1 ceph-rbd]# kubectl create -f 12-rbd-pv-claim.yaml 
persistentvolumeclaim/ceph-rbd-pv-claim created

[root@ec-k8s-m1 ceph-rbd]# kubectl get pvc
NAME                STATUS    VOLUME         CAPACITY   ACCESS MODES   STORAGECLASS   AGE
ceph-rbd-pv-claim   Bound     ceph-rbd-pv    5Gi        RWO                           16s

Create Pod

##
#  20-rbd-pvc-pod.yaml
#  
apiVersion: v1
kind: Pod
metadata:
  name: ceph-rbd-pvc-busybox1
spec:
  containers:
    - name: ceph-rbd-pvc-busybox1
      image: busybox
      command: ["sleep", "60000"]
      volumeMounts:
      - name: cephrbd-vol1
        mountPath: "/mnt/k8s-ceph-rbd-vol"
        readOnly: false
  volumes:
    - name: cephrbd-vol1
      persistentVolumeClaim:
        claimName: ceph-rbd-pv-claim
##
#  创建 K8S pod
#  
[root@ec-k8s-m1 ceph-rbd]# kubectl create -f 20-rbd-pvc-pod.yaml 
pod/ceph-rbd-pvc-busybox1 created

[root@ec-k8s-m1 ceph-rbd]# kubectl get pods -o wide
NAME                    READY     STATUS    RESTARTS   AGE       IP                NODE        NOMINATED NODE
ceph-rbd-pvc-busybox1   1/1       Running   0          9s        192.168.136.150   ec-k8s-n3   <none>

Check Volume

##
#  
[root@ec-k8s-m1 ceph-rbd]# kubectl exec -it ceph-rbd-pvc-busybox1 /bin/sh
/ # ls
bin   dev   etc   home  mnt   proc  root  sys   tmp   usr   var
/ # 
/ # cd mnt/
/mnt # ls
k8s-ceph-rbd-vol
/mnt # 
/mnt # cd k8s-ceph-rbd-vol/
/mnt/k8s-ceph-rbd-vol # 
/mnt/k8s-ceph-rbd-vol # echo e.c. >> text.txt

##
#  Ceph-Client Node
#
[root@ec-k8s-m2 ~]# rbd ls
foo
k8s-ceph-rbd

[root@ec-k8s-m2 ~]# rbd map k8s-ceph-rbd
/dev/rbd2

[root@ec-k8s-m2 ~]# mount /dev/rbd2 /mnt/ceph-block-device/

[root@ec-k8s-m2 ~]# cd /mnt

[root@ec-k8s-m2 mnt]# ll ceph-block-device/
total 20
drwx------ 2 root root 16384 Nov  7 19:52 lost+found
-rw-r--r-- 1 root root     5 Nov  7 22:05 text.txt
[root@ec-k8s-m2 mnt]# 
[root@ec-k8s-m2 mnt]# cat ceph-block-device/text.txt 
e.c.

##
#
[root@ec-k8s-m1 ceph-rbd]# kubectl delete -f 20-rbd-pvc-pod.yaml 
pod "ceph-rbd-pvc-busybox1" deleted

##
#  Ceph-Client Node
#

[root@ec-k8s-m2 mnt]# echo $(date)" by "$(hostname) >> ceph-block-device/text.txt 

[root@ec-k8s-m2 mnt]# cat ceph-block-device/text.txt 
e.c.
Wed Nov 7 22:13:57 CST 2018 by ec-k8s-m2

[root@ec-k8s-m2 mnt]# umount /mnt/ceph-block-device/

[root@ec-k8s-m2 mnt]# rbd unmap k8s-ceph-rbd
##
#
[root@ec-k8s-m1 ceph-rbd]# kubectl create -f 20-rbd-pvc-pod.yaml 
pod/ceph-rbd-pvc-busybox1 created
[root@ec-k8s-m1 ceph-rbd]# 
[root@ec-k8s-m1 ceph-rbd]# kubectl exec -it ceph-rbd-pvc-busybox1 /bin/sh
/ # 
/ # ls
bin   dev   etc   home  mnt   proc  root  sys   tmp   usr   var
/ # 
/ # cat mnt/k8s-ceph-rbd-vol/
lost+found/  text.txt
/ # cat mnt/k8s-ceph-rbd-vol/
lost+found/  text.txt
/ # cat mnt/k8s-ceph-rbd-vol/text.txt 
e.c.
Wed Nov 7 22:13:57 CST 2018 by ec-k8s-m2


Successful

Define in the Volumes

##
#  创建 RBD 镜像
#
[root@ec-k8s-m2 ~]# rbd create k8s-ceph-rbd2 --size 2G --image-feature layering

[root@ec-k8s-m2 ~]# rbd info k8s-ceph-rbd2
rbd image 'k8s-ceph-rbd2':
	size 2 GiB in 512 objects
	order 22 (4 MiB objects)
	id: 13f86b8b4567
	block_name_prefix: rbd_data.13f86b8b4567
	format: 2
	features: layering
	op_features: 
	flags: 
	create_timestamp: Wed Nov  7 22:30:42 2018

##
#  直接在 21-rbd-pod.yaml 文件spec.volumes中,定义Ceph RBD 信息。
#   
apiVersion: v1
kind: Pod
metadata:
  name: ceph-rbd-pvc-busybox2
spec:
  containers:
    - image: busybox
      name: ceph-rbd-pvc-busybox2-rw
      command: ["sleep", "60000"]
      volumeMounts:
      - name: rbdpd
        mountPath: /mnt/rbd
  volumes:
    - name: rbdpd
      rbd:
        monitors: 
        - '172.16.0.61:6789'
        - '172.16.0.62:6789'
        pool: rbd
        image: k8s-ceph-rbd2
        fsType: xfs #ext4
        readOnly: false
        user: admin
        secretRef:
          name: ceph-secret
      

##
#  
[root@ec-k8s-m1 ceph-rbd]# kubectl create -f 21-rbd-pod.yaml 
pod/ceph-rbd-pvc-busybox2 created
[root@ec-k8s-m1 ceph-rbd]# 
[root@ec-k8s-m1 ceph-rbd]# kubectl get pods -o wide
NAME                               READY     STATUS    RESTARTS   AGE       IP                NODE        NOMINATED NODE
ceph-rbd-pvc-busybox1              1/1       Running   0          32m       192.168.136.149   ec-k8s-n3   <none>
ceph-rbd-pvc-busybox2              1/1       Running   0          25s       192.168.136.152   ec-k8s-n3   <none>

K8S PV Access Modes

The access modes are:

  • ReadWriteOnce – the volume can be mounted as read-write by a single node
  • ReadOnlyMany – the volume can be mounted read-only by many nodes
  • ReadWriteMany – the volume can be mounted as read-write by many nodes

In the CLI, the access modes are abbreviated to:

  • RWO – ReadWriteOnce
  • ROX – ReadOnlyMany
  • RWX – ReadWriteMany

Important! A volume can only be mounted using one access mode at a time, even if it supports many. For example, a GCEPersistentDisk can be mounted as ReadWriteOnce by a single node or ReadOnlyMany by many nodes, but not at the same time.

Volume Plugin ReadWriteOnce ReadOnlyMany ReadWriteMany
AWSElasticBlockStore
AzureFile
AzureDisk
CephFS
Cinder
FC
Flexvolume
Flocker
GCEPersistentDisk
Glusterfs
HostPath
iSCSI
Quobyte
NFS
RBD
VsphereVolume – (works when pods are collocated)
PortworxVolume
ScaleIO
StorageOS

 

##
#  参考资料
#
https://github.com/kubernetes/examples/tree/master/staging/volumes/rbd
http://docs.ceph.com/docs/master/start/quick-rbd/
https://kubernetes.io/docs/concepts/storage/persistent-volumes/