简介

一个ceph集群只支持一个cephfs

插件ReadWriteOnceReadOnlyManyReadWriteMany
cephfsyyy
rbdyyn

安装rbd-provisioner

rbd-provisioner 安装到kube-system

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: rbd-provisioner
subjects:
  - kind: ServiceAccount
    name: rbd-provisioner
    namespace: kube-system
roleRef:
  kind: ClusterRole
  name: rbd-provisioner
  apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: rbd-provisioner
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: rbd-provisioner
subjects:
- kind: ServiceAccount
  name: rbd-provisioner
  namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: rbd-provisioner
  namespace: kube-system
spec:
  replicas: 1
  selector:
    matchLabels:
      app: rbd-provisioner
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: rbd-provisioner
    spec:
      containers:
      - name: rbd-provisioner
        image: "quay.io/external_storage/rbd-provisioner:v2.1.1-k8s1.11"
        env:
        - name: PROVISIONER_NAME
          value: ceph.com/rbd
      serviceAccount: rbd-provisioner
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  name: rbd-provisioner
rules:
- apiGroups: [""]
  resources: ["secrets"]
  verbs: ["get"]
- apiGroups: [""]
  resources: ["endpoints"]
  verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: rbd-provisioner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
  - apiGroups: [""]
    resources: ["services"]
    resourceNames: ["kube-dns","coredns"]
    verbs: ["list", "get"]
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
  - apiGroups: [""]
    resources: ["secrets"]
    verbs: ["get", "create", "delete"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: rbd-provisioner
  namespace: kube-system

k8s节点安装ceph-common

1
2
yum install ceph-common -y
# 版本与ceph集群版本一致

创建存储池等

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
# 创建存储池# ceph osd pool create {pool-name} {pg-num} {pgp-num}
ceph osd pool create kube 64 64
# 删除存储池
ceph osd pool rm kube-test kube-test --yes-i-really-really-mean-it

# 无法删除
# 修改/etc/ceph/ceph.conf
[mon]
mon allow pool delete = true
# 重启服务
systemctl restart ceph-mon.target

查看ceph key

1
ceph auth list

创建ceph用户和secret

创建ceph用户

1
ceph auth get-or-create client.kube mon 'allow r' osd 'allow class-read object_prefix rbd_children,allow rwx pool=kube'

创建secret

创建secret保存client.admin与client.kube用户,其他namespace要使用ceph rbd的dynamic provisioning功能,需要在对象的ns下面创建secret保存client.kube用户的key

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
[root@node1 ~]# cat ceph-user.yaml
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret
  namespace: kube-system
data:
  key: QVFBaFNwVmp1emNHQVJBQTNZa1ZnUHlkaXhkWlVYektiQ3lENHc9PQ==
type: kubernetes.io/rbd
---
apiVersion: v1
kind: Secret
metadata:
  name: ceph-user-secret
  namespace: kube-system
data:
  key: QVFCb1hKVmpFVEpEQXhBQTJVSUJReDNieEJnZWhkRm9QRXdHNFE9PQ==
type: kubernetes.io/rbd
---
apiVersion: v1
kind: Secret
metadata:
  name: ceph-user-secret
  namespace: default
data:
  key: QVFCb1hKVmpFVEpEQXhBQTJVSUJReDNieEJnZWhkRm9QRXdHNFE9PQ==
type: kubernetes.io/rbd
---
apiVersion: v1
kind: Secret
metadata:
  name: ceph-user-secret
  namespace: tool
data:
  key: QVFCb1hKVmpFVEpEQXhBQTJVSUJReDNieEJnZWhkRm9QRXdHNFE9PQ==
type: kubernetes.io/rbd

创建stroageclass

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
[root@node1 ~]# cat storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: ceph-rbd
  namespace: defaults
  annotations:
     storageclass.beta.kubernetes.io/is-default-class: "true" # <= 设置为默认存储类
provisioner: ceph.com/rbd # 这里使用ceph.com/rbd
reclaimPolicy: Retain # pv删除类型 还有delete
parameters:
  monitors: 192.168.1.105:6789,192.168.1.106:6789,192.168.1.107:6789
  adminId: admin
  adminSecretName: ceph-secret
  adminSecretNamespace: default
  pool: kube
  userId: kube
  userSecretName: ceph-user-secret
  fsType: xfs
  imageFormat: "2"
  imageFeatures: "layering"

创建测试

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
[root@node1 ~]# cat test-pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: test-claim
  namespace: tool
#  namespace: kube-system
spec:
  accessModes:
  - ReadWriteOnce
  resources:
    requests:
      storage: 1Gi
---
apiVersion: v1
kind: Pod
metadata:
  name: test
  namespace: tool
#  namespace: kube-system
spec:
  containers:
  - name: ceph-busybox
    image: busybox
    command: ["sleep", "60000"]
    volumeMounts:
    - name: ceph-vol1
      mountPath: /usr/share/busybox
      readOnly: false
  volumes:
  - name: ceph-vol1
    persistentVolumeClaim:
      claimName: test-claim