cluster.yaml 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505
  1. ---
  2. # Source: rook-ceph-cluster/templates/cephblockpool.yaml
  3. apiVersion: storage.k8s.io/v1
  4. kind: StorageClass
  5. metadata:
  6. name: ceph-block
  7. annotations:
  8. storageclass.kubernetes.io/is-default-class: "true"
  9. provisioner: rook-ceph.rbd.csi.ceph.com
  10. parameters:
  11. pool: ceph-blockpool
  12. clusterID: rook-ceph
  13. csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
  14. csi.storage.k8s.io/controller-expand-secret-namespace: 'rook-ceph'
  15. csi.storage.k8s.io/fstype: ext4
  16. csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
  17. csi.storage.k8s.io/node-stage-secret-namespace: 'rook-ceph'
  18. csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
  19. csi.storage.k8s.io/provisioner-secret-namespace: 'rook-ceph'
  20. imageFeatures: layering
  21. imageFormat: "2"
  22. reclaimPolicy: Delete
  23. allowVolumeExpansion: true
  24. volumeBindingMode: Immediate
  25. ---
  26. # Source: rook-ceph-cluster/templates/cephfilesystem.yaml
  27. apiVersion: storage.k8s.io/v1
  28. kind: StorageClass
  29. metadata:
  30. name: ceph-filesystem
  31. annotations:
  32. storageclass.kubernetes.io/is-default-class: "false"
  33. provisioner: rook-ceph.cephfs.csi.ceph.com
  34. parameters:
  35. fsName: ceph-filesystem
  36. pool: ceph-filesystem-data0
  37. clusterID: rook-ceph
  38. csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
  39. csi.storage.k8s.io/controller-expand-secret-namespace: 'rook-ceph'
  40. csi.storage.k8s.io/fstype: ext4
  41. csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
  42. csi.storage.k8s.io/node-stage-secret-namespace: 'rook-ceph'
  43. csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
  44. csi.storage.k8s.io/provisioner-secret-namespace: 'rook-ceph'
  45. reclaimPolicy: Delete
  46. allowVolumeExpansion: true
  47. volumeBindingMode: Immediate
  48. ---
  49. # Source: rook-ceph-cluster/templates/cephobjectstore.yaml
  50. apiVersion: storage.k8s.io/v1
  51. kind: StorageClass
  52. metadata:
  53. name: ceph-bucket
  54. provisioner: rook-ceph.ceph.rook.io/bucket
  55. reclaimPolicy: Delete
  56. volumeBindingMode: Immediate
  57. parameters:
  58. objectStoreName: ceph-objectstore
  59. objectStoreNamespace: rook-ceph
  60. region: us-east-1
  61. ---
  62. # Source: rook-ceph-cluster/templates/deployment.yaml
  63. apiVersion: apps/v1
  64. kind: Deployment
  65. metadata:
  66. name: rook-ceph-tools
  67. namespace: rook-ceph # namespace:cluster
  68. labels:
  69. app: rook-ceph-tools
  70. spec:
  71. replicas: 1
  72. selector:
  73. matchLabels:
  74. app: rook-ceph-tools
  75. template:
  76. metadata:
  77. labels:
  78. app: rook-ceph-tools
  79. spec:
  80. dnsPolicy: ClusterFirstWithHostNet
  81. containers:
  82. - name: rook-ceph-tools
  83. image: quay.io/ceph/ceph:v18.2.0
  84. command:
  85. - /bin/bash
  86. - -c
  87. - |
  88. # Replicate the script from toolbox.sh inline so the ceph image
  89. # can be run directly, instead of requiring the rook toolbox
  90. CEPH_CONFIG="/etc/ceph/ceph.conf"
  91. MON_CONFIG="/etc/rook/mon-endpoints"
  92. KEYRING_FILE="/etc/ceph/keyring"
  93. # create a ceph config file in its default location so ceph/rados tools can be used
  94. # without specifying any arguments
  95. write_endpoints() {
  96. endpoints=$(cat ${MON_CONFIG})
  97. # filter out the mon names
  98. # external cluster can have numbers or hyphens in mon names, handling them in regex
  99. # shellcheck disable=SC2001
  100. mon_endpoints=$(echo "${endpoints}"| sed 's/[a-z0-9_-]\+=//g')
  101. DATE=$(date)
  102. echo "$DATE writing mon endpoints to ${CEPH_CONFIG}: ${endpoints}"
  103. cat <<EOF > ${CEPH_CONFIG}
  104. [global]
  105. mon_host = ${mon_endpoints}
  106. [client.admin]
  107. keyring = ${KEYRING_FILE}
  108. EOF
  109. }
  110. # watch the endpoints config file and update if the mon endpoints ever change
  111. watch_endpoints() {
  112. # get the timestamp for the target of the soft link
  113. real_path=$(realpath ${MON_CONFIG})
  114. initial_time=$(stat -c %Z "${real_path}")
  115. while true; do
  116. real_path=$(realpath ${MON_CONFIG})
  117. latest_time=$(stat -c %Z "${real_path}")
  118. if [[ "${latest_time}" != "${initial_time}" ]]; then
  119. write_endpoints
  120. initial_time=${latest_time}
  121. fi
  122. sleep 10
  123. done
  124. }
  125. # read the secret from an env var (for backward compatibility), or from the secret file
  126. ceph_secret=${ROOK_CEPH_SECRET}
  127. if [[ "$ceph_secret" == "" ]]; then
  128. ceph_secret=$(cat /var/lib/rook-ceph-mon/secret.keyring)
  129. fi
  130. # create the keyring file
  131. cat <<EOF > ${KEYRING_FILE}
  132. [${ROOK_CEPH_USERNAME}]
  133. key = ${ceph_secret}
  134. EOF
  135. # write the initial config file
  136. write_endpoints
  137. # continuously update the mon endpoints if they fail over
  138. watch_endpoints
  139. imagePullPolicy: IfNotPresent
  140. tty: true
  141. securityContext:
  142. capabilities:
  143. drop:
  144. - ALL
  145. runAsGroup: 2016
  146. runAsNonRoot: true
  147. runAsUser: 2016
  148. env:
  149. - name: ROOK_CEPH_USERNAME
  150. valueFrom:
  151. secretKeyRef:
  152. name: rook-ceph-mon
  153. key: ceph-username
  154. resources:
  155. limits:
  156. cpu: 500m
  157. memory: 1Gi
  158. requests:
  159. cpu: 100m
  160. memory: 128Mi
  161. volumeMounts:
  162. - mountPath: /etc/ceph
  163. name: ceph-config
  164. - name: mon-endpoint-volume
  165. mountPath: /etc/rook
  166. - name: ceph-admin-secret
  167. mountPath: /var/lib/rook-ceph-mon
  168. volumes:
  169. - name: ceph-admin-secret
  170. secret:
  171. secretName: rook-ceph-mon
  172. optional: false
  173. items:
  174. - key: ceph-secret
  175. path: secret.keyring
  176. - name: mon-endpoint-volume
  177. configMap:
  178. name: rook-ceph-mon-endpoints
  179. items:
  180. - key: data
  181. path: mon-endpoints
  182. - name: ceph-config
  183. emptyDir: {}
  184. tolerations:
  185. - key: "node.kubernetes.io/unreachable"
  186. operator: "Exists"
  187. effect: "NoExecute"
  188. tolerationSeconds: 5
  189. ---
  190. # Source: rook-ceph-cluster/templates/securityContextConstraints.yaml
  191. # scc for the Rook and Ceph daemons
  192. # for creating cluster in openshift
  193. ---
  194. # Source: rook-ceph-cluster/templates/volumesnapshotclass.yaml
  195. ---
  196. ---
  197. # Source: rook-ceph-cluster/templates/cephblockpool.yaml
  198. apiVersion: ceph.rook.io/v1
  199. kind: CephBlockPool
  200. metadata:
  201. name: ceph-blockpool
  202. namespace: rook-ceph # namespace:cluster
  203. spec:
  204. failureDomain: host
  205. replicated:
  206. size: 3
  207. ---
  208. # Source: rook-ceph-cluster/templates/cephcluster.yaml
  209. apiVersion: ceph.rook.io/v1
  210. kind: CephCluster
  211. metadata:
  212. name: rook-ceph
  213. namespace: rook-ceph # namespace:cluster
  214. spec:
  215. monitoring:
  216. enabled: false
  217. cephVersion:
  218. allowUnsupported: false
  219. image: quay.io/ceph/ceph:v18.2.0
  220. cleanupPolicy:
  221. allowUninstallWithVolumes: false
  222. confirmation: ""
  223. sanitizeDisks:
  224. dataSource: zero
  225. iteration: 1
  226. method: quick
  227. continueUpgradeAfterChecksEvenIfNotHealthy: false
  228. crashCollector:
  229. disable: true
  230. dashboard:
  231. enabled: true
  232. port: 8080
  233. ssl: false
  234. urlPrefix: /ceph-dashboard
  235. dataDirHostPath: /var/lib/rook
  236. disruptionManagement:
  237. managePodBudgets: true
  238. osdMaintenanceTimeout: 30
  239. pgHealthCheckTimeout: 0
  240. healthCheck:
  241. daemonHealth:
  242. mon:
  243. disabled: false
  244. interval: 45s
  245. osd:
  246. disabled: false
  247. interval: 60s
  248. status:
  249. disabled: false
  250. interval: 60s
  251. livenessProbe:
  252. mgr:
  253. disabled: false
  254. mon:
  255. disabled: false
  256. osd:
  257. disabled: false
  258. logCollector:
  259. enabled: true
  260. maxLogSize: 500M
  261. periodicity: daily
  262. mgr:
  263. allowMultiplePerNode: false
  264. count: 2
  265. modules:
  266. - enabled: true
  267. name: pg_autoscaler
  268. - enabled: true
  269. name: rook
  270. mon:
  271. allowMultiplePerNode: false
  272. count: 3
  273. network:
  274. connections:
  275. compression:
  276. enabled: false
  277. encryption:
  278. enabled: false
  279. requireMsgr2: false
  280. hostNetwork: true
  281. placement:
  282. all:
  283. nodeAffinity:
  284. requiredDuringSchedulingIgnoredDuringExecution:
  285. nodeSelectorTerms:
  286. - matchExpressions:
  287. - key: all
  288. operator: In
  289. values:
  290. - all
  291. podAffinity: null
  292. podAntiAffinity: null
  293. tolerations:
  294. - key: all
  295. operator: Exists
  296. topologySpreadConstraints: null
  297. mgr:
  298. nodeAffinity:
  299. requiredDuringSchedulingIgnoredDuringExecution:
  300. nodeSelectorTerms:
  301. - matchExpressions:
  302. - key: mgr
  303. operator: In
  304. values:
  305. - mgr
  306. podAffinity: null
  307. podAntiAffinity: null
  308. tolerations:
  309. - key: mgr
  310. operator: Exists
  311. topologySpreadConstraints: null
  312. mon:
  313. nodeAffinity:
  314. requiredDuringSchedulingIgnoredDuringExecution:
  315. nodeSelectorTerms:
  316. - matchExpressions:
  317. - key: mon
  318. operator: In
  319. values:
  320. - mon
  321. podAffinity: null
  322. podAntiAffinity: null
  323. tolerations:
  324. - key: mon
  325. operator: Exists
  326. topologySpreadConstraints: null
  327. osd:
  328. nodeAffinity:
  329. requiredDuringSchedulingIgnoredDuringExecution:
  330. nodeSelectorTerms:
  331. - matchExpressions:
  332. - key: osd
  333. operator: In
  334. values:
  335. - osd
  336. podAffinity: null
  337. podAntiAffinity: null
  338. tolerations:
  339. - key: osd
  340. operator: Exists
  341. topologySpreadConstraints: null
  342. priorityClassNames:
  343. mgr: system-cluster-critical
  344. mon: system-node-critical
  345. osd: system-node-critical
  346. removeOSDsIfOutAndSafeToRemove: false
  347. resources:
  348. cleanup:
  349. limits:
  350. cpu: 500m
  351. memory: 1Gi
  352. requests:
  353. cpu: 500m
  354. memory: 100Mi
  355. crashcollector:
  356. limits:
  357. cpu: 500m
  358. memory: 60Mi
  359. requests:
  360. cpu: 100m
  361. memory: 60Mi
  362. exporter:
  363. limits:
  364. cpu: 250m
  365. memory: 128Mi
  366. requests:
  367. cpu: 50m
  368. memory: 50Mi
  369. logcollector:
  370. limits:
  371. cpu: 500m
  372. memory: 1Gi
  373. requests:
  374. cpu: 100m
  375. memory: 100Mi
  376. mgr:
  377. limits:
  378. cpu: 1000m
  379. memory: 1Gi
  380. requests:
  381. cpu: 500m
  382. memory: 512Mi
  383. mgr-sidecar:
  384. limits:
  385. cpu: 500m
  386. memory: 100Mi
  387. requests:
  388. cpu: 100m
  389. memory: 40Mi
  390. mon:
  391. limits:
  392. cpu: 2000m
  393. memory: 2Gi
  394. requests:
  395. cpu: 1000m
  396. memory: 1Gi
  397. osd:
  398. limits:
  399. cpu: 2000m
  400. memory: 3072Mi
  401. requests:
  402. cpu: 750m
  403. memory: 2048Mi
  404. prepareosd:
  405. requests:
  406. cpu: 500m
  407. memory: 50Mi
  408. skipUpgradeChecks: false
  409. storage:
  410. useAllDevices: true
  411. useAllNodes: true
  412. waitTimeoutForHealthyOSDInMinutes: 10
  413. ---
  414. # Source: rook-ceph-cluster/templates/cephfilesystem.yaml
  415. apiVersion: ceph.rook.io/v1
  416. kind: CephFilesystem
  417. metadata:
  418. name: ceph-filesystem
  419. namespace: rook-ceph # namespace:cluster
  420. spec:
  421. dataPools:
  422. - failureDomain: host
  423. name: data0
  424. replicated:
  425. size: 3
  426. metadataPool:
  427. replicated:
  428. size: 3
  429. metadataServer:
  430. activeCount: 1
  431. activeStandby: true
  432. priorityClassName: system-cluster-critical
  433. resources:
  434. limits:
  435. cpu: 2000m
  436. memory: 4Gi
  437. requests:
  438. cpu: 1000m
  439. memory: 4Gi
  440. ---
  441. # Source: rook-ceph-cluster/templates/cephfilesystem.yaml
  442. apiVersion: ceph.rook.io/v1
  443. kind: CephFilesystemSubVolumeGroup
  444. metadata:
  445. name: ceph-filesystem-csi # lets keep the svg crd name same as `filesystem name + csi` for the default csi svg
  446. namespace: rook-ceph # namespace:cluster
  447. spec:
  448. # The name of the subvolume group. If not set, the default is the name of the subvolumeGroup CR.
  449. name: csi
  450. # filesystemName is the metadata name of the CephFilesystem CR where the subvolume group will be created
  451. filesystemName: ceph-filesystem
  452. # reference https://docs.ceph.com/en/latest/cephfs/fs-volumes/#pinning-subvolumes-and-subvolume-groups
  453. # only one out of (export, distributed, random) can be set at a time
  454. # by default pinning is set with value: distributed=1
  455. # for disabling default values set (distributed=0)
  456. pinning:
  457. distributed: 1 # distributed=<0, 1> (disabled=0)
  458. # export: # export=<0-256> (disabled=-1)
  459. # random: # random=[0.0, 1.0](disabled=0.0)
  460. ---
  461. # Source: rook-ceph-cluster/templates/cephobjectstore.yaml
  462. apiVersion: ceph.rook.io/v1
  463. kind: CephObjectStore
  464. metadata:
  465. name: ceph-objectstore
  466. namespace: rook-ceph # namespace:cluster
  467. spec:
  468. dataPool:
  469. erasureCoded:
  470. codingChunks: 1
  471. dataChunks: 2
  472. failureDomain: host
  473. gateway:
  474. instances: 1
  475. port: 80
  476. priorityClassName: system-cluster-critical
  477. resources:
  478. limits:
  479. cpu: 2000m
  480. memory: 2Gi
  481. requests:
  482. cpu: 1000m
  483. memory: 1Gi
  484. metadataPool:
  485. failureDomain: host
  486. replicated:
  487. size: 3
  488. preservePoolsOnDelete: true