cluster.yaml 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474
  1. ---
  2. # Source: rook-ceph-cluster/templates/cephblockpool.yaml
  3. apiVersion: storage.k8s.io/v1
  4. kind: StorageClass
  5. metadata:
  6. name: ceph-block
  7. annotations:
  8. storageclass.kubernetes.io/is-default-class: "true"
  9. provisioner: rook-ceph.rbd.csi.ceph.com
  10. parameters:
  11. pool: ceph-blockpool
  12. clusterID: rook-ceph
  13. csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
  14. csi.storage.k8s.io/controller-expand-secret-namespace: 'rook-ceph'
  15. csi.storage.k8s.io/fstype: ext4
  16. csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
  17. csi.storage.k8s.io/node-stage-secret-namespace: 'rook-ceph'
  18. csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
  19. csi.storage.k8s.io/provisioner-secret-namespace: 'rook-ceph'
  20. imageFeatures: layering
  21. imageFormat: "2"
  22. reclaimPolicy: Delete
  23. allowVolumeExpansion: true
  24. volumeBindingMode: Immediate
  25. ---
  26. # Source: rook-ceph-cluster/templates/cephfilesystem.yaml
  27. apiVersion: storage.k8s.io/v1
  28. kind: StorageClass
  29. metadata:
  30. name: ceph-filesystem
  31. annotations:
  32. storageclass.kubernetes.io/is-default-class: "false"
  33. provisioner: rook-ceph.cephfs.csi.ceph.com
  34. parameters:
  35. fsName: ceph-filesystem
  36. pool: ceph-filesystem-data0
  37. clusterID: rook-ceph
  38. csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
  39. csi.storage.k8s.io/controller-expand-secret-namespace: 'rook-ceph'
  40. csi.storage.k8s.io/fstype: ext4
  41. csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
  42. csi.storage.k8s.io/node-stage-secret-namespace: 'rook-ceph'
  43. csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
  44. csi.storage.k8s.io/provisioner-secret-namespace: 'rook-ceph'
  45. reclaimPolicy: Delete
  46. allowVolumeExpansion: true
  47. volumeBindingMode: Immediate
  48. ---
  49. # Source: rook-ceph-cluster/templates/cephobjectstore.yaml
  50. apiVersion: storage.k8s.io/v1
  51. kind: StorageClass
  52. metadata:
  53. name: ceph-bucket
  54. provisioner: rook-ceph.ceph.rook.io/bucket
  55. reclaimPolicy: Delete
  56. volumeBindingMode: Immediate
  57. parameters:
  58. objectStoreName: ceph-objectstore
  59. objectStoreNamespace: rook-ceph
  60. region: us-east-1
  61. ---
  62. # Source: rook-ceph-cluster/templates/deployment.yaml
  63. apiVersion: apps/v1
  64. kind: Deployment
  65. metadata:
  66. name: rook-ceph-tools
  67. labels:
  68. app: rook-ceph-tools
  69. spec:
  70. replicas: 1
  71. selector:
  72. matchLabels:
  73. app: rook-ceph-tools
  74. template:
  75. metadata:
  76. labels:
  77. app: rook-ceph-tools
  78. spec:
  79. dnsPolicy: ClusterFirstWithHostNet
  80. containers:
  81. - name: rook-ceph-tools
  82. image: quay.io/ceph/ceph:v18.2.0
  83. command:
  84. - /bin/bash
  85. - -c
  86. - |
  87. # Replicate the script from toolbox.sh inline so the ceph image
  88. # can be run directly, instead of requiring the rook toolbox
  89. CEPH_CONFIG="/etc/ceph/ceph.conf"
  90. MON_CONFIG="/etc/rook/mon-endpoints"
  91. KEYRING_FILE="/etc/ceph/keyring"
  92. # create a ceph config file in its default location so ceph/rados tools can be used
  93. # without specifying any arguments
  94. write_endpoints() {
  95. endpoints=$(cat ${MON_CONFIG})
  96. # filter out the mon names
  97. # external cluster can have numbers or hyphens in mon names, handling them in regex
  98. # shellcheck disable=SC2001
  99. mon_endpoints=$(echo "${endpoints}"| sed 's/[a-z0-9_-]\+=//g')
  100. DATE=$(date)
  101. echo "$DATE writing mon endpoints to ${CEPH_CONFIG}: ${endpoints}"
  102. cat <<EOF > ${CEPH_CONFIG}
  103. [global]
  104. mon_host = ${mon_endpoints}
  105. [client.admin]
  106. keyring = ${KEYRING_FILE}
  107. EOF
  108. }
  109. # watch the endpoints config file and update if the mon endpoints ever change
  110. watch_endpoints() {
  111. # get the timestamp for the target of the soft link
  112. real_path=$(realpath ${MON_CONFIG})
  113. initial_time=$(stat -c %Z "${real_path}")
  114. while true; do
  115. real_path=$(realpath ${MON_CONFIG})
  116. latest_time=$(stat -c %Z "${real_path}")
  117. if [[ "${latest_time}" != "${initial_time}" ]]; then
  118. write_endpoints
  119. initial_time=${latest_time}
  120. fi
  121. sleep 10
  122. done
  123. }
  124. # read the secret from an env var (for backward compatibility), or from the secret file
  125. ceph_secret=${ROOK_CEPH_SECRET}
  126. if [[ "$ceph_secret" == "" ]]; then
  127. ceph_secret=$(cat /var/lib/rook-ceph-mon/secret.keyring)
  128. fi
  129. # create the keyring file
  130. cat <<EOF > ${KEYRING_FILE}
  131. [${ROOK_CEPH_USERNAME}]
  132. key = ${ROOK_CEPH_SECRET}
  133. key = ${ceph_secret}
  134. EOF
  135. # write the initial config file
  136. write_endpoints
  137. # continuously update the mon endpoints if they fail over
  138. watch_endpoints
  139. imagePullPolicy: IfNotPresent
  140. tty: true
  141. env:
  142. - name: ROOK_CEPH_USERNAME
  143. valueFrom:
  144. secretKeyRef:
  145. name: rook-ceph-mon
  146. key: ceph-username
  147. resources:
  148. limits:
  149. cpu: 500m
  150. memory: 1Gi
  151. requests:
  152. cpu: 100m
  153. memory: 128Mi
  154. volumeMounts:
  155. - mountPath: /etc/ceph
  156. name: ceph-config
  157. - name: mon-endpoint-volume
  158. mountPath: /etc/rook
  159. - name: ceph-admin-secret
  160. mountPath: /var/lib/rook-ceph-mon
  161. volumes:
  162. - name: ceph-admin-secret
  163. secret:
  164. secretName: rook-ceph-mon
  165. optional: false
  166. items:
  167. - key: ceph-secret
  168. path: secret.keyring
  169. - name: mon-endpoint-volume
  170. configMap:
  171. name: rook-ceph-mon-endpoints
  172. items:
  173. - key: data
  174. path: mon-endpoints
  175. - name: ceph-config
  176. emptyDir: {}
  177. tolerations:
  178. - key: "node.kubernetes.io/unreachable"
  179. operator: "Exists"
  180. effect: "NoExecute"
  181. tolerationSeconds: 5
  182. ---
  183. # Source: rook-ceph-cluster/templates/securityContextConstraints.yaml
  184. # scc for the Rook and Ceph daemons
  185. # for creating cluster in openshift
  186. ---
  187. # Source: rook-ceph-cluster/templates/volumesnapshotclass.yaml
  188. ---
  189. ---
  190. # Source: rook-ceph-cluster/templates/cephblockpool.yaml
  191. apiVersion: ceph.rook.io/v1
  192. kind: CephBlockPool
  193. metadata:
  194. name: ceph-blockpool
  195. spec:
  196. failureDomain: host
  197. replicated:
  198. size: 3
  199. ---
  200. # Source: rook-ceph-cluster/templates/cephcluster.yaml
  201. apiVersion: ceph.rook.io/v1
  202. kind: CephCluster
  203. metadata:
  204. name: rook-ceph
  205. spec:
  206. monitoring:
  207. enabled: false
  208. cephVersion:
  209. allowUnsupported: false
  210. image: quay.io/ceph/ceph:v18.2.0
  211. cleanupPolicy:
  212. allowUninstallWithVolumes: false
  213. confirmation: ""
  214. sanitizeDisks:
  215. dataSource: zero
  216. iteration: 1
  217. method: quick
  218. continueUpgradeAfterChecksEvenIfNotHealthy: false
  219. crashCollector:
  220. disable: true
  221. dashboard:
  222. enabled: true
  223. port: 8080
  224. ssl: false
  225. urlPrefix: /ceph-dashboard
  226. dataDirHostPath: /var/lib/rook
  227. disruptionManagement:
  228. managePodBudgets: true
  229. osdMaintenanceTimeout: 30
  230. pgHealthCheckTimeout: 0
  231. healthCheck:
  232. daemonHealth:
  233. mon:
  234. disabled: false
  235. interval: 45s
  236. osd:
  237. disabled: false
  238. interval: 60s
  239. status:
  240. disabled: false
  241. interval: 60s
  242. livenessProbe:
  243. mgr:
  244. disabled: false
  245. mon:
  246. disabled: false
  247. osd:
  248. disabled: false
  249. logCollector:
  250. enabled: true
  251. maxLogSize: 500M
  252. periodicity: daily
  253. mgr:
  254. allowMultiplePerNode: false
  255. count: 2
  256. modules:
  257. - enabled: true
  258. name: pg_autoscaler
  259. - enabled: true
  260. name: rook
  261. mon:
  262. allowMultiplePerNode: false
  263. count: 3
  264. network:
  265. connections:
  266. compression:
  267. enabled: false
  268. encryption:
  269. enabled: false
  270. requireMsgr2: false
  271. hostNetwork: true
  272. placement:
  273. all:
  274. nodeAffinity:
  275. requiredDuringSchedulingIgnoredDuringExecution:
  276. nodeSelectorTerms:
  277. - matchExpressions:
  278. - key: all
  279. operator: In
  280. values:
  281. - all
  282. podAffinity: null
  283. podAntiAffinity: null
  284. tolerations:
  285. - key: all
  286. operator: Exists
  287. topologySpreadConstraints: null
  288. mgr:
  289. nodeAffinity:
  290. requiredDuringSchedulingIgnoredDuringExecution:
  291. nodeSelectorTerms:
  292. - matchExpressions:
  293. - key: mgr
  294. operator: In
  295. values:
  296. - mgr
  297. podAffinity: null
  298. podAntiAffinity: null
  299. tolerations:
  300. - key: mgr
  301. operator: Exists
  302. topologySpreadConstraints: null
  303. mon:
  304. nodeAffinity:
  305. requiredDuringSchedulingIgnoredDuringExecution:
  306. nodeSelectorTerms:
  307. - matchExpressions:
  308. - key: mon
  309. operator: In
  310. values:
  311. - mon
  312. podAffinity: null
  313. podAntiAffinity: null
  314. tolerations:
  315. - key: mon
  316. operator: Exists
  317. topologySpreadConstraints: null
  318. osd:
  319. nodeAffinity:
  320. requiredDuringSchedulingIgnoredDuringExecution:
  321. nodeSelectorTerms:
  322. - matchExpressions:
  323. - key: osd
  324. operator: In
  325. values:
  326. - osd
  327. podAffinity: null
  328. podAntiAffinity: null
  329. tolerations:
  330. - key: osd
  331. operator: Exists
  332. topologySpreadConstraints: null
  333. priorityClassNames:
  334. mgr: system-cluster-critical
  335. mon: system-node-critical
  336. osd: system-node-critical
  337. removeOSDsIfOutAndSafeToRemove: false
  338. resources:
  339. cleanup:
  340. limits:
  341. cpu: 500m
  342. memory: 1Gi
  343. requests:
  344. cpu: 500m
  345. memory: 100Mi
  346. crashcollector:
  347. limits:
  348. cpu: 500m
  349. memory: 60Mi
  350. requests:
  351. cpu: 100m
  352. memory: 60Mi
  353. exporter:
  354. limits:
  355. cpu: 250m
  356. memory: 128Mi
  357. requests:
  358. cpu: 50m
  359. memory: 50Mi
  360. logcollector:
  361. limits:
  362. cpu: 500m
  363. memory: 1Gi
  364. requests:
  365. cpu: 100m
  366. memory: 100Mi
  367. mgr:
  368. limits:
  369. cpu: 1000m
  370. memory: 1Gi
  371. requests:
  372. cpu: 500m
  373. memory: 512Mi
  374. mgr-sidecar:
  375. limits:
  376. cpu: 500m
  377. memory: 100Mi
  378. requests:
  379. cpu: 100m
  380. memory: 40Mi
  381. mon:
  382. limits:
  383. cpu: 2000m
  384. memory: 2Gi
  385. requests:
  386. cpu: 1000m
  387. memory: 1Gi
  388. osd:
  389. limits:
  390. cpu: 2000m
  391. memory: 2048Mi
  392. requests:
  393. cpu: 1000m
  394. memory: 2048Mi
  395. prepareosd:
  396. requests:
  397. cpu: 500m
  398. memory: 50Mi
  399. skipUpgradeChecks: false
  400. storage:
  401. useAllDevices: true
  402. useAllNodes: true
  403. waitTimeoutForHealthyOSDInMinutes: 10
  404. ---
  405. # Source: rook-ceph-cluster/templates/cephfilesystem.yaml
  406. apiVersion: ceph.rook.io/v1
  407. kind: CephFilesystem
  408. metadata:
  409. name: ceph-filesystem
  410. spec:
  411. dataPools:
  412. - failureDomain: host
  413. name: data0
  414. replicated:
  415. size: 3
  416. metadataPool:
  417. replicated:
  418. size: 3
  419. metadataServer:
  420. activeCount: 1
  421. activeStandby: true
  422. priorityClassName: system-cluster-critical
  423. resources:
  424. limits:
  425. cpu: 2000m
  426. memory: 4Gi
  427. requests:
  428. cpu: 1000m
  429. memory: 4Gi
  430. ---
  431. # Source: rook-ceph-cluster/templates/cephobjectstore.yaml
  432. apiVersion: ceph.rook.io/v1
  433. kind: CephObjectStore
  434. metadata:
  435. name: ceph-objectstore
  436. spec:
  437. dataPool:
  438. erasureCoded:
  439. codingChunks: 1
  440. dataChunks: 2
  441. failureDomain: host
  442. gateway:
  443. instances: 1
  444. port: 80
  445. priorityClassName: system-cluster-critical
  446. resources:
  447. limits:
  448. cpu: 2000m
  449. memory: 2Gi
  450. requests:
  451. cpu: 1000m
  452. memory: 1Gi
  453. metadataPool:
  454. failureDomain: host
  455. replicated:
  456. size: 3
  457. preservePoolsOnDelete: true