cluster.yaml 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476
  1. ---
  2. # Source: rook-ceph-cluster/templates/cephblockpool.yaml
  3. apiVersion: storage.k8s.io/v1
  4. kind: StorageClass
  5. metadata:
  6. name: ceph-block
  7. annotations:
  8. storageclass.kubernetes.io/is-default-class: "true"
  9. provisioner: rook-ceph.rbd.csi.ceph.com
  10. parameters:
  11. pool: ceph-blockpool
  12. clusterID: rook-ceph
  13. csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
  14. csi.storage.k8s.io/controller-expand-secret-namespace: 'rook-ceph'
  15. csi.storage.k8s.io/fstype: ext4
  16. csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
  17. csi.storage.k8s.io/node-stage-secret-namespace: 'rook-ceph'
  18. csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
  19. csi.storage.k8s.io/provisioner-secret-namespace: 'rook-ceph'
  20. imageFeatures: layering
  21. imageFormat: "2"
  22. reclaimPolicy: Delete
  23. allowVolumeExpansion: true
  24. volumeBindingMode: Immediate
  25. ---
  26. # Source: rook-ceph-cluster/templates/cephfilesystem.yaml
  27. apiVersion: storage.k8s.io/v1
  28. kind: StorageClass
  29. metadata:
  30. name: ceph-filesystem
  31. annotations:
  32. storageclass.kubernetes.io/is-default-class: "false"
  33. provisioner: rook-ceph.cephfs.csi.ceph.com
  34. parameters:
  35. fsName: ceph-filesystem
  36. pool: ceph-filesystem-data0
  37. clusterID: rook-ceph
  38. csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
  39. csi.storage.k8s.io/controller-expand-secret-namespace: 'rook-ceph'
  40. csi.storage.k8s.io/fstype: ext4
  41. csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
  42. csi.storage.k8s.io/node-stage-secret-namespace: 'rook-ceph'
  43. csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
  44. csi.storage.k8s.io/provisioner-secret-namespace: 'rook-ceph'
  45. reclaimPolicy: Delete
  46. allowVolumeExpansion: true
  47. volumeBindingMode: Immediate
  48. ---
  49. # Source: rook-ceph-cluster/templates/cephobjectstore.yaml
  50. apiVersion: storage.k8s.io/v1
  51. kind: StorageClass
  52. metadata:
  53. name: ceph-bucket
  54. provisioner: rook-ceph.ceph.rook.io/bucket
  55. reclaimPolicy: Delete
  56. volumeBindingMode: Immediate
  57. parameters:
  58. objectStoreName: ceph-objectstore
  59. objectStoreNamespace: rook-ceph
  60. region: us-east-1
  61. ---
  62. # Source: rook-ceph-cluster/templates/deployment.yaml
  63. apiVersion: apps/v1
  64. kind: Deployment
  65. metadata:
  66. name: rook-ceph-tools
  67. labels:
  68. app: rook-ceph-tools
  69. spec:
  70. replicas: 1
  71. selector:
  72. matchLabels:
  73. app: rook-ceph-tools
  74. template:
  75. metadata:
  76. labels:
  77. app: rook-ceph-tools
  78. spec:
  79. dnsPolicy: ClusterFirstWithHostNet
  80. containers:
  81. - name: rook-ceph-tools
  82. image: quay.io/ceph/ceph:v18.2.0
  83. command:
  84. - /bin/bash
  85. - -c
  86. - |
  87. # Replicate the script from toolbox.sh inline so the ceph image
  88. # can be run directly, instead of requiring the rook toolbox
  89. CEPH_CONFIG="/etc/ceph/ceph.conf"
  90. MON_CONFIG="/etc/rook/mon-endpoints"
  91. KEYRING_FILE="/etc/ceph/keyring"
  92. # create a ceph config file in its default location so ceph/rados tools can be used
  93. # without specifying any arguments
  94. write_endpoints() {
  95. endpoints=$(cat ${MON_CONFIG})
  96. # filter out the mon names
  97. # external cluster can have numbers or hyphens in mon names, handling them in regex
  98. # shellcheck disable=SC2001
  99. mon_endpoints=$(echo "${endpoints}"| sed 's/[a-z0-9_-]\+=//g')
  100. DATE=$(date)
  101. echo "$DATE writing mon endpoints to ${CEPH_CONFIG}: ${endpoints}"
  102. cat <<EOF > ${CEPH_CONFIG}
  103. [global]
  104. mon_host = ${mon_endpoints}
  105. [client.admin]
  106. keyring = ${KEYRING_FILE}
  107. EOF
  108. }
  109. # watch the endpoints config file and update if the mon endpoints ever change
  110. watch_endpoints() {
  111. # get the timestamp for the target of the soft link
  112. real_path=$(realpath ${MON_CONFIG})
  113. initial_time=$(stat -c %Z "${real_path}")
  114. while true; do
  115. real_path=$(realpath ${MON_CONFIG})
  116. latest_time=$(stat -c %Z "${real_path}")
  117. if [[ "${latest_time}" != "${initial_time}" ]]; then
  118. write_endpoints
  119. initial_time=${latest_time}
  120. fi
  121. sleep 10
  122. done
  123. }
  124. # read the secret from an env var (for backward compatibility), or from the secret file
  125. ceph_secret=${ROOK_CEPH_SECRET}
  126. if [[ "$ceph_secret" == "" ]]; then
  127. ceph_secret=$(cat /var/lib/rook-ceph-mon/secret.keyring)
  128. fi
  129. # create the keyring file
  130. cat <<EOF > ${KEYRING_FILE}
  131. [${ROOK_CEPH_USERNAME}]
  132. key = ${ROOK_CEPH_SECRET}
  133. key = ${ceph_secret}
  134. EOF
  135. # write the initial config file
  136. write_endpoints
  137. # continuously update the mon endpoints if they fail over
  138. watch_endpoints
  139. imagePullPolicy: IfNotPresent
  140. tty: true
  141. env:
  142. - name: ROOK_CEPH_USERNAME
  143. valueFrom:
  144. secretKeyRef:
  145. name: rook-ceph-mon
  146. key: ceph-username
  147. resources:
  148. limits:
  149. cpu: 500m
  150. memory: 1Gi
  151. requests:
  152. cpu: 100m
  153. memory: 128Mi
  154. volumeMounts:
  155. - mountPath: /etc/ceph
  156. name: ceph-config
  157. - name: mon-endpoint-volume
  158. mountPath: /etc/rook
  159. - name: ceph-admin-secret
  160. mountPath: /var/lib/rook-ceph-mon
  161. volumes:
  162. - name: ceph-admin-secret
  163. secret:
  164. secretName: rook-ceph-mon
  165. optional: false
  166. items:
  167. - key: ceph-secret
  168. path: secret.keyring
  169. - name: mon-endpoint-volume
  170. configMap:
  171. name: rook-ceph-mon-endpoints
  172. items:
  173. - key: data
  174. path: mon-endpoints
  175. - name: ceph-config
  176. emptyDir: {}
  177. tolerations:
  178. - key: "node.kubernetes.io/unreachable"
  179. operator: "Exists"
  180. effect: "NoExecute"
  181. tolerationSeconds: 5
  182. ---
  183. # Source: rook-ceph-cluster/templates/securityContextConstraints.yaml
  184. # scc for the Rook and Ceph daemons
  185. # for creating cluster in openshift
  186. ---
  187. # Source: rook-ceph-cluster/templates/volumesnapshotclass.yaml
  188. ---
  189. ---
  190. # Source: rook-ceph-cluster/templates/cephblockpool.yaml
  191. apiVersion: ceph.rook.io/v1
  192. kind: CephBlockPool
  193. metadata:
  194. name: ceph-blockpool
  195. spec:
  196. failureDomain: host
  197. replicated:
  198. size: 3
  199. ---
  200. # Source: rook-ceph-cluster/templates/cephcluster.yaml
  201. apiVersion: ceph.rook.io/v1
  202. kind: CephCluster
  203. metadata:
  204. name: rook-ceph
  205. spec:
  206. monitoring:
  207. enabled: false
  208. cephVersion:
  209. allowUnsupported: false
  210. image: quay.io/ceph/ceph:v18.2.0
  211. cleanupPolicy:
  212. allowUninstallWithVolumes: false
  213. confirmation: ""
  214. sanitizeDisks:
  215. dataSource: zero
  216. iteration: 1
  217. method: quick
  218. continueUpgradeAfterChecksEvenIfNotHealthy: false
  219. crashCollector:
  220. disable: true
  221. dashboard:
  222. enabled: true
  223. port: 8080
  224. ssl: false
  225. urlPrefix: /ceph-dashboard
  226. dataDirHostPath: /var/lib/rook
  227. disruptionManagement:
  228. managePodBudgets: true
  229. osdMaintenanceTimeout: 30
  230. pgHealthCheckTimeout: 0
  231. healthCheck:
  232. daemonHealth:
  233. mon:
  234. disabled: false
  235. interval: 45s
  236. osd:
  237. disabled: false
  238. interval: 60s
  239. status:
  240. disabled: false
  241. interval: 60s
  242. livenessProbe:
  243. mgr:
  244. disabled: false
  245. mon:
  246. disabled: false
  247. osd:
  248. disabled: false
  249. logCollector:
  250. enabled: true
  251. maxLogSize: 500M
  252. periodicity: daily
  253. mgr:
  254. allowMultiplePerNode: false
  255. count: 2
  256. modules:
  257. - enabled: true
  258. name: pg_autoscaler
  259. - enabled: true
  260. name: rook
  261. mon:
  262. allowMultiplePerNode: false
  263. count: 3
  264. network:
  265. connections:
  266. compression:
  267. enabled: false
  268. encryption:
  269. enabled: false
  270. hostNetwork: true
  271. provider: host
  272. requireMsgr2: false
  273. hostNetwork: true
  274. placement:
  275. all:
  276. nodeAffinity:
  277. requiredDuringSchedulingIgnoredDuringExecution:
  278. nodeSelectorTerms:
  279. - matchExpressions:
  280. - key: all
  281. operator: In
  282. values:
  283. - all
  284. podAffinity: null
  285. podAntiAffinity: null
  286. tolerations:
  287. - key: all
  288. operator: Exists
  289. topologySpreadConstraints: null
  290. mgr:
  291. nodeAffinity:
  292. requiredDuringSchedulingIgnoredDuringExecution:
  293. nodeSelectorTerms:
  294. - matchExpressions:
  295. - key: mgr
  296. operator: In
  297. values:
  298. - mgr
  299. podAffinity: null
  300. podAntiAffinity: null
  301. tolerations:
  302. - key: mgr
  303. operator: Exists
  304. topologySpreadConstraints: null
  305. mon:
  306. nodeAffinity:
  307. requiredDuringSchedulingIgnoredDuringExecution:
  308. nodeSelectorTerms:
  309. - matchExpressions:
  310. - key: mon
  311. operator: In
  312. values:
  313. - mon
  314. podAffinity: null
  315. podAntiAffinity: null
  316. tolerations:
  317. - key: mon
  318. operator: Exists
  319. topologySpreadConstraints: null
  320. osd:
  321. nodeAffinity:
  322. requiredDuringSchedulingIgnoredDuringExecution:
  323. nodeSelectorTerms:
  324. - matchExpressions:
  325. - key: osd
  326. operator: In
  327. values:
  328. - osd
  329. podAffinity: null
  330. podAntiAffinity: null
  331. tolerations:
  332. - key: osd
  333. operator: Exists
  334. topologySpreadConstraints: null
  335. priorityClassNames:
  336. mgr: system-cluster-critical
  337. mon: system-node-critical
  338. osd: system-node-critical
  339. removeOSDsIfOutAndSafeToRemove: false
  340. resources:
  341. cleanup:
  342. limits:
  343. cpu: 500m
  344. memory: 1Gi
  345. requests:
  346. cpu: 500m
  347. memory: 100Mi
  348. crashcollector:
  349. limits:
  350. cpu: 500m
  351. memory: 60Mi
  352. requests:
  353. cpu: 100m
  354. memory: 60Mi
  355. exporter:
  356. limits:
  357. cpu: 250m
  358. memory: 128Mi
  359. requests:
  360. cpu: 50m
  361. memory: 50Mi
  362. logcollector:
  363. limits:
  364. cpu: 500m
  365. memory: 1Gi
  366. requests:
  367. cpu: 100m
  368. memory: 100Mi
  369. mgr:
  370. limits:
  371. cpu: 1000m
  372. memory: 1Gi
  373. requests:
  374. cpu: 500m
  375. memory: 512Mi
  376. mgr-sidecar:
  377. limits:
  378. cpu: 500m
  379. memory: 100Mi
  380. requests:
  381. cpu: 100m
  382. memory: 40Mi
  383. mon:
  384. limits:
  385. cpu: 2000m
  386. memory: 2Gi
  387. requests:
  388. cpu: 1000m
  389. memory: 1Gi
  390. osd:
  391. limits:
  392. cpu: 2000m
  393. memory: 2560Mi
  394. requests:
  395. cpu: 1000m
  396. memory: 2560Mi
  397. prepareosd:
  398. requests:
  399. cpu: 500m
  400. memory: 50Mi
  401. skipUpgradeChecks: false
  402. storage:
  403. useAllDevices: true
  404. useAllNodes: true
  405. waitTimeoutForHealthyOSDInMinutes: 10
  406. ---
  407. # Source: rook-ceph-cluster/templates/cephfilesystem.yaml
  408. apiVersion: ceph.rook.io/v1
  409. kind: CephFilesystem
  410. metadata:
  411. name: ceph-filesystem
  412. spec:
  413. dataPools:
  414. - failureDomain: host
  415. name: data0
  416. replicated:
  417. size: 3
  418. metadataPool:
  419. replicated:
  420. size: 3
  421. metadataServer:
  422. activeCount: 1
  423. activeStandby: true
  424. priorityClassName: system-cluster-critical
  425. resources:
  426. limits:
  427. cpu: 2000m
  428. memory: 4Gi
  429. requests:
  430. cpu: 1000m
  431. memory: 4Gi
  432. ---
  433. # Source: rook-ceph-cluster/templates/cephobjectstore.yaml
  434. apiVersion: ceph.rook.io/v1
  435. kind: CephObjectStore
  436. metadata:
  437. name: ceph-objectstore
  438. spec:
  439. dataPool:
  440. erasureCoded:
  441. codingChunks: 1
  442. dataChunks: 2
  443. failureDomain: host
  444. gateway:
  445. instances: 1
  446. port: 80
  447. priorityClassName: system-cluster-critical
  448. resources:
  449. limits:
  450. cpu: 2000m
  451. memory: 2Gi
  452. requests:
  453. cpu: 1000m
  454. memory: 1Gi
  455. metadataPool:
  456. failureDomain: host
  457. replicated:
  458. size: 3
  459. preservePoolsOnDelete: true