pool.yaml 3.3 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364
  1. #################################################################################################################
  2. # Create a Ceph pool with settings for replication in production environments. A minimum of 3 OSDs on
  3. # different hosts are required in this example.
  4. # kubectl create -f pool.yaml
  5. #################################################################################################################
  6. apiVersion: ceph.rook.io/v1
  7. kind: CephBlockPool
  8. metadata:
  9. name: replicapool
  10. namespace: supernova-ns # namespace:cluster
  11. spec:
  12. # The failure domain will spread the replicas of the data across different failure zones
  13. failureDomain: host
  14. # For a pool based on raw copies, specify the number of copies. A size of 1 indicates no redundancy.
  15. replicated:
  16. size: 3
  17. # Disallow setting pool with replica 1, this could lead to data loss without recovery.
  18. # Make sure you're *ABSOLUTELY CERTAIN* that is what you want
  19. requireSafeReplicaSize: true
  20. # hybridStorage:
  21. # primaryDeviceClass: ssd
  22. # secondaryDeviceClass: hdd
  23. # The number for replicas per failure domain, the value must be a divisor of the replica count. If specified, the most common value is 2 for stretch clusters, where the replica count would be 4.
  24. # replicasPerFailureDomain: 2
  25. # The name of the failure domain to place further down replicas
  26. # subFailureDomain: host
  27. # Ceph CRUSH root location of the rule
  28. # For reference: https://docs.ceph.com/docs/master/rados/operations/crush-map/#types-and-buckets
  29. #crushRoot: my-root
  30. # The Ceph CRUSH device class associated with the CRUSH replicated rule
  31. # For reference: https://docs.ceph.com/docs/master/rados/operations/crush-map/#device-classes
  32. #deviceClass: my-class
  33. # Enables collecting RBD per-image IO statistics by enabling dynamic OSD performance counters. Defaults to false.
  34. # For reference: https://docs.ceph.com/docs/master/mgr/prometheus/#rbd-io-statistics
  35. # enableRBDStats: true
  36. # Set any property on a given pool
  37. # see https://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values
  38. parameters:
  39. # Inline compression mode for the data pool
  40. # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression
  41. compression_mode: none
  42. # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
  43. # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
  44. #target_size_ratio: ".5"
  45. mirroring:
  46. enabled: false
  47. # mirroring mode: pool level or per image
  48. # for more details see: https://docs.ceph.com/docs/master/rbd/rbd-mirroring/#enable-mirroring
  49. mode: image
  50. # specify the schedule(s) on which snapshots should be taken
  51. # snapshotSchedules:
  52. # - interval: 24h # daily snapshots
  53. # startTime: 14:00:00-05:00
  54. # reports pool mirroring status if enabled
  55. statusCheck:
  56. mirror:
  57. disabled: false
  58. interval: 60s
  59. # quota in bytes and/or objects, default value is 0 (unlimited)
  60. # see https://docs.ceph.com/en/latest/rados/operations/pools/#set-pool-quotas
  61. # quotas:
  62. # maxSize: "10Gi" # valid suffixes include k, M, G, T, P, E, Ki, Mi, Gi, Ti, Pi, Ei
  63. # maxObjects: 1000000000 # 1 billion objects