| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734 | # Default values for a single rook-ceph cluster# This is a YAML-formatted file.# Declare variables to be passed into your templates.# -- Namespace of the main rook operatoroperatorNamespace: rook-ceph# -- The metadata.name of the CephCluster CR# @default -- The same as the namespaceclusterName:# -- Optional override of the target kubernetes versionkubeVersion:# -- Cluster ceph.conf overrideconfigOverride:# configOverride: |#   [global]#   mon_allow_pool_delete = true#   osd_pool_default_size = 3#   osd_pool_default_min_size = 2# Installs a debugging toolbox deploymenttoolbox:  # -- Enable Ceph debugging pod deployment. See [toolbox](../Troubleshooting/ceph-toolbox.md)  enabled: true  # -- Toolbox image, defaults to the image used by the Ceph cluster  image: quay.io/ceph/ceph:v18.2.0  # -- Toolbox tolerations  tolerations: []  # -- Toolbox affinity  affinity: {}  # -- Toolbox container security context  containerSecurityContext:    runAsNonRoot: true    runAsUser: 2016    runAsGroup: 2016    capabilities:      drop: ["ALL"]  # -- Toolbox resources  resources:    limits:      cpu: "500m"      memory: "1Gi"    requests:      cpu: "100m"      memory: "128Mi"  # -- Set the priority class for the toolbox if desired  priorityClassName:monitoring:  # -- Enable Prometheus integration, will also create necessary RBAC rules to allow Operator to create ServiceMonitors.  # Monitoring requires Prometheus to be pre-installed  enabled: false  # -- Whether to create the Prometheus rules for Ceph alerts  createPrometheusRules: false  # -- The namespace in which to create the prometheus rules, if different from the rook cluster namespace.  # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus  # deployed) to set rulesNamespaceOverride for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.  rulesNamespaceOverride:  # Monitoring settings for external clusters:  # externalMgrEndpoints: <list of endpoints>  # externalMgrPrometheusPort: <port>  # allow adding custom labels and annotations to the prometheus rule  prometheusRule:    # -- Labels applied to PrometheusRule    labels: {}    # -- Annotations applied to PrometheusRule    annotations: {}# -- Create & use PSP resources. Set this to the same value as the rook-ceph chart.pspEnable: false# imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.# imagePullSecrets:# - name: my-registry-secret# All values below are taken from the CephCluster CRD# -- Cluster configuration.# @default -- See [below](#ceph-cluster-spec)cephClusterSpec:  # This cluster spec example is for a converged cluster where all the Ceph daemons are running locally,  # as in the host-based example (cluster.yaml). For a different configuration such as a  # PVC-based cluster (cluster-on-pvc.yaml), external cluster (cluster-external.yaml),  # or stretch cluster (cluster-stretched.yaml), replace this entire `cephClusterSpec`  # with the specs from those examples.  # For more details, check https://rook.io/docs/rook/v1.10/CRDs/Cluster/ceph-cluster-crd/  cephVersion:    # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).    # v16 is Pacific, v17 is Quincy.    # RECOMMENDATION: In production, use a specific version tag instead of the general v16 flag, which pulls the latest release and could result in different    # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.    # If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v15.2.11-20200419    # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities    image: quay.io/ceph/ceph:v18.2.0    # Whether to allow unsupported versions of Ceph. Currently `pacific`, `quincy`, and `reef` are supported.    # Future versions such as `squid` (v19) would require this to be set to `true`.    # Do not set to true in production.    allowUnsupported: false  # The path on the host where configuration files will be persisted. Must be specified.  # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.  # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.  dataDirHostPath: /var/lib/rook  # Whether or not upgrade should continue even if a check fails  # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise  # Use at your OWN risk  # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/v1.10/Upgrade/ceph-upgrade/  skipUpgradeChecks: false  # Whether or not continue if PGs are not clean during an upgrade  continueUpgradeAfterChecksEvenIfNotHealthy: false  # WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart.  # If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one  # if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would  # continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`.  # The default wait timeout is 10 minutes.  waitTimeoutForHealthyOSDInMinutes: 10  mon:    # Set the number of mons to be started. Generally recommended to be 3.    # For highest availability, an odd number of mons should be specified.    count: 3    # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason.    # Mons should only be allowed on the same node for test environments where data loss is acceptable.    allowMultiplePerNode: false  mgr:    # When higher availability of the mgr is needed, increase the count to 2.    # In that case, one mgr will be active and one in standby. When Ceph updates which    # mgr is active, Rook will update the mgr services to match the active mgr.    count: 2    allowMultiplePerNode: false    modules:      # Several modules should not need to be included in this list. The "dashboard" and "monitoring" modules      # are already enabled by other settings in the cluster CR.      - name: pg_autoscaler        enabled: true      - name: rook        enabled: true  # enable the ceph dashboard for viewing cluster status  dashboard:    enabled: true    # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)    urlPrefix: /ceph-dashboard    # serve the dashboard at the given port.    port: 8080    # Serve the dashboard using SSL (if using ingress to expose the dashboard and `ssl: true` you need to set    # the corresponding "backend protocol" annotation(s) for your ingress controller of choice)    ssl: false    # ssl: true  # Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/CRDs/ceph-cluster-crd.md#network-configuration-settings  network:    hostNetwork: true    connections:      # Whether to encrypt the data in transit across the wire to prevent eavesdropping the data on the network.      # The default is false. When encryption is enabled, all communication between clients and Ceph daemons, or between Ceph daemons will be encrypted.      # When encryption is not enabled, clients still establish a strong initial authentication and data integrity is still validated with a crc check.      # IMPORTANT: Encryption requires the 5.11 kernel for the latest nbd and cephfs drivers. Alternatively for testing only,      # you can set the "mounter: rbd-nbd" in the rbd storage class, or "mounter: fuse" in the cephfs storage class.      # The nbd and fuse drivers are *not* recommended in production since restarting the csi driver pod will disconnect the volumes.      encryption:        enabled: false      # Whether to compress the data in transit across the wire. The default is false.      # Requires Ceph Quincy (v17) or newer. Also see the kernel requirements above for encryption.      compression:        enabled: false      # Whether to require communication over msgr2. If true, the msgr v1 port (6789) will be disabled      # and clients will be required to connect to the Ceph cluster with the v2 port (3300).      # Requires a kernel that supports msgr v2 (kernel 5.11 or CentOS 8.4 or newer).      requireMsgr2: false  #   # enable host networking      provider: host      hostNetwork: true  #   # EXPERIMENTAL: enable the Multus network provider  #   provider: multus  #   selectors:  #     # The selector keys are required to be `public` and `cluster`.  #     # Based on the configuration, the operator will do the following:  #     #   1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface  #     #   2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network'  #     #  #     # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus  #     #  #     # public: public-conf --> NetworkAttachmentDefinition object name in Multus  #     # cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus  #   # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4  #   ipFamily: "IPv6"  #   # Ceph daemons to listen on both IPv4 and Ipv6 networks  #   dualStack: false  # enable the crash collector for ceph daemon crash collection  crashCollector:    disable: true    # Uncomment daysToRetain to prune ceph crash entries older than the    # specified number of days.    # daysToRetain: 30  # enable log collector, daemons will log on files and rotate  logCollector:    enabled: true    periodicity: daily # one of: hourly, daily, weekly, monthly    maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.  # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.  cleanupPolicy:    # Since cluster cleanup is destructive to data, confirmation is required.    # To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data".    # This value should only be set when the cluster is about to be deleted. After the confirmation is set,    # Rook will immediately stop configuring the cluster and only wait for the delete command.    # If the empty string is set, Rook will not destroy any data on hosts during uninstall.    confirmation: ""    # sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion    sanitizeDisks:      # method indicates if the entire disk should be sanitized or simply ceph's metadata      # in both case, re-install is possible      # possible choices are 'complete' or 'quick' (default)      method: quick      # dataSource indicate where to get random bytes from to write on the disk      # possible choices are 'zero' (default) or 'random'      # using random sources will consume entropy from the system and will take much more time then the zero source      dataSource: zero      # iteration overwrite N times instead of the default (1)      # takes an integer value      iteration: 1    # allowUninstallWithVolumes defines how the uninstall should be performed    # If set to true, cephCluster deletion does not wait for the PVs to be deleted.    allowUninstallWithVolumes: false  # To control where various services will be scheduled by kubernetes, use the placement configuration sections below.  # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and  # tolerate taints with a key of 'storage-node'.  placement:    all:      nodeAffinity:        requiredDuringSchedulingIgnoredDuringExecution:          nodeSelectorTerms:            - matchExpressions:              - key: all                operator: In                values:                - all      podAffinity:      podAntiAffinity:      topologySpreadConstraints:      tolerations:      - key: all        operator: Exists  #   # The above placement information can also be specified for mon, osd, and mgr components    mon:      nodeAffinity:        requiredDuringSchedulingIgnoredDuringExecution:          nodeSelectorTerms:            - matchExpressions:              - key: mon                operator: In                values:                - mon      podAffinity:      podAntiAffinity:      topologySpreadConstraints:      tolerations:      - key: mon        operator: Exists  #   # Monitor deployments may contain an anti-affinity rule for avoiding monitor  #   # collocation on the same node. This is a required rule when host network is used  #   # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a  #   # preferred rule with weight: 50.    osd:      nodeAffinity:        requiredDuringSchedulingIgnoredDuringExecution:          nodeSelectorTerms:            - matchExpressions:              - key: osd                operator: In                values:                - osd      podAffinity:      podAntiAffinity:      topologySpreadConstraints:      tolerations:      - key: osd        operator: Exists    mgr:      nodeAffinity:        requiredDuringSchedulingIgnoredDuringExecution:          nodeSelectorTerms:            - matchExpressions:              - key: mgr                operator: In                values:                - mgr      podAffinity:      podAntiAffinity:      topologySpreadConstraints:      tolerations:      - key: mgr        operator: Exists  #   cleanup:  # annotations:  #   all:  #   mon:  #   osd:  #   cleanup:  #   prepareosd:  #   # If no mgr annotations are set, prometheus scrape annotations will be set by default.  #   mgr:  # labels:  #   all:  #   mon:  #   osd:  #   cleanup:  #   mgr:  #   prepareosd:  #   # monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator.  #   # These labels can be passed as LabelSelector to Prometheus  #   monitoring:  resources:    mgr:      limits:        cpu: "1000m"        memory: "1Gi"      requests:        cpu: "500m"        memory: "512Mi"    mon:      limits:        cpu: "2000m"        memory: "2Gi"      requests:        cpu: "1000m"        memory: "1Gi"    osd:      limits:        cpu: "2000m"        memory: "5120Mi"      requests:        cpu: "1000m"        memory: "4096Mi"    prepareosd:      # limits: It is not recommended to set limits on the OSD prepare job      #         since it's a one-time burst for memory that must be allowed to      #         complete without an OOM kill.  Note however that if a k8s      #         limitRange guardrail is defined external to Rook, the lack of      #         a limit here may result in a sync failure, in which case a      #         limit should be added.  1200Mi may suffice for up to 15Ti      #         OSDs ; for larger devices 2Gi may be required.      #         cf. https://github.com/rook/rook/pull/11103      requests:        cpu: "500m"        memory: "50Mi"    mgr-sidecar:      limits:        cpu: "500m"        memory: "100Mi"      requests:        cpu: "100m"        memory: "40Mi"    crashcollector:      limits:        cpu: "500m"        memory: "60Mi"      requests:        cpu: "100m"        memory: "60Mi"    logcollector:      limits:        cpu: "500m"        memory: "1Gi"      requests:        cpu: "100m"        memory: "100Mi"    cleanup:      limits:        cpu: "500m"        memory: "1Gi"      requests:        cpu: "500m"        memory: "100Mi"    exporter:      limits:        cpu: "250m"        memory: "128Mi"      requests:        cpu: "50m"        memory: "50Mi"  # The option to automatically remove OSDs that are out and are safe to destroy.  removeOSDsIfOutAndSafeToRemove: false  # priority classes to apply to ceph resources  priorityClassNames:    mon: system-node-critical    osd: system-node-critical    mgr: system-cluster-critical  storage: # cluster level storage configuration and selection    useAllNodes: true    useAllDevices: true    # deviceFilter:    # config:    #   crushRoot: "custom-root" # specify a non-default root label for the CRUSH map    #   metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.    #   databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB    #   osdsPerDevice: "1" # this value can be overridden at the node or device level    #   encryptedDevice: "true" # the default value for this option is "false"    # # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named    # # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.    # nodes:    #   - name: "172.17.4.201"    #     devices: # specific devices to use for storage can be specified for each node    #       - name: "sdb"    #       - name: "nvme01" # multiple osds can be created on high performance devices    #         config:    #           osdsPerDevice: "5"    #       - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths    #     config: # configuration can be specified at the node level which overrides the cluster level config    #   - name: "172.17.4.301"    #     deviceFilter: "^sd."  # The section for configuring management of daemon disruptions during upgrade or fencing.  disruptionManagement:    # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically    # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will    # block eviction of OSDs by default and unblock them safely when drains are detected.    managePodBudgets: true    # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the    # default DOWN/OUT interval) when it is draining. This is only relevant when  `managePodBudgets` is `true`. The default value is `30` minutes.    osdMaintenanceTimeout: 30    # A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up.    # Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`.    # No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain.    pgHealthCheckTimeout: 0  # Configure the healthcheck and liveness probes for ceph pods.  # Valid values for daemons are 'mon', 'osd', 'status'  healthCheck:    daemonHealth:      mon:        disabled: false        interval: 45s      osd:        disabled: false        interval: 60s      status:        disabled: false        interval: 60s    # Change pod liveness probe, it works for all mon, mgr, and osd pods.    livenessProbe:      mon:        disabled: false      mgr:        disabled: false      osd:        disabled: falseingress:  # -- Enable an ingress for the ceph-dashboard  dashboard:    {}    # annotations:    #   external-dns.alpha.kubernetes.io/hostname: dashboard.example.com    #   nginx.ingress.kubernetes.io/rewrite-target: /ceph-dashboard/$2    # If the dashboard has ssl: true the following will make sure the NGINX Ingress controller can expose the dashboard correctly    #   nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"    #   nginx.ingress.kubernetes.io/server-snippet: |    #     proxy_ssl_verify off;    # host:    #   name: dashboard.example.com    #   path: "/ceph-dashboard(/|$)(.*)"    # tls:    # - hosts:    #     - dashboard.example.com    #   secretName: testsecret-tls    ## Note: Only one of ingress class annotation or the `ingressClassName:` can be used at a time    ## to set the ingress class    # ingressClassName: nginx# -- A list of CephBlockPool configurations to deploy# @default -- See [below](#ceph-block-pools)cephBlockPools:  - name: ceph-blockpool    # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration    spec:      failureDomain: host      replicated:        size: 3      # Enables collecting RBD per-image IO statistics by enabling dynamic OSD performance counters. Defaults to false.      # For reference: https://docs.ceph.com/docs/master/mgr/prometheus/#rbd-io-statistics      # enableRBDStats: true    storageClass:      enabled: true      name: ceph-block      isDefault: true      reclaimPolicy: Delete      allowVolumeExpansion: true      volumeBindingMode: "Immediate"      mountOptions: []      # see https://kubernetes.io/docs/concepts/storage/storage-classes/#allowed-topologies      allowedTopologies: []      #        - matchLabelExpressions:      #            - key: rook-ceph-role      #              values:      #                - storage-node      # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Block-Storage-RBD/block-storage.md#provision-storage for available configuration      parameters:        # (optional) mapOptions is a comma-separated list of map options.        # For krbd options refer        # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options        # For nbd options refer        # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options        # mapOptions: lock_on_read,queue_depth=1024        # (optional) unmapOptions is a comma-separated list of unmap options.        # For krbd options refer        # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options        # For nbd options refer        # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options        # unmapOptions: force        # RBD image format. Defaults to "2".        imageFormat: "2"        # RBD image features, equivalent to OR'd bitfield value: 63        # Available for imageFormat: "2". Older releases of CSI RBD        # support only the `layering` feature. The Linux kernel (KRBD) supports the        # full feature complement as of 5.4        imageFeatures: layering        # These secrets contain Ceph admin credentials.        csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner        csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"        csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner        csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"        csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node        csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"        # Specify the filesystem type of the volume. If not specified, csi-provisioner        # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock        # in hyperconverged settings where the volume is mounted on the same node as the osds.        csi.storage.k8s.io/fstype: ext4# -- A list of CephFileSystem configurations to deploy# @default -- See [below](#ceph-file-systems)cephFileSystems:  - name: ceph-filesystem    # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#filesystem-settings for available configuration    spec:      metadataPool:        replicated:          size: 3      dataPools:        - failureDomain: host          replicated:            size: 3          # Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#pools          name: data0      metadataServer:        activeCount: 1        activeStandby: true        resources:          limits:            cpu: "2000m"            memory: "4Gi"          requests:            cpu: "1000m"            memory: "4Gi"        priorityClassName: system-cluster-critical    storageClass:      enabled: true      isDefault: false      name: ceph-filesystem      # (Optional) specify a data pool to use, must be the name of one of the data pools above, 'data0' by default      pool: data0      reclaimPolicy: Delete      allowVolumeExpansion: true      volumeBindingMode: "Immediate"      mountOptions: []      # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Shared-Filesystem-CephFS/filesystem-storage.md#provision-storage for available configuration      parameters:        # The secrets contain Ceph admin credentials.        csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner        csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"        csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner        csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"        csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node        csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"        # Specify the filesystem type of the volume. If not specified, csi-provisioner        # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock        # in hyperconverged settings where the volume is mounted on the same node as the osds.        csi.storage.k8s.io/fstype: ext4# -- Settings for the filesystem snapshot class# @default -- See [CephFS Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#cephfs-snapshots)cephFileSystemVolumeSnapshotClass:  enabled: false  name: ceph-filesystem  isDefault: true  deletionPolicy: Delete  annotations: {}  labels: {}  # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#cephfs-snapshots for available configuration  parameters: {}# -- Settings for the block pool snapshot class# @default -- See [RBD Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#rbd-snapshots)cephBlockPoolsVolumeSnapshotClass:  enabled: false  name: ceph-block  isDefault: false  deletionPolicy: Delete  annotations: {}  labels: {}  # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#rbd-snapshots for available configuration  parameters: {}# -- A list of CephObjectStore configurations to deploy# @default -- See [below](#ceph-object-stores)cephObjectStores:  - name: ceph-objectstore    # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md#object-store-settings for available configuration    spec:      metadataPool:        failureDomain: host        replicated:          size: 3      dataPool:        failureDomain: host        erasureCoded:          dataChunks: 2          codingChunks: 1      preservePoolsOnDelete: true      gateway:        port: 80        resources:          limits:            cpu: "2000m"            memory: "2Gi"          requests:            cpu: "1000m"            memory: "1Gi"        # securePort: 443        # sslCertificateRef:        instances: 1        priorityClassName: system-cluster-critical    storageClass:      enabled: true      name: ceph-bucket      reclaimPolicy: Delete      volumeBindingMode: "Immediate"      # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-bucket-claim.md#storageclass for available configuration      parameters:        # note: objectStoreNamespace and objectStoreName are configured by the chart        region: us-east-1    ingress:      # Enable an ingress for the ceph-objectstore      enabled: false      # annotations: {}      # host:      #   name: objectstore.example.com      #   path: /      # tls:      # - hosts:      #     - objectstore.example.com      #   secretName: ceph-objectstore-tls      # ingressClassName: nginx# cephECBlockPools are disabled by default, please remove the comments and set desired values to enable it#cephECBlockPools:#  # For erasure coded a replicated metadata pool is required.#  # https://rook.io/docs/rook/latest/CRDs/Shared-Filesystem/ceph-filesystem-crd/#erasure-coded#  - name: ec-metadata-pool#    # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration#    spec:#      replicated:#        size: 2#  - name: ec-data-pool#    spec:#      failureDomain: osd#      erasureCoded:#        dataChunks: 2#        codingChunks: 1#      deviceClass: hdd# cephECStorageClass also is disabled by default, please remove the comments and set desired values to enable it# if cephECBlockPools are uncommented you must remove the comments of cephEcStorageClass as well#cephECStorageClass:#  name: rook-ceph-block#  # Change "rook-ceph" provisioner prefix to match the operator namespace if needed#  provisioner: rook-ceph.rbd.csi.ceph.com # driver:namespace:operator#  parameters:#    # clusterID is the namespace where the rook cluster is running#    # If you change this namespace, also change the namespace below where the secret namespaces are defined#    clusterID: rook-ceph # namespace:cluster##    # If you want to use erasure coded pool with RBD, you need to create#    # two pools. one erasure coded and one replicated.#    # You need to specify the replicated pool here in the `pool` parameter, it is#    # used for the metadata of the images.#    # The erasure coded pool must be set as the `dataPool` parameter below.#    dataPool: ec-data-pool#    pool: ec-metadata-pool##    # (optional) mapOptions is a comma-separated list of map options.#    # For krbd options refer#    # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options#    # For nbd options refer#    # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options#    # mapOptions: lock_on_read,queue_depth=1024##    # (optional) unmapOptions is a comma-separated list of unmap options.#    # For krbd options refer#    # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options#    # For nbd options refer#    # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options#    # unmapOptions: force##    # RBD image format. Defaults to "2".#    imageFormat: "2"##    # RBD image features, equivalent to OR'd bitfield value: 63#    # Available for imageFormat: "2". Older releases of CSI RBD#    # support only the `layering` feature. The Linux kernel (KRBD) supports the#    # full feature complement as of 5.4#    # imageFeatures: layering,fast-diff,object-map,deep-flatten,exclusive-lock#    imageFeatures: layering#  allowVolumeExpansion: true#  reclaimPolicy: Delete
 |