| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686 | # Default values for rook-ceph-operator# This is a YAML-formatted file.# Declare variables to be passed into your templates.image:  # -- Image  repository: rook/ceph  # -- Image tag  # @default -- `master`  tag: master  # -- Image pull policy  pullPolicy: IfNotPresentcrds:  # -- Whether the helm chart should create and update the CRDs. If false, the CRDs must be  # managed independently with deploy/examples/crds.yaml.  # **WARNING** Only set during first deployment. If later disabled the cluster may be DESTROYED.  # If the CRDs are deleted in this case, see  # [the disaster recovery guide](https://rook.io/docs/rook/latest/Troubleshooting/disaster-recovery/#restoring-crds-after-deletion)  # to restore them.  enabled: true# -- Pod resource requests & limitsresources:  limits:    cpu: 500m    memory: 512Mi  requests:    cpu: 100m    memory: 128Mi# -- Kubernetes [`nodeSelector`](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) to add to the Deployment.nodeSelector: {}# Constraint rook-ceph-operator Deployment to nodes with label `disktype: ssd`.# For more info, see https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector#  disktype: ssd# -- List of Kubernetes [`tolerations`](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) to add to the Deployment.tolerations: []# -- Delay to use for the `node.kubernetes.io/unreachable` pod failure toleration to override# the Kubernetes default of 5 minutesunreachableNodeTolerationSeconds: 5# -- Whether the operator should watch cluster CRD in its own namespace or notcurrentNamespaceOnly: false# -- Pod annotationsannotations: {}# -- Global log level for the operator.# Options: `ERROR`, `WARNING`, `INFO`, `DEBUG`logLevel: INFO# -- If true, create & use RBAC resourcesrbacEnable: true# -- If true, create & use PSP resourcespspEnable: false# -- Set the priority class for the rook operator deployment if desiredpriorityClassName:# -- Set the container security context for the operatorcontainerSecurityContext:  runAsNonRoot: true  runAsUser: 2016  runAsGroup: 2016  capabilities:    drop: ["ALL"]# -- If true, loop devices are allowed to be used for osds in test clustersallowLoopDevices: false# Settings for whether to disable the drivers or other daemons if they are not# neededcsi:  # -- Enable Ceph CSI RBD driver  enableRbdDriver: true  # -- Enable Ceph CSI CephFS driver  enableCephfsDriver: true  # -- Enable Ceph CSI GRPC Metrics  enableGrpcMetrics: false  # -- Enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary  # in some network configurations where the SDN does not provide access to an external cluster or  # there is significant drop in read/write performance  enableCSIHostNetwork: true  # -- Enable Snapshotter in CephFS provisioner pod  enableCephfsSnapshotter: true  # -- Enable Snapshotter in NFS provisioner pod  enableNFSSnapshotter: true  # -- Enable Snapshotter in RBD provisioner pod  enableRBDSnapshotter: true  # -- Enable Host mount for `/etc/selinux` directory for Ceph CSI nodeplugins  enablePluginSelinuxHostMount: false  # -- Enable Ceph CSI PVC encryption support  enableCSIEncryption: false  # -- PriorityClassName to be set on csi driver plugin pods  pluginPriorityClassName: system-node-critical  # -- PriorityClassName to be set on csi driver provisioner pods  provisionerPriorityClassName: system-cluster-critical  # -- Policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.  # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html  rbdFSGroupPolicy: "File"  # -- Policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.  # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html  cephFSFSGroupPolicy: "File"  # -- Policy for modifying a volume's ownership or permissions when the NFS PVC is being mounted.  # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html  nfsFSGroupPolicy: "File"  # -- OMAP generator generates the omap mapping between the PV name and the RBD image  # which helps CSI to identify the rbd images for CSI operations.  # `CSI_ENABLE_OMAP_GENERATOR` needs to be enabled when we are using rbd mirroring feature.  # By default OMAP generator is disabled and when enabled, it will be deployed as a  # sidecar with CSI provisioner pod, to enable set it to true.  enableOMAPGenerator: false  # -- Set CephFS Kernel mount options to use https://docs.ceph.com/en/latest/man/8/mount.ceph/#options.  # Set to "ms_mode=secure" when connections.encrypted is enabled in CephCluster CR  cephFSKernelMountOptions:  # -- Enable adding volume metadata on the CephFS subvolumes and RBD images.  # Not all users might be interested in getting volume/snapshot details as metadata on CephFS subvolume and RBD images.  # Hence enable metadata is false by default  enableMetadata: false  # -- Set replicas for csi provisioner deployment  provisionerReplicas: 2  # -- Cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful  # in cases like for example, when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster  clusterName:  # -- Set logging level for cephCSI containers maintained by the cephCSI.  # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.  logLevel: 0  # -- Set logging level for Kubernetes-csi sidecar containers.  # Supported values from 0 to 5. 0 for general useful logs (the default), 5 for trace level verbosity.  # @default -- `0`  sidecarLogLevel:  # -- CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate  # @default -- `RollingUpdate`  rbdPluginUpdateStrategy:  # -- A maxUnavailable parameter of CSI RBD plugin daemonset update strategy.  # @default -- `1`  rbdPluginUpdateStrategyMaxUnavailable:  # -- CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate  # @default -- `RollingUpdate`  cephFSPluginUpdateStrategy:  # -- A maxUnavailable parameter of CSI cephFS plugin daemonset update strategy.  # @default -- `1`  cephFSPluginUpdateStrategyMaxUnavailable:  # -- CSI NFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate  # @default -- `RollingUpdate`  nfsPluginUpdateStrategy:  # -- Set GRPC timeout for csi containers (in seconds). It should be >= 120. If this value is not set or is invalid, it defaults to 150  grpcTimeoutInSeconds: 150  # -- Allow starting an unsupported ceph-csi image  allowUnsupportedVersion: false  # -- The volume of the CephCSI RBD plugin DaemonSet  csiRBDPluginVolume:  #  - name: lib-modules  #    hostPath:  #      path: /run/booted-system/kernel-modules/lib/modules/  #  - name: host-nix  #    hostPath:  #      path: /nix  # -- The volume mounts of the CephCSI RBD plugin DaemonSet  csiRBDPluginVolumeMount:  #  - name: host-nix  #    mountPath: /nix  #    readOnly: true  # -- The volume of the CephCSI CephFS plugin DaemonSet  csiCephFSPluginVolume:  #  - name: lib-modules  #    hostPath:  #      path: /run/booted-system/kernel-modules/lib/modules/  #  - name: host-nix  #    hostPath:  #      path: /nix  # -- The volume mounts of the CephCSI CephFS plugin DaemonSet  csiCephFSPluginVolumeMount:  #  - name: host-nix  #    mountPath: /nix  #    readOnly: true  # -- CEPH CSI RBD provisioner resource requirement list  # csi-omap-generator resources will be applied only if `enableOMAPGenerator` is set to `true`  # @default -- see values.yaml  csiRBDProvisionerResource: |    - name : csi-provisioner      resource:        requests:          memory: 128Mi          cpu: 100m        limits:          memory: 256Mi          cpu: 200m    - name : csi-resizer      resource:        requests:          memory: 128Mi          cpu: 100m        limits:          memory: 256Mi          cpu: 200m    - name : csi-attacher      resource:        requests:          memory: 128Mi          cpu: 100m        limits:          memory: 256Mi          cpu: 200m    - name : csi-snapshotter      resource:        requests:          memory: 128Mi          cpu: 100m        limits:          memory: 256Mi          cpu: 200m    - name : csi-rbdplugin      resource:        requests:          memory: 512Mi          cpu: 250m        limits:          memory: 1Gi          cpu: 500m    - name : csi-omap-generator      resource:        requests:          memory: 512Mi          cpu: 250m        limits:          memory: 1Gi          cpu: 500m    - name : liveness-prometheus      resource:        requests:          memory: 128Mi          cpu: 50m        limits:          memory: 256Mi          cpu: 100m  # -- CEPH CSI RBD plugin resource requirement list  # @default -- see values.yaml  csiRBDPluginResource: |    - name : driver-registrar      resource:        requests:          memory: 128Mi          cpu: 50m        limits:          memory: 256Mi          cpu: 100m    - name : csi-rbdplugin      resource:        requests:          memory: 512Mi          cpu: 250m        limits:          memory: 1Gi          cpu: 500m    - name : liveness-prometheus      resource:        requests:          memory: 128Mi          cpu: 50m        limits:          memory: 256Mi          cpu: 100m  # -- CEPH CSI CephFS provisioner resource requirement list  # @default -- see values.yaml  csiCephFSProvisionerResource: |    - name : csi-provisioner      resource:        requests:          memory: 128Mi          cpu: 100m        limits:          memory: 256Mi          cpu: 200m    - name : csi-resizer      resource:        requests:          memory: 128Mi          cpu: 100m        limits:          memory: 256Mi          cpu: 200m    - name : csi-attacher      resource:        requests:          memory: 128Mi          cpu: 100m        limits:          memory: 256Mi          cpu: 200m    - name : csi-snapshotter      resource:        requests:          memory: 128Mi          cpu: 100m        limits:          memory: 256Mi          cpu: 200m    - name : csi-cephfsplugin      resource:        requests:          memory: 512Mi          cpu: 250m        limits:          memory: 1Gi          cpu: 500m    - name : liveness-prometheus      resource:        requests:          memory: 128Mi          cpu: 50m        limits:          memory: 256Mi          cpu: 100m  # -- CEPH CSI CephFS plugin resource requirement list  # @default -- see values.yaml  csiCephFSPluginResource: |    - name : driver-registrar      resource:        requests:          memory: 128Mi          cpu: 50m        limits:          memory: 256Mi          cpu: 100m    - name : csi-cephfsplugin      resource:        requests:          memory: 512Mi          cpu: 250m        limits:          memory: 1Gi          cpu: 500m    - name : liveness-prometheus      resource:        requests:          memory: 128Mi          cpu: 50m        limits:          memory: 256Mi          cpu: 100m  # -- CEPH CSI NFS provisioner resource requirement list  # @default -- see values.yaml  csiNFSProvisionerResource: |    - name : csi-provisioner      resource:        requests:          memory: 128Mi          cpu: 100m        limits:          memory: 256Mi          cpu: 200m    - name : csi-nfsplugin      resource:        requests:          memory: 512Mi          cpu: 250m        limits:          memory: 1Gi          cpu: 500m    - name : csi-attacher      resource:        requests:          memory: 512Mi          cpu: 250m        limits:          memory: 1Gi          cpu: 500m  # -- CEPH CSI NFS plugin resource requirement list  # @default -- see values.yaml  csiNFSPluginResource: |    - name : driver-registrar      resource:        requests:          memory: 128Mi          cpu: 50m        limits:          memory: 256Mi          cpu: 100m    - name : csi-nfsplugin      resource:        requests:          memory: 512Mi          cpu: 250m        limits:          memory: 1Gi          cpu: 500m  # Set provisionerTolerations and provisionerNodeAffinity for provisioner pod.  # The CSI provisioner would be best to start on the same nodes as other ceph daemons.  # -- Array of tolerations in YAML format which will be added to CSI provisioner deployment  provisionerTolerations:  #    - key: key  #      operator: Exists  #      effect: NoSchedule  # -- The node labels for affinity of the CSI provisioner deployment [^1]  provisionerNodeAffinity: #key1=value1,value2; key2=value3  # Set pluginTolerations and pluginNodeAffinity for plugin daemonset pods.  # The CSI plugins need to be started on all the nodes where the clients need to mount the storage.  # -- Array of tolerations in YAML format which will be added to CephCSI plugin DaemonSet  pluginTolerations:  #    - key: key  #      operator: Exists  #      effect: NoSchedule  # -- The node labels for affinity of the CephCSI RBD plugin DaemonSet [^1]  pluginNodeAffinity: # key1=value1,value2; key2=value3  # -- Enable Ceph CSI Liveness sidecar deployment  enableLiveness: false  # -- CSI CephFS driver GRPC metrics port  # @default -- `9091`  cephfsGrpcMetricsPort:  # -- CSI CephFS driver metrics port  # @default -- `9081`  cephfsLivenessMetricsPort:  # -- Ceph CSI RBD driver GRPC metrics port  # @default -- `9090`  rbdGrpcMetricsPort:  # -- CSI Addons server port  # @default -- `9070`  csiAddonsPort:  # -- Enable Ceph Kernel clients on kernel < 4.17. If your kernel does not support quotas for CephFS  # you may want to disable this setting. However, this will cause an issue during upgrades  # with the FUSE client. See the [upgrade guide](https://rook.io/docs/rook/v1.2/ceph-upgrade.html)  forceCephFSKernelClient: true  # -- Ceph CSI RBD driver metrics port  # @default -- `8080`  rbdLivenessMetricsPort:  serviceMonitor:    # -- Enable ServiceMonitor for Ceph CSI drivers    enabled: false    # -- Service monitor scrape interval    interval: 5s    # -- ServiceMonitor additional labels    labels: {}    # -- Use a different namespace for the ServiceMonitor    namespace:  # -- Kubelet root directory path (if the Kubelet uses a different path for the `--root-dir` flag)  # @default -- `/var/lib/kubelet`  kubeletDirPath:  cephcsi:    # -- Ceph CSI image    # @default -- `quay.io/cephcsi/cephcsi:v3.9.0`    image:  registrar:    # -- Kubernetes CSI registrar image    # @default -- `registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0`    image:  provisioner:    # -- Kubernetes CSI provisioner image    # @default -- `registry.k8s.io/sig-storage/csi-provisioner:v3.5.0`    image:  snapshotter:    # -- Kubernetes CSI snapshotter image    # @default -- `registry.k8s.io/sig-storage/csi-snapshotter:v6.2.2`    image:  attacher:    # -- Kubernetes CSI Attacher image    # @default -- `registry.k8s.io/sig-storage/csi-attacher:v4.3.0`    image:  resizer:    # -- Kubernetes CSI resizer image    # @default -- `registry.k8s.io/sig-storage/csi-resizer:v1.8.0`    image:  # -- Image pull policy  imagePullPolicy: IfNotPresent  # -- Labels to add to the CSI CephFS Deployments and DaemonSets Pods  cephfsPodLabels: #"key1=value1,key2=value2"  # -- Labels to add to the CSI NFS Deployments and DaemonSets Pods  nfsPodLabels: #"key1=value1,key2=value2"  # -- Labels to add to the CSI RBD Deployments and DaemonSets Pods  rbdPodLabels: #"key1=value1,key2=value2"  csiAddons:    # -- Enable CSIAddons    enabled: false    # -- CSIAddons Sidecar image    image: "quay.io/csiaddons/k8s-sidecar:v0.7.0"  nfs:    # -- Enable the nfs csi driver    enabled: false  topology:    # -- Enable topology based provisioning    enabled: false    # NOTE: the value here serves as an example and needs to be    # updated with node labels that define domains of interest    # -- domainLabels define which node labels to use as domains    # for CSI nodeplugins to advertise their domains    domainLabels:    # - kubernetes.io/hostname    # - topology.kubernetes.io/zone    # - topology.rook.io/rack  readAffinity:    # -- Enable read affinity for RBD volumes. Recommended to    # set to true if running kernel 5.8 or newer.    # @default -- `false`    enabled: false    # -- Define which node labels to use    # as CRUSH location. This should correspond to the values set    # in the CRUSH map.    # @default -- labels listed [here](../CRDs/Cluster/ceph-cluster-crd.md#osd-topology)    crushLocationLabels:  # -- Whether to skip any attach operation altogether for CephFS PVCs. See more details  # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).  # If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation  # of pods using the CephFS PVC fast. **WARNING** It's highly discouraged to use this for  # CephFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.  cephFSAttachRequired: true  # -- Whether to skip any attach operation altogether for RBD PVCs. See more details  # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).  # If set to false it skips the volume attachments and makes the creation of pods using the RBD PVC fast.  # **WARNING** It's highly discouraged to use this for RWO volumes as it can cause data corruption.  # csi-addons operations like Reclaimspace and PVC Keyrotation will also not be supported if set  # to false since we'll have no VolumeAttachments to determine which node the PVC is mounted on.  # Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.  rbdAttachRequired: true  # -- Whether to skip any attach operation altogether for NFS PVCs. See more details  # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).  # If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation  # of pods using the NFS PVC fast. **WARNING** It's highly discouraged to use this for  # NFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.  nfsAttachRequired: true# -- Enable discovery daemonenableDiscoveryDaemon: false# -- Set the discovery daemon device discovery interval (default to 60m)discoveryDaemonInterval: 60m# -- The timeout for ceph commands in secondscephCommandsTimeoutSeconds: "15"# -- If true, run rook operator on the host networkuseOperatorHostNetwork:# -- If true, scale down the rook operator.# This is useful for administrative actions where the rook operator must be scaled down, while using gitops style tooling# to deploy your helm charts.scaleDownOperator: false## Rook Discover configuration## toleration: NoSchedule, PreferNoSchedule or NoExecute## tolerationKey: Set this to the specific key of the taint to tolerate## tolerations: Array of tolerations in YAML format which will be added to agent deployment## nodeAffinity: Set to labels of the node to matchdiscover:  # -- Toleration for the discover pods.  # Options: `NoSchedule`, `PreferNoSchedule` or `NoExecute`  toleration:  # -- The specific key of the taint to tolerate  tolerationKey:  # -- Array of tolerations in YAML format which will be added to discover deployment  tolerations:  #   - key: key  #     operator: Exists  #     effect: NoSchedule  # -- The node labels for affinity of `discover-agent` [^1]  nodeAffinity: # key1=value1,value2; key2=value3  # -- Labels to add to the discover pods  podLabels: # "key1=value1,key2=value2"  # -- Add resources to discover daemon pods  resources:  #   - limits:  #       cpu: 500m  #       memory: 512Mi  #   - requests:  #       cpu: 100m  #       memory: 128Mi# -- Whether to disable the admission controllerdisableAdmissionController: true# -- Runs Ceph Pods as privileged to be able to write to `hostPaths` in OpenShift with SELinux restrictions.hostpathRequiresPrivileged: false# -- Disable automatic orchestration when new devices are discovered.disableDeviceHotplug: false# -- Blacklist certain disks according to the regex provided.discoverDaemonUdev:# -- imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.imagePullSecrets:# - name: my-registry-secret# -- Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be usedenableOBCWatchOperatorNamespace: true# -- Set tolerations and nodeAffinity [^1] for admission controller pod.# The admission controller would be best to start on the same nodes as other ceph daemons.admissionController:  # tolerations:  #    - key: key  #      operator: Exists  #      effect: NoSchedule  # nodeAffinity: key1=value1,value2; key2=value3# [^1]: `nodeAffinity` and `*NodeAffinity` options should have the format `"role=storage,rook; storage=ceph"` or `storage=;role=rook-example` or `storage=;` (_checks only for presence of key_)monitoring:  # -- Enable monitoring. Requires Prometheus to be pre-installed.  # Enabling will also create RBAC rules to allow Operator to create ServiceMonitors  enabled: false# All values below are taken from the CephCluster CRD# -- Cluster configuration.# @default -- See [below](#ceph-cluster-spec)cephClusterSpec:  external:    enable: false  crashCollector:    disable: true  healthCheck:    daemonHealth:      mon:        disabled: false        interval: 45s# -- A list of CephBlockPool configurations to deploy# @default -- See [below](#ceph-block-pools)cephBlockPools: {}# -- A list of CephFileSystem configurations to deploy# @default -- See [below](#ceph-file-systems)cephFileSystems: {}# -- A list of CephObjectStore configurations to deploy# @default -- See [below](#ceph-object-stores)cephObjectStores: {}
 |