| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505 | ---# Source: rook-ceph-cluster/templates/cephblockpool.yamlapiVersion: storage.k8s.io/v1kind: StorageClassmetadata:  name: ceph-block  annotations:    storageclass.kubernetes.io/is-default-class: "true"provisioner: rook-ceph.rbd.csi.ceph.comparameters:  pool: ceph-blockpool  clusterID: rook-ceph  csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner  csi.storage.k8s.io/controller-expand-secret-namespace: 'rook-ceph'  csi.storage.k8s.io/fstype: ext4  csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node  csi.storage.k8s.io/node-stage-secret-namespace: 'rook-ceph'  csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner  csi.storage.k8s.io/provisioner-secret-namespace: 'rook-ceph'  imageFeatures: layering  imageFormat: "2"reclaimPolicy: DeleteallowVolumeExpansion: truevolumeBindingMode: Immediate---# Source: rook-ceph-cluster/templates/cephfilesystem.yamlapiVersion: storage.k8s.io/v1kind: StorageClassmetadata:  name: ceph-filesystem  annotations:    storageclass.kubernetes.io/is-default-class: "false"provisioner: rook-ceph.cephfs.csi.ceph.comparameters:  fsName: ceph-filesystem  pool: ceph-filesystem-data0  clusterID: rook-ceph  csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner  csi.storage.k8s.io/controller-expand-secret-namespace: 'rook-ceph'  csi.storage.k8s.io/fstype: ext4  csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node  csi.storage.k8s.io/node-stage-secret-namespace: 'rook-ceph'  csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner  csi.storage.k8s.io/provisioner-secret-namespace: 'rook-ceph'reclaimPolicy: DeleteallowVolumeExpansion: truevolumeBindingMode: Immediate---# Source: rook-ceph-cluster/templates/cephobjectstore.yamlapiVersion: storage.k8s.io/v1kind: StorageClassmetadata:  name: ceph-bucketprovisioner: rook-ceph.ceph.rook.io/bucketreclaimPolicy: DeletevolumeBindingMode: Immediateparameters:  objectStoreName: ceph-objectstore  objectStoreNamespace: rook-ceph  region: us-east-1---# Source: rook-ceph-cluster/templates/deployment.yamlapiVersion: apps/v1kind: Deploymentmetadata:  name: rook-ceph-tools  namespace: rook-ceph # namespace:cluster  labels:    app: rook-ceph-toolsspec:  replicas: 1  selector:    matchLabels:      app: rook-ceph-tools  template:    metadata:      labels:        app: rook-ceph-tools    spec:      dnsPolicy: ClusterFirstWithHostNet      containers:        - name: rook-ceph-tools          image: quay.io/ceph/ceph:v18.2.0          command:            - /bin/bash            - -c            - |              # Replicate the script from toolbox.sh inline so the ceph image              # can be run directly, instead of requiring the rook toolbox              CEPH_CONFIG="/etc/ceph/ceph.conf"              MON_CONFIG="/etc/rook/mon-endpoints"              KEYRING_FILE="/etc/ceph/keyring"              # create a ceph config file in its default location so ceph/rados tools can be used              # without specifying any arguments              write_endpoints() {                endpoints=$(cat ${MON_CONFIG})                # filter out the mon names                # external cluster can have numbers or hyphens in mon names, handling them in regex                # shellcheck disable=SC2001                mon_endpoints=$(echo "${endpoints}"| sed 's/[a-z0-9_-]\+=//g')                DATE=$(date)                echo "$DATE writing mon endpoints to ${CEPH_CONFIG}: ${endpoints}"                  cat <<EOF > ${CEPH_CONFIG}              [global]              mon_host = ${mon_endpoints}              [client.admin]              keyring = ${KEYRING_FILE}              EOF              }              # watch the endpoints config file and update if the mon endpoints ever change              watch_endpoints() {                # get the timestamp for the target of the soft link                real_path=$(realpath ${MON_CONFIG})                initial_time=$(stat -c %Z "${real_path}")                while true; do                  real_path=$(realpath ${MON_CONFIG})                  latest_time=$(stat -c %Z "${real_path}")                  if [[ "${latest_time}" != "${initial_time}" ]]; then                    write_endpoints                    initial_time=${latest_time}                  fi                  sleep 10                done              }              # read the secret from an env var (for backward compatibility), or from the secret file              ceph_secret=${ROOK_CEPH_SECRET}              if [[ "$ceph_secret" == "" ]]; then                ceph_secret=$(cat /var/lib/rook-ceph-mon/secret.keyring)              fi              # create the keyring file              cat <<EOF > ${KEYRING_FILE}              [${ROOK_CEPH_USERNAME}]              key = ${ceph_secret}              EOF              # write the initial config file              write_endpoints              # continuously update the mon endpoints if they fail over              watch_endpoints          imagePullPolicy: IfNotPresent          tty: true          securityContext:            capabilities:              drop:              - ALL            runAsGroup: 2016            runAsNonRoot: true            runAsUser: 2016          env:            - name: ROOK_CEPH_USERNAME              valueFrom:                secretKeyRef:                  name: rook-ceph-mon                  key: ceph-username          resources:            limits:              cpu: 500m              memory: 1Gi            requests:              cpu: 100m              memory: 128Mi          volumeMounts:            - mountPath: /etc/ceph              name: ceph-config            - name: mon-endpoint-volume              mountPath: /etc/rook            - name: ceph-admin-secret              mountPath: /var/lib/rook-ceph-mon      volumes:        - name: ceph-admin-secret          secret:            secretName: rook-ceph-mon            optional: false            items:            - key: ceph-secret              path: secret.keyring        - name: mon-endpoint-volume          configMap:            name: rook-ceph-mon-endpoints            items:            - key: data              path: mon-endpoints        - name: ceph-config          emptyDir: {}      tolerations:        - key: "node.kubernetes.io/unreachable"          operator: "Exists"          effect: "NoExecute"          tolerationSeconds: 5---# Source: rook-ceph-cluster/templates/securityContextConstraints.yaml# scc for the Rook and Ceph daemons# for creating cluster in openshift---# Source: rook-ceph-cluster/templates/volumesnapshotclass.yaml------# Source: rook-ceph-cluster/templates/cephblockpool.yamlapiVersion: ceph.rook.io/v1kind: CephBlockPoolmetadata:  name: ceph-blockpool  namespace: rook-ceph # namespace:clusterspec:  failureDomain: host  replicated:    size: 3---# Source: rook-ceph-cluster/templates/cephcluster.yamlapiVersion: ceph.rook.io/v1kind: CephClustermetadata:  name: rook-ceph  namespace: rook-ceph # namespace:clusterspec:  monitoring:    enabled: false  cephVersion:    allowUnsupported: false    image: quay.io/ceph/ceph:v18.2.0  cleanupPolicy:    allowUninstallWithVolumes: false    confirmation: ""    sanitizeDisks:      dataSource: zero      iteration: 1      method: quick  continueUpgradeAfterChecksEvenIfNotHealthy: false  crashCollector:    disable: true  dashboard:    enabled: true    port: 8080    ssl: false    urlPrefix: /ceph-dashboard  dataDirHostPath: /var/lib/rook  disruptionManagement:    managePodBudgets: true    osdMaintenanceTimeout: 30    pgHealthCheckTimeout: 0  healthCheck:    daemonHealth:      mon:        disabled: false        interval: 45s      osd:        disabled: false        interval: 60s      status:        disabled: false        interval: 60s    livenessProbe:      mgr:        disabled: false      mon:        disabled: false      osd:        disabled: false  logCollector:    enabled: true    maxLogSize: 500M    periodicity: daily  mgr:    allowMultiplePerNode: false    count: 2    modules:    - enabled: true      name: pg_autoscaler    - enabled: true      name: rook  mon:    allowMultiplePerNode: false    count: 3  network:    connections:      compression:        enabled: false      encryption:        enabled: false      requireMsgr2: false    hostNetwork: true  placement:    all:      nodeAffinity:        requiredDuringSchedulingIgnoredDuringExecution:          nodeSelectorTerms:          - matchExpressions:            - key: all              operator: In              values:              - all      podAffinity: null      podAntiAffinity: null      tolerations:      - key: all        operator: Exists      topologySpreadConstraints: null    mgr:      nodeAffinity:        requiredDuringSchedulingIgnoredDuringExecution:          nodeSelectorTerms:          - matchExpressions:            - key: mgr              operator: In              values:              - mgr      podAffinity: null      podAntiAffinity: null      tolerations:      - key: mgr        operator: Exists      topologySpreadConstraints: null    mon:      nodeAffinity:        requiredDuringSchedulingIgnoredDuringExecution:          nodeSelectorTerms:          - matchExpressions:            - key: mon              operator: In              values:              - mon      podAffinity: null      podAntiAffinity: null      tolerations:      - key: mon        operator: Exists      topologySpreadConstraints: null    osd:      nodeAffinity:        requiredDuringSchedulingIgnoredDuringExecution:          nodeSelectorTerms:          - matchExpressions:            - key: osd              operator: In              values:              - osd      podAffinity: null      podAntiAffinity: null      tolerations:      - key: osd        operator: Exists      topologySpreadConstraints: null  priorityClassNames:    mgr: system-cluster-critical    mon: system-node-critical    osd: system-node-critical  removeOSDsIfOutAndSafeToRemove: false  resources:    cleanup:      limits:        cpu: 500m        memory: 1Gi      requests:        cpu: 500m        memory: 100Mi    crashcollector:      limits:        cpu: 500m        memory: 60Mi      requests:        cpu: 100m        memory: 60Mi    exporter:      limits:        cpu: 250m        memory: 128Mi      requests:        cpu: 50m        memory: 50Mi    logcollector:      limits:        cpu: 500m        memory: 1Gi      requests:        cpu: 100m        memory: 100Mi    mgr:      limits:        cpu: 1000m        memory: 1Gi      requests:        cpu: 500m        memory: 512Mi    mgr-sidecar:      limits:        cpu: 500m        memory: 100Mi      requests:        cpu: 100m        memory: 40Mi    mon:      limits:        cpu: 2000m        memory: 2Gi      requests:        cpu: 1000m        memory: 1Gi    osd:      limits:        cpu: 2000m        memory: 3072Mi      requests:        cpu: 750m        memory: 2048Mi    prepareosd:      requests:        cpu: 500m        memory: 50Mi  skipUpgradeChecks: false  storage:    useAllDevices: true    useAllNodes: true  waitTimeoutForHealthyOSDInMinutes: 10---# Source: rook-ceph-cluster/templates/cephfilesystem.yamlapiVersion: ceph.rook.io/v1kind: CephFilesystemmetadata:  name: ceph-filesystem  namespace: rook-ceph # namespace:clusterspec:  dataPools:  - failureDomain: host    name: data0    replicated:      size: 3  metadataPool:    replicated:      size: 3  metadataServer:    activeCount: 1    activeStandby: true    priorityClassName: system-cluster-critical    resources:      limits:        cpu: 2000m        memory: 4Gi      requests:        cpu: 1000m        memory: 4Gi---# Source: rook-ceph-cluster/templates/cephfilesystem.yamlapiVersion: ceph.rook.io/v1kind: CephFilesystemSubVolumeGroupmetadata:  name: ceph-filesystem-csi    # lets keep the svg crd name same as `filesystem name + csi` for the default csi svg  namespace: rook-ceph # namespace:clusterspec:  # The name of the subvolume group. If not set, the default is the name of the subvolumeGroup CR.  name: csi  # filesystemName is the metadata name of the CephFilesystem CR where the subvolume group will be created  filesystemName: ceph-filesystem  # reference https://docs.ceph.com/en/latest/cephfs/fs-volumes/#pinning-subvolumes-and-subvolume-groups  # only one out of (export, distributed, random) can be set at a time  # by default pinning is set with value: distributed=1  # for disabling default values set (distributed=0)  pinning:    distributed: 1            # distributed=<0, 1> (disabled=0)    # export:                 # export=<0-256> (disabled=-1)    # random:                 # random=[0.0, 1.0](disabled=0.0)---# Source: rook-ceph-cluster/templates/cephobjectstore.yamlapiVersion: ceph.rook.io/v1kind: CephObjectStoremetadata:  name: ceph-objectstore  namespace: rook-ceph # namespace:clusterspec:  dataPool:    erasureCoded:      codingChunks: 1      dataChunks: 2    failureDomain: host  gateway:    instances: 1    port: 80    priorityClassName: system-cluster-critical    resources:      limits:        cpu: 2000m        memory: 2Gi      requests:        cpu: 1000m        memory: 1Gi  metadataPool:    failureDomain: host    replicated:      size: 3  preservePoolsOnDelete: true
 |