Parcourir la source

releasing docker.dezendorf.net/code-server:43d66c7

Breandan Dezendorf il y a 2 ans
Parent
commit
4dd3a6545c

+ 1 - 1
dezendorf/homelab/k3s/codeserver/codeserver.yaml

@@ -17,7 +17,7 @@ spec:
     spec:
       containers:
       - name: codeserver
-        image: docker.dezendorf.net/code-server:96bac41
+        image: docker.dezendorf.net/code-server:43d66c7
         ports:
         - name: web
           containerPort: 8080

+ 5 - 0
dezendorf/homelab/talos/bin/bootstrap.sh

@@ -0,0 +1,5 @@
+helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/
+helm template kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard > kubernetes-dashboard.yaml
+kubectl create ns kubernetes-dashboard
+kubectl apply --namespace kubernetes-dashboard -f kubernetes-dashboard.yaml
+

+ 32 - 0
dezendorf/homelab/talos/bin/ceph-down.sh

@@ -0,0 +1,32 @@
+#!/bin/bash
+
+#kubectl --namespace rook-ceph delete cephcluster rook-ceph --wait
+
+#helm --namespace rook-ceph uninstall rook-ceph
+
+CLUSTER=$1-c
+OPERATOR=$1-o
+NAMESPACE=$1-ns
+
+echo "Patching cluster $CLUSTER to allow deletes"
+echo kubectl --namespace ${NAMESPACE} patch cephcluster ${NAMESPACE} --type merge -p '{"spec":{"cleanupPolicy":{"confirmation":"yes-really-destroy-data"}}}'
+kubectl --namespace ${NAMESPACE} patch cephcluster ${NAMESPACE} --type merge -p '{"spec":{"cleanupPolicy":{"confirmation":"yes-really-destroy-data"}}}'
+
+kubectl delete storageclasses ceph-block ceph-bucket ceph-filesystem --wait
+
+kubectl --namespace ${NAMESPACE} delete cephobjectstore ceph-objectstore --force
+kubectl --namespace ${NAMESPACE} delete cephfilesystem ceph-filesystem --force
+kubectl --namespace ${NAMESPACE} delete cephblockpools ceph-blockpool --force
+
+kubectl --namespace ${NAMESPACE} delete cephcluster ${CLUSTER} --wait
+
+helm --namespace ${NAMESPACE} uninstall ${CLUSTER}
+
+kubectl delete ns ${NAMESPACE} --wait
+
+for i in $(kubectl get crd | grep ceph | awk '{print $1}'); do kubectl delete crd $i ; done
+for i in $(kubectl get crd | grep objectbucket | awk '{print $1}'); do kubectl delete crd $i ; done
+for i in $(kubectl get clusterrole | grep ceph | awk '{print $1}'); do kubectl delete clusterrole $i ; done
+for i in $(kubectl get clusterrole | grep rbd | awk '{print $1}'); do kubectl delete clusterrole $i ; done
+for i in $(kubectl get clusterrole | grep objectstorage- | awk '{print $1}'); do kubectl delete clusterrole $i ; done
+for i in $(kubectl get clusterrolebinding | grep -E '(ceph|rook|rbd-csi|objectstorage-prov)' | awk '{print $1}' ) ; do kubectl delete clusterrolebinding $i ; done

+ 39 - 0
dezendorf/homelab/talos/bin/ceph-up.sh

@@ -0,0 +1,39 @@
+#!/bin/bash
+
+CLUSTER=$1-c
+OPERATOR=$1-o
+NAMESPACE=$1-ns
+
+echo $CLUSTER
+echo $NAMESPACE
+
+echo "Approving outstanding CSRS"
+
+for i in $(kubectl get csr  --sort-by=.metadata.creationTimestamp | grep -E '^csr-' | grep Pending | awk '{print $1}') ; do kubectl certificate approve $i ; done
+
+echo "Creating namespace ${NAMESPACE}"
+kubectl create ns ${NAMESPACE}
+
+echo "Adding 'rook-release' helm chart"
+helm repo add rook-release "https://charts.rook.io/release"
+
+echo "Setting ${NAMESPACE} to enforce=privileged"
+kubectl label ns ${NAMESPACE} pod-security.kubernetes.io/enforce=privileged
+kubectl label ns default pod-security.kubernetes.io/enforce=privileged
+
+echo "Installing operator ${OPERATOR} for cluster ${CLUSTER} in namespace ${NAMESPACE}" 
+echo helm install --create-namespace --namespace ${NAMESPACE} ${CLUSTER} rook-release/rook-ceph --values operator-values.yaml
+helm install --create-namespace --namespace ${NAMESPACE} ${OPERATOR} rook-release/rook-ceph --values operator-values.yaml
+
+echo "sleeping for 30 seconds"
+sleep 30
+echo "resuming"
+
+echo "Telling operator ${OPERATOR} to create cluster ${CLUSTER} in namespace ${NAMESPACE}"
+echo helm install --create-namespace --namespace ${NAMESPACE} ${CLUSTER}-operator --set operatorNamespace=${NAMESPACE} rook-release/rook-ceph-cluster --values ceph-values.yaml
+helm install --create-namespace --namespace ${NAMESPACE} ${OPERATOR}-operator --set operatorNamespace=${NAMESPACE} rook-release/rook-ceph-cluster --values ceph-values.yaml
+
+
+#echo "Setting up ceph-toolbox"
+#echo kubectl apply --namespace ${NAMESPACE} -f toolbox.yaml
+#kubectl apply --namespace ${NAMESPACE} -f toolbox.yaml

+ 1861 - 0
dezendorf/homelab/talos/bin/create-external-cluster-resources.py

@@ -0,0 +1,1861 @@
+"""
+Copyright 2020 The Rook Authors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import errno
+import sys
+import json
+import argparse
+import re
+import subprocess
+import hmac
+from hashlib import sha1 as sha
+from os import linesep as LINESEP
+from os import path
+from email.utils import formatdate
+import requests
+from requests.auth import AuthBase
+
+py3k = False
+if sys.version_info.major >= 3:
+    py3k = True
+    import urllib.parse
+    from ipaddress import ip_address, IPv4Address
+
+ModuleNotFoundError = ImportError
+
+try:
+    import rados
+except ModuleNotFoundError as noModErr:
+    print(f"Error: {noModErr}\nExiting the script...")
+    sys.exit(1)
+
+try:
+    import rbd
+except ModuleNotFoundError as noModErr:
+    print(f"Error: {noModErr}\nExiting the script...")
+    sys.exit(1)
+
+try:
+    # for 2.7.x
+    from StringIO import StringIO
+except ModuleNotFoundError:
+    # for 3.x
+    from io import StringIO
+
+try:
+    # for 2.7.x
+    from urlparse import urlparse
+except ModuleNotFoundError:
+    # for 3.x
+    from urllib.parse import urlparse
+
+try:
+    from base64 import encodestring
+except:
+    from base64 import encodebytes as encodestring
+
+
+class ExecutionFailureException(Exception):
+    pass
+
+
+################################################
+################## DummyRados ##################
+################################################
+# this is mainly for testing and could be used where 'rados' is not available
+
+
+class DummyRados(object):
+    def __init__(self):
+        self.return_val = 0
+        self.err_message = ""
+        self.state = "connected"
+        self.cmd_output_map = {}
+        self.cmd_names = {}
+        self._init_cmd_output_map()
+        self.dummy_host_ip_map = {}
+
+    def _init_cmd_output_map(self):
+        json_file_name = "test-data/ceph-status-out"
+        script_dir = path.abspath(path.dirname(__file__))
+        ceph_status_str = ""
+        with open(
+            path.join(script_dir, json_file_name), mode="r", encoding="UTF-8"
+        ) as json_file:
+            ceph_status_str = json_file.read()
+        self.cmd_names["fs ls"] = """{"format": "json", "prefix": "fs ls"}"""
+        self.cmd_names[
+            "quorum_status"
+        ] = """{"format": "json", "prefix": "quorum_status"}"""
+        self.cmd_names[
+            "mgr services"
+        ] = """{"format": "json", "prefix": "mgr services"}"""
+        # all the commands and their output
+        self.cmd_output_map[
+            self.cmd_names["fs ls"]
+        ] = """[{"name":"myfs","metadata_pool":"myfs-metadata","metadata_pool_id":2,"data_pool_ids":[3],"data_pools":["myfs-replicated"]}]"""
+        self.cmd_output_map[
+            self.cmd_names["quorum_status"]
+        ] = """{"election_epoch":3,"quorum":[0],"quorum_names":["a"],"quorum_leader_name":"a","quorum_age":14385,"features":{"quorum_con":"4540138292836696063","quorum_mon":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus"]},"monmap":{"epoch":1,"fsid":"af4e1673-0b72-402d-990a-22d2919d0f1c","modified":"2020-05-07T03:36:39.918035Z","created":"2020-05-07T03:36:39.918035Z","min_mon_release":15,"min_mon_release_name":"octopus","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"10.110.205.174:3300","nonce":0},{"type":"v1","addr":"10.110.205.174:6789","nonce":0}]},"addr":"10.110.205.174:6789/0","public_addr":"10.110.205.174:6789/0","priority":0,"weight":0}]}}"""
+        self.cmd_output_map[
+            self.cmd_names["mgr services"]
+        ] = """{"dashboard":"https://ceph-dashboard:8443/","prometheus":"http://ceph-dashboard-db:9283/"}"""
+        self.cmd_output_map[
+            """{"caps": ["mon", "allow r, allow command quorum_status", "osd", "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth get-or-create"}"""
+        ] = """[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r, allow command quorum_status","osd":"allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"}}]"""
+        self.cmd_output_map[
+            """{"caps": ["mon", "profile rbd, allow command 'osd blocklist'", "osd", "profile rbd"], "entity": "client.csi-rbd-node", "format": "json", "prefix": "auth get-or-create"}"""
+        ] = """[{"entity":"client.csi-rbd-node","key":"AQBOgrNeHbK1AxAAubYBeV8S1U/GPzq5SVeq6g==","caps":{"mon":"profile rbd, allow command 'osd blocklist'","osd":"profile rbd"}}]"""
+        self.cmd_output_map[
+            """{"caps": ["mon", "profile rbd, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "profile rbd"], "entity": "client.csi-rbd-provisioner", "format": "json", "prefix": "auth get-or-create"}"""
+        ] = """[{"entity":"client.csi-rbd-provisioner","key":"AQBNgrNe1geyKxAA8ekViRdE+hss5OweYBkwNg==","caps":{"mgr":"allow rw","mon":"profile rbd, allow command 'osd blocklist'","osd":"profile rbd"}}]"""
+        self.cmd_output_map[
+            """{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs *=*", "mds", "allow rw"], "entity": "client.csi-cephfs-node", "format": "json", "prefix": "auth get-or-create"}"""
+        ] = """[{"entity":"client.csi-cephfs-node","key":"AQBOgrNeENunKxAAPCmgE7R6G8DcXnaJ1F32qg==","caps":{"mds":"allow rw","mgr":"allow rw","mon":"allow r, allow command 'osd blocklist'","osd":"allow rw tag cephfs *=*"}}]"""
+        self.cmd_output_map[
+            """{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=*"], "entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth get-or-create"}"""
+        ] = """[{"entity":"client.csi-cephfs-provisioner","key":"AQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r, allow command 'osd blocklist'","osd":"allow rw tag cephfs metadata=*"}}]"""
+        self.cmd_output_map[
+            """{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=*"], "entity": "client.csi-cephfs-provisioner-openshift-storage", "format": "json", "prefix": "auth get-or-create"}"""
+        ] = """[{"entity":"client.csi-cephfs-provisioner-openshift-storage","key":"BQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r, allow command 'osd blocklist'","osd":"allow rw tag cephfs metadata=*"}}]"""
+        self.cmd_output_map[
+            """{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=myfs"], "entity": "client.csi-cephfs-provisioner-openshift-storage-myfs", "format": "json", "prefix": "auth get-or-create"}"""
+        ] = """[{"entity":"client.csi-cephfs-provisioner-openshift-storage-myfs","key":"CQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r, allow command 'osd blocklist'","osd":"allow rw tag cephfs metadata=myfs"}}]"""
+        self.cmd_output_map[
+            """{"caps": ["mon", "allow r, allow command quorum_status, allow command version", "mgr", "allow command config", "osd", "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth get-or-create"}"""
+        ] = """[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"}}]"""
+        self.cmd_output_map[
+            """{"format": "json", "prefix": "mgr services"}"""
+        ] = """{"dashboard": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:7000/", "prometheus": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:9283/"}"""
+        self.cmd_output_map[
+            """{"entity": "client.healthchecker", "format": "json", "prefix": "auth get"}"""
+        ] = """{"dashboard": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:7000/", "prometheus": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:9283/"}"""
+        self.cmd_output_map[
+            """{"entity": "client.healthchecker", "format": "json", "prefix": "auth get"}"""
+        ] = """[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"}}]"""
+        self.cmd_output_map[
+            """{"entity": "client.csi-cephfs-node", "format": "json", "prefix": "auth get"}"""
+        ] = """[]"""
+        self.cmd_output_map[
+            """{"entity": "client.csi-rbd-node", "format": "json", "prefix": "auth get"}"""
+        ] = """[]"""
+        self.cmd_output_map[
+            """{"entity": "client.csi-rbd-provisioner", "format": "json", "prefix": "auth get"}"""
+        ] = """[]"""
+        self.cmd_output_map[
+            """{"entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth get"}"""
+        ] = """[]"""
+        self.cmd_output_map[
+            """{"entity": "client.csi-cephfs-provisioner-openshift-storage", "format": "json", "prefix": "auth get"}"""
+        ] = """[]"""
+        self.cmd_output_map[
+            """{"entity": "client.csi-cephfs-provisioner-openshift-storage-myfs", "format": "json", "prefix": "auth get"}"""
+        ] = """[]"""
+        self.cmd_output_map[
+            """{"entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth get"}"""
+        ] = """[{"entity":"client.csi-cephfs-provisioner","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r", "mgr":"allow rw", "osd":"allow rw tag cephfs metadata=*"}}]"""
+        self.cmd_output_map[
+            """{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=*"], "entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth caps"}"""
+        ] = """[{"entity":"client.csi-cephfs-provisioner","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r,  allow command 'osd blocklist'", "mgr":"allow rw", "osd":"allow rw tag cephfs metadata=*"}}]"""
+        self.cmd_output_map['{"format": "json", "prefix": "status"}'] = ceph_status_str
+
+    def shutdown(self):
+        pass
+
+    def get_fsid(self):
+        return "af4e1673-0b72-402d-990a-22d2919d0f1c"
+
+    def conf_read_file(self):
+        pass
+
+    def connect(self):
+        pass
+
+    def pool_exists(self, pool_name):
+        return True
+
+    def mon_command(self, cmd, out):
+        json_cmd = json.loads(cmd)
+        json_cmd_str = json.dumps(json_cmd, sort_keys=True)
+        cmd_output = self.cmd_output_map[json_cmd_str]
+        return self.return_val, cmd_output, str(self.err_message.encode("utf-8"))
+
+    def _convert_hostname_to_ip(self, host_name):
+        ip_reg_x = re.compile(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}")
+        # if provided host is directly an IP address, return the same
+        if ip_reg_x.match(host_name):
+            return host_name
+        import random
+
+        host_ip = self.dummy_host_ip_map.get(host_name, "")
+        if not host_ip:
+            host_ip = f"172.9.{random.randint(0, 254)}.{random.randint(0, 254)}"
+            self.dummy_host_ip_map[host_name] = host_ip
+        del random
+        return host_ip
+
+    @classmethod
+    def Rados(conffile=None):
+        return DummyRados()
+
+
+class S3Auth(AuthBase):
+
+    """Attaches AWS Authentication to the given Request object."""
+
+    service_base_url = "s3.amazonaws.com"
+
+    def __init__(self, access_key, secret_key, service_url=None):
+        if service_url:
+            self.service_base_url = service_url
+        self.access_key = str(access_key)
+        self.secret_key = str(secret_key)
+
+    def __call__(self, r):
+        # Create date header if it is not created yet.
+        if "date" not in r.headers and "x-amz-date" not in r.headers:
+            r.headers["date"] = formatdate(timeval=None, localtime=False, usegmt=True)
+        signature = self.get_signature(r)
+        if py3k:
+            signature = signature.decode("utf-8")
+        r.headers["Authorization"] = f"AWS {self.access_key}:{signature}"
+        return r
+
+    def get_signature(self, r):
+        canonical_string = self.get_canonical_string(r.url, r.headers, r.method)
+        if py3k:
+            key = self.secret_key.encode("utf-8")
+            msg = canonical_string.encode("utf-8")
+        else:
+            key = self.secret_key
+            msg = canonical_string
+        h = hmac.new(key, msg, digestmod=sha)
+        return encodestring(h.digest()).strip()
+
+    def get_canonical_string(self, url, headers, method):
+        parsedurl = urlparse(url)
+        objectkey = parsedurl.path[1:]
+
+        bucket = parsedurl.netloc[: -len(self.service_base_url)]
+        if len(bucket) > 1:
+            # remove last dot
+            bucket = bucket[:-1]
+
+        interesting_headers = {"content-md5": "", "content-type": "", "date": ""}
+        for key in headers:
+            lk = key.lower()
+            try:
+                lk = lk.decode("utf-8")
+            except:
+                pass
+            if headers[key] and (
+                lk in interesting_headers.keys() or lk.startswith("x-amz-")
+            ):
+                interesting_headers[lk] = headers[key].strip()
+
+        # If x-amz-date is used it supersedes the date header.
+        if not py3k:
+            if "x-amz-date" in interesting_headers:
+                interesting_headers["date"] = ""
+        else:
+            if "x-amz-date" in interesting_headers:
+                interesting_headers["date"] = ""
+
+        buf = f"{method}\n"
+        for key in sorted(interesting_headers.keys()):
+            val = interesting_headers[key]
+            if key.startswith("x-amz-"):
+                buf += f"{key}:{val}\n"
+            else:
+                buf += f"{val}\n"
+
+        # append the bucket if it exists
+        if bucket != "":
+            buf += f"/{bucket}"
+
+        # add the objectkey. even if it doesn't exist, add the slash
+        buf += f"/{objectkey}"
+
+        return buf
+
+
+class RadosJSON:
+    EXTERNAL_USER_NAME = "client.healthchecker"
+    EXTERNAL_RGW_ADMIN_OPS_USER_NAME = "rgw-admin-ops-user"
+    EMPTY_OUTPUT_LIST = "Empty output list"
+    DEFAULT_RGW_POOL_PREFIX = "default"
+    DEFAULT_MONITORING_ENDPOINT_PORT = "9283"
+
+    @classmethod
+    def gen_arg_parser(cls, args_to_parse=None):
+        argP = argparse.ArgumentParser()
+
+        common_group = argP.add_argument_group("common")
+        common_group.add_argument("--verbose", "-v", action="store_true", default=False)
+        common_group.add_argument(
+            "--ceph-conf", "-c", help="Provide a ceph conf file.", type=str
+        )
+        common_group.add_argument(
+            "--keyring", "-k", help="Path to ceph keyring file.", type=str
+        )
+        common_group.add_argument(
+            "--run-as-user",
+            "-u",
+            default="",
+            type=str,
+            help="Provides a user name to check the cluster's health status, must be prefixed by 'client.'",
+        )
+        common_group.add_argument(
+            "--cluster-name", default="", help="Ceph cluster name"
+        )
+        common_group.add_argument(
+            "--namespace",
+            default="",
+            help="Namespace where CephCluster is running",
+        )
+        common_group.add_argument(
+            "--rgw-pool-prefix", default="", help="RGW Pool prefix"
+        )
+        common_group.add_argument(
+            "--restricted-auth-permission",
+            default=False,
+            help="Restrict cephCSIKeyrings auth permissions to specific pools, cluster."
+            + "Mandatory flags that need to be set are --rbd-data-pool-name, and --cluster-name."
+            + "--cephfs-filesystem-name flag can also be passed in case of cephfs user restriction, so it can restrict user to particular cephfs filesystem"
+            + "sample run: `python3 /etc/ceph/create-external-cluster-resources.py --cephfs-filesystem-name myfs --rbd-data-pool-name replicapool --cluster-name rookstorage --restricted-auth-permission true`"
+            + "Note: Restricting the csi-users per pool, and per cluster will require creating new csi-users and new secrets for that csi-users."
+            + "So apply these secrets only to new `Consumer cluster` deployment while using the same `Source cluster`.",
+        )
+
+        output_group = argP.add_argument_group("output")
+        output_group.add_argument(
+            "--format",
+            "-t",
+            choices=["json", "bash"],
+            default="json",
+            help="Provides the output format (json | bash)",
+        )
+        output_group.add_argument(
+            "--output",
+            "-o",
+            default="",
+            help="Output will be stored into the provided file",
+        )
+        output_group.add_argument(
+            "--cephfs-filesystem-name",
+            default="",
+            help="Provides the name of the Ceph filesystem",
+        )
+        output_group.add_argument(
+            "--cephfs-metadata-pool-name",
+            default="",
+            help="Provides the name of the cephfs metadata pool",
+        )
+        output_group.add_argument(
+            "--cephfs-data-pool-name",
+            default="",
+            help="Provides the name of the cephfs data pool",
+        )
+        output_group.add_argument(
+            "--rbd-data-pool-name",
+            default="",
+            required=False,
+            help="Provides the name of the RBD datapool",
+        )
+        output_group.add_argument(
+            "--alias-rbd-data-pool-name",
+            default="",
+            required=False,
+            help="Provides an alias for the  RBD data pool name, necessary if a special character is present in the pool name such as a period or underscore",
+        )
+        output_group.add_argument(
+            "--rgw-endpoint",
+            default="",
+            required=False,
+            help="RADOS Gateway endpoint (in `<IPv4>:<PORT>` or `<[IPv6]>:<PORT>` or `<FQDN>:<PORT>` format)",
+        )
+        output_group.add_argument(
+            "--rgw-tls-cert-path",
+            default="",
+            required=False,
+            help="RADOS Gateway endpoint TLS certificate",
+        )
+        output_group.add_argument(
+            "--rgw-skip-tls",
+            required=False,
+            default=False,
+            help="Ignore TLS certification validation when a self-signed certificate is provided (NOT RECOMMENDED",
+        )
+        output_group.add_argument(
+            "--monitoring-endpoint",
+            default="",
+            required=False,
+            help="Ceph Manager prometheus exporter endpoints (comma separated list of (format `<IPv4>` or `<[IPv6]>` or `<FQDN>`) entries of active and standby mgrs)",
+        )
+        output_group.add_argument(
+            "--monitoring-endpoint-port",
+            default="",
+            required=False,
+            help="Ceph Manager prometheus exporter port",
+        )
+        output_group.add_argument(
+            "--skip-monitoring-endpoint",
+            default=False,
+            action="store_true",
+            help="Do not check for a monitoring endpoint for the Ceph cluster",
+        )
+        output_group.add_argument(
+            "--rbd-metadata-ec-pool-name",
+            default="",
+            required=False,
+            help="Provides the name of erasure coded RBD metadata pool",
+        )
+        output_group.add_argument(
+            "--dry-run",
+            default=False,
+            action="store_true",
+            help="Dry run prints the executed commands without running them",
+        )
+        output_group.add_argument(
+            "--rados-namespace",
+            default="",
+            required=False,
+            help="divides a pool into separate logical namespaces",
+        )
+        output_group.add_argument(
+            "--subvolume-group",
+            default="",
+            required=False,
+            help="provides the name of the subvolume group",
+        )
+        output_group.add_argument(
+            "--rgw-realm-name",
+            default="",
+            required=False,
+            help="provides the name of the rgw-realm",
+        )
+        output_group.add_argument(
+            "--rgw-zone-name",
+            default="",
+            required=False,
+            help="provides the name of the rgw-zone",
+        )
+        output_group.add_argument(
+            "--rgw-zonegroup-name",
+            default="",
+            required=False,
+            help="provides the name of the rgw-zonegroup",
+        )
+
+        upgrade_group = argP.add_argument_group("upgrade")
+        upgrade_group.add_argument(
+            "--upgrade",
+            action="store_true",
+            default=False,
+            help="Upgrades the cephCSIKeyrings(For example: client.csi-cephfs-provisioner) with new permissions needed for the new cluster version and older permission will still be applied."
+            + "Sample run: `python3 /etc/ceph/create-external-cluster-resources.py --upgrade`, this will upgrade all the default csi users(non-restricted)"
+            + "For restricted users(For example: client.csi-cephfs-provisioner-openshift-storage-myfs), users created using --restricted-auth-permission flag need to pass mandatory flags"
+            + "mandatory flags: '--rbd-data-pool-name, --cluster-name and --run-as-user' flags while upgrading"
+            + "in case of cephfs users if you have passed --cephfs-filesystem-name flag while creating user then while upgrading it will be mandatory too"
+            + "Sample run: `python3 /etc/ceph/create-external-cluster-resources.py --upgrade --rbd-data-pool-name replicapool --cluster-name rookstorage  --run-as-user client.csi-rbd-node-rookstorage-replicapool`"
+            + "PS: An existing non-restricted user cannot be converted to a restricted user by upgrading."
+            + "Upgrade flag should only be used to append new permissions to users, it shouldn't be used for changing user already applied permission, for example you shouldn't change in which pool user has access",
+        )
+
+        if args_to_parse:
+            assert (
+                type(args_to_parse) == list
+            ), "Argument to 'gen_arg_parser' should be a list"
+        else:
+            args_to_parse = sys.argv[1:]
+        return argP.parse_args(args_to_parse)
+
+    def validate_rbd_metadata_ec_pool_name(self):
+        if self._arg_parser.rbd_metadata_ec_pool_name:
+            rbd_metadata_ec_pool_name = self._arg_parser.rbd_metadata_ec_pool_name
+            rbd_pool_name = self._arg_parser.rbd_data_pool_name
+
+            if rbd_pool_name == "":
+                raise ExecutionFailureException(
+                    "Flag '--rbd-data-pool-name' should not be empty"
+                )
+
+            if rbd_metadata_ec_pool_name == "":
+                raise ExecutionFailureException(
+                    "Flag '--rbd-metadata-ec-pool-name' should not be empty"
+                )
+
+            cmd_json = {"prefix": "osd dump", "format": "json"}
+            ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
+            if ret_val != 0 or len(json_out) == 0:
+                raise ExecutionFailureException(
+                    f"{cmd_json['prefix']} command failed.\n"
+                    f"Error: {err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST}"
+                )
+            metadata_pool_exist, pool_exist = False, False
+
+            for key in json_out["pools"]:
+                # if erasure_code_profile is empty and pool name exists then it replica pool
+                if (
+                    key["erasure_code_profile"] == ""
+                    and key["pool_name"] == rbd_metadata_ec_pool_name
+                ):
+                    metadata_pool_exist = True
+                # if erasure_code_profile is not empty and pool name exists then it is ec pool
+                if key["erasure_code_profile"] and key["pool_name"] == rbd_pool_name:
+                    pool_exist = True
+
+            if not metadata_pool_exist:
+                raise ExecutionFailureException(
+                    "Provided rbd_ec_metadata_pool name,"
+                    f" {rbd_metadata_ec_pool_name}, does not exist"
+                )
+            if not pool_exist:
+                raise ExecutionFailureException(
+                    f"Provided rbd_data_pool name, {rbd_pool_name}, does not exist"
+                )
+            return rbd_metadata_ec_pool_name
+
+    def dry_run(self, msg):
+        if self._arg_parser.dry_run:
+            print("Execute: " + "'" + msg + "'")
+
+    def validate_rgw_endpoint_tls_cert(self):
+        if self._arg_parser.rgw_tls_cert_path:
+            with open(self._arg_parser.rgw_tls_cert_path, encoding="utf8") as f:
+                contents = f.read()
+                return contents.rstrip()
+
+    def _check_conflicting_options(self):
+        if not self._arg_parser.upgrade and not self._arg_parser.rbd_data_pool_name:
+            raise ExecutionFailureException(
+                "Either '--upgrade' or '--rbd-data-pool-name <pool_name>' should be specified"
+            )
+
+    def _invalid_endpoint(self, endpoint_str):
+        # seprating port, by getting last split of `:` delimiter
+        try:
+            endpoint_str_ip, port = endpoint_str.rsplit(":", 1)
+        except ValueError:
+            raise ExecutionFailureException(f"Not a proper endpoint: {endpoint_str}")
+
+        try:
+            if endpoint_str_ip[0] == "[":
+                endpoint_str_ip = endpoint_str_ip[1 : len(endpoint_str_ip) - 1]
+            ip_type = (
+                "IPv4" if type(ip_address(endpoint_str_ip)) is IPv4Address else "IPv6"
+            )
+        except ValueError:
+            ip_type = "FQDN"
+        if not port.isdigit():
+            raise ExecutionFailureException(f"Port not valid: {port}")
+        intPort = int(port)
+        if intPort < 1 or intPort > 2**16 - 1:
+            raise ExecutionFailureException(f"Out of range port number: {port}")
+
+        return ip_type
+
+    def endpoint_dial(self, endpoint_str, ip_type, timeout=3, cert=None):
+        # if the 'cluster' instance is a dummy one,
+        # don't try to reach out to the endpoint
+        if isinstance(self.cluster, DummyRados):
+            return "", "", ""
+        if ip_type == "IPv6":
+            try:
+                endpoint_str_ip, endpoint_str_port = endpoint_str.rsplit(":", 1)
+            except ValueError:
+                raise ExecutionFailureException(
+                    f"Not a proper endpoint: {endpoint_str}"
+                )
+            if endpoint_str_ip[0] != "[":
+                endpoint_str_ip = "[" + endpoint_str_ip + "]"
+            endpoint_str = ":".join([endpoint_str_ip, endpoint_str_port])
+
+        protocols = ["http", "https"]
+        response_error = None
+        for prefix in protocols:
+            try:
+                ep = f"{prefix}://{endpoint_str}"
+                verify = None
+                # If verify is set to a path to a directory,
+                # the directory must have been processed using the c_rehash utility supplied with OpenSSL.
+                if prefix == "https" and self._arg_parser.rgw_skip_tls:
+                    verify = False
+                    r = requests.head(ep, timeout=timeout, verify=False)
+                elif prefix == "https" and cert:
+                    verify = cert
+                    r = requests.head(ep, timeout=timeout, verify=cert)
+                else:
+                    r = requests.head(ep, timeout=timeout)
+                if r.status_code == 200:
+                    return prefix, verify, ""
+            except Exception as err:
+                response_error = err
+                continue
+        sys.stderr.write(
+            f"unable to connect to endpoint: {endpoint_str}, failed error: {response_error}"
+        )
+        return (
+            "",
+            "",
+            ("-1"),
+        )
+
+    def __init__(self, arg_list=None):
+        self.out_map = {}
+        self._excluded_keys = set()
+        self._arg_parser = self.gen_arg_parser(args_to_parse=arg_list)
+        self._check_conflicting_options()
+        self.run_as_user = self._arg_parser.run_as_user
+        self.output_file = self._arg_parser.output
+        self.ceph_conf = self._arg_parser.ceph_conf
+        self.ceph_keyring = self._arg_parser.keyring
+        self.MIN_USER_CAP_PERMISSIONS = {
+            "mgr": "allow command config",
+            "mon": "allow r, allow command quorum_status, allow command version",
+            "osd": "allow rwx pool={0}.rgw.meta, "
+            + "allow r pool=.rgw.root, "
+            + "allow rw pool={0}.rgw.control, "
+            + "allow rx pool={0}.rgw.log, "
+            + "allow x pool={0}.rgw.buckets.index",
+        }
+        # if user not provided, give a default user
+        if not self.run_as_user and not self._arg_parser.upgrade:
+            self.run_as_user = self.EXTERNAL_USER_NAME
+        if not self._arg_parser.rgw_pool_prefix and not self._arg_parser.upgrade:
+            self._arg_parser.rgw_pool_prefix = self.DEFAULT_RGW_POOL_PREFIX
+        if self.ceph_conf:
+            kwargs = {}
+            if self.ceph_keyring:
+                kwargs["conf"] = {"keyring": self.ceph_keyring}
+            self.cluster = rados.Rados(conffile=self.ceph_conf, **kwargs)
+        else:
+            self.cluster = rados.Rados()
+            self.cluster.conf_read_file()
+        self.cluster.connect()
+
+    def shutdown(self):
+        if self.cluster.state == "connected":
+            self.cluster.shutdown()
+
+    def get_fsid(self):
+        if self._arg_parser.dry_run:
+            return self.dry_run("ceph fsid")
+        return str(self.cluster.get_fsid())
+
+    def _common_cmd_json_gen(self, cmd_json):
+        cmd = json.dumps(cmd_json, sort_keys=True)
+        ret_val, cmd_out, err_msg = self.cluster.mon_command(cmd, b"")
+        if self._arg_parser.verbose:
+            print(f"Command Input: {cmd}")
+            print(
+                f"Return Val: {ret_val}\nCommand Output: {cmd_out}\n"
+                f"Error Message: {err_msg}\n----------\n"
+            )
+        json_out = {}
+        # if there is no error (i.e; ret_val is ZERO) and 'cmd_out' is not empty
+        # then convert 'cmd_out' to a json output
+        if ret_val == 0 and cmd_out:
+            json_out = json.loads(cmd_out)
+        return ret_val, json_out, err_msg
+
+    def get_ceph_external_mon_data(self):
+        cmd_json = {"prefix": "quorum_status", "format": "json"}
+        if self._arg_parser.dry_run:
+            return self.dry_run("ceph " + cmd_json["prefix"])
+        ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
+        # if there is an unsuccessful attempt,
+        if ret_val != 0 or len(json_out) == 0:
+            raise ExecutionFailureException(
+                "'quorum_status' command failed.\n"
+                f"Error: {err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST}"
+            )
+        q_leader_name = json_out["quorum_leader_name"]
+        q_leader_details = {}
+        q_leader_matching_list = [
+            l for l in json_out["monmap"]["mons"] if l["name"] == q_leader_name
+        ]
+        if len(q_leader_matching_list) == 0:
+            raise ExecutionFailureException("No matching 'mon' details found")
+        q_leader_details = q_leader_matching_list[0]
+        # get the address vector of the quorum-leader
+        q_leader_addrvec = q_leader_details.get("public_addrs", {}).get("addrvec", [])
+        # if the quorum-leader has only one address in the address-vector
+        # and it is of type 'v2' (ie; with <IP>:3300),
+        # raise an exception to make user aware that
+        # they have to enable 'v1' (ie; with <IP>:6789) type as well
+        if len(q_leader_addrvec) == 1 and q_leader_addrvec[0]["type"] == "v2":
+            raise ExecutionFailureException(
+                "Only 'v2' address type is enabled, user should also enable 'v1' type as well"
+            )
+        ip_port = str(q_leader_details["public_addr"].split("/")[0])
+        return f"{str(q_leader_name)}={ip_port}"
+
+    def _convert_hostname_to_ip(self, host_name, port, ip_type):
+        # if 'cluster' instance is a dummy type,
+        # call the dummy instance's "convert" method
+        if not host_name:
+            raise ExecutionFailureException("Empty hostname provided")
+        if isinstance(self.cluster, DummyRados):
+            return self.cluster._convert_hostname_to_ip(host_name)
+
+        if ip_type == "FQDN":
+            # check which ip FQDN should be converted to, IPv4 or IPv6
+            # check the host ip, the endpoint ip type would be similar to host ip
+            cmd_json = {"prefix": "orch host ls", "format": "json"}
+            ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
+            # if there is an unsuccessful attempt,
+            if ret_val != 0 or len(json_out) == 0:
+                raise ExecutionFailureException(
+                    "'orch host ls' command failed.\n"
+                    f"Error: {err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST}"
+                )
+            host_addr = json_out[0]["addr"]
+            # add :80 sample port in ip_type, as _invalid_endpoint also verify port
+            host_ip_type = self._invalid_endpoint(host_addr + ":80")
+            import socket
+
+            # example output [(<AddressFamily.AF_INET: 2>, <SocketKind.SOCK_STREAM: 1>, 6, '', ('93.184.216.34', 80)), ...]
+            # we need to get 93.184.216.34 so it would be ip[0][4][0]
+            if host_ip_type == "IPv6":
+                ip = socket.getaddrinfo(
+                    host_name, port, family=socket.AF_INET6, proto=socket.IPPROTO_TCP
+                )
+            elif host_ip_type == "IPv4":
+                ip = socket.getaddrinfo(
+                    host_name, port, family=socket.AF_INET, proto=socket.IPPROTO_TCP
+                )
+            del socket
+            return ip[0][4][0]
+        return host_name
+
+    def get_active_and_standby_mgrs(self):
+        if self._arg_parser.dry_run:
+            return "", self.dry_run("ceph status")
+        monitoring_endpoint_port = self._arg_parser.monitoring_endpoint_port
+        monitoring_endpoint_ip_list = self._arg_parser.monitoring_endpoint
+        standby_mgrs = []
+        if not monitoring_endpoint_ip_list:
+            cmd_json = {"prefix": "status", "format": "json"}
+            ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
+            # if there is an unsuccessful attempt,
+            if ret_val != 0 or len(json_out) == 0:
+                raise ExecutionFailureException(
+                    "'mgr services' command failed.\n"
+                    f"Error: {err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST}"
+                )
+            monitoring_endpoint = (
+                json_out.get("mgrmap", {}).get("services", {}).get("prometheus", "")
+            )
+            if not monitoring_endpoint:
+                return "", ""
+            # now check the stand-by mgr-s
+            standby_arr = json_out.get("mgrmap", {}).get("standbys", [])
+            for each_standby in standby_arr:
+                if "name" in each_standby.keys():
+                    standby_mgrs.append(each_standby["name"])
+            try:
+                parsed_endpoint = urlparse(monitoring_endpoint)
+            except ValueError:
+                raise ExecutionFailureException(
+                    f"invalid endpoint: {monitoring_endpoint}"
+                )
+            monitoring_endpoint_ip_list = parsed_endpoint.hostname
+            if not monitoring_endpoint_port:
+                monitoring_endpoint_port = str(parsed_endpoint.port)
+
+        # if monitoring endpoint port is not set, put a default mon port
+        if not monitoring_endpoint_port:
+            monitoring_endpoint_port = self.DEFAULT_MONITORING_ENDPOINT_PORT
+
+        # user could give comma and space separated inputs (like --monitoring-endpoint="<ip1>, <ip2>")
+        monitoring_endpoint_ip_list = monitoring_endpoint_ip_list.replace(",", " ")
+        monitoring_endpoint_ip_list_split = monitoring_endpoint_ip_list.split()
+        # if monitoring-endpoint could not be found, raise an error
+        if len(monitoring_endpoint_ip_list_split) == 0:
+            raise ExecutionFailureException("No 'monitoring-endpoint' found")
+        # first ip is treated as the main monitoring-endpoint
+        monitoring_endpoint_ip = monitoring_endpoint_ip_list_split[0]
+        # rest of the ip-s are added to the 'standby_mgrs' list
+        standby_mgrs.extend(monitoring_endpoint_ip_list_split[1:])
+        failed_ip = monitoring_endpoint_ip
+
+        monitoring_endpoint = ":".join(
+            [monitoring_endpoint_ip, monitoring_endpoint_port]
+        )
+        ip_type = self._invalid_endpoint(monitoring_endpoint)
+        try:
+            monitoring_endpoint_ip = self._convert_hostname_to_ip(
+                monitoring_endpoint_ip, monitoring_endpoint_port, ip_type
+            )
+            # collect all the 'stand-by' mgr ips
+            mgr_ips = []
+            for each_standby_mgr in standby_mgrs:
+                failed_ip = each_standby_mgr
+                mgr_ips.append(
+                    self._convert_hostname_to_ip(
+                        each_standby_mgr, monitoring_endpoint_port, ip_type
+                    )
+                )
+        except:
+            raise ExecutionFailureException(
+                f"Conversion of host: {failed_ip} to IP failed. "
+                "Please enter the IP addresses of all the ceph-mgrs with the '--monitoring-endpoint' flag"
+            )
+
+        _, _, err = self.endpoint_dial(monitoring_endpoint, ip_type)
+        if err == "-1":
+            raise ExecutionFailureException(err)
+        # add the validated active mgr IP into the first index
+        mgr_ips.insert(0, monitoring_endpoint_ip)
+        all_mgr_ips_str = ",".join(mgr_ips)
+        return all_mgr_ips_str, monitoring_endpoint_port
+
+    def check_user_exist(self, user):
+        cmd_json = {"prefix": "auth get", "entity": f"{user}", "format": "json"}
+        ret_val, json_out, _ = self._common_cmd_json_gen(cmd_json)
+        if ret_val != 0 or len(json_out) == 0:
+            return ""
+        return str(json_out[0]["key"])
+
+    def get_cephfs_provisioner_caps_and_entity(self):
+        entity = "client.csi-cephfs-provisioner"
+        caps = {
+            "mon": "allow r, allow command 'osd blocklist'",
+            "mgr": "allow rw",
+            "osd": "allow rw tag cephfs metadata=*",
+        }
+        if self._arg_parser.restricted_auth_permission:
+            cluster_name = self._arg_parser.cluster_name
+            if cluster_name == "":
+                raise ExecutionFailureException(
+                    "cluster_name not found, please set the '--cluster-name' flag"
+                )
+            cephfs_filesystem = self._arg_parser.cephfs_filesystem_name
+            if cephfs_filesystem == "":
+                entity = f"{entity}-{cluster_name}"
+            else:
+                entity = f"{entity}-{cluster_name}-{cephfs_filesystem}"
+                caps["osd"] = f"allow rw tag cephfs metadata={cephfs_filesystem}"
+
+        return caps, entity
+
+    def get_cephfs_node_caps_and_entity(self):
+        entity = "client.csi-cephfs-node"
+        caps = {
+            "mon": "allow r, allow command 'osd blocklist'",
+            "mgr": "allow rw",
+            "osd": "allow rw tag cephfs *=*",
+            "mds": "allow rw",
+        }
+        if self._arg_parser.restricted_auth_permission:
+            cluster_name = self._arg_parser.cluster_name
+            if cluster_name == "":
+                raise ExecutionFailureException(
+                    "cluster_name not found, please set the '--cluster-name' flag"
+                )
+            cephfs_filesystem = self._arg_parser.cephfs_filesystem_name
+            if cephfs_filesystem == "":
+                entity = f"{entity}-{cluster_name}"
+            else:
+                entity = f"{entity}-{cluster_name}-{cephfs_filesystem}"
+                caps["osd"] = f"allow rw tag cephfs *={cephfs_filesystem}"
+
+        return caps, entity
+
+    def get_entity(self, entity, rbd_pool_name, alias_rbd_pool_name, cluster_name):
+        if (
+            rbd_pool_name.count(".") != 0
+            or rbd_pool_name.count("_") != 0
+            or alias_rbd_pool_name != ""
+            # checking alias_rbd_pool_name is not empty as there maybe a special character used other than . or _
+        ):
+            if alias_rbd_pool_name == "":
+                raise ExecutionFailureException(
+                    "please set the '--alias-rbd-data-pool-name' flag as the rbd data pool name contains '.' or '_'"
+                )
+            if (
+                alias_rbd_pool_name.count(".") != 0
+                or alias_rbd_pool_name.count("_") != 0
+            ):
+                raise ExecutionFailureException(
+                    "'--alias-rbd-data-pool-name' flag value should not contain '.' or '_'"
+                )
+            entity = f"{entity}-{cluster_name}-{alias_rbd_pool_name}"
+        else:
+            entity = f"{entity}-{cluster_name}-{rbd_pool_name}"
+
+        return entity
+
+    def get_rbd_provisioner_caps_and_entity(self):
+        entity = "client.csi-rbd-provisioner"
+        caps = {
+            "mon": "profile rbd, allow command 'osd blocklist'",
+            "mgr": "allow rw",
+            "osd": "profile rbd",
+        }
+        if self._arg_parser.restricted_auth_permission:
+            rbd_pool_name = self._arg_parser.rbd_data_pool_name
+            alias_rbd_pool_name = self._arg_parser.alias_rbd_data_pool_name
+            cluster_name = self._arg_parser.cluster_name
+            if rbd_pool_name == "":
+                raise ExecutionFailureException(
+                    "mandatory flag not found, please set the '--rbd-data-pool-name' flag"
+                )
+            if cluster_name == "":
+                raise ExecutionFailureException(
+                    "mandatory flag not found, please set the '--cluster-name' flag"
+                )
+            entity = self.get_entity(
+                entity, rbd_pool_name, alias_rbd_pool_name, cluster_name
+            )
+            caps["osd"] = f"profile rbd pool={rbd_pool_name}"
+
+        return caps, entity
+
+    def get_rbd_node_caps_and_entity(self):
+        entity = "client.csi-rbd-node"
+        caps = {
+            "mon": "profile rbd, allow command 'osd blocklist'",
+            "osd": "profile rbd",
+        }
+        if self._arg_parser.restricted_auth_permission:
+            rbd_pool_name = self._arg_parser.rbd_data_pool_name
+            alias_rbd_pool_name = self._arg_parser.alias_rbd_data_pool_name
+            cluster_name = self._arg_parser.cluster_name
+            if rbd_pool_name == "":
+                raise ExecutionFailureException(
+                    "mandatory flag not found, please set the '--rbd-data-pool-name' flag"
+                )
+            if cluster_name == "":
+                raise ExecutionFailureException(
+                    "mandatory flag not found, please set the '--cluster-name' flag"
+                )
+            entity = self.get_entity(
+                entity, rbd_pool_name, alias_rbd_pool_name, cluster_name
+            )
+            caps["osd"] = f"profile rbd pool={rbd_pool_name}"
+
+        return caps, entity
+
+    def get_caps_and_entity(self, user_name):
+        if "client.csi-cephfs-provisioner" in user_name:
+            if "client.csi-cephfs-provisioner" != user_name:
+                self._arg_parser.restricted_auth_permission = True
+            return self.get_cephfs_provisioner_caps_and_entity()
+        if "client.csi-cephfs-node" in user_name:
+            if "client.csi-cephfs-node" != user_name:
+                self._arg_parser.restricted_auth_permission = True
+            return self.get_cephfs_node_caps_and_entity()
+        if "client.csi-rbd-provisioner" in user_name:
+            if "client.csi-rbd-provisioner" != user_name:
+                self._arg_parser.restricted_auth_permission = True
+            return self.get_rbd_provisioner_caps_and_entity()
+        if "client.csi-rbd-node" in user_name:
+            if "client.csi-rbd-node" != user_name:
+                self._arg_parser.restricted_auth_permission = True
+            return self.get_rbd_node_caps_and_entity()
+
+        raise ExecutionFailureException(
+            f"no user found with user_name: {user_name}, "
+            "get_caps_and_entity command failed.\n"
+        )
+
+    def create_cephCSIKeyring_user(self, user):
+        """
+        command: ceph auth get-or-create client.csi-cephfs-provisioner mon 'allow r' mgr 'allow rw' osd 'allow rw tag cephfs metadata=*'
+        """
+        caps, entity = self.get_caps_and_entity(user)
+        cmd_json = {
+            "prefix": "auth get-or-create",
+            "entity": entity,
+            "caps": [cap for cap_list in list(caps.items()) for cap in cap_list],
+            "format": "json",
+        }
+
+        if self._arg_parser.dry_run:
+            return (
+                self.dry_run(
+                    "ceph "
+                    + cmd_json["prefix"]
+                    + " "
+                    + cmd_json["entity"]
+                    + " "
+                    + " ".join(cmd_json["caps"])
+                ),
+                "",
+            )
+        # check if user already exist
+        user_key = self.check_user_exist(entity)
+        if user_key != "":
+            return user_key, f"{entity.split('.', 1)[1]}"
+            # entity.split('.',1)[1] to rename entity(client.csi-rbd-node) as csi-rbd-node
+
+        ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
+        # if there is an unsuccessful attempt,
+        if ret_val != 0 or len(json_out) == 0:
+            raise ExecutionFailureException(
+                f"'auth get-or-create {user}' command failed.\n"
+                f"Error: {err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST}"
+            )
+        return str(json_out[0]["key"]), f"{entity.split('.', 1)[1]}"
+        # entity.split('.',1)[1] to rename entity(client.csi-rbd-node) as csi-rbd-node
+
+    def get_cephfs_data_pool_details(self):
+        cmd_json = {"prefix": "fs ls", "format": "json"}
+        if self._arg_parser.dry_run:
+            return self.dry_run("ceph " + cmd_json["prefix"])
+        ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
+        # if there is an unsuccessful attempt, report an error
+        if ret_val != 0:
+            # if fs and data_pool arguments are not set, silently return
+            if (
+                self._arg_parser.cephfs_filesystem_name == ""
+                and self._arg_parser.cephfs_data_pool_name == ""
+            ):
+                return
+            # if user has provided any of the
+            # '--cephfs-filesystem-name' or '--cephfs-data-pool-name' arguments,
+            # raise an exception as we are unable to verify the args
+            raise ExecutionFailureException(
+                f"'fs ls' ceph call failed with error: {err_msg}"
+            )
+
+        matching_json_out = {}
+        # if '--cephfs-filesystem-name' argument is provided,
+        # check whether the provided filesystem-name exists or not
+        if self._arg_parser.cephfs_filesystem_name:
+            # get the matching list
+            matching_json_out_list = [
+                matched
+                for matched in json_out
+                if str(matched["name"]) == self._arg_parser.cephfs_filesystem_name
+            ]
+            # unable to find a matching fs-name, raise an error
+            if len(matching_json_out_list) == 0:
+                raise ExecutionFailureException(
+                    f"Filesystem provided, '{self._arg_parser.cephfs_filesystem_name}', "
+                    f"is not found in the fs-list: {[str(x['name']) for x in json_out]}"
+                )
+            matching_json_out = matching_json_out_list[0]
+        # if cephfs filesystem name is not provided,
+        # try to get a default fs name by doing the following
+        else:
+            # a. check if there is only one filesystem is present
+            if len(json_out) == 1:
+                matching_json_out = json_out[0]
+            # b. or else, check if data_pool name is provided
+            elif self._arg_parser.cephfs_data_pool_name:
+                # and if present, check whether there exists a fs which has the data_pool
+                for eachJ in json_out:
+                    if self._arg_parser.cephfs_data_pool_name in eachJ["data_pools"]:
+                        matching_json_out = eachJ
+                        break
+                # if there is no matching fs exists, that means provided data_pool name is invalid
+                if not matching_json_out:
+                    raise ExecutionFailureException(
+                        f"Provided data_pool name, {self._arg_parser.cephfs_data_pool_name},"
+                        " does not exists"
+                    )
+            # c. if nothing is set and couldn't find a default,
+            else:
+                # just return silently
+                return
+
+        if matching_json_out:
+            self._arg_parser.cephfs_filesystem_name = str(matching_json_out["name"])
+            self._arg_parser.cephfs_metadata_pool_name = str(
+                matching_json_out["metadata_pool"]
+            )
+
+        if isinstance(matching_json_out["data_pools"], list):
+            # if the user has already provided data-pool-name,
+            # through --cephfs-data-pool-name
+            if self._arg_parser.cephfs_data_pool_name:
+                # if the provided name is not matching with the one in the list
+                if (
+                    self._arg_parser.cephfs_data_pool_name
+                    not in matching_json_out["data_pools"]
+                ):
+                    raise ExecutionFailureException(
+                        f"Provided data-pool-name: '{self._arg_parser.cephfs_data_pool_name}', "
+                        "doesn't match from the data-pools list: "
+                        f"{[str(x) for x in matching_json_out['data_pools']]}"
+                    )
+            # if data_pool name is not provided,
+            # then try to find a default data pool name
+            else:
+                # if no data_pools exist, silently return
+                if len(matching_json_out["data_pools"]) == 0:
+                    return
+                self._arg_parser.cephfs_data_pool_name = str(
+                    matching_json_out["data_pools"][0]
+                )
+            # if there are more than one 'data_pools' exist,
+            # then warn the user that we are using the selected name
+            if len(matching_json_out["data_pools"]) > 1:
+                print(
+                    "WARNING: Multiple data pools detected: "
+                    f"{[str(x) for x in matching_json_out['data_pools']]}\n"
+                    f"Using the data-pool: '{self._arg_parser.cephfs_data_pool_name}'\n"
+                )
+
+    def create_checkerKey(self):
+        cmd_json = {
+            "prefix": "auth get-or-create",
+            "entity": self.run_as_user,
+            "caps": [
+                "mon",
+                self.MIN_USER_CAP_PERMISSIONS["mon"],
+                "mgr",
+                self.MIN_USER_CAP_PERMISSIONS["mgr"],
+                "osd",
+                self.MIN_USER_CAP_PERMISSIONS["osd"].format(
+                    self._arg_parser.rgw_pool_prefix
+                ),
+            ],
+            "format": "json",
+        }
+        if self._arg_parser.dry_run:
+            return self.dry_run(
+                "ceph "
+                + cmd_json["prefix"]
+                + " "
+                + cmd_json["entity"]
+                + " "
+                + " ".join(cmd_json["caps"])
+            )
+        ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
+        # if there is an unsuccessful attempt,
+        if ret_val != 0 or len(json_out) == 0:
+            raise ExecutionFailureException(
+                f"'auth get-or-create {self.run_as_user}' command failed\n"
+                f"Error: {err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST}"
+            )
+        return str(json_out[0]["key"])
+
+    def get_ceph_dashboard_link(self):
+        cmd_json = {"prefix": "mgr services", "format": "json"}
+        if self._arg_parser.dry_run:
+            return self.dry_run("ceph " + cmd_json["prefix"])
+        ret_val, json_out, _ = self._common_cmd_json_gen(cmd_json)
+        # if there is an unsuccessful attempt,
+        if ret_val != 0 or len(json_out) == 0:
+            return None
+        if "dashboard" not in json_out:
+            return None
+        return json_out["dashboard"]
+
+    def create_rgw_admin_ops_user(self):
+        cmd = [
+            "radosgw-admin",
+            "user",
+            "create",
+            "--uid",
+            self.EXTERNAL_RGW_ADMIN_OPS_USER_NAME,
+            "--display-name",
+            "Rook RGW Admin Ops user",
+            "--caps",
+            "buckets=*;users=*;usage=read;metadata=read;zone=read",
+            "--rgw-realm",
+            self._arg_parser.rgw_realm_name,
+            "--rgw-zonegroup",
+            self._arg_parser.rgw_zonegroup_name,
+            "--rgw-zone",
+            self._arg_parser.rgw_zone_name,
+        ]
+        if self._arg_parser.dry_run:
+            return self.dry_run("ceph " + " ".join(cmd))
+        try:
+            output = subprocess.check_output(cmd, stderr=subprocess.PIPE)
+        except subprocess.CalledProcessError as execErr:
+            # if the user already exists, we just query it
+            if execErr.returncode == errno.EEXIST:
+                cmd = [
+                    "radosgw-admin",
+                    "user",
+                    "info",
+                    "--uid",
+                    self.EXTERNAL_RGW_ADMIN_OPS_USER_NAME,
+                    "--rgw-realm",
+                    self._arg_parser.rgw_realm_name,
+                    "--rgw-zonegroup",
+                    self._arg_parser.rgw_zonegroup_name,
+                    "--rgw-zone",
+                    self._arg_parser.rgw_zone_name,
+                ]
+                try:
+                    output = subprocess.check_output(cmd, stderr=subprocess.PIPE)
+                except subprocess.CalledProcessError as execErr:
+                    err_msg = (
+                        f"failed to execute command {cmd}. Output: {execErr.output}. "
+                        f"Code: {execErr.returncode}. Error: {execErr.stderr}"
+                    )
+                    sys.stderr.write(err_msg)
+                    return None, None, False, "-1"
+            else:
+                err_msg = (
+                    f"failed to execute command {cmd}. Output: {execErr.output}. "
+                    f"Code: {execErr.returncode}. Error: {execErr.stderr}"
+                )
+                sys.stderr.write(err_msg)
+                return None, None, False, "-1"
+
+        # if it is python2, don't check for ceph version for adding `info=read` cap(rgw_validation)
+        if sys.version_info.major < 3:
+            jsonoutput = json.loads(output)
+            return (
+                jsonoutput["keys"][0]["access_key"],
+                jsonoutput["keys"][0]["secret_key"],
+                False,
+                "",
+            )
+
+        # separately add info=read caps for rgw-endpoint ip validation
+        info_cap_supported = True
+        cmd = [
+            "radosgw-admin",
+            "caps",
+            "add",
+            "--uid",
+            self.EXTERNAL_RGW_ADMIN_OPS_USER_NAME,
+            "--caps",
+            "info=read",
+            "--rgw-realm",
+            self._arg_parser.rgw_realm_name,
+            "--rgw-zonegroup",
+            self._arg_parser.rgw_zonegroup_name,
+            "--rgw-zone",
+            self._arg_parser.rgw_zone_name,
+        ]
+        try:
+            output = subprocess.check_output(cmd, stderr=subprocess.PIPE)
+        except subprocess.CalledProcessError as execErr:
+            # if the ceph version not supported for adding `info=read` cap(rgw_validation)
+            if (
+                "could not add caps: unable to add caps: info=read\n"
+                in execErr.stderr.decode("utf-8")
+                and execErr.returncode == 244
+            ):
+                info_cap_supported = False
+            else:
+                err_msg = (
+                    f"failed to execute command {cmd}. Output: {execErr.output}. "
+                    f"Code: {execErr.returncode}. Error: {execErr.stderr}"
+                )
+                sys.stderr.write(err_msg)
+                return None, None, False, "-1"
+
+        jsonoutput = json.loads(output)
+        return (
+            jsonoutput["keys"][0]["access_key"],
+            jsonoutput["keys"][0]["secret_key"],
+            info_cap_supported,
+            "",
+        )
+
+    def validate_rbd_pool(self):
+        if not self.cluster.pool_exists(self._arg_parser.rbd_data_pool_name):
+            raise ExecutionFailureException(
+                f"The provided pool, '{self._arg_parser.rbd_data_pool_name}', does not exist"
+            )
+
+    def validate_rados_namespace(self):
+        rbd_pool_name = self._arg_parser.rbd_data_pool_name
+        rados_namespace = self._arg_parser.rados_namespace
+        if rados_namespace == "":
+            return
+        rbd_inst = rbd.RBD()
+        ioctx = self.cluster.open_ioctx(rbd_pool_name)
+        if rbd_inst.namespace_exists(ioctx, rados_namespace) is False:
+            raise ExecutionFailureException(
+                f"The provided rados Namespace, '{rados_namespace}', "
+                f"is not found in the pool '{rbd_pool_name}'"
+            )
+
+    def get_or_create_subvolume_group(self, subvolume_group, cephfs_filesystem_name):
+        cmd = [
+            "ceph",
+            "fs",
+            "subvolumegroup",
+            "getpath",
+            cephfs_filesystem_name,
+            subvolume_group,
+        ]
+        try:
+            _ = subprocess.check_output(cmd, stderr=subprocess.PIPE)
+        except subprocess.CalledProcessError:
+            cmd = [
+                "ceph",
+                "fs",
+                "subvolumegroup",
+                "create",
+                cephfs_filesystem_name,
+                subvolume_group,
+            ]
+            try:
+                _ = subprocess.check_output(cmd, stderr=subprocess.PIPE)
+            except subprocess.CalledProcessError:
+                raise ExecutionFailureException(
+                    f"subvolume group {subvolume_group} is not able to get created"
+                )
+
+    def pin_subvolume(
+        self, subvolume_group, cephfs_filesystem_name, pin_type, pin_setting
+    ):
+        cmd = [
+            "ceph",
+            "fs",
+            "subvolumegroup",
+            "pin",
+            cephfs_filesystem_name,
+            subvolume_group,
+            pin_type,
+            pin_setting,
+        ]
+        try:
+            _ = subprocess.check_output(cmd, stderr=subprocess.PIPE)
+        except subprocess.CalledProcessError:
+            raise ExecutionFailureException(
+                f"subvolume group {subvolume_group} is not able to get pinned"
+            )
+
+    def get_rgw_fsid(self, base_url, verify):
+        access_key = self.out_map["RGW_ADMIN_OPS_USER_ACCESS_KEY"]
+        secret_key = self.out_map["RGW_ADMIN_OPS_USER_SECRET_KEY"]
+        rgw_endpoint = self._arg_parser.rgw_endpoint
+        base_url = base_url + "://" + rgw_endpoint + "/admin/info?"
+        params = {"format": "json"}
+        request_url = base_url + urllib.parse.urlencode(params)
+
+        try:
+            r = requests.get(
+                request_url,
+                auth=S3Auth(access_key, secret_key, rgw_endpoint),
+                verify=verify,
+            )
+        except requests.exceptions.Timeout:
+            sys.stderr.write(
+                f"invalid endpoint:, not able to call admin-ops api{rgw_endpoint}"
+            )
+            return "", "-1"
+        r1 = r.json()
+        if r1 is None or r1.get("info") is None:
+            sys.stderr.write(
+                f"The provided rgw Endpoint, '{self._arg_parser.rgw_endpoint}', is invalid."
+            )
+            return (
+                "",
+                "-1",
+            )
+
+        return r1["info"]["storage_backends"][0]["cluster_id"], ""
+
+    def validate_rgw_endpoint(self, info_cap_supported):
+        # if the 'cluster' instance is a dummy one,
+        # don't try to reach out to the endpoint
+        if isinstance(self.cluster, DummyRados):
+            return
+
+        rgw_endpoint = self._arg_parser.rgw_endpoint
+
+        # validate rgw endpoint only if ip address is passed
+        ip_type = self._invalid_endpoint(rgw_endpoint)
+
+        # check if the rgw endpoint is reachable
+        cert = None
+        if not self._arg_parser.rgw_skip_tls and self.validate_rgw_endpoint_tls_cert():
+            cert = self._arg_parser.rgw_tls_cert_path
+        base_url, verify, err = self.endpoint_dial(rgw_endpoint, ip_type, cert=cert)
+        if err != "":
+            return "-1"
+
+        # check if the rgw endpoint belongs to the same cluster
+        # only check if `info` cap is supported
+        if info_cap_supported:
+            fsid = self.get_fsid()
+            rgw_fsid, err = self.get_rgw_fsid(base_url, verify)
+            if err == "-1":
+                return "-1"
+            if fsid != rgw_fsid:
+                sys.stderr.write(
+                    f"The provided rgw Endpoint, '{self._arg_parser.rgw_endpoint}', is invalid. We are validating by calling the adminops api through rgw-endpoint and validating the cluster_id '{rgw_fsid}' is equal to the ceph cluster fsid '{fsid}'"
+                )
+                return "-1"
+
+        # check if the rgw endpoint pool exist
+        # only validate if rgw_pool_prefix is passed else it will take default value and we don't create these default pools
+        if self._arg_parser.rgw_pool_prefix != "default":
+            rgw_pools_to_validate = [
+                f"{self._arg_parser.rgw_pool_prefix}.rgw.meta",
+                ".rgw.root",
+                f"{self._arg_parser.rgw_pool_prefix}.rgw.control",
+                f"{self._arg_parser.rgw_pool_prefix}.rgw.log",
+            ]
+            for _rgw_pool_to_validate in rgw_pools_to_validate:
+                if not self.cluster.pool_exists(_rgw_pool_to_validate):
+                    sys.stderr.write(
+                        f"The provided pool, '{_rgw_pool_to_validate}', does not exist"
+                    )
+                    return "-1"
+
+        return ""
+
+    def validate_rgw_multisite(self, rgw_multisite_config_name, rgw_multisite_config):
+        if rgw_multisite_config != "":
+            cmd = [
+                "radosgw-admin",
+                rgw_multisite_config,
+                "get",
+                "--rgw-" + rgw_multisite_config,
+                rgw_multisite_config_name,
+            ]
+            try:
+                _ = subprocess.check_output(cmd, stderr=subprocess.PIPE)
+            except subprocess.CalledProcessError as execErr:
+                err_msg = (
+                    f"failed to execute command {cmd}. Output: {execErr.output}. "
+                    f"Code: {execErr.returncode}. Error: {execErr.stderr}"
+                )
+                sys.stderr.write(err_msg)
+                return "-1"
+        return ""
+
+    def _gen_output_map(self):
+        if self.out_map:
+            return
+        self._arg_parser.cluster_name = (
+            self._arg_parser.cluster_name.lower()
+        )  # always convert cluster name to lowercase characters
+        self.validate_rbd_pool()
+        self.validate_rados_namespace()
+        self._excluded_keys.add("CLUSTER_NAME")
+        self.get_cephfs_data_pool_details()
+        self.out_map["NAMESPACE"] = self._arg_parser.namespace
+        self.out_map["CLUSTER_NAME"] = self._arg_parser.cluster_name
+        self.out_map["ROOK_EXTERNAL_FSID"] = self.get_fsid()
+        self.out_map["ROOK_EXTERNAL_USERNAME"] = self.run_as_user
+        self.out_map["ROOK_EXTERNAL_CEPH_MON_DATA"] = self.get_ceph_external_mon_data()
+        self.out_map["ROOK_EXTERNAL_USER_SECRET"] = self.create_checkerKey()
+        self.out_map["ROOK_EXTERNAL_DASHBOARD_LINK"] = self.get_ceph_dashboard_link()
+        (
+            self.out_map["CSI_RBD_NODE_SECRET"],
+            self.out_map["CSI_RBD_NODE_SECRET_NAME"],
+        ) = self.create_cephCSIKeyring_user("client.csi-rbd-node")
+        (
+            self.out_map["CSI_RBD_PROVISIONER_SECRET"],
+            self.out_map["CSI_RBD_PROVISIONER_SECRET_NAME"],
+        ) = self.create_cephCSIKeyring_user("client.csi-rbd-provisioner")
+        self.out_map["CEPHFS_POOL_NAME"] = self._arg_parser.cephfs_data_pool_name
+        self.out_map[
+            "CEPHFS_METADATA_POOL_NAME"
+        ] = self._arg_parser.cephfs_metadata_pool_name
+        self.out_map["CEPHFS_FS_NAME"] = self._arg_parser.cephfs_filesystem_name
+        self.out_map[
+            "RESTRICTED_AUTH_PERMISSION"
+        ] = self._arg_parser.restricted_auth_permission
+        self.out_map["RADOS_NAMESPACE"] = self._arg_parser.rados_namespace
+        self.out_map["SUBVOLUME_GROUP"] = self._arg_parser.subvolume_group
+        self.out_map["CSI_CEPHFS_NODE_SECRET"] = ""
+        self.out_map["CSI_CEPHFS_PROVISIONER_SECRET"] = ""
+        # create CephFS node and provisioner keyring only when MDS exists
+        if self.out_map["CEPHFS_FS_NAME"] and self.out_map["CEPHFS_POOL_NAME"]:
+            (
+                self.out_map["CSI_CEPHFS_NODE_SECRET"],
+                self.out_map["CSI_CEPHFS_NODE_SECRET_NAME"],
+            ) = self.create_cephCSIKeyring_user("client.csi-cephfs-node")
+            (
+                self.out_map["CSI_CEPHFS_PROVISIONER_SECRET"],
+                self.out_map["CSI_CEPHFS_PROVISIONER_SECRET_NAME"],
+            ) = self.create_cephCSIKeyring_user("client.csi-cephfs-provisioner")
+            # create the default "csi" subvolumegroup
+            self.get_or_create_subvolume_group(
+                "csi", self._arg_parser.cephfs_filesystem_name
+            )
+            # pin the default "csi" subvolumegroup
+            self.pin_subvolume(
+                "csi", self._arg_parser.cephfs_filesystem_name, "distributed", "1"
+            )
+            if self.out_map["SUBVOLUME_GROUP"]:
+                self.get_or_create_subvolume_group(
+                    self._arg_parser.subvolume_group,
+                    self._arg_parser.cephfs_filesystem_name,
+                )
+                self.pin_subvolume(
+                    self._arg_parser.subvolume_group,
+                    self._arg_parser.cephfs_filesystem_name,
+                    "distributed",
+                    "1",
+                )
+        self.out_map["RGW_TLS_CERT"] = ""
+        self.out_map["MONITORING_ENDPOINT"] = ""
+        self.out_map["MONITORING_ENDPOINT_PORT"] = ""
+        if not self._arg_parser.skip_monitoring_endpoint:
+            (
+                self.out_map["MONITORING_ENDPOINT"],
+                self.out_map["MONITORING_ENDPOINT_PORT"],
+            ) = self.get_active_and_standby_mgrs()
+        self.out_map["RBD_POOL_NAME"] = self._arg_parser.rbd_data_pool_name
+        self.out_map[
+            "RBD_METADATA_EC_POOL_NAME"
+        ] = self.validate_rbd_metadata_ec_pool_name()
+        self.out_map["RGW_POOL_PREFIX"] = self._arg_parser.rgw_pool_prefix
+        self.out_map["RGW_ENDPOINT"] = ""
+        if self._arg_parser.rgw_endpoint:
+            if self._arg_parser.dry_run:
+                self.create_rgw_admin_ops_user()
+            else:
+                if (
+                    self._arg_parser.rgw_realm_name != ""
+                    and self._arg_parser.rgw_zonegroup_name != ""
+                    and self._arg_parser.rgw_zone_name != ""
+                ):
+                    err = self.validate_rgw_multisite(
+                        self._arg_parser.rgw_realm_name, "realm"
+                    )
+                    err = self.validate_rgw_multisite(
+                        self._arg_parser.rgw_zonegroup_name, "zonegroup"
+                    )
+                    err = self.validate_rgw_multisite(
+                        self._arg_parser.rgw_zone_name, "zone"
+                    )
+
+                if (
+                    self._arg_parser.rgw_realm_name == ""
+                    and self._arg_parser.rgw_zonegroup_name == ""
+                    and self._arg_parser.rgw_zone_name == ""
+                ) or (
+                    self._arg_parser.rgw_realm_name != ""
+                    and self._arg_parser.rgw_zonegroup_name != ""
+                    and self._arg_parser.rgw_zone_name != ""
+                ):
+                    (
+                        self.out_map["RGW_ADMIN_OPS_USER_ACCESS_KEY"],
+                        self.out_map["RGW_ADMIN_OPS_USER_SECRET_KEY"],
+                        info_cap_supported,
+                        err,
+                    ) = self.create_rgw_admin_ops_user()
+                    err = self.validate_rgw_endpoint(info_cap_supported)
+                    if self._arg_parser.rgw_tls_cert_path:
+                        self.out_map[
+                            "RGW_TLS_CERT"
+                        ] = self.validate_rgw_endpoint_tls_cert()
+                    # if there is no error, set the RGW_ENDPOINT
+                    if err != "-1":
+                        self.out_map["RGW_ENDPOINT"] = self._arg_parser.rgw_endpoint
+                else:
+                    err = "Please provide all the RGW multisite parameters or none of them"
+                    sys.stderr.write(err)
+
+    def gen_shell_out(self):
+        self._gen_output_map()
+        shOutIO = StringIO()
+        for k, v in self.out_map.items():
+            if v and k not in self._excluded_keys:
+                shOutIO.write(f"export {k}={v}{LINESEP}")
+        shOut = shOutIO.getvalue()
+        shOutIO.close()
+        return shOut
+
+    def gen_json_out(self):
+        self._gen_output_map()
+        if self._arg_parser.dry_run:
+            return ""
+        json_out = [
+            {
+                "name": "rook-ceph-mon-endpoints",
+                "kind": "ConfigMap",
+                "data": {
+                    "data": self.out_map["ROOK_EXTERNAL_CEPH_MON_DATA"],
+                    "maxMonId": "0",
+                    "mapping": "{}",
+                },
+            },
+            {
+                "name": "rook-ceph-mon",
+                "kind": "Secret",
+                "data": {
+                    "admin-secret": "admin-secret",
+                    "fsid": self.out_map["ROOK_EXTERNAL_FSID"],
+                    "mon-secret": "mon-secret",
+                },
+            },
+            {
+                "name": "rook-ceph-operator-creds",
+                "kind": "Secret",
+                "data": {
+                    "userID": self.out_map["ROOK_EXTERNAL_USERNAME"],
+                    "userKey": self.out_map["ROOK_EXTERNAL_USER_SECRET"],
+                },
+            },
+        ]
+
+        # if 'MONITORING_ENDPOINT' exists, then only add 'monitoring-endpoint' to Cluster
+        if (
+            self.out_map["MONITORING_ENDPOINT"]
+            and self.out_map["MONITORING_ENDPOINT_PORT"]
+        ):
+            json_out.append(
+                {
+                    "name": "monitoring-endpoint",
+                    "kind": "CephCluster",
+                    "data": {
+                        "MonitoringEndpoint": self.out_map["MONITORING_ENDPOINT"],
+                        "MonitoringPort": self.out_map["MONITORING_ENDPOINT_PORT"],
+                    },
+                }
+            )
+
+        # if 'CSI_RBD_NODE_SECRET' exists, then only add 'rook-csi-rbd-provisioner' Secret
+        if (
+            self.out_map["CSI_RBD_NODE_SECRET"]
+            and self.out_map["CSI_RBD_NODE_SECRET_NAME"]
+        ):
+            json_out.append(
+                {
+                    "name": f"rook-{self.out_map['CSI_RBD_NODE_SECRET_NAME']}",
+                    "kind": "Secret",
+                    "data": {
+                        "userID": self.out_map["CSI_RBD_NODE_SECRET_NAME"],
+                        "userKey": self.out_map["CSI_RBD_NODE_SECRET"],
+                    },
+                }
+            )
+        # if 'CSI_RBD_PROVISIONER_SECRET' exists, then only add 'rook-csi-rbd-provisioner' Secret
+        if (
+            self.out_map["CSI_RBD_PROVISIONER_SECRET"]
+            and self.out_map["CSI_RBD_PROVISIONER_SECRET_NAME"]
+        ):
+            json_out.append(
+                {
+                    "name": f"rook-{self.out_map['CSI_RBD_PROVISIONER_SECRET_NAME']}",
+                    "kind": "Secret",
+                    "data": {
+                        "userID": self.out_map["CSI_RBD_PROVISIONER_SECRET_NAME"],
+                        "userKey": self.out_map["CSI_RBD_PROVISIONER_SECRET"],
+                    },
+                }
+            )
+        # if 'CSI_CEPHFS_PROVISIONER_SECRET' exists, then only add 'rook-csi-cephfs-provisioner' Secret
+        if (
+            self.out_map["CSI_CEPHFS_PROVISIONER_SECRET"]
+            and self.out_map["CSI_CEPHFS_PROVISIONER_SECRET_NAME"]
+        ):
+            json_out.append(
+                {
+                    "name": f"rook-{self.out_map['CSI_CEPHFS_PROVISIONER_SECRET_NAME']}",
+                    "kind": "Secret",
+                    "data": {
+                        "adminID": self.out_map["CSI_CEPHFS_PROVISIONER_SECRET_NAME"],
+                        "adminKey": self.out_map["CSI_CEPHFS_PROVISIONER_SECRET"],
+                    },
+                }
+            )
+        # if 'CSI_CEPHFS_NODE_SECRET' exists, then only add 'rook-csi-cephfs-node' Secret
+        if (
+            self.out_map["CSI_CEPHFS_NODE_SECRET"]
+            and self.out_map["CSI_CEPHFS_NODE_SECRET_NAME"]
+        ):
+            json_out.append(
+                {
+                    "name": f"rook-{self.out_map['CSI_CEPHFS_NODE_SECRET_NAME']}",
+                    "kind": "Secret",
+                    "data": {
+                        "adminID": self.out_map["CSI_CEPHFS_NODE_SECRET_NAME"],
+                        "adminKey": self.out_map["CSI_CEPHFS_NODE_SECRET"],
+                    },
+                }
+            )
+        # if 'ROOK_EXTERNAL_DASHBOARD_LINK' exists, then only add 'rook-ceph-dashboard-link' Secret
+        if self.out_map["ROOK_EXTERNAL_DASHBOARD_LINK"]:
+            json_out.append(
+                {
+                    "name": "rook-ceph-dashboard-link",
+                    "kind": "Secret",
+                    "data": {
+                        "userID": "ceph-dashboard-link",
+                        "userKey": self.out_map["ROOK_EXTERNAL_DASHBOARD_LINK"],
+                    },
+                }
+            )
+        if self.out_map["RBD_METADATA_EC_POOL_NAME"]:
+            json_out.append(
+                {
+                    "name": "ceph-rbd",
+                    "kind": "StorageClass",
+                    "data": {
+                        "dataPool": self.out_map["RBD_POOL_NAME"],
+                        "pool": self.out_map["RBD_METADATA_EC_POOL_NAME"],
+                        "csi.storage.k8s.io/provisioner-secret-name": f"rook-{self.out_map['CSI_RBD_PROVISIONER_SECRET_NAME']}",
+                        "csi.storage.k8s.io/controller-expand-secret-name": f"rook-{self.out_map['CSI_RBD_PROVISIONER_SECRET_NAME']}",
+                        "csi.storage.k8s.io/node-stage-secret-name": f"rook-{self.out_map['CSI_RBD_NODE_SECRET_NAME']}",
+                    },
+                }
+            )
+        else:
+            json_out.append(
+                {
+                    "name": "ceph-rbd",
+                    "kind": "StorageClass",
+                    "data": {
+                        "pool": self.out_map["RBD_POOL_NAME"],
+                        "csi.storage.k8s.io/provisioner-secret-name": f"rook-{self.out_map['CSI_RBD_PROVISIONER_SECRET_NAME']}",
+                        "csi.storage.k8s.io/controller-expand-secret-name": f"rook-{self.out_map['CSI_RBD_PROVISIONER_SECRET_NAME']}",
+                        "csi.storage.k8s.io/node-stage-secret-name": f"rook-{self.out_map['CSI_RBD_NODE_SECRET_NAME']}",
+                    },
+                }
+            )
+        # if 'CEPHFS_FS_NAME' exists, then only add 'cephfs' StorageClass
+        if self.out_map["CEPHFS_FS_NAME"]:
+            json_out.append(
+                {
+                    "name": "cephfs",
+                    "kind": "StorageClass",
+                    "data": {
+                        "fsName": self.out_map["CEPHFS_FS_NAME"],
+                        "pool": self.out_map["CEPHFS_POOL_NAME"],
+                        "csi.storage.k8s.io/provisioner-secret-name": f"rook-{self.out_map['CSI_CEPHFS_PROVISIONER_SECRET_NAME']}",
+                        "csi.storage.k8s.io/controller-expand-secret-name": f"rook-{self.out_map['CSI_CEPHFS_PROVISIONER_SECRET_NAME']}",
+                        "csi.storage.k8s.io/node-stage-secret-name": f"rook-{self.out_map['CSI_CEPHFS_NODE_SECRET_NAME']}",
+                    },
+                }
+            )
+        # if 'RGW_ENDPOINT' exists, then only add 'ceph-rgw' StorageClass
+        if self.out_map["RGW_ENDPOINT"]:
+            json_out.append(
+                {
+                    "name": "ceph-rgw",
+                    "kind": "StorageClass",
+                    "data": {
+                        "endpoint": self.out_map["RGW_ENDPOINT"],
+                        "poolPrefix": self.out_map["RGW_POOL_PREFIX"],
+                    },
+                }
+            )
+            json_out.append(
+                {
+                    "name": "rgw-admin-ops-user",
+                    "kind": "Secret",
+                    "data": {
+                        "accessKey": self.out_map["RGW_ADMIN_OPS_USER_ACCESS_KEY"],
+                        "secretKey": self.out_map["RGW_ADMIN_OPS_USER_SECRET_KEY"],
+                    },
+                }
+            )
+        # if 'RGW_TLS_CERT' exists, then only add the "ceph-rgw-tls-cert" secret
+        if self.out_map["RGW_TLS_CERT"]:
+            json_out.append(
+                {
+                    "name": "ceph-rgw-tls-cert",
+                    "kind": "Secret",
+                    "data": {
+                        "cert": self.out_map["RGW_TLS_CERT"],
+                    },
+                }
+            )
+
+        return json.dumps(json_out) + LINESEP
+
+    def upgrade_users_permissions(self):
+        users = [
+            "client.csi-cephfs-node",
+            "client.csi-cephfs-provisioner",
+            "client.csi-rbd-node",
+            "client.csi-rbd-provisioner",
+        ]
+        if self.run_as_user != "" and self.run_as_user not in users:
+            users.append(self.run_as_user)
+        for user in users:
+            self.upgrade_user_permissions(user)
+
+    def upgrade_user_permissions(self, user):
+        # check whether the given user exists or not
+        cmd_json = {"prefix": "auth get", "entity": f"{user}", "format": "json"}
+        ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
+        if ret_val != 0 or len(json_out) == 0:
+            print(f"user {user} not found for upgrading.")
+            return
+        existing_caps = json_out[0]["caps"]
+        new_cap, _ = self.get_caps_and_entity(user)
+        cap_keys = ["mon", "mgr", "osd", "mds"]
+        caps = []
+        for eachCap in cap_keys:
+            cur_cap_values = existing_caps.get(eachCap, "")
+            new_cap_values = new_cap.get(eachCap, "")
+            cur_cap_perm_list = [
+                x.strip() for x in cur_cap_values.split(",") if x.strip()
+            ]
+            new_cap_perm_list = [
+                x.strip() for x in new_cap_values.split(",") if x.strip()
+            ]
+            # append new_cap_list to cur_cap_list to maintain the order of caps
+            cur_cap_perm_list.extend(new_cap_perm_list)
+            # eliminate duplicates without using 'set'
+            # set re-orders items in the list and we have to keep the order
+            new_cap_list = []
+            [new_cap_list.append(x) for x in cur_cap_perm_list if x not in new_cap_list]
+            existing_caps[eachCap] = ", ".join(new_cap_list)
+            if existing_caps[eachCap]:
+                caps.append(eachCap)
+                caps.append(existing_caps[eachCap])
+        cmd_json = {
+            "prefix": "auth caps",
+            "entity": user,
+            "caps": caps,
+            "format": "json",
+        }
+        ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
+        if ret_val != 0:
+            raise ExecutionFailureException(
+                f"'auth caps {user}' command failed.\n Error: {err_msg}"
+            )
+        print(f"Updated user {user} successfully.")
+
+    def main(self):
+        generated_output = ""
+        if self._arg_parser.upgrade:
+            self.upgrade_users_permissions()
+        elif self._arg_parser.format == "json":
+            generated_output = self.gen_json_out()
+        elif self._arg_parser.format == "bash":
+            generated_output = self.gen_shell_out()
+        else:
+            raise ExecutionFailureException(
+                f"Unsupported format: {self._arg_parser.format}"
+            )
+        print(generated_output)
+        if self.output_file and generated_output:
+            fOut = open(self.output_file, mode="w", encoding="UTF-8")
+            fOut.write(generated_output)
+            fOut.close()
+
+
+################################################
+##################### MAIN #####################
+################################################
+if __name__ == "__main__":
+    rjObj = RadosJSON()
+    try:
+        rjObj.main()
+    except ExecutionFailureException as err:
+        print(f"Execution Failed: {err}")
+        raise err
+    except KeyError as kErr:
+        print(f"KeyError: {kErr}")
+    except OSError as osErr:
+        print(f"Error while trying to output the data: {osErr}")
+    finally:
+        rjObj.shutdown()

+ 4 - 0
dezendorf/homelab/talos/bin/kga

@@ -0,0 +1,4 @@
+#!/bin/bash
+
+kubectl api-resources --verbs=list --namespaced -o name | xargs -n 1 kubectl get --show-kind --ignore-not-found -n $1 | sort | awk '{print $1}'  | grep \/ 
+

+ 1 - 0
dezendorf/homelab/talos/bin/list-owned-objects.sh

@@ -0,0 +1 @@
+kubectl -n c3 get secret,configmap,service,deployment,pvc -o jsonpath={range .items[?(@.metadata.ownerReferences[*].uid=="1358c4e5-915c-4652-9f91-e991360fd760")]}{.kind}{"/"}{.metadata.name}{"\n"}{end}

+ 1 - 0
dezendorf/homelab/talos/bin/remove-finalizers.sh

@@ -0,0 +1 @@
+kubectl -n clean-ns patch cephcluster/clean-ns --type json --patch='[ { "op":"remove","path": "/metadata/finalizers"} ]'

+ 5 - 0
dezendorf/homelab/talos/bin/wipe-all

@@ -0,0 +1,5 @@
+for i in $(kubectl get nodes | grep -v STATUS | grep -v control-plane | grep -v NotReady | awk '{print $1}') ; do ./bin/wipe-node $i ; done
+
+NODES=()
+for i in $(kubectl get nodes -o wide | grep -v STATUS | grep -v control-plane | grep -v NotReady | awk '{print " -n " $6}') ; do NODES+=($i);  done
+talosctl reboot ${NODES[@]}

+ 42 - 0
dezendorf/homelab/talos/bin/wipe-node

@@ -0,0 +1,42 @@
+#!/bin/bash
+
+if [ $# -ne 1 ]; then
+  echo "Incorrect number of arguments"
+fi
+
+NODE=$1
+
+echo "Resetting ${NODE}"
+
+NODEIP=$(kubectl get nodes -o wide | grep ${NODE} | grep -v "INTERNAL-IP" | awk '{print $6}')
+DISKS=$(talosctl -n ${NODEIP} disks | grep usb | awk '{print $2}' | grep -v DEV | grep -v "/dev/sdg" | grep -v "/dev/mmc")
+
+echo "Disks: $DISKS"
+echo "Node IP: $NODEIP"
+
+for d in $DISKS ; do
+
+echo "Creating disk-wipe pod to clear $d on $NODE (${NODEIP})"
+cat <<EOF | kubectl apply -f -
+apiVersion: v1
+kind: Pod
+metadata:
+  name: disk-wipe-${NODE}
+spec:
+  restartPolicy: Never
+  nodeName: ${NODE}
+  containers:
+  - name: disk-wipe-${NODE}
+    image: busybox
+    securityContext:
+      privileged: true
+    command: ["/bin/sh", "-c", "dd if=/dev/zero bs=1M count=100 oflag=direct of=${d}"]
+EOF
+
+kubectl wait --timeout=900s --for=jsonpath='{.status.phase}=Succeeded' pod disk-wipe-${NODE}
+
+kubectl delete pod disk-wipe-${NODE}
+
+done
+
+#talosctl -n ${NODEIP} reboot

+ 730 - 0
dezendorf/homelab/talos/ceph-values.yaml

@@ -0,0 +1,730 @@
+# Default values for a single rook-ceph cluster
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+# -- Namespace of the main rook operator
+operatorNamespace: rook-ceph
+
+# -- The metadata.name of the CephCluster CR
+# @default -- The same as the namespace
+clusterName:
+
+# -- Optional override of the target kubernetes version
+kubeVersion:
+
+# -- Cluster ceph.conf override
+configOverride:
+# configOverride: |
+#   [global]
+#   mon_allow_pool_delete = true
+#   osd_pool_default_size = 3
+#   osd_pool_default_min_size = 2
+
+# Installs a debugging toolbox deployment
+toolbox:
+  # -- Enable Ceph debugging pod deployment. See [toolbox](../Troubleshooting/ceph-toolbox.md)
+  enabled: true
+  # -- Toolbox image, defaults to the image used by the Ceph cluster
+  image: #quay.io/ceph/ceph:v17.2.6
+  # -- Toolbox tolerations
+  tolerations: []
+  # -- Toolbox affinity
+  affinity: {}
+  # -- Toolbox container security context
+  containerSecurityContext:
+    runAsNonRoot: true
+    runAsUser: 2016
+    runAsGroup: 2016
+    capabilities:
+      drop: ["ALL"]
+  # -- Toolbox resources
+  resources:
+    limits:
+      cpu: "500m"
+      memory: "1Gi"
+    requests:
+      cpu: "100m"
+      memory: "128Mi"
+  # -- Set the priority class for the toolbox if desired
+  priorityClassName:
+
+monitoring:
+  # -- Enable Prometheus integration, will also create necessary RBAC rules to allow Operator to create ServiceMonitors.
+  # Monitoring requires Prometheus to be pre-installed
+  enabled: false
+  # -- Whether to create the Prometheus rules for Ceph alerts
+  createPrometheusRules: false
+  # -- The namespace in which to create the prometheus rules, if different from the rook cluster namespace.
+  # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
+  # deployed) to set rulesNamespaceOverride for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
+  rulesNamespaceOverride:
+  # Monitoring settings for external clusters:
+  # externalMgrEndpoints: <list of endpoints>
+  # externalMgrPrometheusPort: <port>
+  # allow adding custom labels and annotations to the prometheus rule
+  prometheusRule:
+    # -- Labels applied to PrometheusRule
+    labels: {}
+    # -- Annotations applied to PrometheusRule
+    annotations: {}
+
+# -- Create & use PSP resources. Set this to the same value as the rook-ceph chart.
+pspEnable: false
+
+# imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
+# imagePullSecrets:
+# - name: my-registry-secret
+
+# All values below are taken from the CephCluster CRD
+# -- Cluster configuration.
+# @default -- See [below](#ceph-cluster-spec)
+cephClusterSpec:
+  # This cluster spec example is for a converged cluster where all the Ceph daemons are running locally,
+  # as in the host-based example (cluster.yaml). For a different configuration such as a
+  # PVC-based cluster (cluster-on-pvc.yaml), external cluster (cluster-external.yaml),
+  # or stretch cluster (cluster-stretched.yaml), replace this entire `cephClusterSpec`
+  # with the specs from those examples.
+
+  # For more details, check https://rook.io/docs/rook/v1.10/CRDs/Cluster/ceph-cluster-crd/
+  cephVersion:
+    # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
+    # v16 is Pacific, v17 is Quincy.
+    # RECOMMENDATION: In production, use a specific version tag instead of the general v16 flag, which pulls the latest release and could result in different
+    # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
+    # If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v15.2.11-20200419
+    # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
+    image: quay.io/ceph/ceph:v17.2.6
+    # Whether to allow unsupported versions of Ceph. Currently `pacific`, `quincy`, and `reef` are supported.
+    # Future versions such as `squid` (v19) would require this to be set to `true`.
+    # Do not set to true in production.
+    allowUnsupported: false
+
+  # The path on the host where configuration files will be persisted. Must be specified.
+  # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
+  # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
+  dataDirHostPath: /var/lib/rook
+
+  # Whether or not upgrade should continue even if a check fails
+  # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
+  # Use at your OWN risk
+  # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/v1.10/Upgrade/ceph-upgrade/
+  skipUpgradeChecks: false
+
+  # Whether or not continue if PGs are not clean during an upgrade
+  continueUpgradeAfterChecksEvenIfNotHealthy: false
+
+  # WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart.
+  # If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one
+  # if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would
+  # continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`.
+  # The default wait timeout is 10 minutes.
+  waitTimeoutForHealthyOSDInMinutes: 10
+
+  mon:
+    # Set the number of mons to be started. Generally recommended to be 3.
+    # For highest availability, an odd number of mons should be specified.
+    count: 3
+    # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason.
+    # Mons should only be allowed on the same node for test environments where data loss is acceptable.
+    allowMultiplePerNode: false
+
+  mgr:
+    # When higher availability of the mgr is needed, increase the count to 2.
+    # In that case, one mgr will be active and one in standby. When Ceph updates which
+    # mgr is active, Rook will update the mgr services to match the active mgr.
+    count: 2
+    allowMultiplePerNode: false
+    modules:
+      # Several modules should not need to be included in this list. The "dashboard" and "monitoring" modules
+      # are already enabled by other settings in the cluster CR.
+      - name: pg_autoscaler
+        enabled: true
+
+  # enable the ceph dashboard for viewing cluster status
+  dashboard:
+    enabled: true
+    # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
+    urlPrefix: /ceph-dashboard
+    # serve the dashboard at the given port.
+    port: 8443
+    # Serve the dashboard using SSL (if using ingress to expose the dashboard and `ssl: true` you need to set
+    # the corresponding "backend protocol" annotation(s) for your ingress controller of choice)
+    ssl: false
+    # ssl: true
+
+  # Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/CRDs/ceph-cluster-crd.md#network-configuration-settings
+  network:
+    connections:
+      # Whether to encrypt the data in transit across the wire to prevent eavesdropping the data on the network.
+      # The default is false. When encryption is enabled, all communication between clients and Ceph daemons, or between Ceph daemons will be encrypted.
+      # When encryption is not enabled, clients still establish a strong initial authentication and data integrity is still validated with a crc check.
+      # IMPORTANT: Encryption requires the 5.11 kernel for the latest nbd and cephfs drivers. Alternatively for testing only,
+      # you can set the "mounter: rbd-nbd" in the rbd storage class, or "mounter: fuse" in the cephfs storage class.
+      # The nbd and fuse drivers are *not* recommended in production since restarting the csi driver pod will disconnect the volumes.
+      encryption:
+        enabled: false
+      # Whether to compress the data in transit across the wire. The default is false.
+      # Requires Ceph Quincy (v17) or newer. Also see the kernel requirements above for encryption.
+      compression:
+        enabled: false
+      # Whether to require communication over msgr2. If true, the msgr v1 port (6789) will be disabled
+      # and clients will be required to connect to the Ceph cluster with the v2 port (3300).
+      # Requires a kernel that supports msgr v2 (kernel 5.11 or CentOS 8.4 or newer).
+      requireMsgr2: false
+  #   # enable host networking
+      provider: host
+  #   # EXPERIMENTAL: enable the Multus network provider
+  #   provider: multus
+  #   selectors:
+  #     # The selector keys are required to be `public` and `cluster`.
+  #     # Based on the configuration, the operator will do the following:
+  #     #   1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface
+  #     #   2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network'
+  #     #
+  #     # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus
+  #     #
+  #     # public: public-conf --> NetworkAttachmentDefinition object name in Multus
+  #     # cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus
+  #   # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4
+  #   ipFamily: "IPv6"
+  #   # Ceph daemons to listen on both IPv4 and Ipv6 networks
+  #   dualStack: false
+
+  # enable the crash collector for ceph daemon crash collection
+  crashCollector:
+    disable: true
+    # Uncomment daysToRetain to prune ceph crash entries older than the
+    # specified number of days.
+    # daysToRetain: 30
+
+  # enable log collector, daemons will log on files and rotate
+  logCollector:
+    enabled: true
+    periodicity: daily # one of: hourly, daily, weekly, monthly
+    maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
+
+  # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
+  cleanupPolicy:
+    # Since cluster cleanup is destructive to data, confirmation is required.
+    # To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data".
+    # This value should only be set when the cluster is about to be deleted. After the confirmation is set,
+    # Rook will immediately stop configuring the cluster and only wait for the delete command.
+    # If the empty string is set, Rook will not destroy any data on hosts during uninstall.
+    confirmation: ""
+    # sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion
+    sanitizeDisks:
+      # method indicates if the entire disk should be sanitized or simply ceph's metadata
+      # in both case, re-install is possible
+      # possible choices are 'complete' or 'quick' (default)
+      method: quick
+      # dataSource indicate where to get random bytes from to write on the disk
+      # possible choices are 'zero' (default) or 'random'
+      # using random sources will consume entropy from the system and will take much more time then the zero source
+      dataSource: zero
+      # iteration overwrite N times instead of the default (1)
+      # takes an integer value
+      iteration: 1
+    # allowUninstallWithVolumes defines how the uninstall should be performed
+    # If set to true, cephCluster deletion does not wait for the PVs to be deleted.
+    allowUninstallWithVolumes: false
+
+  # To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
+  # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
+  # tolerate taints with a key of 'storage-node'.
+    placement:
+      all:
+        nodeAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+            nodeSelectorTerms:
+              - matchExpressions:
+                - key: role
+                  operator: In
+                  values:
+                  - storage-node
+        podAffinity:
+        podAntiAffinity:
+        topologySpreadConstraints:
+        tolerations:
+        - key: storage-node
+          operator: Exists
+  #   # The above placement information can also be specified for mon, osd, and mgr components
+      mon:
+        nodeAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+            nodeSelectorTerms:
+              - matchExpressions:
+                - key: ceph-node-type
+                  operator: In
+                  values:
+                  - mon
+        podAffinity:
+        podAntiAffinity:
+        topologySpreadConstraints:
+        tolerations:
+        - key: mon
+          operator: Exists
+  #   # Monitor deployments may contain an anti-affinity rule for avoiding monitor
+  #   # collocation on the same node. This is a required rule when host network is used
+  #   # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
+  #   # preferred rule with weight: 50.
+      osd:
+        nodeAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+            nodeSelectorTerms:
+              - matchExpressions:
+                - key: ceph-node-type
+                  operator: In
+                  values:
+                  - osd
+        podAffinity:
+        podAntiAffinity:
+        topologySpreadConstraints:
+        tolerations:
+        - key: osd
+          operator: Exists
+      mgr:
+        nodeAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+            nodeSelectorTerms:
+              - matchExpressions:
+                - key: ceph-node-type
+                  operator: In
+                  values:
+                  - mon
+        podAffinity:
+        podAntiAffinity:
+        topologySpreadConstraints:
+        tolerations:
+        - key: mon
+          operator: Exists
+  #   cleanup:
+
+  # annotations:
+  #   all:
+  #   mon:
+  #   osd:
+  #   cleanup:
+  #   prepareosd:
+  #   # If no mgr annotations are set, prometheus scrape annotations will be set by default.
+  #   mgr:
+
+  # labels:
+  #   all:
+  #   mon:
+  #   osd:
+  #   cleanup:
+  #   mgr:
+  #   prepareosd:
+  #   # monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator.
+  #   # These labels can be passed as LabelSelector to Prometheus
+  #   monitoring:
+
+  resources:
+    mgr:
+      limits:
+        cpu: "1000m"
+        memory: "1Gi"
+      requests:
+        cpu: "500m"
+        memory: "512Mi"
+    mon:
+      limits:
+        cpu: "2000m"
+        memory: "2Gi"
+      requests:
+        cpu: "1000m"
+        memory: "1Gi"
+    osd:
+      limits:
+        cpu: "2000m"
+        memory: "2560Mi"
+      requests:
+        cpu: "1000m"
+        memory: "2560Mi"
+    prepareosd:
+      # limits: It is not recommended to set limits on the OSD prepare job
+      #         since it's a one-time burst for memory that must be allowed to
+      #         complete without an OOM kill.  Note however that if a k8s
+      #         limitRange guardrail is defined external to Rook, the lack of
+      #         a limit here may result in a sync failure, in which case a
+      #         limit should be added.  1200Mi may suffice for up to 15Ti
+      #         OSDs ; for larger devices 2Gi may be required.
+      #         cf. https://github.com/rook/rook/pull/11103
+      requests:
+        cpu: "500m"
+        memory: "50Mi"
+    mgr-sidecar:
+      limits:
+        cpu: "500m"
+        memory: "100Mi"
+      requests:
+        cpu: "100m"
+        memory: "40Mi"
+    crashcollector:
+      limits:
+        cpu: "500m"
+        memory: "60Mi"
+      requests:
+        cpu: "100m"
+        memory: "60Mi"
+    logcollector:
+      limits:
+        cpu: "500m"
+        memory: "1Gi"
+      requests:
+        cpu: "100m"
+        memory: "100Mi"
+    cleanup:
+      limits:
+        cpu: "500m"
+        memory: "1Gi"
+      requests:
+        cpu: "500m"
+        memory: "100Mi"
+    exporter:
+      limits:
+        cpu: "250m"
+        memory: "128Mi"
+      requests:
+        cpu: "50m"
+        memory: "50Mi"
+
+  # The option to automatically remove OSDs that are out and are safe to destroy.
+  removeOSDsIfOutAndSafeToRemove: false
+
+  # priority classes to apply to ceph resources
+  priorityClassNames:
+    mon: system-node-critical
+    osd: system-node-critical
+    mgr: system-cluster-critical
+
+  storage: # cluster level storage configuration and selection
+    useAllNodes: true
+    useAllDevices: true
+    # deviceFilter:
+    # config:
+    #   crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
+    #   metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
+    #   databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
+    #   osdsPerDevice: "1" # this value can be overridden at the node or device level
+    #   encryptedDevice: "true" # the default value for this option is "false"
+    # # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
+    # # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
+    # nodes:
+    #   - name: "172.17.4.201"
+    #     devices: # specific devices to use for storage can be specified for each node
+    #       - name: "sdb"
+    #       - name: "nvme01" # multiple osds can be created on high performance devices
+    #         config:
+    #           osdsPerDevice: "5"
+    #       - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths
+    #     config: # configuration can be specified at the node level which overrides the cluster level config
+    #   - name: "172.17.4.301"
+    #     deviceFilter: "^sd."
+
+  # The section for configuring management of daemon disruptions during upgrade or fencing.
+  disruptionManagement:
+    # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically
+    # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will
+    # block eviction of OSDs by default and unblock them safely when drains are detected.
+    managePodBudgets: true
+    # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the
+    # default DOWN/OUT interval) when it is draining. This is only relevant when  `managePodBudgets` is `true`. The default value is `30` minutes.
+    osdMaintenanceTimeout: 30
+    # A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up.
+    # Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`.
+    # No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain.
+    pgHealthCheckTimeout: 0
+
+  # Configure the healthcheck and liveness probes for ceph pods.
+  # Valid values for daemons are 'mon', 'osd', 'status'
+  healthCheck:
+    daemonHealth:
+      mon:
+        disabled: false
+        interval: 45s
+      osd:
+        disabled: false
+        interval: 60s
+      status:
+        disabled: false
+        interval: 60s
+    # Change pod liveness probe, it works for all mon, mgr, and osd pods.
+    livenessProbe:
+      mon:
+        disabled: false
+      mgr:
+        disabled: false
+      osd:
+        disabled: false
+
+ingress:
+  # -- Enable an ingress for the ceph-dashboard
+  dashboard:
+    {}
+    # annotations:
+    #   external-dns.alpha.kubernetes.io/hostname: dashboard.example.com
+    #   nginx.ingress.kubernetes.io/rewrite-target: /ceph-dashboard/$2
+    # If the dashboard has ssl: true the following will make sure the NGINX Ingress controller can expose the dashboard correctly
+    #   nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
+    #   nginx.ingress.kubernetes.io/server-snippet: |
+    #     proxy_ssl_verify off;
+    # host:
+    #   name: dashboard.example.com
+    #   path: "/ceph-dashboard(/|$)(.*)"
+    # tls:
+    # - hosts:
+    #     - dashboard.example.com
+    #   secretName: testsecret-tls
+    ## Note: Only one of ingress class annotation or the `ingressClassName:` can be used at a time
+    ## to set the ingress class
+    # ingressClassName: nginx
+
+# -- A list of CephBlockPool configurations to deploy
+# @default -- See [below](#ceph-block-pools)
+cephBlockPools:
+  - name: ceph-blockpool
+    # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration
+    spec:
+      failureDomain: host
+      replicated:
+        size: 3
+      # Enables collecting RBD per-image IO statistics by enabling dynamic OSD performance counters. Defaults to false.
+      # For reference: https://docs.ceph.com/docs/master/mgr/prometheus/#rbd-io-statistics
+      # enableRBDStats: true
+    storageClass:
+      enabled: true
+      name: ceph-block
+      isDefault: true
+      reclaimPolicy: Delete
+      allowVolumeExpansion: true
+      volumeBindingMode: "Immediate"
+      mountOptions: []
+      # see https://kubernetes.io/docs/concepts/storage/storage-classes/#allowed-topologies
+      allowedTopologies: []
+      #        - matchLabelExpressions:
+      #            - key: rook-ceph-role
+      #              values:
+      #                - storage-node
+      # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Block-Storage-RBD/block-storage.md#provision-storage for available configuration
+      parameters:
+        # (optional) mapOptions is a comma-separated list of map options.
+        # For krbd options refer
+        # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
+        # For nbd options refer
+        # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
+        # mapOptions: lock_on_read,queue_depth=1024
+
+        # (optional) unmapOptions is a comma-separated list of unmap options.
+        # For krbd options refer
+        # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
+        # For nbd options refer
+        # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
+        # unmapOptions: force
+
+        # RBD image format. Defaults to "2".
+        imageFormat: "2"
+
+        # RBD image features, equivalent to OR'd bitfield value: 63
+        # Available for imageFormat: "2". Older releases of CSI RBD
+        # support only the `layering` feature. The Linux kernel (KRBD) supports the
+        # full feature complement as of 5.4
+        imageFeatures: layering
+
+        # These secrets contain Ceph admin credentials.
+        csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
+        csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
+        csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
+        csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
+        csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
+        csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
+        # Specify the filesystem type of the volume. If not specified, csi-provisioner
+        # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
+        # in hyperconverged settings where the volume is mounted on the same node as the osds.
+        csi.storage.k8s.io/fstype: ext4
+
+# -- A list of CephFileSystem configurations to deploy
+# @default -- See [below](#ceph-file-systems)
+cephFileSystems:
+  - name: ceph-filesystem
+    # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#filesystem-settings for available configuration
+    spec:
+      metadataPool:
+        replicated:
+          size: 3
+      dataPools:
+        - failureDomain: host
+          replicated:
+            size: 3
+          # Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#pools
+          name: data0
+      metadataServer:
+        activeCount: 1
+        activeStandby: true
+        resources:
+          limits:
+            cpu: "2000m"
+            memory: "4Gi"
+          requests:
+            cpu: "1000m"
+            memory: "4Gi"
+        priorityClassName: system-cluster-critical
+    storageClass:
+      enabled: true
+      isDefault: false
+      name: ceph-filesystem
+      # (Optional) specify a data pool to use, must be the name of one of the data pools above, 'data0' by default
+      pool: data0
+      reclaimPolicy: Delete
+      allowVolumeExpansion: true
+      volumeBindingMode: "Immediate"
+      mountOptions: []
+      # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Shared-Filesystem-CephFS/filesystem-storage.md#provision-storage for available configuration
+      parameters:
+        # The secrets contain Ceph admin credentials.
+        csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
+        csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
+        csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
+        csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
+        csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
+        csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
+        # Specify the filesystem type of the volume. If not specified, csi-provisioner
+        # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
+        # in hyperconverged settings where the volume is mounted on the same node as the osds.
+        csi.storage.k8s.io/fstype: ext4
+
+# -- Settings for the filesystem snapshot class
+# @default -- See [CephFS Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#cephfs-snapshots)
+cephFileSystemVolumeSnapshotClass:
+  enabled: false
+  name: ceph-filesystem
+  isDefault: true
+  deletionPolicy: Delete
+  annotations: {}
+  labels: {}
+  # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#cephfs-snapshots for available configuration
+  parameters: {}
+
+# -- Settings for the block pool snapshot class
+# @default -- See [RBD Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#rbd-snapshots)
+cephBlockPoolsVolumeSnapshotClass:
+  enabled: false
+  name: ceph-block
+  isDefault: false
+  deletionPolicy: Delete
+  annotations: {}
+  labels: {}
+  # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#rbd-snapshots for available configuration
+  parameters: {}
+
+# -- A list of CephObjectStore configurations to deploy
+# @default -- See [below](#ceph-object-stores)
+cephObjectStores:
+  - name: ceph-objectstore
+    # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md#object-store-settings for available configuration
+    spec:
+      metadataPool:
+        failureDomain: host
+        replicated:
+          size: 3
+      dataPool:
+        failureDomain: host
+        erasureCoded:
+          dataChunks: 2
+          codingChunks: 1
+      preservePoolsOnDelete: true
+      gateway:
+        port: 80
+        resources:
+          limits:
+            cpu: "2000m"
+            memory: "2Gi"
+          requests:
+            cpu: "1000m"
+            memory: "1Gi"
+        # securePort: 443
+        # sslCertificateRef:
+        instances: 1
+        priorityClassName: system-cluster-critical
+    storageClass:
+      enabled: true
+      name: ceph-bucket
+      reclaimPolicy: Delete
+      volumeBindingMode: "Immediate"
+      # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-bucket-claim.md#storageclass for available configuration
+      parameters:
+        # note: objectStoreNamespace and objectStoreName are configured by the chart
+        region: us-east-1
+    ingress:
+      # Enable an ingress for the ceph-objectstore
+      enabled: false
+      # annotations: {}
+      # host:
+      #   name: objectstore.example.com
+      #   path: /
+      # tls:
+      # - hosts:
+      #     - objectstore.example.com
+      #   secretName: ceph-objectstore-tls
+      # ingressClassName: nginx
+# cephECBlockPools are disabled by default, please remove the comments and set desired values to enable it
+#cephECBlockPools:
+#  # For erasure coded a replicated metadata pool is required.
+#  # https://rook.io/docs/rook/latest/CRDs/Shared-Filesystem/ceph-filesystem-crd/#erasure-coded
+#  - name: ec-metadata-pool
+#    # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration
+#    spec:
+#      replicated:
+#        size: 2
+#  - name: ec-data-pool
+#    spec:
+#      failureDomain: osd
+#      erasureCoded:
+#        dataChunks: 2
+#        codingChunks: 1
+#      deviceClass: hdd
+
+# cephECStorageClass also is disabled by default, please remove the comments and set desired values to enable it
+# if cephECBlockPools are uncommented you must remove the comments of cephEcStorageClass as well
+#cephECStorageClass:
+#  name: rook-ceph-block
+#  # Change "rook-ceph" provisioner prefix to match the operator namespace if needed
+#  provisioner: rook-ceph.rbd.csi.ceph.com # driver:namespace:operator
+#  parameters:
+#    # clusterID is the namespace where the rook cluster is running
+#    # If you change this namespace, also change the namespace below where the secret namespaces are defined
+#    clusterID: rook-ceph # namespace:cluster
+#
+#    # If you want to use erasure coded pool with RBD, you need to create
+#    # two pools. one erasure coded and one replicated.
+#    # You need to specify the replicated pool here in the `pool` parameter, it is
+#    # used for the metadata of the images.
+#    # The erasure coded pool must be set as the `dataPool` parameter below.
+#    dataPool: ec-data-pool
+#    pool: ec-metadata-pool
+#
+#    # (optional) mapOptions is a comma-separated list of map options.
+#    # For krbd options refer
+#    # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
+#    # For nbd options refer
+#    # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
+#    # mapOptions: lock_on_read,queue_depth=1024
+#
+#    # (optional) unmapOptions is a comma-separated list of unmap options.
+#    # For krbd options refer
+#    # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
+#    # For nbd options refer
+#    # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
+#    # unmapOptions: force
+#
+#    # RBD image format. Defaults to "2".
+#    imageFormat: "2"
+#
+#    # RBD image features, equivalent to OR'd bitfield value: 63
+#    # Available for imageFormat: "2". Older releases of CSI RBD
+#    # support only the `layering` feature. The Linux kernel (KRBD) supports the
+#    # full feature complement as of 5.4
+#    # imageFeatures: layering,fast-diff,object-map,deep-flatten,exclusive-lock
+#    imageFeatures: layering
+#  allowVolumeExpansion: true
+#  reclaimPolicy: Delete

+ 523 - 0
dezendorf/homelab/talos/nodeconfig/controlplane.yaml

@@ -0,0 +1,523 @@
+version: v1alpha1 # Indicates the schema used to decode the contents.
+debug: false # Enable verbose logging to the console.
+persist: true # description: |
+# Provides machine specific configuration options.
+machine:
+    type: controlplane # Defines the role of the machine within the cluster.
+    token: u7240y.plogoeorz04f09sw # The `token` is used by a machine to join the PKI of the cluster.
+    # The root certificate authority of the PKI.
+    ca:
+        crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJQakNCOGFBREFnRUNBaEFCdE5hMWNDcXBMUTl5RHh4Vm1hVmJNQVVHQXl0bGNEQVFNUTR3REFZRFZRUUsKRXdWMFlXeHZjekFlRncweU16QTVNRE14TmpNMk5UVmFGdzB6TXpBNE16RXhOak0yTlRWYU1CQXhEakFNQmdOVgpCQW9UQlhSaGJHOXpNQ293QlFZREsyVndBeUVBQW5qWmpDRmRpdTIvNUJNSlI2QWRWTWhwUEQ5MzgxTnYrWnA5Cm1mRndFTENqWVRCZk1BNEdBMVVkRHdFQi93UUVBd0lDaERBZEJnTlZIU1VFRmpBVUJnZ3JCZ0VGQlFjREFRWUkKS3dZQkJRVUhBd0l3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFkQmdOVkhRNEVGZ1FVMFU0OVEzK2lZY3k1TTBUSwphWnVBUjIwUlRNVXdCUVlESzJWd0EwRUF6TEtTdG1FQ1BHNzZpYUp5Z3Nmdzc1Z0tqVEVmODFCS0NOVFBBWkJzCnM2alRySktlRVA0SHpzOFIvZStyTGc4ZSszNDZWSXhoY1FMQWVKV21qc2VQRGc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
+        key: LS0tLS1CRUdJTiBFRDI1NTE5IFBSSVZBVEUgS0VZLS0tLS0KTUM0Q0FRQXdCUVlESzJWd0JDSUVJRExDOExUQVZ5bVl6Mk02dXU3VnVsUmNUZk1WbHBtdFptb1dxZUtoNjJSYgotLS0tLUVORCBFRDI1NTE5IFBSSVZBVEUgS0VZLS0tLS0K
+    # Extra certificate subject alternative names for the machine's certificate.
+    certSANs: []
+    #   # Uncomment this to enable SANs.
+    #   - 10.0.0.10
+    #   - 172.16.0.10
+    #   - 192.168.0.10
+
+    # Used to provide additional options to the kubelet.
+    kubelet:
+        image: ghcr.io/siderolabs/kubelet:v1.28.0 # The `image` field is an optional reference to an alternative kubelet image.
+        defaultRuntimeSeccompProfileEnabled: true # Enable container runtime default Seccomp profile.
+        disableManifestsDirectory: true # The `disableManifestsDirectory` field configures the kubelet to get static pod manifests from the /etc/kubernetes/manifests directory.
+        
+        # # The `ClusterDNS` field is an optional reference to an alternative kubelet clusterDNS ip list.
+        # clusterDNS:
+        #     - 10.96.0.10
+        #     - 169.254.2.53
+
+        # # The `extraArgs` field is used to provide additional flags to the kubelet.
+        # extraArgs:
+        #     key: value
+
+        # # The `extraMounts` field is used to add additional mounts to the kubelet container.
+        # extraMounts:
+        #     - destination: /var/lib/example
+        #       type: bind
+        #       source: /var/lib/example
+        #       options:
+        #         - bind
+        #         - rshared
+        #         - rw
+
+        # # The `extraConfig` field is used to provide kubelet configuration overrides.
+        # extraConfig:
+        #     serverTLSBootstrap: true
+
+        # # The `nodeIP` field is used to configure `--node-ip` flag for the kubelet.
+        # nodeIP:
+        #     # The `validSubnets` field configures the networks to pick kubelet node IP from.
+        #     validSubnets:
+        #         - 10.0.0.0/8
+        #         - '!10.0.0.3/32'
+        #         - fdc7::/16
+    # Provides machine specific network configuration options.
+    network:
+      interfaces:
+      - interface: eth0
+        dhcp: true
+        vip:
+          ip: 192.168.0.10
+    # # `interfaces` is used to define the network interface configuration.
+    #  interfaces:
+    #     - interface: enp0s1 # The interface name.
+    #       # Assigns static IP addresses to the interface.
+    #       addresses:
+    #         - 192.168.2.0/24
+    #       # A list of routes associated with the interface.
+    #       routes:
+    #         - network: 0.0.0.0/0 # The route's network (destination).
+    #           gateway: 192.168.2.1 # The route's gateway (if empty, creates link scope route).
+    #           metric: 1024 # The optional metric for the route.
+    #       mtu: 1500 # The interface's MTU.
+    #       
+    #       # # Picks a network device using the selector.
+
+    #       # # select a device with bus prefix 00:*.
+    #       # deviceSelector:
+    #       #     busPath: 00:* # PCI, USB bus prefix, supports matching by wildcard.
+    #       # # select a device with mac address matching `*:f0:ab` and `virtio` kernel driver.
+    #       # deviceSelector:
+    #       #     hardwareAddr: '*:f0:ab' # Device hardware address, supports matching by wildcard.
+    #       #     driver: virtio # Kernel driver, supports matching by wildcard.
+    #       # # select a device with bus prefix 00:*, a device with mac address matching `*:f0:ab` and `virtio` kernel driver.
+    #       # deviceSelector:
+    #       #     - busPath: 00:* # PCI, USB bus prefix, supports matching by wildcard.
+    #       #     - hardwareAddr: '*:f0:ab' # Device hardware address, supports matching by wildcard.
+    #       #       driver: virtio # Kernel driver, supports matching by wildcard.
+
+    #       # # Bond specific options.
+    #       # bond:
+    #       #     # The interfaces that make up the bond.
+    #       #     interfaces:
+    #       #         - enp2s0
+    #       #         - enp2s1
+    #       #     # Picks a network device using the selector.
+    #       #     deviceSelectors:
+    #       #         - busPath: 00:* # PCI, USB bus prefix, supports matching by wildcard.
+    #       #         - hardwareAddr: '*:f0:ab' # Device hardware address, supports matching by wildcard.
+    #       #           driver: virtio # Kernel driver, supports matching by wildcard.
+    #       #     mode: 802.3ad # A bond option.
+    #       #     lacpRate: fast # A bond option.
+
+    #       # # Bridge specific options.
+    #       # bridge:
+    #       #     # The interfaces that make up the bridge.
+    #       #     interfaces:
+    #       #         - enxda4042ca9a51
+    #       #         - enxae2a6774c259
+    #       #     # A bridge option.
+    #       #     stp:
+    #       #         enabled: true # Whether Spanning Tree Protocol (STP) is enabled.
+
+    #       # # Indicates if DHCP should be used to configure the interface.
+    #       # dhcp: true
+
+    #       # # DHCP specific options.
+    #       # dhcpOptions:
+    #       #     routeMetric: 1024 # The priority of all routes received via DHCP.
+
+    #       # # Wireguard specific configuration.
+
+    #       # # wireguard server example
+    #       # wireguard:
+    #       #     privateKey: ABCDEF... # Specifies a private key configuration (base64 encoded).
+    #       #     listenPort: 51111 # Specifies a device's listening port.
+    #       #     # Specifies a list of peer configurations to apply to a device.
+    #       #     peers:
+    #       #         - publicKey: ABCDEF... # Specifies the public key of this peer.
+    #       #           endpoint: 192.168.1.3 # Specifies the endpoint of this peer entry.
+    #       #           # AllowedIPs specifies a list of allowed IP addresses in CIDR notation for this peer.
+    #       #           allowedIPs:
+    #       #             - 192.168.1.0/24
+    #       # # wireguard peer example
+    #       # wireguard:
+    #       #     privateKey: ABCDEF... # Specifies a private key configuration (base64 encoded).
+    #       #     # Specifies a list of peer configurations to apply to a device.
+    #       #     peers:
+    #       #         - publicKey: ABCDEF... # Specifies the public key of this peer.
+    #       #           endpoint: 192.168.1.2:51822 # Specifies the endpoint of this peer entry.
+    #       #           persistentKeepaliveInterval: 10s # Specifies the persistent keepalive interval for this peer.
+    #       #           # AllowedIPs specifies a list of allowed IP addresses in CIDR notation for this peer.
+    #       #           allowedIPs:
+    #       #             - 192.168.1.0/24
+
+    #       # # Virtual (shared) IP address configuration.
+
+    #       # # layer2 vip example
+    #        vip:
+    #            ip: 192.168.0.10
+                
+    # # Used to statically set the nameservers for the machine.
+    # nameservers:
+    #     - 8.8.8.8
+    #     - 1.1.1.1
+
+    # # Allows for extra entries to be added to the `/etc/hosts` file
+    # extraHostEntries:
+    #     - ip: 192.168.1.100 # The IP of the host.
+    #       # The host alias.
+    #       aliases:
+    #         - example
+    #         - example.domain.tld
+
+    # # Configures KubeSpan feature.
+    #kubespan:
+    #    enabled: true # Enable the KubeSpan feature.
+
+    # Used to provide instructions for installations.
+    install:
+        disk: /dev/sda # The disk used for installations.
+        image: ghcr.io/siderolabs/installer:v1.5.1 # Allows for supplying the image used to perform the installation.
+        wipe: true # Indicates if the installation disk should be wiped at installation time.
+        
+        # # Look up disk using disk attributes like model, size, serial and others.
+        # diskSelector:
+        #     size: 4GB # Disk size.
+        #     model: WDC* # Disk model `/sys/block/<dev>/device/model`.
+        #     busPath: /pci0000:00/0000:00:17.0/ata1/host0/target0:0:0/0:0:0:0 # Disk bus path.
+
+        # # Allows for supplying extra kernel args via the bootloader.
+        # extraKernelArgs:
+        #     - talos.platform=metal
+        #     - reboot=k
+
+        # # Allows for supplying additional system extension images to install on top of base Talos image.
+        # extensions:
+        #     - image: ghcr.io/siderolabs/gvisor:20220117.0-v1.0.0 # System extension image.
+    # Used to configure the machine's container image registry mirrors.
+    registries: {}
+    # # Specifies mirror configuration for each registry host namespace.
+    # mirrors:
+    #     ghcr.io:
+    #         # List of endpoints (URLs) for registry mirrors to use.
+    #         endpoints:
+    #             - https://registry.insecure
+    #             - https://ghcr.io/v2/
+
+    # # Specifies TLS & auth configuration for HTTPS image registries.
+    # config:
+    #     registry.insecure:
+    #         # The TLS configuration for the registry.
+    #         tls:
+    #             insecureSkipVerify: true # Skip TLS server certificate verification (not recommended).
+    #             
+    #             # # Enable mutual TLS authentication with the registry.
+    #             # clientIdentity:
+    #             #     crt: LS0tIEVYQU1QTEUgQ0VSVElGSUNBVEUgLS0t
+    #             #     key: LS0tIEVYQU1QTEUgS0VZIC0tLQ==
+    #         
+    #         # # The auth configuration for this registry.
+    #         # auth:
+    #         #     username: username # Optional registry authentication.
+    #         #     password: password # Optional registry authentication.
+
+    # Features describe individual Talos features that can be switched on or off.
+    features:
+        rbac: true # Enable role-based access control (RBAC).
+        stableHostname: true # Enable stable default hostname.
+        apidCheckExtKeyUsage: true # Enable checks for extended key usage of client certificates in apid.
+        diskQuotaSupport: true # Enable XFS project quota support for EPHEMERAL partition and user disks.
+        
+        # # Configure Talos API access from Kubernetes pods.
+        # kubernetesTalosAPIAccess:
+        #     enabled: true # Enable Talos API access from Kubernetes pods.
+        #     # The list of Talos API roles which can be granted for access from Kubernetes pods.
+        #     allowedRoles:
+        #         - os:reader
+        #     # The list of Kubernetes namespaces Talos API access is available from.
+        #     allowedKubernetesNamespaces:
+        #         - kube-system
+    
+    # # Provides machine specific control plane configuration options.
+
+    # # ControlPlane definition example.
+    # controlPlane:
+    #     # Controller manager machine specific configuration options.
+    #     controllerManager:
+    #         disabled: false # Disable kube-controller-manager on the node.
+    #     # Scheduler machine specific configuration options.
+    #     scheduler:
+    #         disabled: true # Disable kube-scheduler on the node.
+
+    # # Used to provide static pod definitions to be run by the kubelet directly bypassing the kube-apiserver.
+
+    # # nginx static pod.
+    # pods:
+    #     - apiVersion: v1
+    #       kind: pod
+    #       metadata:
+    #         name: nginx
+    #       spec:
+    #         containers:
+    #             - image: nginx
+    #               name: nginx
+
+    # # Used to partition, format and mount additional disks.
+
+    # # MachineDisks list example.
+    # disks:
+    #     - device: /dev/sdb # The name of the disk to use.
+    #       # A list of partitions to create on the disk.
+    #       partitions:
+    #         - mountpoint: /var/mnt/extra # Where to mount the partition.
+    #           
+    #           # # The size of partition: either bytes or human readable representation. If `size:` is omitted, the partition is sized to occupy the full disk.
+
+    #           # # Human readable representation.
+    #           # size: 100 MB
+    #           # # Precise value in bytes.
+    #           # size: 1073741824
+
+    # # Allows the addition of user specified files.
+
+    # # MachineFiles usage example.
+    # files:
+    #     - content: '...' # The contents of the file.
+    #       permissions: 0o666 # The file's permissions in octal.
+    #       path: /tmp/file.txt # The path of the file.
+    #       op: append # The operation to use
+
+    # # The `env` field allows for the addition of environment variables.
+
+    # # Environment variables definition examples.
+    # env:
+    #     GRPC_GO_LOG_SEVERITY_LEVEL: info
+    #     GRPC_GO_LOG_VERBOSITY_LEVEL: "99"
+    #     https_proxy: http://SERVER:PORT/
+    # env:
+    #     GRPC_GO_LOG_SEVERITY_LEVEL: error
+    #     https_proxy: https://USERNAME:PASSWORD@SERVER:PORT/
+    # env:
+    #     https_proxy: http://DOMAIN\USERNAME:PASSWORD@SERVER:PORT/
+
+    # # Used to configure the machine's time settings.
+
+    # # Example configuration for cloudflare ntp server.
+    # time:
+    #     disabled: false # Indicates if the time service is disabled for the machine.
+    #     # Specifies time (NTP) servers to use for setting the system time.
+    #     servers:
+    #         - time.cloudflare.com
+    #     bootTimeout: 2m0s # Specifies the timeout when the node time is considered to be in sync unlocking the boot sequence.
+
+    # # Used to configure the machine's sysctls.
+
+    # # MachineSysctls usage example.
+    # sysctls:
+    #     kernel.domainname: talos.dev
+    #     net.ipv4.ip_forward: "0"
+
+    # # Used to configure the machine's sysfs.
+
+    # # MachineSysfs usage example.
+    # sysfs:
+    #     devices.system.cpu.cpu0.cpufreq.scaling_governor: performance
+
+    # # Machine system disk encryption configuration.
+    # systemDiskEncryption:
+    #     # Ephemeral partition encryption.
+    #     ephemeral:
+    #         provider: luks2 # Encryption provider to use for the encryption.
+    #         # Defines the encryption keys generation and storage method.
+    #         keys:
+    #             - # Deterministically generated key from the node UUID and PartitionLabel.
+    #               nodeID: {}
+    #               slot: 0 # Key slot number for LUKS2 encryption.
+    #               
+    #               # # KMS managed encryption key.
+    #               # kms:
+    #               #     endpoint: https://192.168.88.21:4443 # KMS endpoint to Seal/Unseal the key.
+    #         
+    #         # # Cipher kind to use for the encryption. Depends on the encryption provider.
+    #         # cipher: aes-xts-plain64
+
+    #         # # Defines the encryption sector size.
+    #         # blockSize: 4096
+
+    #         # # Additional --perf parameters for the LUKS2 encryption.
+    #         # options:
+    #         #     - no_read_workqueue
+    #         #     - no_write_workqueue
+
+    # # Configures the udev system.
+    # udev:
+    #     # List of udev rules to apply to the udev system
+    #     rules:
+    #         - SUBSYSTEM=="drm", KERNEL=="renderD*", GROUP="44", MODE="0660"
+
+    # # Configures the logging system.
+    # logging:
+    #     # Logging destination.
+    #     destinations:
+    #         - endpoint: tcp://1.2.3.4:12345 # Where to send logs. Supported protocols are "tcp" and "udp".
+    #           format: json_lines # Logs format.
+
+    # # Configures the kernel.
+    # kernel:
+    #     # Kernel modules to load.
+    #     modules:
+    #         - name: brtfs # Module name.
+
+    # # Configures the seccomp profiles for the machine.
+    # seccompProfiles:
+    #     - name: audit.json # The `name` field is used to provide the file name of the seccomp profile.
+    #       # The `value` field is used to provide the seccomp profile.
+    #       value:
+    #         defaultAction: SCMP_ACT_LOG
+
+    # # Configures the node labels for the machine.
+
+    # # node labels example.
+    # nodeLabels:
+    #     exampleLabel: exampleLabelValue
+# Provides cluster specific configuration options.
+cluster:
+    id: DnLy-yXXaIqQRaELsr5VZj-rnVj7jUxyyv69GkmgDIQ= # Globally unique identifier for this cluster (base64 encoded random 32 bytes).
+    secret: ssgf5bbwHcen8w5i48nlDIdsUcTf9O3AoO3EQDTbpmo= # Shared secret of cluster (base64 encoded random 32 bytes).
+    # Provides control plane specific configuration options.
+    controlPlane:
+        endpoint: https://talos-master-vm-01.dezendorf.net:6443 # Endpoint is the canonical controlplane endpoint, which can be an IP address or a DNS hostname.
+    clusterName: talos-cluster # Configures the cluster's name.
+    # Provides cluster specific network configuration options.
+    network:
+        dnsDomain: cluster.local # The domain used by Kubernetes DNS.
+        # The pod subnet CIDR.
+        podSubnets:
+            - 10.244.0.0/16
+        # The service subnet CIDR.
+        serviceSubnets:
+            - 10.96.0.0/12
+        
+        # # The CNI used.
+        # cni:
+        #     name: custom # Name of CNI to use.
+        #     # URLs containing manifests to apply for the CNI.
+        #     urls:
+        #         - https://docs.projectcalico.org/archive/v3.20/manifests/canal.yaml
+    token: zszgpu.9yqk443feyidmnx3 # The [bootstrap token](https://kubernetes.io/docs/reference/access-authn-authz/bootstrap-tokens/) used to join the cluster.
+    secretboxEncryptionSecret: M0Xyw+c3iyQ6wo64UpJpYP03DZW0SL+NLjugUzkRGGc= # A key used for the [encryption of secret data at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/).
+    # The base64 encoded root certificate authority used by Kubernetes.
+    ca:
+        crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJpekNDQVRDZ0F3SUJBZ0lSQUp4ZS9tbGpNUENkaXBFTDBrenZtNjB3Q2dZSUtvWkl6ajBFQXdJd0ZURVQKTUJFR0ExVUVDaE1LYTNWaVpYSnVaWFJsY3pBZUZ3MHlNekE1TURNeE5qTTJOVFZhRncwek16QTRNekV4TmpNMgpOVFZhTUJVeEV6QVJCZ05WQkFvVENtdDFZbVZ5Ym1WMFpYTXdXVEFUQmdjcWhrak9QUUlCQmdncWhrak9QUU1CCkJ3TkNBQVMza29QWERNeFE5c2UyUlhnZklFZXZLR0N6alRjcjNLZ1hEYjBkaW9ueHlRSXJNWGpTOUFWNEJ2aE4KRldKTHpRLy9WTjZJRUdGTEZOb1NjUnpJaURVNG8yRXdYekFPQmdOVkhROEJBZjhFQkFNQ0FvUXdIUVlEVlIwbApCQll3RkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ01BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0hRWURWUjBPCkJCWUVGS1F4eWVkOHdRUnplcFRwcFFZMkZIUHVOTTdjTUFvR0NDcUdTTTQ5QkFNQ0Ewa0FNRVlDSVFDNGFWTmoKN01ONEloMFdyNWcyK01oa1FBbzZUMzVYU0ZrVEozbjFaRDd1TGdJaEFJZC9PTGVFQkRQb2FQTUFMTk5Sc01hQQpTOWpSMHR5MTBCcTFzZVhuYVpJdAotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
+        key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUJEK0pjNjZmeWR5QmpVcHdLWm9wLzhma0tMSnEvTHh1c2w1azE4UTd5djRvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFdDVLRDF3ek1VUGJIdGtWNEh5QkhyeWhnczQwM0s5eW9GdzI5SFlxSjhja0NLekY0MHZRRgplQWI0VFJWaVM4MFAvMVRlaUJCaFN4VGFFbkVjeUlnMU9BPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=
+    # The base64 encoded aggregator certificate authority used by Kubernetes for front-proxy certificate generation.
+    aggregatorCA:
+        crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJZVENDQVFhZ0F3SUJBZ0lSQU5mOFFscXA1eWlWRGVZVlBybGVVOTR3Q2dZSUtvWkl6ajBFQXdJd0FEQWUKRncweU16QTVNRE14TmpNMk5UVmFGdzB6TXpBNE16RXhOak0yTlRWYU1BQXdXVEFUQmdjcWhrak9QUUlCQmdncQpoa2pPUFFNQkJ3TkNBQVFPUkdBeUNnUk0yL0ltbFpod2d6cnIxOVBNSnNJSTJaNHhVVVpGWVpjdGxBMFIzRWR2CkxnTHo1ZzVqZVdFZWk4V2x4UEk4Ylkyd2F2MmY5dExDZ2RPeW8yRXdYekFPQmdOVkhROEJBZjhFQkFNQ0FvUXcKSFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ01BOEdBMVVkRXdFQi93UUZNQU1CQWY4dwpIUVlEVlIwT0JCWUVGTGtscGpwdTlwSENrS1YvaTFlVGl4UVVjMDZJTUFvR0NDcUdTTTQ5QkFNQ0Ewa0FNRVlDCklRQ0Zmc3ZhK3dGWDhKT2JqYjhnOEJCK1VpK3lBWDJJRG54M1d1cmZCS2p0blFJaEFLblppS0dBeTBaRG0vR2wKblprclh1dTAxSmQwSVIzQU5oUUFKUGNXbitOWQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
+        key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUdpYzB5Uzl6NTB1SnZVNkwvNE52cUVRMFh0Z1piQ1NjWk9jbGx2Tld4cnBvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFRGtSZ01nb0VUTnZ5SnBXWWNJTTY2OWZUekNiQ0NObWVNVkZHUldHWExaUU5FZHhIYnk0Qwo4K1lPWTNsaEhvdkZwY1R5UEcyTnNHcjluL2JTd29IVHNnPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=
+    # The base64 encoded private key for service account token generation.
+    serviceAccount:
+        key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSVBwYVVDT1VielJ0azc2OVpMRmZnWVo3NEFQbWV3ejhPUlptbWZzeXM3d3JvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFZUh0YjBwQWs0Y0JtV041OEl4eGpKUlVMaExvcWVxVERTK1NHVnRSNUJneDQ0UExrWnVlKwpsUjRwMEtLc2w3UVlSNW5UdkEzRnJjQThZTi9mek9KTkpBPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=
+    # API server specific configuration options.
+    apiServer:
+        image: registry.k8s.io/kube-apiserver:v1.28.0 # The container image used in the API server manifest.
+        # Extra certificate subject alternative names for the API server's certificate.
+        certSANs:
+            - talos-master-vm-01.dezendorf.net
+        disablePodSecurityPolicy: true # Disable PodSecurityPolicy in the API server and default manifests.
+        # Configure the API server admission plugins.
+        admissionControl:
+            - name: PodSecurity # Name is the name of the admission controller.
+              # Configuration is an embedded configuration object to be used as the plugin's
+              configuration:
+                apiVersion: pod-security.admission.config.k8s.io/v1alpha1
+                defaults:
+                    audit: restricted
+                    audit-version: latest
+                    enforce: baseline
+                    enforce-version: latest
+                    warn: restricted
+                    warn-version: latest
+                exemptions:
+                    namespaces:
+                        - kube-system
+                    runtimeClasses: []
+                    usernames: []
+                kind: PodSecurityConfiguration
+        # Configure the API server audit policy.
+        auditPolicy:
+            apiVersion: audit.k8s.io/v1
+            kind: Policy
+            rules:
+                - level: Metadata
+    # Controller manager server specific configuration options.
+    controllerManager:
+        image: registry.k8s.io/kube-controller-manager:v1.28.0 # The container image used in the controller manager manifest.
+    # Kube-proxy server-specific configuration options
+    proxy:
+        image: registry.k8s.io/kube-proxy:v1.28.0 # The container image used in the kube-proxy manifest.
+        
+        # # Disable kube-proxy deployment on cluster bootstrap.
+        # disabled: false
+    # Scheduler server specific configuration options.
+    scheduler:
+        image: registry.k8s.io/kube-scheduler:v1.28.0 # The container image used in the scheduler manifest.
+    # Configures cluster member discovery.
+    discovery:
+        enabled: true # Enable the cluster membership discovery feature.
+        # Configure registries used for cluster member discovery.
+        registries:
+            # Kubernetes registry uses Kubernetes API server to discover cluster members and stores additional information
+            kubernetes:
+                disabled: true # Disable Kubernetes discovery registry.
+            # Service registry is using an external service to push and pull information about cluster members.
+            service: {}
+            # # External service endpoint.
+            # endpoint: https://discovery.talos.dev/
+    # Etcd specific configuration options.
+    etcd:
+        # The `ca` is the root certificate authority of the PKI.
+        ca:
+            crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJmakNDQVNPZ0F3SUJBZ0lRSVloZ3BvV0lhMXpsK2xnUklYWWpYakFLQmdncWhrak9QUVFEQWpBUE1RMHcKQ3dZRFZRUUtFd1JsZEdOa01CNFhEVEl6TURrd016RTJNelkxTlZvWERUTXpNRGd6TVRFMk16WTFOVm93RHpFTgpNQXNHQTFVRUNoTUVaWFJqWkRCWk1CTUdCeXFHU000OUFnRUdDQ3FHU000OUF3RUhBMElBQkhKUDhuT2VueEdhCnE1d1NqMGJHR0FhcjJFSDFKWjlEK2tqQVE5Y0FudEtPOTNSM3ZFbit6UDZDZTFPRUp3OGsyU1dXa1l0bmQ2dFIKUkt1U3dBeDBnelNqWVRCZk1BNEdBMVVkRHdFQi93UUVBd0lDaERBZEJnTlZIU1VFRmpBVUJnZ3JCZ0VGQlFjRApBUVlJS3dZQkJRVUhBd0l3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFkQmdOVkhRNEVGZ1FVMCtuak9iaHcrRkMwCitEazBuVWtzaG1HYU5VSXdDZ1lJS29aSXpqMEVBd0lEU1FBd1JnSWhBS3pRaFJhb1hYeVV1RklSYkx6aldSSmkKRSszVGtEeVZqZjNxYnVnUHlaeXlBaUVBcHoyZEVNUVpDc1l5R2g5VDRQU2VXRDEyTlVYNlI4NWEycnlZbUJPZgp6d0k9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
+            key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUllWTFCbGhaaDB6QTZySXE1WEVlc1BwemVlZ0xXWDBJVTZkcktURUdEaW5vQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFY2sveWM1NmZFWnFybkJLUFJzWVlCcXZZUWZVbG4wUDZTTUJEMXdDZTBvNzNkSGU4U2Y3TQovb0o3VTRRbkR5VFpKWmFSaTJkM3ExRkVxNUxBREhTRE5BPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=
+        
+        # # The container image used to create the etcd service.
+        # image: gcr.io/etcd-development/etcd:v3.5.9
+
+        # # The `advertisedSubnets` field configures the networks to pick etcd advertised IP from.
+        # advertisedSubnets:
+        #     - 10.0.0.0/8
+    # A list of urls that point to additional manifests.
+    extraManifests: []
+    #   - https://www.example.com/manifest1.yaml
+    #   - https://www.example.com/manifest2.yaml
+
+    # A list of inline Kubernetes manifests.
+    inlineManifests: []
+    #   - name: namespace-ci # Name of the manifest.
+    #     contents: |- # Manifest contents as a string.
+    #       apiVersion: v1
+    #       kind: Namespace
+    #       metadata:
+    #       	name: ci
+
+    
+    # # A key used for the [encryption of secret data at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/).
+
+    # # Decryption secret example (do not use in production!).
+    # aescbcEncryptionSecret: z01mye6j16bspJYtTB/5SFX8j7Ph4JXxM2Xuu4vsBPM=
+
+    # # Core DNS specific configuration options.
+    # coreDNS:
+    #     image: registry.k8s.io/coredns/coredns:v1.10.1 # The `image` field is an override to the default coredns image.
+
+    # # External cloud provider configuration.
+    # externalCloudProvider:
+    #     enabled: true # Enable external cloud provider.
+    #     # A list of urls that point to additional manifests for an external cloud provider.
+    #     manifests:
+    #         - https://raw.githubusercontent.com/kubernetes/cloud-provider-aws/v1.20.0-alpha.0/manifests/rbac.yaml
+    #         - https://raw.githubusercontent.com/kubernetes/cloud-provider-aws/v1.20.0-alpha.0/manifests/aws-cloud-controller-manager-daemonset.yaml
+
+    # # A map of key value pairs that will be added while fetching the extraManifests.
+    # extraManifestHeaders:
+    #     Token: "1234567"
+    #     X-ExtraInfo: info
+
+    # # Settings for admin kubeconfig generation.
+    # adminKubeconfig:
+    #     certLifetime: 1h0m0s # Admin kubeconfig certificate lifetime (default is 1 year).
+
+    # # Allows running workload on control-plane nodes.
+    # allowSchedulingOnControlPlanes: true

+ 547 - 0
dezendorf/homelab/talos/nodeconfig/pi.yaml

@@ -0,0 +1,547 @@
+version: v1alpha1 # Indicates the schema used to decode the contents.
+debug: false # Enable verbose logging to the console.
+persist: true # description: |
+# Provides machine specific configuration options.
+machine:
+    type: worker # Defines the role of the machine within the cluster.
+    token: u7240y.plogoeorz04f09sw # The `token` is used by a machine to join the PKI of the cluster.
+    # The root certificate authority of the PKI.
+    ca:
+        crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJQakNCOGFBREFnRUNBaEFCdE5hMWNDcXBMUTl5RHh4Vm1hVmJNQVVHQXl0bGNEQVFNUTR3REFZRFZRUUsKRXdWMFlXeHZjekFlRncweU16QTVNRE14TmpNMk5UVmFGdzB6TXpBNE16RXhOak0yTlRWYU1CQXhEakFNQmdOVgpCQW9UQlhSaGJHOXpNQ293QlFZREsyVndBeUVBQW5qWmpDRmRpdTIvNUJNSlI2QWRWTWhwUEQ5MzgxTnYrWnA5Cm1mRndFTENqWVRCZk1BNEdBMVVkRHdFQi93UUVBd0lDaERBZEJnTlZIU1VFRmpBVUJnZ3JCZ0VGQlFjREFRWUkKS3dZQkJRVUhBd0l3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFkQmdOVkhRNEVGZ1FVMFU0OVEzK2lZY3k1TTBUSwphWnVBUjIwUlRNVXdCUVlESzJWd0EwRUF6TEtTdG1FQ1BHNzZpYUp5Z3Nmdzc1Z0tqVEVmODFCS0NOVFBBWkJzCnM2alRySktlRVA0SHpzOFIvZStyTGc4ZSszNDZWSXhoY1FMQWVKV21qc2VQRGc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
+        key: ""
+    # Extra certificate subject alternative names for the machine's certificate.
+    certSANs: []
+    #   # Uncomment this to enable SANs.
+    #   - 10.0.0.10
+    #   - 172.16.0.10
+    #   - 192.168.0.10
+
+    # Used to provide additional options to the kubelet.
+    kubelet:
+        image: ghcr.io/siderolabs/kubelet:v1.28.0 # The `image` field is an optional reference to an alternative kubelet image.
+        defaultRuntimeSeccompProfileEnabled: true # Enable container runtime default Seccomp profile.
+        disableManifestsDirectory: true # The `disableManifestsDirectory` field configures the kubelet to get static pod manifests from the /etc/kubernetes/manifests directory.
+        
+        # # The `ClusterDNS` field is an optional reference to an alternative kubelet clusterDNS ip list.
+        # clusterDNS:
+        #     - 10.96.0.10
+        #     - 169.254.2.53
+
+        # # The `extraArgs` field is used to provide additional flags to the kubelet.
+        # extraArgs:
+        #     key: value
+
+        # # The `extraMounts` field is used to add additional mounts to the kubelet container.
+        # extraMounts:
+        #     - destination: /var/lib/example
+        #       type: bind
+        #       source: /var/lib/example
+        #       options:
+        #         - bind
+        #         - rshared
+        #         - rw
+
+        # # The `extraConfig` field is used to provide kubelet configuration overrides.
+        # extraConfig:
+        #     serverTLSBootstrap: true
+
+        # # The `nodeIP` field is used to configure `--node-ip` flag for the kubelet.
+        # nodeIP:
+        #     # The `validSubnets` field configures the networks to pick kubelet node IP from.
+        #     validSubnets:
+        #         - 10.0.0.0/8
+        #         - '!10.0.0.3/32'
+        #         - fdc7::/16
+    # Provides machine specific network configuration options.
+    network: {}
+    # # `interfaces` is used to define the network interface configuration.
+    # interfaces:
+    #     - interface: enp0s1 # The interface name.
+    #       # Assigns static IP addresses to the interface.
+    #       addresses:
+    #         - 192.168.2.0/24
+    #       # A list of routes associated with the interface.
+    #       routes:
+    #         - network: 0.0.0.0/0 # The route's network (destination).
+    #           gateway: 192.168.2.1 # The route's gateway (if empty, creates link scope route).
+    #           metric: 1024 # The optional metric for the route.
+    #       mtu: 1500 # The interface's MTU.
+    #       
+    #       # # Picks a network device using the selector.
+
+    #       # # select a device with bus prefix 00:*.
+    #       # deviceSelector:
+    #       #     busPath: 00:* # PCI, USB bus prefix, supports matching by wildcard.
+    #       # # select a device with mac address matching `*:f0:ab` and `virtio` kernel driver.
+    #       # deviceSelector:
+    #       #     hardwareAddr: '*:f0:ab' # Device hardware address, supports matching by wildcard.
+    #       #     driver: virtio # Kernel driver, supports matching by wildcard.
+    #       # # select a device with bus prefix 00:*, a device with mac address matching `*:f0:ab` and `virtio` kernel driver.
+    #       # deviceSelector:
+    #       #     - busPath: 00:* # PCI, USB bus prefix, supports matching by wildcard.
+    #       #     - hardwareAddr: '*:f0:ab' # Device hardware address, supports matching by wildcard.
+    #       #       driver: virtio # Kernel driver, supports matching by wildcard.
+
+    #       # # Bond specific options.
+    #       # bond:
+    #       #     # The interfaces that make up the bond.
+    #       #     interfaces:
+    #       #         - enp2s0
+    #       #         - enp2s1
+    #       #     # Picks a network device using the selector.
+    #       #     deviceSelectors:
+    #       #         - busPath: 00:* # PCI, USB bus prefix, supports matching by wildcard.
+    #       #         - hardwareAddr: '*:f0:ab' # Device hardware address, supports matching by wildcard.
+    #       #           driver: virtio # Kernel driver, supports matching by wildcard.
+    #       #     mode: 802.3ad # A bond option.
+    #       #     lacpRate: fast # A bond option.
+
+    #       # # Bridge specific options.
+    #       # bridge:
+    #       #     # The interfaces that make up the bridge.
+    #       #     interfaces:
+    #       #         - enxda4042ca9a51
+    #       #         - enxae2a6774c259
+    #       #     # A bridge option.
+    #       #     stp:
+    #       #         enabled: true # Whether Spanning Tree Protocol (STP) is enabled.
+
+    #       # # Indicates if DHCP should be used to configure the interface.
+    #       # dhcp: true
+
+    #       # # DHCP specific options.
+    #       # dhcpOptions:
+    #       #     routeMetric: 1024 # The priority of all routes received via DHCP.
+
+    #       # # Wireguard specific configuration.
+
+    #       # # wireguard server example
+    #       # wireguard:
+    #       #     privateKey: ABCDEF... # Specifies a private key configuration (base64 encoded).
+    #       #     listenPort: 51111 # Specifies a device's listening port.
+    #       #     # Specifies a list of peer configurations to apply to a device.
+    #       #     peers:
+    #       #         - publicKey: ABCDEF... # Specifies the public key of this peer.
+    #       #           endpoint: 192.168.1.3 # Specifies the endpoint of this peer entry.
+    #       #           # AllowedIPs specifies a list of allowed IP addresses in CIDR notation for this peer.
+    #       #           allowedIPs:
+    #       #             - 192.168.1.0/24
+    #       # # wireguard peer example
+    #       # wireguard:
+    #       #     privateKey: ABCDEF... # Specifies a private key configuration (base64 encoded).
+    #       #     # Specifies a list of peer configurations to apply to a device.
+    #       #     peers:
+    #       #         - publicKey: ABCDEF... # Specifies the public key of this peer.
+    #       #           endpoint: 192.168.1.2:51822 # Specifies the endpoint of this peer entry.
+    #       #           persistentKeepaliveInterval: 10s # Specifies the persistent keepalive interval for this peer.
+    #       #           # AllowedIPs specifies a list of allowed IP addresses in CIDR notation for this peer.
+    #       #           allowedIPs:
+    #       #             - 192.168.1.0/24
+
+    #       # # Virtual (shared) IP address configuration.
+
+    #       # # layer2 vip example
+    #       # vip:
+    #       #     ip: 172.16.199.55 # Specifies the IP address to be used.
+
+    # # Used to statically set the nameservers for the machine.
+    # nameservers:
+    #     - 8.8.8.8
+    #     - 1.1.1.1
+
+    # # Allows for extra entries to be added to the `/etc/hosts` file
+    # extraHostEntries:
+    #     - ip: 192.168.1.100 # The IP of the host.
+    #       # The host alias.
+    #       aliases:
+    #         - example
+    #         - example.domain.tld
+
+    # # Configures KubeSpan feature.
+    # kubespan:
+    #     enabled: true # Enable the KubeSpan feature.
+
+    # Used to provide instructions for installations.
+    install:
+        disk: /dev/mmcblk0 # The disk used for installations.
+        image: ghcr.io/siderolabs/installer:v1.5.1 # Allows for supplying the image used to perform the installation.
+        wipe: false # Indicates if the installation disk should be wiped at installation time.
+        
+        # # Look up disk using disk attributes like model, size, serial and others.
+        # diskSelector:
+        #     size: 4GB # Disk size.
+        #     model: WDC* # Disk model `/sys/block/<dev>/device/model`.
+        #     busPath: /pci0000:00/0000:00:17.0/ata1/host0/target0:0:0/0:0:0:0 # Disk bus path.
+
+        # # Allows for supplying extra kernel args via the bootloader.
+        # extraKernelArgs:
+        #     - talos.platform=metal
+        #     - reboot=k
+
+        # # Allows for supplying additional system extension images to install on top of base Talos image.
+        # extensions:
+        #     - image: ghcr.io/siderolabs/gvisor:20220117.0-v1.0.0 # System extension image.
+    # Used to configure the machine's container image registry mirrors.
+    registries: {}
+    # # Specifies mirror configuration for each registry host namespace.
+    # mirrors:
+    #     ghcr.io:
+    #         # List of endpoints (URLs) for registry mirrors to use.
+    #         endpoints:
+    #             - https://registry.insecure
+    #             - https://ghcr.io/v2/
+
+    # # Specifies TLS & auth configuration for HTTPS image registries.
+    # config:
+    #     registry.insecure:
+    #         # The TLS configuration for the registry.
+    #         tls:
+    #             insecureSkipVerify: true # Skip TLS server certificate verification (not recommended).
+    #             
+    #             # # Enable mutual TLS authentication with the registry.
+    #             # clientIdentity:
+    #             #     crt: LS0tIEVYQU1QTEUgQ0VSVElGSUNBVEUgLS0t
+    #             #     key: LS0tIEVYQU1QTEUgS0VZIC0tLQ==
+    #         
+    #         # # The auth configuration for this registry.
+    #         # auth:
+    #         #     username: username # Optional registry authentication.
+    #         #     password: password # Optional registry authentication.
+
+    # Features describe individual Talos features that can be switched on or off.
+    features:
+        rbac: true # Enable role-based access control (RBAC).
+        stableHostname: true # Enable stable default hostname.
+        apidCheckExtKeyUsage: true # Enable checks for extended key usage of client certificates in apid.
+          #diskQuotaSupport: true # Enable XFS project quota support for EPHEMERAL partition and user disks.
+        
+        # # Configure Talos API access from Kubernetes pods.
+        # kubernetesTalosAPIAccess:
+        #     enabled: true # Enable Talos API access from Kubernetes pods.
+        #     # The list of Talos API roles which can be granted for access from Kubernetes pods.
+        #     allowedRoles:
+        #         - os:reader
+        #     # The list of Kubernetes namespaces Talos API access is available from.
+        #     allowedKubernetesNamespaces:
+        #         - kube-system
+    
+    # # Provides machine specific control plane configuration options.
+
+    # # ControlPlane definition example.
+    # controlPlane:
+    #     # Controller manager machine specific configuration options.
+    #     controllerManager:
+    #         disabled: false # Disable kube-controller-manager on the node.
+    #     # Scheduler machine specific configuration options.
+    #     scheduler:
+    #         disabled: true # Disable kube-scheduler on the node.
+
+    # # Used to provide static pod definitions to be run by the kubelet directly bypassing the kube-apiserver.
+
+    # # nginx static pod.
+    # pods:
+    #     - apiVersion: v1
+    #       kind: pod
+    #       metadata:
+    #         name: nginx
+    #       spec:
+    #         containers:
+    #             - image: nginx
+    #               name: nginx
+
+    # # Used to partition, format and mount additional disks.
+
+    # # MachineDisks list example.
+    # disks:
+    #     - device: /dev/sdb # The name of the disk to use.
+    #       # A list of partitions to create on the disk.
+    #       partitions:
+    #         - mountpoint: /var/mnt/extra # Where to mount the partition.
+    #           
+    #           # # The size of partition: either bytes or human readable representation. If `size:` is omitted, the partition is sized to occupy the full disk.
+
+    #           # # Human readable representation.
+    #           # size: 100 MB
+    #           # # Precise value in bytes.
+    #           # size: 1073741824
+
+    # # Allows the addition of user specified files.
+
+    # # MachineFiles usage example.
+    # files:
+    #     - content: '...' # The contents of the file.
+    #       permissions: 0o666 # The file's permissions in octal.
+    #       path: /tmp/file.txt # The path of the file.
+    #       op: append # The operation to use
+
+    # # The `env` field allows for the addition of environment variables.
+
+    # # Environment variables definition examples.
+    # env:
+    #     GRPC_GO_LOG_SEVERITY_LEVEL: info
+    #     GRPC_GO_LOG_VERBOSITY_LEVEL: "99"
+    #     https_proxy: http://SERVER:PORT/
+    # env:
+    #     GRPC_GO_LOG_SEVERITY_LEVEL: error
+    #     https_proxy: https://USERNAME:PASSWORD@SERVER:PORT/
+    # env:
+    #     https_proxy: http://DOMAIN\USERNAME:PASSWORD@SERVER:PORT/
+
+    # # Used to configure the machine's time settings.
+
+    # # Example configuration for cloudflare ntp server.
+    # time:
+    #     disabled: false # Indicates if the time service is disabled for the machine.
+    #     # Specifies time (NTP) servers to use for setting the system time.
+    #     servers:
+    #         - time.cloudflare.com
+    #     bootTimeout: 2m0s # Specifies the timeout when the node time is considered to be in sync unlocking the boot sequence.
+
+    # # Used to configure the machine's sysctls.
+
+    # # MachineSysctls usage example.
+    # sysctls:
+    #     kernel.domainname: talos.dev
+    #     net.ipv4.ip_forward: "0"
+
+    # # Used to configure the machine's sysfs.
+
+    # # MachineSysfs usage example.
+    # sysfs:
+    #     devices.system.cpu.cpu0.cpufreq.scaling_governor: performance
+
+    # # Machine system disk encryption configuration.
+    # systemDiskEncryption:
+    #     # Ephemeral partition encryption.
+    #     ephemeral:
+    #         provider: luks2 # Encryption provider to use for the encryption.
+    #         # Defines the encryption keys generation and storage method.
+    #         keys:
+    #             - # Deterministically generated key from the node UUID and PartitionLabel.
+    #               nodeID: {}
+    #               slot: 0 # Key slot number for LUKS2 encryption.
+    #               
+    #               # # KMS managed encryption key.
+    #               # kms:
+    #               #     endpoint: https://192.168.88.21:4443 # KMS endpoint to Seal/Unseal the key.
+    #         
+    #         # # Cipher kind to use for the encryption. Depends on the encryption provider.
+    #         # cipher: aes-xts-plain64
+
+    #         # # Defines the encryption sector size.
+    #         # blockSize: 4096
+
+    #         # # Additional --perf parameters for the LUKS2 encryption.
+    #         # options:
+    #         #     - no_read_workqueue
+    #         #     - no_write_workqueue
+
+    # # Configures the udev system.
+    # udev:
+    #     # List of udev rules to apply to the udev system
+    #     rules:
+    #         - SUBSYSTEM=="drm", KERNEL=="renderD*", GROUP="44", MODE="0660"
+
+    # # Configures the logging system.
+    # logging:
+    #     # Logging destination.
+    #     destinations:
+    #         - endpoint: tcp://1.2.3.4:12345 # Where to send logs. Supported protocols are "tcp" and "udp".
+    #           format: json_lines # Logs format.
+
+    # # Configures the kernel.
+    # kernel:
+    #     # Kernel modules to load.
+    #     modules:
+    #         - name: brtfs # Module name.
+
+    # # Configures the seccomp profiles for the machine.
+    # seccompProfiles:
+    #     - name: audit.json # The `name` field is used to provide the file name of the seccomp profile.
+    #       # The `value` field is used to provide the seccomp profile.
+    #       value:
+    #         defaultAction: SCMP_ACT_LOG
+
+    # # Configures the node labels for the machine.
+
+    # # node labels example.
+    nodeLabels:
+        ceph-osd-node: true
+        ceph-mon-node: false
+        ceph-mgr-node: false
+        ceph-rgw-node: false
+# Provides cluster specific configuration options.
+cluster:
+    id: DnLy-yXXaIqQRaELsr5VZj-rnVj7jUxyyv69GkmgDIQ= # Globally unique identifier for this cluster (base64 encoded random 32 bytes).
+    secret: ssgf5bbwHcen8w5i48nlDIdsUcTf9O3AoO3EQDTbpmo= # Shared secret of cluster (base64 encoded random 32 bytes).
+    # Provides control plane specific configuration options.
+    controlPlane:
+        endpoint: https://talos-master-vm-01.dezendorf.net:6443 # Endpoint is the canonical controlplane endpoint, which can be an IP address or a DNS hostname.
+    # Provides cluster specific network configuration options.
+    network:
+        dnsDomain: cluster.local # The domain used by Kubernetes DNS.
+        # The pod subnet CIDR.
+        podSubnets:
+            - 10.244.0.0/16
+        # The service subnet CIDR.
+        serviceSubnets:
+            - 10.96.0.0/12
+        
+        # # The CNI used.
+        # cni:
+        #     name: custom # Name of CNI to use.
+        #     # URLs containing manifests to apply for the CNI.
+        #     urls:
+        #         - https://docs.projectcalico.org/archive/v3.20/manifests/canal.yaml
+    token: zszgpu.9yqk443feyidmnx3 # The [bootstrap token](https://kubernetes.io/docs/reference/access-authn-authz/bootstrap-tokens/) used to join the cluster.
+    # The base64 encoded root certificate authority used by Kubernetes.
+    ca:
+        crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJpekNDQVRDZ0F3SUJBZ0lSQUp4ZS9tbGpNUENkaXBFTDBrenZtNjB3Q2dZSUtvWkl6ajBFQXdJd0ZURVQKTUJFR0ExVUVDaE1LYTNWaVpYSnVaWFJsY3pBZUZ3MHlNekE1TURNeE5qTTJOVFZhRncwek16QTRNekV4TmpNMgpOVFZhTUJVeEV6QVJCZ05WQkFvVENtdDFZbVZ5Ym1WMFpYTXdXVEFUQmdjcWhrak9QUUlCQmdncWhrak9QUU1CCkJ3TkNBQVMza29QWERNeFE5c2UyUlhnZklFZXZLR0N6alRjcjNLZ1hEYjBkaW9ueHlRSXJNWGpTOUFWNEJ2aE4KRldKTHpRLy9WTjZJRUdGTEZOb1NjUnpJaURVNG8yRXdYekFPQmdOVkhROEJBZjhFQkFNQ0FvUXdIUVlEVlIwbApCQll3RkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ01BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0hRWURWUjBPCkJCWUVGS1F4eWVkOHdRUnplcFRwcFFZMkZIUHVOTTdjTUFvR0NDcUdTTTQ5QkFNQ0Ewa0FNRVlDSVFDNGFWTmoKN01ONEloMFdyNWcyK01oa1FBbzZUMzVYU0ZrVEozbjFaRDd1TGdJaEFJZC9PTGVFQkRQb2FQTUFMTk5Sc01hQQpTOWpSMHR5MTBCcTFzZVhuYVpJdAotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
+        key: ""
+    # Configures cluster member discovery.
+    discovery:
+        enabled: true # Enable the cluster membership discovery feature.
+        # Configure registries used for cluster member discovery.
+        registries:
+            # Kubernetes registry uses Kubernetes API server to discover cluster members and stores additional information
+            kubernetes:
+                disabled: true # Disable Kubernetes discovery registry.
+            # Service registry is using an external service to push and pull information about cluster members.
+            service: {}
+            # # External service endpoint.
+            # endpoint: https://discovery.talos.dev/
+    
+    # # A key used for the [encryption of secret data at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/).
+
+    # # Decryption secret example (do not use in production!).
+    # aescbcEncryptionSecret: z01mye6j16bspJYtTB/5SFX8j7Ph4JXxM2Xuu4vsBPM=
+
+    # # A key used for the [encryption of secret data at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/).
+
+    # # Decryption secret example (do not use in production!).
+    # secretboxEncryptionSecret: z01mye6j16bspJYtTB/5SFX8j7Ph4JXxM2Xuu4vsBPM=
+
+    # # The base64 encoded aggregator certificate authority used by Kubernetes for front-proxy certificate generation.
+
+    # # AggregatorCA example.
+    # aggregatorCA:
+    #     crt: LS0tIEVYQU1QTEUgQ0VSVElGSUNBVEUgLS0t
+    #     key: LS0tIEVYQU1QTEUgS0VZIC0tLQ==
+
+    # # The base64 encoded private key for service account token generation.
+
+    # # AggregatorCA example.
+    # serviceAccount:
+    #     key: LS0tIEVYQU1QTEUgS0VZIC0tLQ==
+
+    # # API server specific configuration options.
+    # apiServer:
+    #     image: registry.k8s.io/kube-apiserver:v1.28.0 # The container image used in the API server manifest.
+    #     # Extra arguments to supply to the API server.
+    #     extraArgs:
+    #         feature-gates: ServerSideApply=true
+    #         http2-max-streams-per-connection: "32"
+    #     # Extra certificate subject alternative names for the API server's certificate.
+    #     certSANs:
+    #         - 1.2.3.4
+    #         - 4.5.6.7
+    #     # Configure the API server admission plugins.
+    #     admissionControl:
+    #         - name: PodSecurity # Name is the name of the admission controller.
+    #           # Configuration is an embedded configuration object to be used as the plugin's
+    #           configuration:
+    #             apiVersion: pod-security.admission.config.k8s.io/v1alpha1
+    #             defaults:
+    #                 audit: restricted
+    #                 audit-version: latest
+    #                 enforce: baseline
+    #                 enforce-version: latest
+    #                 warn: restricted
+    #                 warn-version: latest
+    #             exemptions:
+    #                 namespaces:
+    #                     - kube-system
+    #                 runtimeClasses: []
+    #                 usernames: []
+    #             kind: PodSecurityConfiguration
+    #     # Configure the API server audit policy.
+    #     auditPolicy:
+    #         apiVersion: audit.k8s.io/v1
+    #         kind: Policy
+    #         rules:
+    #             - level: Metadata
+
+    # # Controller manager server specific configuration options.
+    # controllerManager:
+    #     image: registry.k8s.io/kube-controller-manager:v1.28.0 # The container image used in the controller manager manifest.
+    #     # Extra arguments to supply to the controller manager.
+    #     extraArgs:
+    #         feature-gates: ServerSideApply=true
+
+    # # Kube-proxy server-specific configuration options
+    # proxy:
+    #     disabled: false # Disable kube-proxy deployment on cluster bootstrap.
+    #     image: registry.k8s.io/kube-proxy:v1.28.0 # The container image used in the kube-proxy manifest.
+    #     mode: ipvs # proxy mode of kube-proxy.
+    #     # Extra arguments to supply to kube-proxy.
+    #     extraArgs:
+    #         proxy-mode: iptables
+
+    # # Scheduler server specific configuration options.
+    # scheduler:
+    #     image: registry.k8s.io/kube-scheduler:v1.28.0 # The container image used in the scheduler manifest.
+    #     # Extra arguments to supply to the scheduler.
+    #     extraArgs:
+    #         feature-gates: AllBeta=true
+
+    # # Etcd specific configuration options.
+    # etcd:
+    #     image: gcr.io/etcd-development/etcd:v3.5.9 # The container image used to create the etcd service.
+    #     # The `ca` is the root certificate authority of the PKI.
+    #     ca:
+    #         crt: LS0tIEVYQU1QTEUgQ0VSVElGSUNBVEUgLS0t
+    #         key: LS0tIEVYQU1QTEUgS0VZIC0tLQ==
+    #     # Extra arguments to supply to etcd.
+    #     extraArgs:
+    #         election-timeout: "5000"
+    #     # The `advertisedSubnets` field configures the networks to pick etcd advertised IP from.
+    #     advertisedSubnets:
+    #         - 10.0.0.0/8
+
+    # # Core DNS specific configuration options.
+    # coreDNS:
+    #     image: registry.k8s.io/coredns/coredns:v1.10.1 # The `image` field is an override to the default coredns image.
+
+    # # External cloud provider configuration.
+    # externalCloudProvider:
+    #     enabled: true # Enable external cloud provider.
+    #     # A list of urls that point to additional manifests for an external cloud provider.
+    #     manifests:
+    #         - https://raw.githubusercontent.com/kubernetes/cloud-provider-aws/v1.20.0-alpha.0/manifests/rbac.yaml
+    #         - https://raw.githubusercontent.com/kubernetes/cloud-provider-aws/v1.20.0-alpha.0/manifests/aws-cloud-controller-manager-daemonset.yaml
+
+    # # A list of urls that point to additional manifests.
+    # extraManifests:
+    #     - https://www.example.com/manifest1.yaml
+    #     - https://www.example.com/manifest2.yaml
+
+    # # A map of key value pairs that will be added while fetching the extraManifests.
+    # extraManifestHeaders:
+    #     Token: "1234567"
+    #     X-ExtraInfo: info
+
+    # # A list of inline Kubernetes manifests.
+    # inlineManifests:
+    #     - name: namespace-ci # Name of the manifest.
+    #       contents: |- # Manifest contents as a string.
+    #         apiVersion: v1
+    #         kind: Namespace
+    #         metadata:
+    #         	name: ci
+
+    # # Settings for admin kubeconfig generation.
+    # adminKubeconfig:
+    #     certLifetime: 1h0m0s # Admin kubeconfig certificate lifetime (default is 1 year).
+
+    # # Allows running workload on control-plane nodes.
+    # allowSchedulingOnControlPlanes: true

+ 547 - 0
dezendorf/homelab/talos/nodeconfig/worker.yaml

@@ -0,0 +1,547 @@
+version: v1alpha1 # Indicates the schema used to decode the contents.
+debug: false # Enable verbose logging to the console.
+persist: true # description: |
+# Provides machine specific configuration options.
+machine:
+    type: worker # Defines the role of the machine within the cluster.
+    token: u7240y.plogoeorz04f09sw # The `token` is used by a machine to join the PKI of the cluster.
+    # The root certificate authority of the PKI.
+    ca:
+        crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJQakNCOGFBREFnRUNBaEFCdE5hMWNDcXBMUTl5RHh4Vm1hVmJNQVVHQXl0bGNEQVFNUTR3REFZRFZRUUsKRXdWMFlXeHZjekFlRncweU16QTVNRE14TmpNMk5UVmFGdzB6TXpBNE16RXhOak0yTlRWYU1CQXhEakFNQmdOVgpCQW9UQlhSaGJHOXpNQ293QlFZREsyVndBeUVBQW5qWmpDRmRpdTIvNUJNSlI2QWRWTWhwUEQ5MzgxTnYrWnA5Cm1mRndFTENqWVRCZk1BNEdBMVVkRHdFQi93UUVBd0lDaERBZEJnTlZIU1VFRmpBVUJnZ3JCZ0VGQlFjREFRWUkKS3dZQkJRVUhBd0l3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFkQmdOVkhRNEVGZ1FVMFU0OVEzK2lZY3k1TTBUSwphWnVBUjIwUlRNVXdCUVlESzJWd0EwRUF6TEtTdG1FQ1BHNzZpYUp5Z3Nmdzc1Z0tqVEVmODFCS0NOVFBBWkJzCnM2alRySktlRVA0SHpzOFIvZStyTGc4ZSszNDZWSXhoY1FMQWVKV21qc2VQRGc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
+        key: ""
+    # Extra certificate subject alternative names for the machine's certificate.
+    certSANs: []
+    #   # Uncomment this to enable SANs.
+    #   - 10.0.0.10
+    #   - 172.16.0.10
+    #   - 192.168.0.10
+
+    # Used to provide additional options to the kubelet.
+    kubelet:
+        image: ghcr.io/siderolabs/kubelet:v1.28.0 # The `image` field is an optional reference to an alternative kubelet image.
+        defaultRuntimeSeccompProfileEnabled: true # Enable container runtime default Seccomp profile.
+        disableManifestsDirectory: true # The `disableManifestsDirectory` field configures the kubelet to get static pod manifests from the /etc/kubernetes/manifests directory.
+        
+        # # The `ClusterDNS` field is an optional reference to an alternative kubelet clusterDNS ip list.
+        # clusterDNS:
+        #     - 10.96.0.10
+        #     - 169.254.2.53
+
+        # # The `extraArgs` field is used to provide additional flags to the kubelet.
+        extraArgs:
+            rotate-server-certificates: true
+        
+        # # The `extraMounts` field is used to add additional mounts to the kubelet container.
+        # extraMounts:
+        #     - destination: /var/lib/example
+        #       type: bind
+        #       source: /var/lib/example
+        #       options:
+        #         - bind
+        #         - rshared
+        #         - rw
+
+        # # The `extraConfig` field is used to provide kubelet configuration overrides.
+        # extraConfig:
+        #     serverTLSBootstrap: true
+
+        # # The `nodeIP` field is used to configure `--node-ip` flag for the kubelet.
+        # nodeIP:
+        #     # The `validSubnets` field configures the networks to pick kubelet node IP from.
+        #     validSubnets:
+        #         - 10.0.0.0/8
+        #         - '!10.0.0.3/32'
+        #         - fdc7::/16
+    # Provides machine specific network configuration options.
+    network: {}
+    # # `interfaces` is used to define the network interface configuration.
+    # interfaces:
+    #     - interface: enp0s1 # The interface name.
+    #       # Assigns static IP addresses to the interface.
+    #       addresses:
+    #         - 192.168.2.0/24
+    #       # A list of routes associated with the interface.
+    #       routes:
+    #         - network: 0.0.0.0/0 # The route's network (destination).
+    #           gateway: 192.168.2.1 # The route's gateway (if empty, creates link scope route).
+    #           metric: 1024 # The optional metric for the route.
+    #       mtu: 1500 # The interface's MTU.
+    #       
+    #       # # Picks a network device using the selector.
+
+    #       # # select a device with bus prefix 00:*.
+    #       # deviceSelector:
+    #       #     busPath: 00:* # PCI, USB bus prefix, supports matching by wildcard.
+    #       # # select a device with mac address matching `*:f0:ab` and `virtio` kernel driver.
+    #       # deviceSelector:
+    #       #     hardwareAddr: '*:f0:ab' # Device hardware address, supports matching by wildcard.
+    #       #     driver: virtio # Kernel driver, supports matching by wildcard.
+    #       # # select a device with bus prefix 00:*, a device with mac address matching `*:f0:ab` and `virtio` kernel driver.
+    #       # deviceSelector:
+    #       #     - busPath: 00:* # PCI, USB bus prefix, supports matching by wildcard.
+    #       #     - hardwareAddr: '*:f0:ab' # Device hardware address, supports matching by wildcard.
+    #       #       driver: virtio # Kernel driver, supports matching by wildcard.
+
+    #       # # Bond specific options.
+    #       # bond:
+    #       #     # The interfaces that make up the bond.
+    #       #     interfaces:
+    #       #         - enp2s0
+    #       #         - enp2s1
+    #       #     # Picks a network device using the selector.
+    #       #     deviceSelectors:
+    #       #         - busPath: 00:* # PCI, USB bus prefix, supports matching by wildcard.
+    #       #         - hardwareAddr: '*:f0:ab' # Device hardware address, supports matching by wildcard.
+    #       #           driver: virtio # Kernel driver, supports matching by wildcard.
+    #       #     mode: 802.3ad # A bond option.
+    #       #     lacpRate: fast # A bond option.
+
+    #       # # Bridge specific options.
+    #       # bridge:
+    #       #     # The interfaces that make up the bridge.
+    #       #     interfaces:
+    #       #         - enxda4042ca9a51
+    #       #         - enxae2a6774c259
+    #       #     # A bridge option.
+    #       #     stp:
+    #       #         enabled: true # Whether Spanning Tree Protocol (STP) is enabled.
+
+    #       # # Indicates if DHCP should be used to configure the interface.
+    #       # dhcp: true
+
+    #       # # DHCP specific options.
+    #       # dhcpOptions:
+    #       #     routeMetric: 1024 # The priority of all routes received via DHCP.
+
+    #       # # Wireguard specific configuration.
+
+    #       # # wireguard server example
+    #       # wireguard:
+    #       #     privateKey: ABCDEF... # Specifies a private key configuration (base64 encoded).
+    #       #     listenPort: 51111 # Specifies a device's listening port.
+    #       #     # Specifies a list of peer configurations to apply to a device.
+    #       #     peers:
+    #       #         - publicKey: ABCDEF... # Specifies the public key of this peer.
+    #       #           endpoint: 192.168.1.3 # Specifies the endpoint of this peer entry.
+    #       #           # AllowedIPs specifies a list of allowed IP addresses in CIDR notation for this peer.
+    #       #           allowedIPs:
+    #       #             - 192.168.1.0/24
+    #       # # wireguard peer example
+    #       # wireguard:
+    #       #     privateKey: ABCDEF... # Specifies a private key configuration (base64 encoded).
+    #       #     # Specifies a list of peer configurations to apply to a device.
+    #       #     peers:
+    #       #         - publicKey: ABCDEF... # Specifies the public key of this peer.
+    #       #           endpoint: 192.168.1.2:51822 # Specifies the endpoint of this peer entry.
+    #       #           persistentKeepaliveInterval: 10s # Specifies the persistent keepalive interval for this peer.
+    #       #           # AllowedIPs specifies a list of allowed IP addresses in CIDR notation for this peer.
+    #       #           allowedIPs:
+    #       #             - 192.168.1.0/24
+
+    #       # # Virtual (shared) IP address configuration.
+
+    #       # # layer2 vip example
+    #       # vip:
+    #       #     ip: 172.16.199.55 # Specifies the IP address to be used.
+
+    # # Used to statically set the nameservers for the machine.
+    # nameservers:
+    #     - 8.8.8.8
+    #     - 1.1.1.1
+
+    # # Allows for extra entries to be added to the `/etc/hosts` file
+    # extraHostEntries:
+    #     - ip: 192.168.1.100 # The IP of the host.
+    #       # The host alias.
+    #       aliases:
+    #         - example
+    #         - example.domain.tld
+
+    # # Configures KubeSpan feature.
+    # kubespan:
+    #     enabled: true # Enable the KubeSpan feature.
+
+    # Used to provide instructions for installations.
+    install:
+        disk: /dev/sda # The disk used for installations.
+        image: ghcr.io/siderolabs/installer:v1.5.1 # Allows for supplying the image used to perform the installation.
+        wipe: false # Indicates if the installation disk should be wiped at installation time.
+        
+        # # Look up disk using disk attributes like model, size, serial and others.
+        # diskSelector:
+        #     size: 4GB # Disk size.
+        #     model: WDC* # Disk model `/sys/block/<dev>/device/model`.
+        #     busPath: /pci0000:00/0000:00:17.0/ata1/host0/target0:0:0/0:0:0:0 # Disk bus path.
+
+        # # Allows for supplying extra kernel args via the bootloader.
+        # extraKernelArgs:
+        #     - talos.platform=metal
+        #     - reboot=k
+
+        # # Allows for supplying additional system extension images to install on top of base Talos image.
+        # extensions:
+        #     - image: ghcr.io/siderolabs/gvisor:20220117.0-v1.0.0 # System extension image.
+    # Used to configure the machine's container image registry mirrors.
+    registries: {}
+    # # Specifies mirror configuration for each registry host namespace.
+    # mirrors:
+    #     ghcr.io:
+    #         # List of endpoints (URLs) for registry mirrors to use.
+    #         endpoints:
+    #             - https://registry.insecure
+    #             - https://ghcr.io/v2/
+
+    # # Specifies TLS & auth configuration for HTTPS image registries.
+    # config:
+    #     registry.insecure:
+    #         # The TLS configuration for the registry.
+    #         tls:
+    #             insecureSkipVerify: true # Skip TLS server certificate verification (not recommended).
+    #             
+    #             # # Enable mutual TLS authentication with the registry.
+    #             # clientIdentity:
+    #             #     crt: LS0tIEVYQU1QTEUgQ0VSVElGSUNBVEUgLS0t
+    #             #     key: LS0tIEVYQU1QTEUgS0VZIC0tLQ==
+    #         
+    #         # # The auth configuration for this registry.
+    #         # auth:
+    #         #     username: username # Optional registry authentication.
+    #         #     password: password # Optional registry authentication.
+
+    # Features describe individual Talos features that can be switched on or off.
+    features:
+        rbac: true # Enable role-based access control (RBAC).
+        stableHostname: true # Enable stable default hostname.
+        apidCheckExtKeyUsage: true # Enable checks for extended key usage of client certificates in apid.
+          #diskQuotaSupport: true # Enable XFS project quota support for EPHEMERAL partition and user disks.
+        
+        # # Configure Talos API access from Kubernetes pods.
+        # kubernetesTalosAPIAccess:
+        #     enabled: true # Enable Talos API access from Kubernetes pods.
+        #     # The list of Talos API roles which can be granted for access from Kubernetes pods.
+        #     allowedRoles:
+        #         - os:reader
+        #     # The list of Kubernetes namespaces Talos API access is available from.
+        #     allowedKubernetesNamespaces:
+        #         - kube-system
+    
+    # # Provides machine specific control plane configuration options.
+
+    # # ControlPlane definition example.
+    # controlPlane:
+    #     # Controller manager machine specific configuration options.
+    #     controllerManager:
+    #         disabled: false # Disable kube-controller-manager on the node.
+    #     # Scheduler machine specific configuration options.
+    #     scheduler:
+    #         disabled: true # Disable kube-scheduler on the node.
+
+    # # Used to provide static pod definitions to be run by the kubelet directly bypassing the kube-apiserver.
+
+    # # nginx static pod.
+    # pods:
+    #     - apiVersion: v1
+    #       kind: pod
+    #       metadata:
+    #         name: nginx
+    #       spec:
+    #         containers:
+    #             - image: nginx
+    #               name: nginx
+
+    # # Used to partition, format and mount additional disks.
+
+    # # MachineDisks list example.
+    # disks:
+    #     - device: /dev/sdb # The name of the disk to use.
+    #       # A list of partitions to create on the disk.
+    #       partitions:
+    #         - mountpoint: /var/mnt/extra # Where to mount the partition.
+    #           
+    #           # # The size of partition: either bytes or human readable representation. If `size:` is omitted, the partition is sized to occupy the full disk.
+
+    #           # # Human readable representation.
+    #           # size: 100 MB
+    #           # # Precise value in bytes.
+    #           # size: 1073741824
+
+    # # Allows the addition of user specified files.
+
+    # # MachineFiles usage example.
+    # files:
+    #     - content: '...' # The contents of the file.
+    #       permissions: 0o666 # The file's permissions in octal.
+    #       path: /tmp/file.txt # The path of the file.
+    #       op: append # The operation to use
+
+    # # The `env` field allows for the addition of environment variables.
+
+    # # Environment variables definition examples.
+    # env:
+    #     GRPC_GO_LOG_SEVERITY_LEVEL: info
+    #     GRPC_GO_LOG_VERBOSITY_LEVEL: "99"
+    #     https_proxy: http://SERVER:PORT/
+    # env:
+    #     GRPC_GO_LOG_SEVERITY_LEVEL: error
+    #     https_proxy: https://USERNAME:PASSWORD@SERVER:PORT/
+    # env:
+    #     https_proxy: http://DOMAIN\USERNAME:PASSWORD@SERVER:PORT/
+
+    # # Used to configure the machine's time settings.
+
+    # # Example configuration for cloudflare ntp server.
+    # time:
+    #     disabled: false # Indicates if the time service is disabled for the machine.
+    #     # Specifies time (NTP) servers to use for setting the system time.
+    #     servers:
+    #         - time.cloudflare.com
+    #     bootTimeout: 2m0s # Specifies the timeout when the node time is considered to be in sync unlocking the boot sequence.
+
+    # # Used to configure the machine's sysctls.
+
+    # # MachineSysctls usage example.
+    # sysctls:
+    #     kernel.domainname: talos.dev
+    #     net.ipv4.ip_forward: "0"
+
+    # # Used to configure the machine's sysfs.
+
+    # # MachineSysfs usage example.
+    # sysfs:
+    #     devices.system.cpu.cpu0.cpufreq.scaling_governor: performance
+
+    # # Machine system disk encryption configuration.
+    # systemDiskEncryption:
+    #     # Ephemeral partition encryption.
+    #     ephemeral:
+    #         provider: luks2 # Encryption provider to use for the encryption.
+    #         # Defines the encryption keys generation and storage method.
+    #         keys:
+    #             - # Deterministically generated key from the node UUID and PartitionLabel.
+    #               nodeID: {}
+    #               slot: 0 # Key slot number for LUKS2 encryption.
+    #               
+    #               # # KMS managed encryption key.
+    #               # kms:
+    #               #     endpoint: https://192.168.88.21:4443 # KMS endpoint to Seal/Unseal the key.
+    #         
+    #         # # Cipher kind to use for the encryption. Depends on the encryption provider.
+    #         # cipher: aes-xts-plain64
+
+    #         # # Defines the encryption sector size.
+    #         # blockSize: 4096
+
+    #         # # Additional --perf parameters for the LUKS2 encryption.
+    #         # options:
+    #         #     - no_read_workqueue
+    #         #     - no_write_workqueue
+
+    # # Configures the udev system.
+    # udev:
+    #     # List of udev rules to apply to the udev system
+    #     rules:
+    #         - SUBSYSTEM=="drm", KERNEL=="renderD*", GROUP="44", MODE="0660"
+
+    # # Configures the logging system.
+    logging:
+        # Logging destination.
+        destinations:
+            - endpoint: tcp://1.2.3.4:12345 # Where to send logs. Supported protocols are "tcp" and "udp".
+              format: json_lines # Logs format.
+
+    # # Configures the kernel.
+    # kernel:
+    #     # Kernel modules to load.
+    #     modules:
+    #         - name: brtfs # Module name.
+
+    # # Configures the seccomp profiles for the machine.
+    # seccompProfiles:
+    #     - name: audit.json # The `name` field is used to provide the file name of the seccomp profile.
+    #       # The `value` field is used to provide the seccomp profile.
+    #       value:
+    #         defaultAction: SCMP_ACT_LOG
+
+    # # Configures the node labels for the machine.
+
+    # # node labels example.
+    nodeLabels:
+        ceph-osd-node: false
+        ceph-mon-node: true
+        ceph-mgr-node: true
+        ceph-rgw-node: true
+# Provides cluster specific configuration options.
+cluster:
+    id: DnLy-yXXaIqQRaELsr5VZj-rnVj7jUxyyv69GkmgDIQ= # Globally unique identifier for this cluster (base64 encoded random 32 bytes).
+    secret: ssgf5bbwHcen8w5i48nlDIdsUcTf9O3AoO3EQDTbpmo= # Shared secret of cluster (base64 encoded random 32 bytes).
+    # Provides control plane specific configuration options.
+    controlPlane:
+        endpoint: https://talos-master-vm-01.dezendorf.net:6443 # Endpoint is the canonical controlplane endpoint, which can be an IP address or a DNS hostname.
+    # Provides cluster specific network configuration options.
+    network:
+        dnsDomain: cluster.local # The domain used by Kubernetes DNS.
+        # The pod subnet CIDR.
+        podSubnets:
+            - 10.244.0.0/16
+        # The service subnet CIDR.
+        serviceSubnets:
+            - 10.96.0.0/12
+        
+        # # The CNI used.
+        # cni:
+        #     name: custom # Name of CNI to use.
+        #     # URLs containing manifests to apply for the CNI.
+        #     urls:
+        #         - https://docs.projectcalico.org/archive/v3.20/manifests/canal.yaml
+    token: zszgpu.9yqk443feyidmnx3 # The [bootstrap token](https://kubernetes.io/docs/reference/access-authn-authz/bootstrap-tokens/) used to join the cluster.
+    # The base64 encoded root certificate authority used by Kubernetes.
+    ca:
+        crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJpekNDQVRDZ0F3SUJBZ0lSQUp4ZS9tbGpNUENkaXBFTDBrenZtNjB3Q2dZSUtvWkl6ajBFQXdJd0ZURVQKTUJFR0ExVUVDaE1LYTNWaVpYSnVaWFJsY3pBZUZ3MHlNekE1TURNeE5qTTJOVFZhRncwek16QTRNekV4TmpNMgpOVFZhTUJVeEV6QVJCZ05WQkFvVENtdDFZbVZ5Ym1WMFpYTXdXVEFUQmdjcWhrak9QUUlCQmdncWhrak9QUU1CCkJ3TkNBQVMza29QWERNeFE5c2UyUlhnZklFZXZLR0N6alRjcjNLZ1hEYjBkaW9ueHlRSXJNWGpTOUFWNEJ2aE4KRldKTHpRLy9WTjZJRUdGTEZOb1NjUnpJaURVNG8yRXdYekFPQmdOVkhROEJBZjhFQkFNQ0FvUXdIUVlEVlIwbApCQll3RkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ01BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0hRWURWUjBPCkJCWUVGS1F4eWVkOHdRUnplcFRwcFFZMkZIUHVOTTdjTUFvR0NDcUdTTTQ5QkFNQ0Ewa0FNRVlDSVFDNGFWTmoKN01ONEloMFdyNWcyK01oa1FBbzZUMzVYU0ZrVEozbjFaRDd1TGdJaEFJZC9PTGVFQkRQb2FQTUFMTk5Sc01hQQpTOWpSMHR5MTBCcTFzZVhuYVpJdAotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
+        key: ""
+    # Configures cluster member discovery.
+    discovery:
+        enabled: true # Enable the cluster membership discovery feature.
+        # Configure registries used for cluster member discovery.
+        registries:
+            # Kubernetes registry uses Kubernetes API server to discover cluster members and stores additional information
+            kubernetes:
+                disabled: true # Disable Kubernetes discovery registry.
+            # Service registry is using an external service to push and pull information about cluster members.
+            service: {}
+            # # External service endpoint.
+            # endpoint: https://discovery.talos.dev/
+    
+    # # A key used for the [encryption of secret data at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/).
+
+    # # Decryption secret example (do not use in production!).
+    # aescbcEncryptionSecret: z01mye6j16bspJYtTB/5SFX8j7Ph4JXxM2Xuu4vsBPM=
+
+    # # A key used for the [encryption of secret data at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/).
+
+    # # Decryption secret example (do not use in production!).
+    # secretboxEncryptionSecret: z01mye6j16bspJYtTB/5SFX8j7Ph4JXxM2Xuu4vsBPM=
+
+    # # The base64 encoded aggregator certificate authority used by Kubernetes for front-proxy certificate generation.
+
+    # # AggregatorCA example.
+    # aggregatorCA:
+    #     crt: LS0tIEVYQU1QTEUgQ0VSVElGSUNBVEUgLS0t
+    #     key: LS0tIEVYQU1QTEUgS0VZIC0tLQ==
+
+    # # The base64 encoded private key for service account token generation.
+
+    # # AggregatorCA example.
+    # serviceAccount:
+    #     key: LS0tIEVYQU1QTEUgS0VZIC0tLQ==
+
+    # # API server specific configuration options.
+    # apiServer:
+    #     image: registry.k8s.io/kube-apiserver:v1.28.0 # The container image used in the API server manifest.
+    #     # Extra arguments to supply to the API server.
+    #     extraArgs:
+    #         feature-gates: ServerSideApply=true
+    #         http2-max-streams-per-connection: "32"
+    #     # Extra certificate subject alternative names for the API server's certificate.
+    #     certSANs:
+    #         - 1.2.3.4
+    #         - 4.5.6.7
+    #     # Configure the API server admission plugins.
+    #     admissionControl:
+    #         - name: PodSecurity # Name is the name of the admission controller.
+    #           # Configuration is an embedded configuration object to be used as the plugin's
+    #           configuration:
+    #             apiVersion: pod-security.admission.config.k8s.io/v1alpha1
+    #             defaults:
+    #                 audit: restricted
+    #                 audit-version: latest
+    #                 enforce: baseline
+    #                 enforce-version: latest
+    #                 warn: restricted
+    #                 warn-version: latest
+    #             exemptions:
+    #                 namespaces:
+    #                     - kube-system
+    #                 runtimeClasses: []
+    #                 usernames: []
+    #             kind: PodSecurityConfiguration
+    #     # Configure the API server audit policy.
+    #     auditPolicy:
+    #         apiVersion: audit.k8s.io/v1
+    #         kind: Policy
+    #         rules:
+    #             - level: Metadata
+
+    # # Controller manager server specific configuration options.
+    # controllerManager:
+    #     image: registry.k8s.io/kube-controller-manager:v1.28.0 # The container image used in the controller manager manifest.
+    #     # Extra arguments to supply to the controller manager.
+    #     extraArgs:
+    #         feature-gates: ServerSideApply=true
+
+    # # Kube-proxy server-specific configuration options
+    # proxy:
+    #     disabled: false # Disable kube-proxy deployment on cluster bootstrap.
+    #     image: registry.k8s.io/kube-proxy:v1.28.0 # The container image used in the kube-proxy manifest.
+    #     mode: ipvs # proxy mode of kube-proxy.
+    #     # Extra arguments to supply to kube-proxy.
+    #     extraArgs:
+    #         proxy-mode: iptables
+
+    # # Scheduler server specific configuration options.
+    # scheduler:
+    #     image: registry.k8s.io/kube-scheduler:v1.28.0 # The container image used in the scheduler manifest.
+    #     # Extra arguments to supply to the scheduler.
+    #     extraArgs:
+    #         feature-gates: AllBeta=true
+
+    # # Etcd specific configuration options.
+    # etcd:
+    #     image: gcr.io/etcd-development/etcd:v3.5.9 # The container image used to create the etcd service.
+    #     # The `ca` is the root certificate authority of the PKI.
+    #     ca:
+    #         crt: LS0tIEVYQU1QTEUgQ0VSVElGSUNBVEUgLS0t
+    #         key: LS0tIEVYQU1QTEUgS0VZIC0tLQ==
+    #     # Extra arguments to supply to etcd.
+    #     extraArgs:
+    #         election-timeout: "5000"
+    #     # The `advertisedSubnets` field configures the networks to pick etcd advertised IP from.
+    #     advertisedSubnets:
+    #         - 10.0.0.0/8
+
+    # # Core DNS specific configuration options.
+    # coreDNS:
+    #     image: registry.k8s.io/coredns/coredns:v1.10.1 # The `image` field is an override to the default coredns image.
+
+    # # External cloud provider configuration.
+    # externalCloudProvider:
+    #     enabled: true # Enable external cloud provider.
+    #     # A list of urls that point to additional manifests for an external cloud provider.
+    #     manifests:
+    #         - https://raw.githubusercontent.com/kubernetes/cloud-provider-aws/v1.20.0-alpha.0/manifests/rbac.yaml
+    #         - https://raw.githubusercontent.com/kubernetes/cloud-provider-aws/v1.20.0-alpha.0/manifests/aws-cloud-controller-manager-daemonset.yaml
+
+    # # A list of urls that point to additional manifests.
+    # extraManifests:
+    #     - https://www.example.com/manifest1.yaml
+    #     - https://www.example.com/manifest2.yaml
+
+    # # A map of key value pairs that will be added while fetching the extraManifests.
+    # extraManifestHeaders:
+    #     Token: "1234567"
+    #     X-ExtraInfo: info
+
+    # # A list of inline Kubernetes manifests.
+    # inlineManifests:
+    #     - name: namespace-ci # Name of the manifest.
+    #       contents: |- # Manifest contents as a string.
+    #         apiVersion: v1
+    #         kind: Namespace
+    #         metadata:
+    #         	name: ci
+
+    # # Settings for admin kubeconfig generation.
+    # adminKubeconfig:
+    #     certLifetime: 1h0m0s # Admin kubeconfig certificate lifetime (default is 1 year).
+
+    # # Allows running workload on control-plane nodes.
+    # allowSchedulingOnControlPlanes: true

+ 661 - 0
dezendorf/homelab/talos/operator-values.yaml

@@ -0,0 +1,661 @@
+# Default values for rook-ceph-operator
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+image:
+  # -- Image
+  repository: rook/ceph
+  # -- Image tag
+  # @default -- `master`
+  tag: master
+  # -- Image pull policy
+  pullPolicy: IfNotPresent
+
+crds:
+  # -- Whether the helm chart should create and update the CRDs. If false, the CRDs must be
+  # managed independently with deploy/examples/crds.yaml.
+  # **WARNING** Only set during first deployment. If later disabled the cluster may be DESTROYED.
+  # If the CRDs are deleted in this case, see
+  # [the disaster recovery guide](https://rook.io/docs/rook/latest/Troubleshooting/disaster-recovery/#restoring-crds-after-deletion)
+  # to restore them.
+  enabled: true
+
+# -- Pod resource requests & limits
+resources:
+  limits:
+    cpu: 500m
+    memory: 512Mi
+  requests:
+    cpu: 100m
+    memory: 128Mi
+
+# -- Kubernetes [`nodeSelector`](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) to add to the Deployment.
+nodeSelector: {}
+# Constraint rook-ceph-operator Deployment to nodes with label `disktype: ssd`.
+# For more info, see https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
+#  disktype: ssd
+
+# -- List of Kubernetes [`tolerations`](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) to add to the Deployment.
+tolerations: []
+
+# -- Delay to use for the `node.kubernetes.io/unreachable` pod failure toleration to override
+# the Kubernetes default of 5 minutes
+unreachableNodeTolerationSeconds: 5
+
+# -- Whether the operator should watch cluster CRD in its own namespace or not
+currentNamespaceOnly: false
+
+# -- Pod annotations
+annotations: {}
+
+# -- Global log level for the operator.
+# Options: `ERROR`, `WARNING`, `INFO`, `DEBUG`
+logLevel: INFO
+
+# -- If true, create & use RBAC resources
+rbacEnable: true
+
+# -- If true, create & use PSP resources
+pspEnable: false
+
+# -- Set the priority class for the rook operator deployment if desired
+priorityClassName:
+
+# -- Set the container security context for the operator
+containerSecurityContext:
+  runAsNonRoot: true
+  runAsUser: 2016
+  runAsGroup: 2016
+  capabilities:
+    drop: ["ALL"]
+# -- If true, loop devices are allowed to be used for osds in test clusters
+allowLoopDevices: false
+
+# Settings for whether to disable the drivers or other daemons if they are not
+# needed
+csi:
+  # -- Enable Ceph CSI RBD driver
+  enableRbdDriver: false
+  # -- Enable Ceph CSI CephFS driver
+  enableCephfsDriver: true
+  # -- Enable Ceph CSI GRPC Metrics
+  enableGrpcMetrics: false
+  # -- Enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
+  # in some network configurations where the SDN does not provide access to an external cluster or
+  # there is significant drop in read/write performance
+  enableCSIHostNetwork: true
+  # -- Enable Snapshotter in CephFS provisioner pod
+  enableCephfsSnapshotter: true
+  # -- Enable Snapshotter in NFS provisioner pod
+  enableNFSSnapshotter: true
+  # -- Enable Snapshotter in RBD provisioner pod
+  enableRBDSnapshotter: true
+  # -- Enable Host mount for `/etc/selinux` directory for Ceph CSI nodeplugins
+  enablePluginSelinuxHostMount: false
+  # -- Enable Ceph CSI PVC encryption support
+  enableCSIEncryption: false
+
+  # -- PriorityClassName to be set on csi driver plugin pods
+  pluginPriorityClassName: system-node-critical
+
+  # -- PriorityClassName to be set on csi driver provisioner pods
+  provisionerPriorityClassName: system-cluster-critical
+
+  # -- Policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
+  # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
+  rbdFSGroupPolicy: "File"
+
+  # -- Policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
+  # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
+  cephFSFSGroupPolicy: "File"
+
+  # -- Policy for modifying a volume's ownership or permissions when the NFS PVC is being mounted.
+  # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
+  nfsFSGroupPolicy: "File"
+
+  # -- OMAP generator generates the omap mapping between the PV name and the RBD image
+  # which helps CSI to identify the rbd images for CSI operations.
+  # `CSI_ENABLE_OMAP_GENERATOR` needs to be enabled when we are using rbd mirroring feature.
+  # By default OMAP generator is disabled and when enabled, it will be deployed as a
+  # sidecar with CSI provisioner pod, to enable set it to true.
+  enableOMAPGenerator: false
+
+  # -- Set CephFS Kernel mount options to use https://docs.ceph.com/en/latest/man/8/mount.ceph/#options.
+  # Set to "ms_mode=secure" when connections.encrypted is enabled in CephCluster CR
+  cephFSKernelMountOptions:
+
+  # -- Enable adding volume metadata on the CephFS subvolumes and RBD images.
+  # Not all users might be interested in getting volume/snapshot details as metadata on CephFS subvolume and RBD images.
+  # Hence enable metadata is false by default
+  enableMetadata: false
+
+  # -- Set replicas for csi provisioner deployment
+  provisionerReplicas: 2
+
+  # -- Cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful
+  # in cases like for example, when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster
+  clusterName:
+
+  # -- Set logging level for cephCSI containers maintained by the cephCSI.
+  # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
+  logLevel: 0
+
+  # -- Set logging level for Kubernetes-csi sidecar containers.
+  # Supported values from 0 to 5. 0 for general useful logs (the default), 5 for trace level verbosity.
+  # @default -- `0`
+  sidecarLogLevel:
+
+  # -- CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
+  # @default -- `RollingUpdate`
+  rbdPluginUpdateStrategy:
+
+  # -- A maxUnavailable parameter of CSI RBD plugin daemonset update strategy.
+  # @default -- `1`
+  rbdPluginUpdateStrategyMaxUnavailable:
+
+  # -- CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
+  # @default -- `RollingUpdate`
+  cephFSPluginUpdateStrategy:
+
+  # -- A maxUnavailable parameter of CSI cephFS plugin daemonset update strategy.
+  # @default -- `1`
+  cephFSPluginUpdateStrategyMaxUnavailable:
+
+  # -- CSI NFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
+  # @default -- `RollingUpdate`
+  nfsPluginUpdateStrategy:
+
+  # -- Set GRPC timeout for csi containers (in seconds). It should be >= 120. If this value is not set or is invalid, it defaults to 150
+  grpcTimeoutInSeconds: 150
+
+  # -- Allow starting an unsupported ceph-csi image
+  allowUnsupportedVersion: false
+
+  # -- The volume of the CephCSI RBD plugin DaemonSet
+  csiRBDPluginVolume:
+  #  - name: lib-modules
+  #    hostPath:
+  #      path: /run/booted-system/kernel-modules/lib/modules/
+  #  - name: host-nix
+  #    hostPath:
+  #      path: /nix
+
+  # -- The volume mounts of the CephCSI RBD plugin DaemonSet
+  csiRBDPluginVolumeMount:
+  #  - name: host-nix
+  #    mountPath: /nix
+  #    readOnly: true
+
+  # -- The volume of the CephCSI CephFS plugin DaemonSet
+  csiCephFSPluginVolume:
+  #  - name: lib-modules
+  #    hostPath:
+  #      path: /run/booted-system/kernel-modules/lib/modules/
+  #  - name: host-nix
+  #    hostPath:
+  #      path: /nix
+
+  # -- The volume mounts of the CephCSI CephFS plugin DaemonSet
+  csiCephFSPluginVolumeMount:
+  #  - name: host-nix
+  #    mountPath: /nix
+  #    readOnly: true
+
+  # -- CEPH CSI RBD provisioner resource requirement list
+  # csi-omap-generator resources will be applied only if `enableOMAPGenerator` is set to `true`
+  # @default -- see values.yaml
+  csiRBDProvisionerResource: |
+    - name : csi-provisioner
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 100m
+        limits:
+          memory: 256Mi
+          cpu: 200m
+    - name : csi-resizer
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 100m
+        limits:
+          memory: 256Mi
+          cpu: 200m
+    - name : csi-attacher
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 100m
+        limits:
+          memory: 256Mi
+          cpu: 200m
+    - name : csi-snapshotter
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 100m
+        limits:
+          memory: 256Mi
+          cpu: 200m
+    - name : csi-rbdplugin
+      resource:
+        requests:
+          memory: 512Mi
+          cpu: 250m
+        limits:
+          memory: 1Gi
+          cpu: 500m
+    - name : csi-omap-generator
+      resource:
+        requests:
+          memory: 512Mi
+          cpu: 250m
+        limits:
+          memory: 1Gi
+          cpu: 500m
+    - name : liveness-prometheus
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 50m
+        limits:
+          memory: 256Mi
+          cpu: 100m
+
+  # -- CEPH CSI RBD plugin resource requirement list
+  # @default -- see values.yaml
+  csiRBDPluginResource: |
+    - name : driver-registrar
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 50m
+        limits:
+          memory: 256Mi
+          cpu: 100m
+    - name : csi-rbdplugin
+      resource:
+        requests:
+          memory: 512Mi
+          cpu: 250m
+        limits:
+          memory: 1Gi
+          cpu: 500m
+    - name : liveness-prometheus
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 50m
+        limits:
+          memory: 256Mi
+          cpu: 100m
+
+  # -- CEPH CSI CephFS provisioner resource requirement list
+  # @default -- see values.yaml
+  csiCephFSProvisionerResource: |
+    - name : csi-provisioner
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 100m
+        limits:
+          memory: 256Mi
+          cpu: 200m
+    - name : csi-resizer
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 100m
+        limits:
+          memory: 256Mi
+          cpu: 200m
+    - name : csi-attacher
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 100m
+        limits:
+          memory: 256Mi
+          cpu: 200m
+    - name : csi-snapshotter
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 100m
+        limits:
+          memory: 256Mi
+          cpu: 200m
+    - name : csi-cephfsplugin
+      resource:
+        requests:
+          memory: 512Mi
+          cpu: 250m
+        limits:
+          memory: 1Gi
+          cpu: 500m
+    - name : liveness-prometheus
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 50m
+        limits:
+          memory: 256Mi
+          cpu: 100m
+
+  # -- CEPH CSI CephFS plugin resource requirement list
+  # @default -- see values.yaml
+  csiCephFSPluginResource: |
+    - name : driver-registrar
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 50m
+        limits:
+          memory: 256Mi
+          cpu: 100m
+    - name : csi-cephfsplugin
+      resource:
+        requests:
+          memory: 512Mi
+          cpu: 250m
+        limits:
+          memory: 1Gi
+          cpu: 500m
+    - name : liveness-prometheus
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 50m
+        limits:
+          memory: 256Mi
+          cpu: 100m
+
+  # -- CEPH CSI NFS provisioner resource requirement list
+  # @default -- see values.yaml
+  csiNFSProvisionerResource: |
+    - name : csi-provisioner
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 100m
+        limits:
+          memory: 256Mi
+          cpu: 200m
+    - name : csi-nfsplugin
+      resource:
+        requests:
+          memory: 512Mi
+          cpu: 250m
+        limits:
+          memory: 1Gi
+          cpu: 500m
+    - name : csi-attacher
+      resource:
+        requests:
+          memory: 512Mi
+          cpu: 250m
+        limits:
+          memory: 1Gi
+          cpu: 500m
+
+  # -- CEPH CSI NFS plugin resource requirement list
+  # @default -- see values.yaml
+  csiNFSPluginResource: |
+    - name : driver-registrar
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 50m
+        limits:
+          memory: 256Mi
+          cpu: 100m
+    - name : csi-nfsplugin
+      resource:
+        requests:
+          memory: 512Mi
+          cpu: 250m
+        limits:
+          memory: 1Gi
+          cpu: 500m
+
+  # Set provisionerTolerations and provisionerNodeAffinity for provisioner pod.
+  # The CSI provisioner would be best to start on the same nodes as other ceph daemons.
+
+  # -- Array of tolerations in YAML format which will be added to CSI provisioner deployment
+  provisionerTolerations:
+  #    - key: key
+  #      operator: Exists
+  #      effect: NoSchedule
+
+  # -- The node labels for affinity of the CSI provisioner deployment [^1]
+  provisionerNodeAffinity: #key1=value1,value2; key2=value3
+  # Set pluginTolerations and pluginNodeAffinity for plugin daemonset pods.
+  # The CSI plugins need to be started on all the nodes where the clients need to mount the storage.
+
+  # -- Array of tolerations in YAML format which will be added to CephCSI plugin DaemonSet
+  pluginTolerations:
+  #    - key: key
+  #      operator: Exists
+  #      effect: NoSchedule
+
+  # -- The node labels for affinity of the CephCSI RBD plugin DaemonSet [^1]
+  pluginNodeAffinity: # key1=value1,value2; key2=value3
+
+  # -- Enable Ceph CSI Liveness sidecar deployment
+  enableLiveness: false
+
+  # -- CSI CephFS driver GRPC metrics port
+  # @default -- `9091`
+  cephfsGrpcMetricsPort:
+
+  # -- CSI CephFS driver metrics port
+  # @default -- `9081`
+  cephfsLivenessMetricsPort:
+
+  # -- Ceph CSI RBD driver GRPC metrics port
+  # @default -- `9090`
+  rbdGrpcMetricsPort:
+
+  # -- CSI Addons server port
+  # @default -- `9070`
+  csiAddonsPort:
+
+  # -- Enable Ceph Kernel clients on kernel < 4.17. If your kernel does not support quotas for CephFS
+  # you may want to disable this setting. However, this will cause an issue during upgrades
+  # with the FUSE client. See the [upgrade guide](https://rook.io/docs/rook/v1.2/ceph-upgrade.html)
+  forceCephFSKernelClient: true
+
+  # -- Ceph CSI RBD driver metrics port
+  # @default -- `8080`
+  rbdLivenessMetricsPort:
+
+  serviceMonitor:
+    # -- Enable ServiceMonitor for Ceph CSI drivers
+    enabled: false
+    # -- Service monitor scrape interval
+    interval: 5s
+    # -- ServiceMonitor additional labels
+    labels: {}
+    # -- Use a different namespace for the ServiceMonitor
+    namespace:
+
+  # -- Kubelet root directory path (if the Kubelet uses a different path for the `--root-dir` flag)
+  # @default -- `/var/lib/kubelet`
+  kubeletDirPath:
+
+  cephcsi:
+    # -- Ceph CSI image
+    # @default -- `quay.io/cephcsi/cephcsi:v3.9.0`
+    image:
+
+  registrar:
+    # -- Kubernetes CSI registrar image
+    # @default -- `registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0`
+    image:
+
+  provisioner:
+    # -- Kubernetes CSI provisioner image
+    # @default -- `registry.k8s.io/sig-storage/csi-provisioner:v3.5.0`
+    image:
+
+  snapshotter:
+    # -- Kubernetes CSI snapshotter image
+    # @default -- `registry.k8s.io/sig-storage/csi-snapshotter:v6.2.2`
+    image:
+
+  attacher:
+    # -- Kubernetes CSI Attacher image
+    # @default -- `registry.k8s.io/sig-storage/csi-attacher:v4.3.0`
+    image:
+
+  resizer:
+    # -- Kubernetes CSI resizer image
+    # @default -- `registry.k8s.io/sig-storage/csi-resizer:v1.8.0`
+    image:
+
+  # -- Image pull policy
+  imagePullPolicy: IfNotPresent
+
+  # -- Labels to add to the CSI CephFS Deployments and DaemonSets Pods
+  cephfsPodLabels: #"key1=value1,key2=value2"
+
+  # -- Labels to add to the CSI NFS Deployments and DaemonSets Pods
+  nfsPodLabels: #"key1=value1,key2=value2"
+
+  # -- Labels to add to the CSI RBD Deployments and DaemonSets Pods
+  rbdPodLabels: #"key1=value1,key2=value2"
+
+  csiAddons:
+    # -- Enable CSIAddons
+    enabled: false
+    # -- CSIAddons Sidecar image
+    image: "quay.io/csiaddons/k8s-sidecar:v0.7.0"
+
+  nfs:
+    # -- Enable the nfs csi driver
+    enabled: true
+
+  topology:
+    # -- Enable topology based provisioning
+    enabled: false
+    # NOTE: the value here serves as an example and needs to be
+    # updated with node labels that define domains of interest
+    # -- domainLabels define which node labels to use as domains
+    # for CSI nodeplugins to advertise their domains
+    domainLabels:
+    # - kubernetes.io/hostname
+    # - topology.kubernetes.io/zone
+    # - topology.rook.io/rack
+
+  readAffinity:
+    # -- Enable read affinity for RBD volumes. Recommended to
+    # set to true if running kernel 5.8 or newer.
+    # @default -- `false`
+    enabled: false
+    # -- Define which node labels to use
+    # as CRUSH location. This should correspond to the values set
+    # in the CRUSH map.
+    # @default -- labels listed [here](../CRDs/Cluster/ceph-cluster-crd.md#osd-topology)
+    crushLocationLabels:
+
+  # -- Whether to skip any attach operation altogether for CephFS PVCs. See more details
+  # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
+  # If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation
+  # of pods using the CephFS PVC fast. **WARNING** It's highly discouraged to use this for
+  # CephFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
+  cephFSAttachRequired: true
+  # -- Whether to skip any attach operation altogether for RBD PVCs. See more details
+  # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
+  # If set to false it skips the volume attachments and makes the creation of pods using the RBD PVC fast.
+  # **WARNING** It's highly discouraged to use this for RWO volumes as it can cause data corruption.
+  # csi-addons operations like Reclaimspace and PVC Keyrotation will also not be supported if set
+  # to false since we'll have no VolumeAttachments to determine which node the PVC is mounted on.
+  # Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
+  rbdAttachRequired: true
+  # -- Whether to skip any attach operation altogether for NFS PVCs. See more details
+  # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
+  # If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation
+  # of pods using the NFS PVC fast. **WARNING** It's highly discouraged to use this for
+  # NFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
+  nfsAttachRequired: true
+
+# -- Enable discovery daemon
+enableDiscoveryDaemon: false
+# -- Set the discovery daemon device discovery interval (default to 60m)
+discoveryDaemonInterval: 60m
+
+# -- The timeout for ceph commands in seconds
+cephCommandsTimeoutSeconds: "15"
+
+# -- If true, run rook operator on the host network
+useOperatorHostNetwork:
+
+# -- If true, scale down the rook operator.
+# This is useful for administrative actions where the rook operator must be scaled down, while using gitops style tooling
+# to deploy your helm charts.
+scaleDownOperator: false
+
+## Rook Discover configuration
+## toleration: NoSchedule, PreferNoSchedule or NoExecute
+## tolerationKey: Set this to the specific key of the taint to tolerate
+## tolerations: Array of tolerations in YAML format which will be added to agent deployment
+## nodeAffinity: Set to labels of the node to match
+
+discover:
+  # -- Toleration for the discover pods.
+  # Options: `NoSchedule`, `PreferNoSchedule` or `NoExecute`
+  toleration:
+  # -- The specific key of the taint to tolerate
+  tolerationKey:
+  # -- Array of tolerations in YAML format which will be added to discover deployment
+  tolerations:
+  #   - key: key
+  #     operator: Exists
+  #     effect: NoSchedule
+  # -- The node labels for affinity of `discover-agent` [^1]
+  nodeAffinity: # key1=value1,value2; key2=value3
+  # -- Labels to add to the discover pods
+  podLabels: # "key1=value1,key2=value2"
+  # -- Add resources to discover daemon pods
+  resources:
+  #   - limits:
+  #       cpu: 500m
+  #       memory: 512Mi
+  #   - requests:
+  #       cpu: 100m
+  #       memory: 128Mi
+
+# -- Whether to disable the admission controller
+disableAdmissionController: true
+
+# -- Runs Ceph Pods as privileged to be able to write to `hostPaths` in OpenShift with SELinux restrictions.
+hostpathRequiresPrivileged: false
+
+# -- Disable automatic orchestration when new devices are discovered.
+disableDeviceHotplug: false
+
+# -- Blacklist certain disks according to the regex provided.
+discoverDaemonUdev:
+
+# -- imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
+imagePullSecrets:
+# - name: my-registry-secret
+
+# -- Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
+enableOBCWatchOperatorNamespace: true
+
+# -- Set tolerations and nodeAffinity [^1] for admission controller pod.
+# The admission controller would be best to start on the same nodes as other ceph daemons.
+admissionController:
+  # tolerations:
+  #    - key: key
+  #      operator: Exists
+  #      effect: NoSchedule
+  # nodeAffinity: key1=value1,value2; key2=value3
+
+# [^1]: `nodeAffinity` and `*NodeAffinity` options should have the format `"role=storage,rook; storage=ceph"` or `storage=;role=rook-example` or `storage=;` (_checks only for presence of key_)
+
+monitoring:
+  # -- Enable monitoring. Requires Prometheus to be pre-installed.
+  # Enabling will also create RBAC rules to allow Operator to create ServiceMonitors
+  enabled: false