Browse Source

Added initial ceph-up/down and helm values

Breandan Dezendorf 2 years ago
parent
commit
df4f5093d8

+ 1 - 4
dezendorf/homelab/talos/bin/ceph-up.sh

@@ -21,6 +21,7 @@ echo "Setting ${NAMESPACE} to enforce=privileged"
 kubectl label ns ${NAMESPACE} pod-security.kubernetes.io/enforce=privileged
 kubectl label ns ${NAMESPACE} pod-security.kubernetes.io/enforce=privileged
 kubectl label ns default pod-security.kubernetes.io/enforce=privileged
 kubectl label ns default pod-security.kubernetes.io/enforce=privileged
 
 
+
 echo "Installing operator ${OPERATOR} for cluster ${CLUSTER} in namespace ${NAMESPACE}" 
 echo "Installing operator ${OPERATOR} for cluster ${CLUSTER} in namespace ${NAMESPACE}" 
 echo helm install --create-namespace --namespace ${NAMESPACE} ${CLUSTER} rook-release/rook-ceph --values operator-values.yaml
 echo helm install --create-namespace --namespace ${NAMESPACE} ${CLUSTER} rook-release/rook-ceph --values operator-values.yaml
 helm install --create-namespace --namespace ${NAMESPACE} ${OPERATOR} rook-release/rook-ceph --values operator-values.yaml
 helm install --create-namespace --namespace ${NAMESPACE} ${OPERATOR} rook-release/rook-ceph --values operator-values.yaml
@@ -33,7 +34,3 @@ echo "Telling operator ${OPERATOR} to create cluster ${CLUSTER} in namespace ${N
 echo helm install --create-namespace --namespace ${NAMESPACE} ${CLUSTER}-operator --set operatorNamespace=${NAMESPACE} rook-release/rook-ceph-cluster --values ceph-values.yaml
 echo helm install --create-namespace --namespace ${NAMESPACE} ${CLUSTER}-operator --set operatorNamespace=${NAMESPACE} rook-release/rook-ceph-cluster --values ceph-values.yaml
 helm install --create-namespace --namespace ${NAMESPACE} ${OPERATOR}-operator --set operatorNamespace=${NAMESPACE} rook-release/rook-ceph-cluster --values ceph-values.yaml
 helm install --create-namespace --namespace ${NAMESPACE} ${OPERATOR}-operator --set operatorNamespace=${NAMESPACE} rook-release/rook-ceph-cluster --values ceph-values.yaml
 
 
-
-#echo "Setting up ceph-toolbox"
-#echo kubectl apply --namespace ${NAMESPACE} -f toolbox.yaml
-#kubectl apply --namespace ${NAMESPACE} -f toolbox.yaml

+ 62 - 62
dezendorf/homelab/talos/ceph-values.yaml

@@ -146,7 +146,7 @@ cephClusterSpec:
     # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
     # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
     urlPrefix: /ceph-dashboard
     urlPrefix: /ceph-dashboard
     # serve the dashboard at the given port.
     # serve the dashboard at the given port.
-    port: 8443
+    port: 8080
     # Serve the dashboard using SSL (if using ingress to expose the dashboard and `ssl: true` you need to set
     # Serve the dashboard using SSL (if using ingress to expose the dashboard and `ssl: true` you need to set
     # the corresponding "backend protocol" annotation(s) for your ingress controller of choice)
     # the corresponding "backend protocol" annotation(s) for your ingress controller of choice)
     ssl: false
     ssl: false
@@ -231,72 +231,72 @@ cephClusterSpec:
   # To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
   # To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
   # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
   # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
   # tolerate taints with a key of 'storage-node'.
   # tolerate taints with a key of 'storage-node'.
-    placement:
-      all:
-        nodeAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            nodeSelectorTerms:
-              - matchExpressions:
-                - key: role
-                  operator: In
-                  values:
-                  - storage-node
-        podAffinity:
-        podAntiAffinity:
-        topologySpreadConstraints:
-        tolerations:
-        - key: storage-node
-          operator: Exists
+  placement:
+    all:
+      nodeAffinity:
+        requiredDuringSchedulingIgnoredDuringExecution:
+          nodeSelectorTerms:
+            - matchExpressions:
+              - key: all
+                operator: In
+                values:
+                - all
+      podAffinity:
+      podAntiAffinity:
+      topologySpreadConstraints:
+      tolerations:
+      - key: all
+        operator: Exists
   #   # The above placement information can also be specified for mon, osd, and mgr components
   #   # The above placement information can also be specified for mon, osd, and mgr components
-      mon:
-        nodeAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            nodeSelectorTerms:
-              - matchExpressions:
-                - key: ceph-node-type
-                  operator: In
-                  values:
-                  - mon
-        podAffinity:
-        podAntiAffinity:
-        topologySpreadConstraints:
-        tolerations:
-        - key: mon
-          operator: Exists
+    mon:
+      nodeAffinity:
+        requiredDuringSchedulingIgnoredDuringExecution:
+          nodeSelectorTerms:
+            - matchExpressions:
+              - key: mon
+                operator: In
+                values:
+                - mon
+      podAffinity:
+      podAntiAffinity:
+      topologySpreadConstraints:
+      tolerations:
+      - key: mon
+        operator: Exists
   #   # Monitor deployments may contain an anti-affinity rule for avoiding monitor
   #   # Monitor deployments may contain an anti-affinity rule for avoiding monitor
   #   # collocation on the same node. This is a required rule when host network is used
   #   # collocation on the same node. This is a required rule when host network is used
   #   # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
   #   # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
   #   # preferred rule with weight: 50.
   #   # preferred rule with weight: 50.
-      osd:
-        nodeAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            nodeSelectorTerms:
-              - matchExpressions:
-                - key: ceph-node-type
-                  operator: In
-                  values:
-                  - osd
-        podAffinity:
-        podAntiAffinity:
-        topologySpreadConstraints:
-        tolerations:
-        - key: osd
-          operator: Exists
-      mgr:
-        nodeAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            nodeSelectorTerms:
-              - matchExpressions:
-                - key: ceph-node-type
-                  operator: In
-                  values:
-                  - mon
-        podAffinity:
-        podAntiAffinity:
-        topologySpreadConstraints:
-        tolerations:
-        - key: mon
-          operator: Exists
+    osd:
+      nodeAffinity:
+        requiredDuringSchedulingIgnoredDuringExecution:
+          nodeSelectorTerms:
+            - matchExpressions:
+              - key: osd
+                operator: In
+                values:
+                - osd
+      podAffinity:
+      podAntiAffinity:
+      topologySpreadConstraints:
+      tolerations:
+      - key: osd
+        operator: Exists
+    mgr:
+      nodeAffinity:
+        requiredDuringSchedulingIgnoredDuringExecution:
+          nodeSelectorTerms:
+            - matchExpressions:
+              - key: mgr
+                operator: In
+                values:
+                - mgr
+      podAffinity:
+      podAntiAffinity:
+      topologySpreadConstraints:
+      tolerations:
+      - key: mgr
+        operator: Exists
   #   cleanup:
   #   cleanup:
 
 
   # annotations:
   # annotations:

+ 6 - 6
dezendorf/homelab/talos/operator-values.yaml

@@ -486,32 +486,32 @@ csi:
   cephcsi:
   cephcsi:
     # -- Ceph CSI image
     # -- Ceph CSI image
     # @default -- `quay.io/cephcsi/cephcsi:v3.9.0`
     # @default -- `quay.io/cephcsi/cephcsi:v3.9.0`
-    image:
+    image: "docker.dezendorf.net/quay.io/cephcsi/cephcsi:v3.9.0"
 
 
   registrar:
   registrar:
     # -- Kubernetes CSI registrar image
     # -- Kubernetes CSI registrar image
     # @default -- `registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0`
     # @default -- `registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0`
-    image:
+    image: "docker.dezendorf.net/registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0"
 
 
   provisioner:
   provisioner:
     # -- Kubernetes CSI provisioner image
     # -- Kubernetes CSI provisioner image
     # @default -- `registry.k8s.io/sig-storage/csi-provisioner:v3.5.0`
     # @default -- `registry.k8s.io/sig-storage/csi-provisioner:v3.5.0`
-    image:
+    image: "docker.dezendorf.net/registry.k8s.io/sig-storage/csi-provisioner:v3.5.0"
 
 
   snapshotter:
   snapshotter:
     # -- Kubernetes CSI snapshotter image
     # -- Kubernetes CSI snapshotter image
     # @default -- `registry.k8s.io/sig-storage/csi-snapshotter:v6.2.2`
     # @default -- `registry.k8s.io/sig-storage/csi-snapshotter:v6.2.2`
-    image:
+    image: "docker.dezendorf.net/registry.k8s.io/sig-storage/csi-snapshotter:v6.2.2"
 
 
   attacher:
   attacher:
     # -- Kubernetes CSI Attacher image
     # -- Kubernetes CSI Attacher image
     # @default -- `registry.k8s.io/sig-storage/csi-attacher:v4.3.0`
     # @default -- `registry.k8s.io/sig-storage/csi-attacher:v4.3.0`
-    image:
+    image: "docker.dezendorf.net/registry.k8s.io/sig-storage/csi-attacher:v4.3.0"
 
 
   resizer:
   resizer:
     # -- Kubernetes CSI resizer image
     # -- Kubernetes CSI resizer image
     # @default -- `registry.k8s.io/sig-storage/csi-resizer:v1.8.0`
     # @default -- `registry.k8s.io/sig-storage/csi-resizer:v1.8.0`
-    image:
+    image: "docker.dezendorf.net/registry.k8s.io/sig-storage/csi-resizer:v1.8.0"
 
 
   # -- Image pull policy
   # -- Image pull policy
   imagePullPolicy: IfNotPresent
   imagePullPolicy: IfNotPresent