--- # Source: mastodon/charts/minio/templates/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: name: mastodon-minio namespace: "mastodon" labels: app.kubernetes.io/name: minio helm.sh/chart: minio-12.0.0 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm automountServiceAccountToken: true secrets: - name: mastodon-minio --- # Source: mastodon/charts/redis/templates/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount automountServiceAccountToken: true metadata: name: mastodon-redis namespace: "mastodon" labels: app.kubernetes.io/name: redis helm.sh/chart: redis-17.4.3 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm --- # Source: mastodon/templates/service-account.yaml apiVersion: v1 kind: ServiceAccount metadata: name: mastodon namespace: "mastodon" labels: app.kubernetes.io/name: mastodon helm.sh/chart: mastodon-1.0.1 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: mastodon automountServiceAccountToken: true --- # Source: mastodon/charts/minio/templates/secrets.yaml apiVersion: v1 kind: Secret metadata: name: mastodon-minio namespace: "mastodon" labels: app.kubernetes.io/name: minio helm.sh/chart: minio-12.0.0 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm type: Opaque data: root-user: "YWRtaW4=" root-password: "U1lNNDBMeXRjVg==" key.json: "" --- # Source: mastodon/charts/postgresql/templates/secrets.yaml apiVersion: v1 kind: Secret metadata: name: mastodon-postgresql namespace: "mastodon" labels: app.kubernetes.io/name: postgresql helm.sh/chart: postgresql-12.1.9 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm type: Opaque data: postgres-password: "cHVaRkp2c1VCOQ==" password: "aGtJUGFJZGg1Vg==" # We don't auto-generate LDAP password when it's not provided as we do for other passwords --- # Source: mastodon/charts/redis/templates/secret.yaml apiVersion: v1 kind: Secret metadata: name: mastodon-redis namespace: "mastodon" labels: app.kubernetes.io/name: redis helm.sh/chart: redis-17.4.3 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm type: Opaque data: redis-password: "RVdvWWdYNzI1Sg==" --- # Source: mastodon/templates/default-secret.yaml apiVersion: v1 kind: Secret metadata: name: mastodon-default namespace: "mastodon" labels: app.kubernetes.io/name: mastodon helm.sh/chart: mastodon-1.0.1 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: mastodon data: MASTODON_ADMIN_PASSWORD: "ZEJ2OGp3aVkxUA==" SECRET_KEY_BASE: "TFZtQXZkMW9Caw==" OTP_SECRET: "dWlmdzhSSUpDbQ==" --- # Source: mastodon/charts/minio/templates/provisioning-configmap.yaml apiVersion: v1 kind: ConfigMap metadata: name: mastodon-minio-provisioning namespace: "mastodon" labels: app.kubernetes.io/name: minio helm.sh/chart: minio-12.0.0 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: minio-provisioning data: --- # Source: mastodon/charts/redis/templates/configmap.yaml apiVersion: v1 kind: ConfigMap metadata: name: mastodon-redis-configuration namespace: "mastodon" labels: app.kubernetes.io/name: redis helm.sh/chart: redis-17.4.3 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm data: redis.conf: |- # User-supplied common configuration: # Enable AOF https://redis.io/topics/persistence#append-only-file appendonly yes # Disable RDB persistence, AOF persistence already enabled. save "" # End of common configuration master.conf: |- dir /data # User-supplied master configuration: rename-command FLUSHDB "" rename-command FLUSHALL "" # End of master configuration replica.conf: |- dir /data # User-supplied replica configuration: rename-command FLUSHDB "" rename-command FLUSHALL "" # End of replica configuration --- # Source: mastodon/charts/redis/templates/health-configmap.yaml apiVersion: v1 kind: ConfigMap metadata: name: mastodon-redis-health namespace: "mastodon" labels: app.kubernetes.io/name: redis helm.sh/chart: redis-17.4.3 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm data: ping_readiness_local.sh: |- #!/bin/bash [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" response=$( timeout -s 3 $1 \ redis-cli \ -h localhost \ -p $REDIS_PORT \ ping ) if [ "$?" -eq "124" ]; then echo "Timed out" exit 1 fi if [ "$response" != "PONG" ]; then echo "$response" exit 1 fi ping_liveness_local.sh: |- #!/bin/bash [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" response=$( timeout -s 3 $1 \ redis-cli \ -h localhost \ -p $REDIS_PORT \ ping ) if [ "$?" -eq "124" ]; then echo "Timed out" exit 1 fi responseFirstWord=$(echo $response | head -n1 | awk '{print $1;}') if [ "$response" != "PONG" ] && [ "$responseFirstWord" != "LOADING" ] && [ "$responseFirstWord" != "MASTERDOWN" ]; then echo "$response" exit 1 fi ping_readiness_master.sh: |- #!/bin/bash [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" response=$( timeout -s 3 $1 \ redis-cli \ -h $REDIS_MASTER_HOST \ -p $REDIS_MASTER_PORT_NUMBER \ ping ) if [ "$?" -eq "124" ]; then echo "Timed out" exit 1 fi if [ "$response" != "PONG" ]; then echo "$response" exit 1 fi ping_liveness_master.sh: |- #!/bin/bash [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" response=$( timeout -s 3 $1 \ redis-cli \ -h $REDIS_MASTER_HOST \ -p $REDIS_MASTER_PORT_NUMBER \ ping ) if [ "$?" -eq "124" ]; then echo "Timed out" exit 1 fi responseFirstWord=$(echo $response | head -n1 | awk '{print $1;}') if [ "$response" != "PONG" ] && [ "$responseFirstWord" != "LOADING" ]; then echo "$response" exit 1 fi ping_readiness_local_and_master.sh: |- script_dir="$(dirname "$0")" exit_status=0 "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? exit $exit_status ping_liveness_local_and_master.sh: |- script_dir="$(dirname "$0")" exit_status=0 "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? exit $exit_status --- # Source: mastodon/charts/redis/templates/scripts-configmap.yaml apiVersion: v1 kind: ConfigMap metadata: name: mastodon-redis-scripts namespace: "mastodon" labels: app.kubernetes.io/name: redis helm.sh/chart: redis-17.4.3 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm data: start-master.sh: | #!/bin/bash [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" if [[ -f /opt/bitnami/redis/mounted-etc/master.conf ]];then cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf fi if [[ -f /opt/bitnami/redis/mounted-etc/redis.conf ]];then cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf fi ARGS=("--port" "${REDIS_PORT}") ARGS+=("--requirepass" "${REDIS_PASSWORD}") ARGS+=("--masterauth" "${REDIS_PASSWORD}") ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") exec redis-server "${ARGS[@]}" --- # Source: mastodon/templates/apache-configmap.yaml apiVersion: v1 kind: ConfigMap metadata: name: mastodon-apache-mastodon-vhost namespace: "mastodon" labels: app.kubernetes.io/name: mastodon helm.sh/chart: mastodon-1.0.1 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: mastodon data: mastodon-vhost.conf: |- ServerName mastodon.dezendorf.net ServerAlias * ProxyPass http://mastodon-web:80/ ProxyPassReverse mastodon.dezendorf.net Order allow,deny Allow from all # Streaming uses normal API calls and websockets. We used this configuration # based on https://stackoverflow.com/questions/27526281/websockets-and-apache-proxy-how-to-configure-mod-proxy-wstunnel RewriteEngine On RewriteCond %{HTTP:Upgrade} =websocket [NC] RewriteRule /api/(.*) ws://mastodon-streaming:80/api/$1 [P,L] RewriteCond %{HTTP:Upgrade} !=websocket [NC] RewriteRule /api/(.*) http://mastodon-streaming:80/api/$1 [P,L] ProxyPassReverse mastodon.dezendorf.net Order allow,deny Allow from all ProxyPass http://mastodon-minio:80/s3storage/ ProxyPassReverse mastodon.dezendorf.net Order allow,deny Allow from all --- # Source: mastodon/templates/default-configmap.yaml apiVersion: v1 kind: ConfigMap metadata: name: mastodon-default namespace: "mastodon" labels: app.kubernetes.io/name: mastodon helm.sh/chart: mastodon-1.0.1 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: mastodon data: MASTODON_ADMIN_USERNAME: "breandan" MASTODON_ADMIN_EMAIL: "breandan@dezendorf.com" DB_HOST: "mastodon-postgresql" DB_PORT: "5432" DB_NAME: "bitnami_mastodon" DB_USER: "bn_mastodon" ES_ENABLED: "true" ES_HOST: "mastodon-elasticsearch" ES_PORT: "9200" WEB_DOMAIN: "mastodon.dezendorf.net" STREAMING_API_BASE_URL: "ws://mastodon.dezendorf.net" REDIS_HOST: "mastodon-redis-master" REDIS_PORT: "6379" S3_ENABLED: "true" S3_BUCKET: "s3storage" S3_ENDPOINT: "http://mastodon-minio" S3_HOSTNAME: "mastodon-minio" S3_REGION: "us-east-1" S3_ALIAS_HOST: "mastodon.dezendorf.net/s3storage" S3_PROTOCOL: "http" --- # Source: mastodon/templates/init-job/init-job-configmap.yaml apiVersion: v1 kind: ConfigMap metadata: name: mastodon-init-scripts namespace: "mastodon" labels: app.kubernetes.io/name: mastodon helm.sh/chart: mastodon-1.0.1 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: mastodon data: # All these operations require access to PostgreSQL (including Elasticsearch migration) and Redis. In order to avoid # potential race conditions we include them in the same script. migrate-and-create-admin.sh: |- #!/bin/bash set -o errexit set -o nounset set -o pipefail # Load libraries . /opt/bitnami/scripts/liblog.sh . /opt/bitnami/scripts/libos.sh . /opt/bitnami/scripts/libvalidations.sh . /opt/bitnami/scripts/libmastodon.sh # Load Mastodon environment variables . /opt/bitnami/scripts/mastodon-env.sh info "Migrating database" psql_connection_string="postgresql://${MASTODON_DATABASE_USERNAME}:${MASTODON_DATABASE_PASSWORD}@${MASTODON_DATABASE_HOST}:${MASTODON_DATABASE_PORT_NUMBER}/${MASTODON_DATABASE_NAME}" mastodon_wait_for_postgresql_connection "$psql_connection_string" mastodon_rake_execute db:migrate elasticsearch_connection_string="http://${MASTODON_ELASTICSEARCH_HOST}:${MASTODON_ELASTICSEARCH_PORT_NUMBER}" mastodon_wait_for_elasticsearch_connection "$elasticsearch_connection_string" info "Migrating Elasticsearch" mastodon_rake_execute chewy:upgrade mastodon_ensure_admin_user_exists precompile-assets.sh: |- #!/bin/bash set -o errexit set -o nounset set -o pipefail # Load libraries . /opt/bitnami/scripts/liblog.sh . /opt/bitnami/scripts/libos.sh . /opt/bitnami/scripts/libvalidations.sh . /opt/bitnami/scripts/libmastodon.sh # Load Mastodon environment variables . /opt/bitnami/scripts/mastodon-env.sh mastodon_wait_for_s3_connection "$MASTODON_S3_HOSTNAME" "$MASTODON_S3_PORT_NUMBER" info "Precompiling assets" mastodon_rake_execute assets:precompile --- # Source: mastodon/charts/minio/templates/pvc.yaml kind: PersistentVolumeClaim apiVersion: v1 metadata: name: mastodon-minio namespace: "mastodon" labels: app.kubernetes.io/name: minio helm.sh/chart: minio-12.0.0 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm spec: accessModes: - "ReadWriteOnce" resources: requests: storage: "8Gi" storageClassName: longhorn --- # Source: mastodon/charts/apache/templates/svc.yaml apiVersion: v1 kind: Service metadata: name: mastodon-apache namespace: "mastodon" labels: app.kubernetes.io/name: apache helm.sh/chart: apache-9.2.11 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm spec: type: LoadBalancer externalTrafficPolicy: "Cluster" loadBalancerSourceRanges: [] sessionAffinity: None ports: - name: http port: 80 targetPort: http - name: https port: 443 targetPort: https selector: app.kubernetes.io/name: apache app.kubernetes.io/instance: mastodon --- # Source: mastodon/charts/elasticsearch/templates/coordinating/svc-headless.yaml apiVersion: v1 kind: Service metadata: name: mastodon-elasticsearch-coordinating-hl namespace: "mastodon" labels: app.kubernetes.io/name: elasticsearch helm.sh/chart: elasticsearch-19.5.8 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: coordinating-only spec: type: ClusterIP publishNotReadyAddresses: true ports: - name: tcp-rest-api port: 9200 targetPort: rest-api - name: tcp-transport port: 9300 targetPort: transport selector: app.kubernetes.io/name: elasticsearch app.kubernetes.io/instance: mastodon app.kubernetes.io/component: coordinating-only --- # Source: mastodon/charts/elasticsearch/templates/data/svc-headless.yaml apiVersion: v1 kind: Service metadata: name: mastodon-elasticsearch-data-hl namespace: "mastodon" labels: app.kubernetes.io/name: elasticsearch helm.sh/chart: elasticsearch-19.5.8 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: data spec: type: ClusterIP publishNotReadyAddresses: true ports: - name: tcp-rest-api port: 9200 targetPort: rest-api - name: tcp-transport port: 9300 targetPort: transport selector: app.kubernetes.io/name: elasticsearch app.kubernetes.io/instance: mastodon app.kubernetes.io/component: data --- # Source: mastodon/charts/elasticsearch/templates/ingest/svc-headless.yaml apiVersion: v1 kind: Service metadata: name: mastodon-elasticsearch-ingest-hl namespace: "mastodon" labels: app.kubernetes.io/name: elasticsearch helm.sh/chart: elasticsearch-19.5.8 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: ingest spec: type: ClusterIP publishNotReadyAddresses: true ports: - name: tcp-rest-api port: 9200 targetPort: rest-api - name: tcp-transport port: 9300 targetPort: transport selector: app.kubernetes.io/name: elasticsearch app.kubernetes.io/instance: mastodon app.kubernetes.io/component: ingest --- # Source: mastodon/charts/elasticsearch/templates/master/svc-headless.yaml apiVersion: v1 kind: Service metadata: name: mastodon-elasticsearch-master-hl namespace: "mastodon" labels: app.kubernetes.io/name: elasticsearch helm.sh/chart: elasticsearch-19.5.8 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: master spec: type: ClusterIP publishNotReadyAddresses: true ports: - name: tcp-rest-api port: 9200 targetPort: rest-api - name: tcp-transport port: 9300 targetPort: transport selector: app.kubernetes.io/name: elasticsearch app.kubernetes.io/instance: mastodon app.kubernetes.io/component: master --- # Source: mastodon/charts/elasticsearch/templates/service.yaml apiVersion: v1 kind: Service metadata: name: mastodon-elasticsearch namespace: "mastodon" labels: app.kubernetes.io/name: elasticsearch helm.sh/chart: elasticsearch-19.5.8 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: coordinating-only annotations: spec: type: ClusterIP sessionAffinity: None ports: - name: tcp-rest-api port: 9200 targetPort: rest-api nodePort: null - name: tcp-transport port: 9300 nodePort: null selector: app.kubernetes.io/name: elasticsearch app.kubernetes.io/instance: mastodon app.kubernetes.io/component: coordinating-only --- # Source: mastodon/charts/minio/templates/service.yaml apiVersion: v1 kind: Service metadata: name: mastodon-minio namespace: "mastodon" labels: app.kubernetes.io/name: minio helm.sh/chart: minio-12.0.0 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm spec: type: ClusterIP ports: - name: minio-api port: 80 targetPort: minio-api nodePort: null - name: minio-console port: 9001 targetPort: minio-console nodePort: null selector: app.kubernetes.io/name: minio app.kubernetes.io/instance: mastodon --- # Source: mastodon/charts/postgresql/templates/primary/svc-headless.yaml apiVersion: v1 kind: Service metadata: name: mastodon-postgresql-hl namespace: "mastodon" labels: app.kubernetes.io/name: postgresql helm.sh/chart: postgresql-12.1.9 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: primary # Use this annotation in addition to the actual publishNotReadyAddresses # field below because the annotation will stop being respected soon but the # field is broken in some versions of Kubernetes: # https://github.com/kubernetes/kubernetes/issues/58662 service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" spec: type: ClusterIP clusterIP: None # We want all pods in the StatefulSet to have their addresses published for # the sake of the other Postgresql pods even before they're ready, since they # have to be able to talk to each other in order to become ready. publishNotReadyAddresses: true ports: - name: tcp-postgresql port: 5432 targetPort: tcp-postgresql selector: app.kubernetes.io/name: postgresql app.kubernetes.io/instance: mastodon app.kubernetes.io/component: primary --- # Source: mastodon/charts/postgresql/templates/primary/svc.yaml apiVersion: v1 kind: Service metadata: name: mastodon-postgresql namespace: "mastodon" labels: app.kubernetes.io/name: postgresql helm.sh/chart: postgresql-12.1.9 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: primary annotations: spec: type: ClusterIP sessionAffinity: None ports: - name: tcp-postgresql port: 5432 targetPort: tcp-postgresql nodePort: null selector: app.kubernetes.io/name: postgresql app.kubernetes.io/instance: mastodon app.kubernetes.io/component: primary --- # Source: mastodon/charts/redis/templates/headless-svc.yaml apiVersion: v1 kind: Service metadata: name: mastodon-redis-headless namespace: "mastodon" labels: app.kubernetes.io/name: redis helm.sh/chart: redis-17.4.3 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm annotations: spec: type: ClusterIP clusterIP: None ports: - name: tcp-redis port: 6379 targetPort: redis selector: app.kubernetes.io/name: redis app.kubernetes.io/instance: mastodon --- # Source: mastodon/charts/redis/templates/master/service.yaml apiVersion: v1 kind: Service metadata: name: mastodon-redis-master namespace: "mastodon" labels: app.kubernetes.io/name: redis helm.sh/chart: redis-17.4.3 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: master spec: type: ClusterIP sessionAffinity: None ports: - name: tcp-redis port: 6379 targetPort: redis nodePort: null selector: app.kubernetes.io/name: redis app.kubernetes.io/instance: mastodon app.kubernetes.io/component: master --- # Source: mastodon/templates/streaming/service.yaml apiVersion: v1 kind: Service metadata: name: mastodon-streaming namespace: "mastodon" labels: app.kubernetes.io/name: mastodon helm.sh/chart: mastodon-1.0.1 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: mastodon app.kubernetes.io/component: streaming spec: type: ClusterIP sessionAffinity: None ports: - name: http port: 80 targetPort: http protocol: TCP nodePort: null selector: app.kubernetes.io/name: mastodon app.kubernetes.io/instance: mastodon app.kubernetes.io/component: streaming --- # Source: mastodon/templates/web/service.yaml apiVersion: v1 kind: Service metadata: name: mastodon-web namespace: "mastodon" labels: app.kubernetes.io/name: mastodon helm.sh/chart: mastodon-1.0.1 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: mastodon app.kubernetes.io/component: web spec: type: ClusterIP sessionAffinity: None ports: - name: http port: 80 protocol: TCP targetPort: http nodePort: null selector: app.kubernetes.io/name: mastodon app.kubernetes.io/instance: mastodon app.kubernetes.io/component: web --- # Source: mastodon/charts/apache/templates/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: name: mastodon-apache namespace: "mastodon" labels: app.kubernetes.io/name: apache helm.sh/chart: apache-9.2.11 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm spec: selector: matchLabels: app.kubernetes.io/name: apache app.kubernetes.io/instance: mastodon replicas: 1 strategy: type: RollingUpdate template: metadata: labels: app.kubernetes.io/name: apache helm.sh/chart: apache-9.2.11 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm spec: # yamllint disable rule:indentation hostAliases: - hostnames: - status.localhost ip: 127.0.0.1 # yamllint enable rule:indentation priorityClassName: "" affinity: podAffinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchLabels: app.kubernetes.io/name: apache app.kubernetes.io/instance: mastodon topologyKey: kubernetes.io/hostname weight: 1 nodeAffinity: securityContext: fsGroup: 1001 containers: - name: apache image: docker.io/bitnami/apache:2.4.55-debian-11-r0 imagePullPolicy: "IfNotPresent" securityContext: runAsNonRoot: true runAsUser: 1001 env: - name: BITNAMI_DEBUG value: "false" - name: APACHE_HTTP_PORT_NUMBER value: "8080" - name: APACHE_HTTPS_PORT_NUMBER value: "8443" envFrom: ports: - name: http containerPort: 8080 - name: https containerPort: 8443 livenessProbe: httpGet: path: /api/v1/streaming/health port: http initialDelaySeconds: 180 periodSeconds: 20 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 6 readinessProbe: httpGet: path: /api/v1/streaming/health port: http initialDelaySeconds: 30 periodSeconds: 10 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 6 resources: limits: {} requests: {} volumeMounts: - name: vhosts mountPath: /vhosts volumes: - name: vhosts configMap: name: mastodon-apache-mastodon-vhost --- # Source: mastodon/charts/minio/templates/standalone/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: name: mastodon-minio namespace: "mastodon" labels: app.kubernetes.io/name: minio helm.sh/chart: minio-12.0.0 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm spec: selector: matchLabels: app.kubernetes.io/name: minio app.kubernetes.io/instance: mastodon strategy: type: Recreate template: metadata: labels: app.kubernetes.io/name: minio helm.sh/chart: minio-12.0.0 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm annotations: checksum/credentials-secret: 2db43b4c4682cc08fe1b802debca807f538d84655d6d362d0cbd488873cf2364 spec: serviceAccountName: mastodon-minio affinity: podAffinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchLabels: app.kubernetes.io/name: minio app.kubernetes.io/instance: mastodon topologyKey: kubernetes.io/hostname weight: 1 nodeAffinity: securityContext: fsGroup: 1001 containers: - name: minio image: docker.io/bitnami/minio:2023.1.12-debian-11-r0 imagePullPolicy: "IfNotPresent" securityContext: runAsNonRoot: true runAsUser: 1001 env: - name: BITNAMI_DEBUG value: "false" - name: MINIO_SCHEME value: "http" - name: MINIO_FORCE_NEW_KEYS value: "no" - name: MINIO_ROOT_USER valueFrom: secretKeyRef: name: mastodon-minio key: root-user - name: MINIO_ROOT_PASSWORD valueFrom: secretKeyRef: name: mastodon-minio key: root-password - name: MINIO_DEFAULT_BUCKETS value: s3storage - name: MINIO_BROWSER value: "on" - name: MINIO_PROMETHEUS_AUTH_TYPE value: "public" - name: MINIO_CONSOLE_PORT_NUMBER value: "9001" envFrom: ports: - name: minio-api containerPort: 9000 protocol: TCP - name: minio-console containerPort: 9001 protocol: TCP livenessProbe: httpGet: path: /minio/health/live port: minio-api scheme: "HTTP" initialDelaySeconds: 5 periodSeconds: 5 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 readinessProbe: tcpSocket: port: minio-api initialDelaySeconds: 5 periodSeconds: 5 timeoutSeconds: 1 successThreshold: 1 failureThreshold: 5 resources: limits: {} requests: {} volumeMounts: - name: data mountPath: /data volumes: - name: data persistentVolumeClaim: claimName: mastodon-minio --- # Source: mastodon/templates/sidekiq/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: name: mastodon-sidekiq namespace: "mastodon" labels: app.kubernetes.io/name: mastodon helm.sh/chart: mastodon-1.0.1 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: mastodon app.kubernetes.io/component: sidekiq spec: replicas: 1 strategy: type: RollingUpdate selector: matchLabels: app.kubernetes.io/name: mastodon app.kubernetes.io/instance: mastodon app.kubernetes.io/component: sidekiq template: metadata: labels: app.kubernetes.io/name: mastodon helm.sh/chart: mastodon-1.0.1 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: sidekiq spec: serviceAccountName: mastodon affinity: podAffinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchLabels: app.kubernetes.io/name: mastodon app.kubernetes.io/instance: mastodon app.kubernetes.io/component: sidekiq topologyKey: kubernetes.io/hostname weight: 1 nodeAffinity: securityContext: fsGroup: 1001 seccompProfile: type: RuntimeDefault initContainers: - name: wait-for-web image: docker.io/bitnami/mastodon:4.0.2-debian-11-r18 imagePullPolicy: IfNotPresent securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: false runAsNonRoot: true runAsUser: 1001 command: - bash - -ec - | #!/bin/bash set -o errexit set -o nounset set -o pipefail . /opt/bitnami/scripts/liblog.sh . /opt/bitnami/scripts/libvalidations.sh . /opt/bitnami/scripts/libmastodon.sh . /opt/bitnami/scripts/mastodon-env.sh mastodon_wait_for_web_connection "http://${MASTODON_WEB_HOST}:${MASTODON_WEB_PORT}" info "Mastodon web is ready" env: - name: BITNAMI_DEBUG value: "false" - name: MASTODON_WEB_HOST value: "mastodon-web" - name: MASTODON_WEB_PORT value: "80" - name: wait-for-s3 image: docker.io/bitnami/mastodon:4.0.2-debian-11-r18 imagePullPolicy: IfNotPresent securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: false runAsNonRoot: true runAsUser: 1001 command: - bash - -ec - | #!/bin/bash set -o errexit set -o nounset set -o pipefail . /opt/bitnami/scripts/liblog.sh . /opt/bitnami/scripts/libvalidations.sh . /opt/bitnami/scripts/libmastodon.sh . /opt/bitnami/scripts/mastodon-env.sh mastodon_wait_for_s3_connection "$MASTODON_S3_HOSTNAME" "$MASTODON_S3_PORT_NUMBER" info "S3 is ready" env: - name: BITNAMI_DEBUG value: "false" - name: MASTODON_S3_HOSTNAME value: "mastodon-minio" - name: MASTODON_S3_PORT_NUMBER value: "80" containers: - name: mastodon image: docker.io/bitnami/mastodon:4.0.2-debian-11-r18 imagePullPolicy: IfNotPresent securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: false runAsNonRoot: true runAsUser: 1001 command: - /opt/bitnami/scripts/mastodon/run.sh env: - name: BITNAMI_DEBUG value: "false" - name: MASTODON_MODE value: "sidekiq" - name: MASTODON_DATABASE_PASSWORD valueFrom: secretKeyRef: name: mastodon-postgresql key: "password" - name: MASTODON_REDIS_PASSWORD valueFrom: secretKeyRef: name: mastodon-redis key: "redis-password" - name: MASTODON_AWS_ACCESS_KEY_ID valueFrom: secretKeyRef: name: mastodon-minio key: "root-user" - name: MASTODON_AWS_SECRET_ACCESS_KEY valueFrom: secretKeyRef: name: mastodon-minio key: "root-password" envFrom: - configMapRef: name: mastodon-default - secretRef: name: mastodon-default resources: limits: {} requests: {} livenessProbe: failureThreshold: 6 initialDelaySeconds: 10 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 5 exec: command: - /bin/sh - -c - pgrep -f ^sidekiq readinessProbe: failureThreshold: 6 initialDelaySeconds: 10 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 5 exec: command: - /bin/sh - -c - pgrep -f ^sidekiq volumeMounts: volumes: --- # Source: mastodon/templates/streaming/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: name: mastodon-streaming namespace: "mastodon" labels: app.kubernetes.io/name: mastodon helm.sh/chart: mastodon-1.0.1 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: mastodon app.kubernetes.io/component: streaming spec: replicas: 1 strategy: type: RollingUpdate selector: matchLabels: app.kubernetes.io/name: mastodon app.kubernetes.io/instance: mastodon app.kubernetes.io/component: streaming template: metadata: labels: app.kubernetes.io/name: mastodon helm.sh/chart: mastodon-1.0.1 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: streaming spec: serviceAccountName: mastodon affinity: podAffinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchLabels: app.kubernetes.io/name: mastodon app.kubernetes.io/instance: mastodon app.kubernetes.io/component: streaming topologyKey: kubernetes.io/hostname weight: 1 nodeAffinity: securityContext: fsGroup: 1001 seccompProfile: type: RuntimeDefault initContainers: # We need to wait for the PostgreSQL database to be ready in order to start with Mastodon. # As it is a ReplicaSet, we need that all nodes are configured in order to start with # the application or race conditions can occur - name: wait-for-db image: docker.io/bitnami/mastodon:4.0.2-debian-11-r18 imagePullPolicy: IfNotPresent securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: false runAsNonRoot: true runAsUser: 1001 command: - bash - -ec - | #!/bin/bash set -o errexit set -o nounset set -o pipefail . /opt/bitnami/scripts/liblog.sh . /opt/bitnami/scripts/libvalidations.sh . /opt/bitnami/scripts/libmastodon.sh . /opt/bitnami/scripts/mastodon-env.sh mastodon_wait_for_postgresql_connection "postgresql://${MASTODON_DATABASE_USER}:${MASTODON_DATABASE_PASSWORD:-}@${MASTODON_DATABASE_HOST}:${MASTODON_DATABASE_PORT_NUMBER}/${MASTODON_DATABASE_NAME}" info "Database is ready" env: - name: BITNAMI_DEBUG value: "false" - name: MASTODON_DATABASE_HOST value: "mastodon-postgresql" - name: MASTODON_DATABASE_PORT_NUMBER value: "5432" - name: MASTODON_DATABASE_PASSWORD valueFrom: secretKeyRef: name: mastodon-postgresql key: password - name: MASTODON_DATABASE_USER value: bn_mastodon - name: MASTODON_DATABASE_NAME value: bitnami_mastodon - name: wait-for-web image: docker.io/bitnami/mastodon:4.0.2-debian-11-r18 imagePullPolicy: IfNotPresent securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: false runAsNonRoot: true runAsUser: 1001 command: - bash - -ec - | #!/bin/bash set -o errexit set -o nounset set -o pipefail . /opt/bitnami/scripts/liblog.sh . /opt/bitnami/scripts/libvalidations.sh . /opt/bitnami/scripts/libmastodon.sh . /opt/bitnami/scripts/mastodon-env.sh mastodon_wait_for_web_connection "http://${MASTODON_WEB_HOST}:${MASTODON_WEB_PORT}" info "Mastodon web is ready" env: - name: BITNAMI_DEBUG value: "false" - name: MASTODON_WEB_HOST value: "mastodon-web" - name: MASTODON_WEB_PORT value: "80" containers: - name: mastodon image: docker.io/bitnami/mastodon:4.0.2-debian-11-r18 imagePullPolicy: IfNotPresent securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: false runAsNonRoot: true runAsUser: 1001 command: - /opt/bitnami/scripts/mastodon/run.sh env: - name: BITNAMI_DEBUG value: "false" - name: MASTODON_MODE value: "streaming" - name: MASTODON_STREAMING_PORT_NUMBER value: "8080" - name: MASTODON_DATABASE_PASSWORD valueFrom: secretKeyRef: name: mastodon-postgresql key: "password" - name: MASTODON_REDIS_PASSWORD valueFrom: secretKeyRef: name: mastodon-redis key: "redis-password" - name: MASTODON_AWS_ACCESS_KEY_ID valueFrom: secretKeyRef: name: mastodon-minio key: "root-user" - name: MASTODON_AWS_SECRET_ACCESS_KEY valueFrom: secretKeyRef: name: mastodon-minio key: "root-password" envFrom: - configMapRef: name: mastodon-default - secretRef: name: mastodon-default resources: limits: {} requests: {} ports: - name: http containerPort: 8080 livenessProbe: failureThreshold: 6 initialDelaySeconds: 10 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 5 httpGet: path: /api/v1/streaming/health port: http readinessProbe: failureThreshold: 6 initialDelaySeconds: 10 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 5 httpGet: path: /api/v1/streaming/health port: http volumeMounts: volumes: --- # Source: mastodon/templates/web/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: name: mastodon-web namespace: "mastodon" labels: app.kubernetes.io/name: mastodon helm.sh/chart: mastodon-1.0.1 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: mastodon app.kubernetes.io/component: web spec: replicas: 1 strategy: type: RollingUpdate selector: matchLabels: app.kubernetes.io/name: mastodon app.kubernetes.io/instance: mastodon app.kubernetes.io/component: web template: metadata: labels: app.kubernetes.io/name: mastodon helm.sh/chart: mastodon-1.0.1 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: web spec: serviceAccountName: mastodon affinity: podAffinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchLabels: app.kubernetes.io/name: mastodon app.kubernetes.io/instance: mastodon app.kubernetes.io/component: web topologyKey: kubernetes.io/hostname weight: 1 nodeAffinity: securityContext: fsGroup: 1001 seccompProfile: type: RuntimeDefault initContainers: # We need to wait for the PostgreSQL database to be ready in order to start with Mastodon. # As it is a ReplicaSet, we need that all nodes are configured in order to start with # the application or race conditions can occur - name: wait-for-db image: docker.io/bitnami/mastodon:4.0.2-debian-11-r18 imagePullPolicy: IfNotPresent securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: false runAsNonRoot: true runAsUser: 1001 command: - bash - -ec - | #!/bin/bash set -o errexit set -o nounset set -o pipefail . /opt/bitnami/scripts/liblog.sh . /opt/bitnami/scripts/libvalidations.sh . /opt/bitnami/scripts/libmastodon.sh . /opt/bitnami/scripts/mastodon-env.sh mastodon_wait_for_postgresql_connection "postgresql://${MASTODON_DATABASE_USER}:${MASTODON_DATABASE_PASSWORD:-}@${MASTODON_DATABASE_HOST}:${MASTODON_DATABASE_PORT_NUMBER}/${MASTODON_DATABASE_NAME}" info "Database is ready" env: - name: BITNAMI_DEBUG value: "false" - name: MASTODON_DATABASE_HOST value: "mastodon-postgresql" - name: MASTODON_DATABASE_PORT_NUMBER value: "5432" - name: MASTODON_DATABASE_PASSWORD valueFrom: secretKeyRef: name: mastodon-postgresql key: password - name: MASTODON_DATABASE_USER value: bn_mastodon - name: MASTODON_DATABASE_NAME value: bitnami_mastodon # We need to wait for the PostgreSQL database to be ready in order to start with Mastodon. # As it is a ReplicaSet, we need that all nodes are configured in order to start with # the application or race conditions can occur - name: wait-for-redis image: docker.io/bitnami/mastodon:4.0.2-debian-11-r18 imagePullPolicy: IfNotPresent securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: false runAsNonRoot: true runAsUser: 1001 command: - bash - -ec - | #!/bin/bash set -o errexit set -o nounset set -o pipefail . /opt/bitnami/scripts/liblog.sh . /opt/bitnami/scripts/libvalidations.sh . /opt/bitnami/scripts/libmastodon.sh . /opt/bitnami/scripts/mastodon-env.sh mastodon_wait_for_redis_connection "redis://${MASTODON_REDIS_PASSWORD:-}@${MASTODON_REDIS_HOST}:${MASTODON_REDIS_PORT_NUMBER}" info "Redis(TM) is ready" env: - name: BITNAMI_DEBUG value: "false" - name: MASTODON_REDIS_HOST value: "mastodon-redis-master" - name: MASTODON_REDIS_PORT_NUMBER value: "6379" - name: MASTODON_REDIS_PASSWORD valueFrom: secretKeyRef: name: mastodon-redis key: redis-password - name: wait-for-elasticsearch image: docker.io/bitnami/mastodon:4.0.2-debian-11-r18 imagePullPolicy: IfNotPresent securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: false runAsNonRoot: true runAsUser: 1001 command: - bash - -ec - | #!/bin/bash set -o errexit set -o nounset set -o pipefail . /opt/bitnami/scripts/liblog.sh . /opt/bitnami/scripts/libvalidations.sh . /opt/bitnami/scripts/libmastodon.sh . /opt/bitnami/scripts/mastodon-env.sh mastodon_wait_for_elasticsearch_connection "http://${MASTODON_ELASTICSEARCH_HOST}:${MASTODON_ELASTICSEARCH_PORT_NUMBER}" info "Mastodon web is ready" env: - name: BITNAMI_DEBUG value: "false" - name: MASTODON_ELASTICSEARCH_HOST value: "mastodon-elasticsearch" - name: MASTODON_ELASTICSEARCH_PORT_NUMBER value: "9200" - name: wait-for-s3 image: docker.io/bitnami/mastodon:4.0.2-debian-11-r18 imagePullPolicy: IfNotPresent securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: false runAsNonRoot: true runAsUser: 1001 command: - bash - -ec - | #!/bin/bash set -o errexit set -o nounset set -o pipefail . /opt/bitnami/scripts/liblog.sh . /opt/bitnami/scripts/libvalidations.sh . /opt/bitnami/scripts/libmastodon.sh . /opt/bitnami/scripts/mastodon-env.sh mastodon_wait_for_s3_connection "$MASTODON_S3_HOSTNAME" "$MASTODON_S3_PORT_NUMBER" info "S3 is ready" env: - name: BITNAMI_DEBUG value: "false" - name: MASTODON_S3_HOSTNAME value: "mastodon-minio" - name: MASTODON_S3_PORT_NUMBER value: "80" containers: - name: mastodon image: docker.io/bitnami/mastodon:4.0.2-debian-11-r18 imagePullPolicy: IfNotPresent securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: false runAsNonRoot: true runAsUser: 1001 command: - /opt/bitnami/scripts/mastodon/run.sh env: - name: BITNAMI_DEBUG value: "false" - name: MASTODON_MODE value: "web" - name: MASTODON_WEB_PORT_NUMBER value: "3000" - name: MASTODON_DATABASE_PASSWORD valueFrom: secretKeyRef: name: mastodon-postgresql key: "password" - name: MASTODON_REDIS_PASSWORD valueFrom: secretKeyRef: name: mastodon-redis key: "redis-password" - name: MASTODON_AWS_ACCESS_KEY_ID valueFrom: secretKeyRef: name: mastodon-minio key: "root-user" - name: MASTODON_AWS_SECRET_ACCESS_KEY valueFrom: secretKeyRef: name: mastodon-minio key: "root-password" envFrom: - configMapRef: name: mastodon-default - secretRef: name: mastodon-default resources: limits: {} requests: {} ports: - name: http containerPort: 3000 livenessProbe: failureThreshold: 6 initialDelaySeconds: 10 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 5 httpGet: path: /health port: http readinessProbe: failureThreshold: 6 initialDelaySeconds: 10 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 5 httpGet: path: /health port: http volumeMounts: volumes: --- # Source: mastodon/charts/elasticsearch/templates/coordinating/statefulset.yaml apiVersion: apps/v1 kind: StatefulSet metadata: name: mastodon-elasticsearch-coordinating namespace: "mastodon" labels: app.kubernetes.io/name: elasticsearch helm.sh/chart: elasticsearch-19.5.8 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: coordinating-only ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ app: coordinating-only spec: replicas: 1 selector: matchLabels: app.kubernetes.io/name: elasticsearch app.kubernetes.io/instance: mastodon app.kubernetes.io/component: coordinating-only updateStrategy: type: RollingUpdate serviceName: mastodon-elasticsearch-coordinating-hl podManagementPolicy: Parallel template: metadata: labels: app.kubernetes.io/name: elasticsearch helm.sh/chart: elasticsearch-19.5.8 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: coordinating-only ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ app: coordinating-only annotations: spec: serviceAccountName: default affinity: podAffinity: podAntiAffinity: nodeAffinity: securityContext: fsGroup: 1001 initContainers: ## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors) - name: sysctl image: docker.io/bitnami/bitnami-shell:11-debian-11-r70 imagePullPolicy: "IfNotPresent" command: - /bin/bash - -ec - | CURRENT=`sysctl -n vm.max_map_count`; DESIRED="262144"; if [ "$DESIRED" -gt "$CURRENT" ]; then sysctl -w vm.max_map_count=262144; fi; CURRENT=`sysctl -n fs.file-max`; DESIRED="65536"; if [ "$DESIRED" -gt "$CURRENT" ]; then sysctl -w fs.file-max=65536; fi; securityContext: privileged: true runAsUser: 0 resources: limits: {} requests: {} containers: - name: elasticsearch image: docker.io/bitnami/elasticsearch:8.6.0-debian-11-r0 imagePullPolicy: "IfNotPresent" securityContext: runAsNonRoot: true runAsUser: 1001 env: - name: MY_POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: BITNAMI_DEBUG value: "false" - name: ELASTICSEARCH_CLUSTER_NAME value: "elastic" - name: ELASTICSEARCH_IS_DEDICATED_NODE value: "yes" - name: ELASTICSEARCH_NODE_ROLES value: "" - name: ELASTICSEARCH_TRANSPORT_PORT_NUMBER value: "9300" - name: ELASTICSEARCH_HTTP_PORT_NUMBER value: "9200" - name: ELASTICSEARCH_CLUSTER_HOSTS value: "mastodon-elasticsearch-master-hl.mastodon.svc.cluster.local,mastodon-elasticsearch-coordinating-hl.mastodon.svc.cluster.local,mastodon-elasticsearch-data-hl.mastodon.svc.cluster.local,mastodon-elasticsearch-ingest-hl.mastodon.svc.cluster.local," - name: ELASTICSEARCH_TOTAL_NODES value: "2" - name: ELASTICSEARCH_CLUSTER_MASTER_HOSTS value: mastodon-elasticsearch-master-0 - name: ELASTICSEARCH_MINIMUM_MASTER_NODES value: "1" - name: ELASTICSEARCH_ADVERTISED_HOSTNAME value: "$(MY_POD_NAME).mastodon-elasticsearch-coordinating-hl.mastodon.svc.cluster.local" - name: ELASTICSEARCH_HEAP_SIZE value: "128m" ports: - name: rest-api containerPort: 9200 - name: transport containerPort: 9300 livenessProbe: failureThreshold: 5 initialDelaySeconds: 90 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 5 exec: command: - /opt/bitnami/scripts/elasticsearch/healthcheck.sh readinessProbe: failureThreshold: 5 initialDelaySeconds: 90 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 5 exec: command: - /opt/bitnami/scripts/elasticsearch/healthcheck.sh resources: limits: {} requests: cpu: 25m memory: 256Mi volumeMounts: - name: data mountPath: /bitnami/elasticsearch/data volumes: - name: "data" emptyDir: {} --- # Source: mastodon/charts/elasticsearch/templates/data/statefulset.yaml apiVersion: apps/v1 kind: StatefulSet metadata: name: mastodon-elasticsearch-data namespace: "mastodon" labels: app.kubernetes.io/name: elasticsearch helm.sh/chart: elasticsearch-19.5.8 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: data ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ app: data spec: replicas: 1 podManagementPolicy: Parallel selector: matchLabels: app.kubernetes.io/name: elasticsearch app.kubernetes.io/instance: mastodon app.kubernetes.io/component: data serviceName: mastodon-elasticsearch-data-hl updateStrategy: type: RollingUpdate template: metadata: labels: app.kubernetes.io/name: elasticsearch helm.sh/chart: elasticsearch-19.5.8 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: data ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ app: data annotations: spec: serviceAccountName: default affinity: podAffinity: podAntiAffinity: nodeAffinity: securityContext: fsGroup: 1001 initContainers: ## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors) - name: sysctl image: docker.io/bitnami/bitnami-shell:11-debian-11-r70 imagePullPolicy: "IfNotPresent" command: - /bin/bash - -ec - | CURRENT=`sysctl -n vm.max_map_count`; DESIRED="262144"; if [ "$DESIRED" -gt "$CURRENT" ]; then sysctl -w vm.max_map_count=262144; fi; CURRENT=`sysctl -n fs.file-max`; DESIRED="65536"; if [ "$DESIRED" -gt "$CURRENT" ]; then sysctl -w fs.file-max=65536; fi; securityContext: privileged: true runAsUser: 0 resources: limits: {} requests: {} containers: - name: elasticsearch image: docker.io/bitnami/elasticsearch:8.6.0-debian-11-r0 imagePullPolicy: "IfNotPresent" securityContext: runAsNonRoot: true runAsUser: 1001 env: - name: BITNAMI_DEBUG value: "false" - name: MY_POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: ELASTICSEARCH_IS_DEDICATED_NODE value: "yes" - name: ELASTICSEARCH_NODE_ROLES value: "data" - name: ELASTICSEARCH_TRANSPORT_PORT_NUMBER value: "9300" - name: ELASTICSEARCH_HTTP_PORT_NUMBER value: "9200" - name: ELASTICSEARCH_CLUSTER_NAME value: "elastic" - name: ELASTICSEARCH_CLUSTER_HOSTS value: "mastodon-elasticsearch-master-hl.mastodon.svc.cluster.local,mastodon-elasticsearch-coordinating-hl.mastodon.svc.cluster.local,mastodon-elasticsearch-data-hl.mastodon.svc.cluster.local,mastodon-elasticsearch-ingest-hl.mastodon.svc.cluster.local," - name: ELASTICSEARCH_TOTAL_NODES value: "2" - name: ELASTICSEARCH_CLUSTER_MASTER_HOSTS value: mastodon-elasticsearch-master-0 - name: ELASTICSEARCH_MINIMUM_MASTER_NODES value: "1" - name: ELASTICSEARCH_ADVERTISED_HOSTNAME value: "$(MY_POD_NAME).mastodon-elasticsearch-data-hl.mastodon.svc.cluster.local" - name: ELASTICSEARCH_HEAP_SIZE value: "1024m" ports: - name: rest-api containerPort: 9200 - name: transport containerPort: 9300 livenessProbe: failureThreshold: 5 initialDelaySeconds: 90 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 5 exec: command: - /opt/bitnami/scripts/elasticsearch/healthcheck.sh readinessProbe: failureThreshold: 5 initialDelaySeconds: 90 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 5 exec: command: - /opt/bitnami/scripts/elasticsearch/healthcheck.sh resources: limits: {} requests: cpu: 25m memory: 2048Mi volumeMounts: - name: data mountPath: /bitnami/elasticsearch/data volumes: volumeClaimTemplates: - metadata: name: "data" annotations: spec: accessModes: - "ReadWriteOnce" resources: requests: storage: "8Gi" storageClassName: longhorn --- # Source: mastodon/charts/elasticsearch/templates/ingest/statefulset.yaml apiVersion: apps/v1 kind: StatefulSet metadata: name: mastodon-elasticsearch-ingest namespace: "mastodon" labels: app.kubernetes.io/name: elasticsearch helm.sh/chart: elasticsearch-19.5.8 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: ingest ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ app: ingest spec: replicas: 1 podManagementPolicy: Parallel selector: matchLabels: app.kubernetes.io/name: elasticsearch app.kubernetes.io/instance: mastodon app.kubernetes.io/component: ingest serviceName: mastodon-elasticsearch-ingest-hl updateStrategy: type: RollingUpdate template: metadata: labels: app.kubernetes.io/name: elasticsearch helm.sh/chart: elasticsearch-19.5.8 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: ingest ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ app: ingest annotations: spec: serviceAccountName: default affinity: podAffinity: podAntiAffinity: nodeAffinity: securityContext: fsGroup: 1001 initContainers: ## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors) - name: sysctl image: docker.io/bitnami/bitnami-shell:11-debian-11-r70 imagePullPolicy: "IfNotPresent" command: - /bin/bash - -ec - | CURRENT=`sysctl -n vm.max_map_count`; DESIRED="262144"; if [ "$DESIRED" -gt "$CURRENT" ]; then sysctl -w vm.max_map_count=262144; fi; CURRENT=`sysctl -n fs.file-max`; DESIRED="65536"; if [ "$DESIRED" -gt "$CURRENT" ]; then sysctl -w fs.file-max=65536; fi; securityContext: privileged: true runAsUser: 0 resources: limits: {} requests: {} containers: - name: elasticsearch image: docker.io/bitnami/elasticsearch:8.6.0-debian-11-r0 imagePullPolicy: "IfNotPresent" securityContext: runAsNonRoot: true runAsUser: 1001 env: - name: BITNAMI_DEBUG value: "false" - name: MY_POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: ELASTICSEARCH_IS_DEDICATED_NODE value: "yes" - name: ELASTICSEARCH_NODE_ROLES value: "ingest" - name: ELASTICSEARCH_TRANSPORT_PORT_NUMBER value: "9300" - name: ELASTICSEARCH_HTTP_PORT_NUMBER value: "9200" - name: ELASTICSEARCH_CLUSTER_NAME value: "elastic" - name: ELASTICSEARCH_CLUSTER_HOSTS value: "mastodon-elasticsearch-master-hl.mastodon.svc.cluster.local,mastodon-elasticsearch-coordinating-hl.mastodon.svc.cluster.local,mastodon-elasticsearch-data-hl.mastodon.svc.cluster.local,mastodon-elasticsearch-ingest-hl.mastodon.svc.cluster.local," - name: ELASTICSEARCH_TOTAL_NODES value: "2" - name: ELASTICSEARCH_CLUSTER_MASTER_HOSTS value: mastodon-elasticsearch-master-0 - name: ELASTICSEARCH_MINIMUM_MASTER_NODES value: "1" - name: ELASTICSEARCH_ADVERTISED_HOSTNAME value: "$(MY_POD_NAME).mastodon-elasticsearch-ingest-hl.mastodon.svc.cluster.local" - name: ELASTICSEARCH_HEAP_SIZE value: "128m" ports: - name: rest-api containerPort: 9200 - name: transport containerPort: 9300 livenessProbe: failureThreshold: 5 initialDelaySeconds: 90 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 5 exec: command: - /opt/bitnami/scripts/elasticsearch/healthcheck.sh readinessProbe: failureThreshold: 5 initialDelaySeconds: 90 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 5 exec: command: - /opt/bitnami/scripts/elasticsearch/healthcheck.sh resources: limits: {} requests: cpu: 25m memory: 256Mi volumeMounts: - name: data mountPath: /bitnami/elasticsearch/data volumes: - name: "data" emptyDir: {} --- # Source: mastodon/charts/elasticsearch/templates/master/statefulset.yaml apiVersion: apps/v1 kind: StatefulSet metadata: name: mastodon-elasticsearch-master namespace: "mastodon" labels: app.kubernetes.io/name: elasticsearch helm.sh/chart: elasticsearch-19.5.8 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: master ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ app: master spec: replicas: 1 podManagementPolicy: Parallel selector: matchLabels: app.kubernetes.io/name: elasticsearch app.kubernetes.io/instance: mastodon app.kubernetes.io/component: master serviceName: mastodon-elasticsearch-master-hl updateStrategy: type: RollingUpdate template: metadata: labels: app.kubernetes.io/name: elasticsearch helm.sh/chart: elasticsearch-19.5.8 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: master ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ app: master annotations: spec: serviceAccountName: default affinity: podAffinity: podAntiAffinity: nodeAffinity: securityContext: fsGroup: 1001 initContainers: ## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors) - name: sysctl image: docker.io/bitnami/bitnami-shell:11-debian-11-r70 imagePullPolicy: "IfNotPresent" command: - /bin/bash - -ec - | CURRENT=`sysctl -n vm.max_map_count`; DESIRED="262144"; if [ "$DESIRED" -gt "$CURRENT" ]; then sysctl -w vm.max_map_count=262144; fi; CURRENT=`sysctl -n fs.file-max`; DESIRED="65536"; if [ "$DESIRED" -gt "$CURRENT" ]; then sysctl -w fs.file-max=65536; fi; securityContext: privileged: true runAsUser: 0 resources: limits: {} requests: {} containers: - name: elasticsearch image: docker.io/bitnami/elasticsearch:8.6.0-debian-11-r0 imagePullPolicy: "IfNotPresent" securityContext: runAsNonRoot: true runAsUser: 1001 env: - name: BITNAMI_DEBUG value: "false" - name: MY_POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: ELASTICSEARCH_IS_DEDICATED_NODE value: "yes" - name: ELASTICSEARCH_NODE_ROLES value: "master" - name: ELASTICSEARCH_TRANSPORT_PORT_NUMBER value: "9300" - name: ELASTICSEARCH_HTTP_PORT_NUMBER value: "9200" - name: ELASTICSEARCH_CLUSTER_NAME value: "elastic" - name: ELASTICSEARCH_CLUSTER_HOSTS value: "mastodon-elasticsearch-master-hl.mastodon.svc.cluster.local,mastodon-elasticsearch-coordinating-hl.mastodon.svc.cluster.local,mastodon-elasticsearch-data-hl.mastodon.svc.cluster.local,mastodon-elasticsearch-ingest-hl.mastodon.svc.cluster.local," - name: ELASTICSEARCH_TOTAL_NODES value: "2" - name: ELASTICSEARCH_CLUSTER_MASTER_HOSTS value: mastodon-elasticsearch-master-0 - name: ELASTICSEARCH_MINIMUM_MASTER_NODES value: "1" - name: ELASTICSEARCH_ADVERTISED_HOSTNAME value: "$(MY_POD_NAME).mastodon-elasticsearch-master-hl.mastodon.svc.cluster.local" - name: ELASTICSEARCH_HEAP_SIZE value: "128m" ports: - name: rest-api containerPort: 9200 - name: transport containerPort: 9300 livenessProbe: failureThreshold: 5 initialDelaySeconds: 90 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 5 exec: command: - /opt/bitnami/scripts/elasticsearch/healthcheck.sh readinessProbe: failureThreshold: 5 initialDelaySeconds: 90 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 5 exec: command: - /opt/bitnami/scripts/elasticsearch/healthcheck.sh resources: limits: {} requests: cpu: 25m memory: 256Mi volumeMounts: - name: data mountPath: /bitnami/elasticsearch/data volumes: volumeClaimTemplates: - metadata: name: "data" annotations: spec: accessModes: - "ReadWriteOnce" resources: requests: storage: "8Gi" storageClassName: longhorn --- # Source: mastodon/charts/postgresql/templates/primary/statefulset.yaml apiVersion: apps/v1 kind: StatefulSet metadata: name: mastodon-postgresql namespace: "mastodon" labels: app.kubernetes.io/name: postgresql helm.sh/chart: postgresql-12.1.9 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: primary annotations: spec: replicas: 1 serviceName: mastodon-postgresql-hl updateStrategy: rollingUpdate: {} type: RollingUpdate selector: matchLabels: app.kubernetes.io/name: postgresql app.kubernetes.io/instance: mastodon app.kubernetes.io/component: primary template: metadata: name: mastodon-postgresql labels: app.kubernetes.io/name: postgresql helm.sh/chart: postgresql-12.1.9 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: primary annotations: spec: serviceAccountName: default affinity: podAffinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchLabels: app.kubernetes.io/name: postgresql app.kubernetes.io/instance: mastodon app.kubernetes.io/component: primary topologyKey: kubernetes.io/hostname weight: 1 nodeAffinity: securityContext: fsGroup: 1001 hostNetwork: false hostIPC: false initContainers: containers: - name: postgresql image: docker.io/bitnami/postgresql:15.1.0-debian-11-r20 imagePullPolicy: "IfNotPresent" securityContext: runAsUser: 1001 env: - name: BITNAMI_DEBUG value: "false" - name: POSTGRESQL_PORT_NUMBER value: "5432" - name: POSTGRESQL_VOLUME_DIR value: "/bitnami/postgresql" - name: PGDATA value: "/bitnami/postgresql/data" # Authentication - name: POSTGRES_USER value: "bn_mastodon" - name: POSTGRES_POSTGRES_PASSWORD valueFrom: secretKeyRef: name: mastodon-postgresql key: postgres-password - name: POSTGRES_PASSWORD valueFrom: secretKeyRef: name: mastodon-postgresql key: password - name: POSTGRES_DB value: "bitnami_mastodon" # Replication # Initdb # Standby # LDAP - name: POSTGRESQL_ENABLE_LDAP value: "no" # TLS - name: POSTGRESQL_ENABLE_TLS value: "no" # Audit - name: POSTGRESQL_LOG_HOSTNAME value: "false" - name: POSTGRESQL_LOG_CONNECTIONS value: "false" - name: POSTGRESQL_LOG_DISCONNECTIONS value: "false" - name: POSTGRESQL_PGAUDIT_LOG_CATALOG value: "off" # Others - name: POSTGRESQL_CLIENT_MIN_MESSAGES value: "error" - name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES value: "pgaudit" ports: - name: tcp-postgresql containerPort: 5432 livenessProbe: failureThreshold: 6 initialDelaySeconds: 30 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 5 exec: command: - /bin/sh - -c - exec pg_isready -U "bn_mastodon" -d "dbname=bitnami_mastodon" -h 127.0.0.1 -p 5432 readinessProbe: failureThreshold: 6 initialDelaySeconds: 5 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 5 exec: command: - /bin/sh - -c - -e - | exec pg_isready -U "bn_mastodon" -d "dbname=bitnami_mastodon" -h 127.0.0.1 -p 5432 [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ] resources: limits: {} requests: cpu: 250m memory: 256Mi volumeMounts: - name: dshm mountPath: /dev/shm - name: data mountPath: /bitnami/postgresql volumes: - name: dshm emptyDir: medium: Memory volumeClaimTemplates: - metadata: name: data spec: accessModes: - "ReadWriteOnce" resources: requests: storage: "8Gi" storageClassName: longhorn --- # Source: mastodon/charts/redis/templates/master/application.yaml apiVersion: apps/v1 kind: StatefulSet metadata: name: mastodon-redis-master namespace: "mastodon" labels: app.kubernetes.io/name: redis helm.sh/chart: redis-17.4.3 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: master spec: replicas: 1 selector: matchLabels: app.kubernetes.io/name: redis app.kubernetes.io/instance: mastodon app.kubernetes.io/component: master serviceName: mastodon-redis-headless updateStrategy: type: RollingUpdate template: metadata: labels: app.kubernetes.io/name: redis helm.sh/chart: redis-17.4.3 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: master annotations: checksum/configmap: 2f15040384162155f37c5089d1a10352963784fb168a605b339e88c8642e7001 checksum/health: 0b8c4cf2e9643861c68f5ce94dc34b6497ef911db5da1c59f51d5f172a4b98dd checksum/scripts: aaa87d91cbed3dc312c3e5b1dab72400a783834667c43a4d19bba0b89be86c63 checksum/secret: 7ad58554d69c8ec88bb5547ce91a036e9612e1db4e16b5faad3181c162e3f776 spec: securityContext: fsGroup: 1001 serviceAccountName: mastodon-redis affinity: podAffinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchLabels: app.kubernetes.io/name: redis app.kubernetes.io/instance: mastodon app.kubernetes.io/component: master topologyKey: kubernetes.io/hostname weight: 1 nodeAffinity: terminationGracePeriodSeconds: 30 containers: - name: redis image: docker.io/bitnami/redis:7.0.8-debian-11-r0 imagePullPolicy: "IfNotPresent" securityContext: runAsUser: 1001 command: - /bin/bash args: - -c - /opt/bitnami/scripts/start-scripts/start-master.sh env: - name: BITNAMI_DEBUG value: "false" - name: REDIS_REPLICATION_MODE value: master - name: ALLOW_EMPTY_PASSWORD value: "no" - name: REDIS_PASSWORD valueFrom: secretKeyRef: name: mastodon-redis key: redis-password - name: REDIS_TLS_ENABLED value: "no" - name: REDIS_PORT value: "6379" ports: - name: redis containerPort: 6379 livenessProbe: initialDelaySeconds: 20 periodSeconds: 5 # One second longer than command timeout should prevent generation of zombie processes. timeoutSeconds: 6 successThreshold: 1 failureThreshold: 5 exec: command: - sh - -c - /health/ping_liveness_local.sh 5 readinessProbe: initialDelaySeconds: 20 periodSeconds: 5 timeoutSeconds: 2 successThreshold: 1 failureThreshold: 5 exec: command: - sh - -c - /health/ping_readiness_local.sh 1 resources: limits: {} requests: {} volumeMounts: - name: start-scripts mountPath: /opt/bitnami/scripts/start-scripts - name: health mountPath: /health - name: redis-data mountPath: /data - name: config mountPath: /opt/bitnami/redis/mounted-etc - name: redis-tmp-conf mountPath: /opt/bitnami/redis/etc/ - name: tmp mountPath: /tmp volumes: - name: start-scripts configMap: name: mastodon-redis-scripts defaultMode: 0755 - name: health configMap: name: mastodon-redis-health defaultMode: 0755 - name: config configMap: name: mastodon-redis-configuration - name: redis-tmp-conf emptyDir: {} - name: tmp emptyDir: {} volumeClaimTemplates: - metadata: name: redis-data labels: app.kubernetes.io/name: redis app.kubernetes.io/instance: mastodon app.kubernetes.io/component: master spec: accessModes: - "ReadWriteOnce" resources: requests: storage: "8Gi" storageClassName: longhorn --- # Source: mastodon/charts/minio/templates/provisioning-job.yaml apiVersion: batch/v1 kind: Job metadata: name: mastodon-minio-provisioning namespace: "mastodon" labels: app.kubernetes.io/name: minio helm.sh/chart: minio-12.0.0 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: minio-provisioning annotations: helm.sh/hook: post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation spec: parallelism: 1 template: metadata: labels: app.kubernetes.io/managed-by: Helm helm.sh/chart: minio-12.0.0 app.kubernetes.io/component: minio-provisioning spec: restartPolicy: OnFailure terminationGracePeriodSeconds: 0 securityContext: fsGroup: 1001 serviceAccountName: mastodon-minio initContainers: - name: wait-for-available-minio image: docker.io/bitnami/minio:2023.1.12-debian-11-r0 imagePullPolicy: "IfNotPresent" securityContext: runAsNonRoot: true runAsUser: 1001 command: - /bin/bash - -c - >- set -e; echo "Waiting for Minio"; wait-for-port \ --host=mastodon-minio \ --state=inuse \ --timeout=120 \ 80; echo "Minio is available"; resources: limits: {} requests: {} containers: - name: minio image: docker.io/bitnami/minio:2023.1.12-debian-11-r0 imagePullPolicy: "IfNotPresent" securityContext: runAsNonRoot: true runAsUser: 1001 command: - /bin/bash - -c - >- set -e; echo "Start Minio provisioning"; function addPolicy() { local tmp=$(mc admin $1 info provisioning $2 | sed -n -e 's/^Policy.*: \(.*\)$/\1/p'); IFS=',' read -r -a CURRENT_POLICIES <<< "$tmp"; if [[ ! "${CURRENT_POLICIES[*]}" =~ "$3" ]]; then mc admin policy update provisioning $3 $1=$2; fi; }; function addUsersFromFile() { local username=$(grep -oP '^username=\K.+' $1); local password=$(grep -oP '^password=\K.+' $1); local disabled=$(grep -oP '^disabled=\K.+' $1); local policies_list=$(grep -oP '^policies=\K.+' $1); local set_policies=$(grep -oP '^setPolicies=\K.+' $1); mc admin user add provisioning "${username}" "${password}"; if [ "${set_policies}" == "true" ]; then mc admin policy set provisioning "${policies_list}" user="${username}"; else IFS=',' read -r -a POLICIES <<< "${policies_list}"; for policy in "${POLICIES[@]}"; do addPolicy user "${username}" "${policy}"; done fi; local user_status="enable"; if [[ "${disabled}" != "" && "${disabled,,}" == "true" ]]; then user_status="disable"; fi; mc admin user "${user_status}" provisioning "${username}"; }; mc alias set provisioning $MINIO_SCHEME://mastodon-minio:80 $MINIO_ROOT_USER $MINIO_ROOT_PASSWORD; mc admin service restart provisioning; mc anonymous set download provisioning/s3storage; echo "End Minio provisioning"; env: - name: MINIO_SCHEME value: "http" - name: MINIO_ROOT_USER valueFrom: secretKeyRef: name: mastodon-minio key: root-user - name: MINIO_ROOT_PASSWORD valueFrom: secretKeyRef: name: mastodon-minio key: root-password envFrom: resources: limits: {} requests: {} volumeMounts: - name: minio-provisioning mountPath: /etc/ilm volumes: - name: minio-provisioning configMap: name: mastodon-minio-provisioning --- # Source: mastodon/templates/init-job/init-job.yaml apiVersion: batch/v1 kind: Job metadata: name: mastodon-init namespace: mastodon labels: app.kubernetes.io/name: mastodon helm.sh/chart: mastodon-1.0.1 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: mastodon annotations: helm.sh/hook: post-install, pre-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded helm.sh/hook-weight: "10" spec: backoffLimit: 10 template: metadata: labels: app.kubernetes.io/name: mastodon helm.sh/chart: mastodon-1.0.1 app.kubernetes.io/instance: mastodon app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: init spec: restartPolicy: OnFailure securityContext: fsGroup: 1001 seccompProfile: type: RuntimeDefault containers: # We separate the job in multiple containers to be able to run them in parallel. We put everything on the same job # as it follows the Job Pattern best practices # https://kubernetes.io/docs/concepts/workloads/controllers/job/#job-patterns - name: migrate-and-create-admin image: docker.io/bitnami/mastodon:4.0.2-debian-11-r18 imagePullPolicy: IfNotPresent command: - /bin/bash - -ec args: - /scripts/migrate-and-create-admin.sh securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: false runAsNonRoot: true runAsUser: 1001 env: - name: BITNAMI_DEBUG value: "false" - name: MASTODON_DATABASE_PASSWORD valueFrom: secretKeyRef: name: mastodon-postgresql key: "password" # The rake db:migrate job requires access to Redis - name: MASTODON_REDIS_PASSWORD valueFrom: secretKeyRef: name: mastodon-redis key: "redis-password" envFrom: - configMapRef: name: mastodon-default - secretRef: name: mastodon-default volumeMounts: - name: scripts mountPath: /scripts resources: limits: {} requests: {} - name: mastodon-assets-precompile image: docker.io/bitnami/mastodon:4.0.2-debian-11-r18 imagePullPolicy: IfNotPresent command: - /bin/bash - -ec args: - /scripts/precompile-assets.sh securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: false runAsNonRoot: true runAsUser: 1001 env: - name: BITNAMI_DEBUG value: "false" - name: MASTODON_S3_HOSTNAME value: "mastodon-minio" - name: MASTODON_S3_PORT_NUMBER value: "80" - name: MASTODON_AWS_ACCESS_KEY_ID valueFrom: secretKeyRef: name: mastodon-minio key: "root-user" - name: MASTODON_AWS_SECRET_ACCESS_KEY valueFrom: secretKeyRef: name: mastodon-minio key: "root-password" envFrom: - configMapRef: name: mastodon-default - secretRef: name: mastodon-default volumeMounts: - name: scripts mountPath: /scripts resources: limits: {} requests: {} volumes: - name: scripts configMap: name: mastodon-init-scripts defaultMode: 0755