mirror of
https://github.com/ZwareBear/JFrog-Cloud-Installers.git
synced 2026-01-22 05:06:59 -06:00
545 lines
18 KiB
YAML
545 lines
18 KiB
YAML
## Global Docker image parameters
|
|
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
|
|
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
|
|
##
|
|
# global:
|
|
# imageRegistry: myRegistryName
|
|
# imagePullSecrets:
|
|
# - myRegistryKeySecretName
|
|
# storageClass: myStorageClass
|
|
|
|
## Bitnami RabbitMQ image version
|
|
## ref: https://hub.docker.com/r/bitnami/rabbitmq/tags/
|
|
##
|
|
image:
|
|
registry: docker.io
|
|
repository: bitnami/rabbitmq
|
|
tag: 3.8.3-debian-10-r40
|
|
|
|
## set to true if you would like to see extra information on logs
|
|
## it turns BASH and NAMI debugging in minideb
|
|
## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging
|
|
debug: false
|
|
|
|
## Specify a imagePullPolicy
|
|
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
|
|
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
|
|
##
|
|
pullPolicy: IfNotPresent
|
|
## Optionally specify an array of imagePullSecrets.
|
|
## Secrets must be manually created in the namespace.
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
|
##
|
|
# pullSecrets:
|
|
# - myRegistryKeySecretName
|
|
|
|
## String to partially override rabbitmq.fullname template (will maintain the release name)
|
|
##
|
|
# nameOverride:
|
|
|
|
## String to fully override rabbitmq.fullname template
|
|
##
|
|
# fullnameOverride:
|
|
|
|
## Use an alternate scheduler, e.g. "stork".
|
|
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
|
|
##
|
|
# schedulerName:
|
|
|
|
## does your cluster have rbac enabled? assume yes by default
|
|
rbacEnabled: true
|
|
|
|
## RabbitMQ should be initialized one by one when building cluster for the first time.
|
|
## Therefore, the default value of podManagementPolicy is 'OrderedReady'
|
|
## Once the RabbitMQ participates in the cluster, it waits for a response from another
|
|
## RabbitMQ in the same cluster at reboot, except the last RabbitMQ of the same cluster.
|
|
## If the cluster exits gracefully, you do not need to change the podManagementPolicy
|
|
## because the first RabbitMQ of the statefulset always will be last of the cluster.
|
|
## However if the last RabbitMQ of the cluster is not the first RabbitMQ due to a failure,
|
|
## you must change podManagementPolicy to 'Parallel'.
|
|
## ref : https://www.rabbitmq.com/clustering.html#restarting
|
|
##
|
|
podManagementPolicy: OrderedReady
|
|
|
|
## section of specific values for rabbitmq
|
|
rabbitmq:
|
|
## RabbitMQ application username
|
|
## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables
|
|
##
|
|
username: user
|
|
|
|
## RabbitMQ application password
|
|
## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables
|
|
##
|
|
# password:
|
|
# existingPasswordSecret: name-of-existing-secret
|
|
|
|
## Erlang cookie to determine whether different nodes are allowed to communicate with each other
|
|
## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables
|
|
##
|
|
# erlangCookie:
|
|
# existingErlangSecret: name-of-existing-secret
|
|
|
|
## Node name to cluster with. e.g.: `clusternode@hostname`
|
|
## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables
|
|
##
|
|
# rabbitmqClusterNodeName:
|
|
|
|
## Value for the RABBITMQ_LOGS environment variable
|
|
## ref: https://www.rabbitmq.com/logging.html#log-file-location
|
|
##
|
|
logs: '-'
|
|
|
|
## RabbitMQ Max File Descriptors
|
|
## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables
|
|
## ref: https://www.rabbitmq.com/install-debian.html#kernel-resource-limits
|
|
##
|
|
setUlimitNofiles: true
|
|
ulimitNofiles: '65536'
|
|
|
|
## RabbitMQ maximum available scheduler threads and online scheduler threads
|
|
## ref: https://hamidreza-s.github.io/erlang/scheduling/real-time/preemptive/migration/2016/02/09/erlang-scheduler-details.html#scheduler-threads
|
|
##
|
|
maxAvailableSchedulers: 2
|
|
onlineSchedulers: 1
|
|
|
|
## Plugins to enable
|
|
plugins: "rabbitmq_management rabbitmq_peer_discovery_k8s"
|
|
|
|
## Extra plugins to enable
|
|
## Use this instead of `plugins` to add new plugins
|
|
extraPlugins: "rabbitmq_auth_backend_ldap"
|
|
|
|
## Clustering settings
|
|
clustering:
|
|
address_type: hostname
|
|
k8s_domain: cluster.local
|
|
## Rebalance master for queues in cluster when new replica is created
|
|
## ref: https://www.rabbitmq.com/rabbitmq-queues.8.html#rebalance
|
|
rebalance: false
|
|
|
|
loadDefinition:
|
|
enabled: false
|
|
secretName: load-definition
|
|
|
|
## environment variables to configure rabbitmq
|
|
## ref: https://www.rabbitmq.com/configure.html#customise-environment
|
|
env: {}
|
|
|
|
## Configuration file content: required cluster configuration
|
|
## Do not override unless you know what you are doing. To add more configuration, use `extraConfiguration` of `advancedConfiguration` instead
|
|
configuration: |-
|
|
## Clustering
|
|
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
|
|
cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
|
|
cluster_formation.node_cleanup.interval = 10
|
|
cluster_formation.node_cleanup.only_log_warning = true
|
|
cluster_partition_handling = autoheal
|
|
# queue master locator
|
|
queue_master_locator=min-masters
|
|
# enable guest user
|
|
loopback_users.guest = false
|
|
|
|
## Configuration file content: extra configuration
|
|
## Use this instead of `configuration` to add more configuration
|
|
extraConfiguration: |-
|
|
#disk_free_limit.absolute = 50MB
|
|
#management.load_definitions = /app/load_definition.json
|
|
|
|
## Configuration file content: advanced configuration
|
|
## Use this as additional configuraton in classic config format (Erlang term configuration format)
|
|
##
|
|
## If you set LDAP with TLS/SSL enabled and you are using self-signed certificates, uncomment these lines.
|
|
## advancedConfiguration: |-
|
|
## [{
|
|
## rabbitmq_auth_backend_ldap,
|
|
## [{
|
|
## ssl_options,
|
|
## [{
|
|
## verify, verify_none
|
|
## }, {
|
|
## fail_if_no_peer_cert,
|
|
## false
|
|
## }]
|
|
## ]}
|
|
## }].
|
|
##
|
|
advancedConfiguration: |-
|
|
|
|
## Enable encryption to rabbitmq
|
|
## ref: https://www.rabbitmq.com/ssl.html
|
|
##
|
|
tls:
|
|
enabled: false
|
|
failIfNoPeerCert: true
|
|
sslOptionsVerify: verify_peer
|
|
caCertificate: |-
|
|
serverCertificate: |-
|
|
serverKey: |-
|
|
# existingSecret: name-of-existing-secret-to-rabbitmq
|
|
|
|
## LDAP configuration
|
|
##
|
|
ldap:
|
|
enabled: false
|
|
server: ""
|
|
port: "389"
|
|
user_dn_pattern: cn=${username},dc=example,dc=org
|
|
tls:
|
|
# If you enabled TLS/SSL you can set advaced options using the advancedConfiguration parameter.
|
|
enabled: false
|
|
|
|
## Kubernetes service type
|
|
service:
|
|
type: ClusterIP
|
|
## Node port
|
|
## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables
|
|
##
|
|
# nodePort: 30672
|
|
|
|
## Set the LoadBalancerIP
|
|
##
|
|
# loadBalancerIP:
|
|
|
|
## Node port Tls
|
|
##
|
|
# nodeTlsPort: 30671
|
|
|
|
## Amqp port
|
|
## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables
|
|
##
|
|
port: 5672
|
|
|
|
## Amqp Tls port
|
|
##
|
|
tlsPort: 5671
|
|
|
|
## Dist port
|
|
## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables
|
|
##
|
|
distPort: 25672
|
|
|
|
## RabbitMQ Manager port
|
|
## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables
|
|
##
|
|
managerPort: 15672
|
|
|
|
## Service annotations
|
|
annotations: {}
|
|
# service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0
|
|
|
|
## Load Balancer sources
|
|
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
|
|
##
|
|
# loadBalancerSourceRanges:
|
|
# - 10.10.10.0/24
|
|
|
|
## Extra ports to expose
|
|
# extraPorts:
|
|
# - name: new_svc_name
|
|
# port: 1234
|
|
# targetPort: 1234
|
|
|
|
## Extra ports to be included in container spec, primarily informational
|
|
# extraContainerPorts:
|
|
# - name: new_svc_name
|
|
# port: 1234
|
|
# targetPort: 1234
|
|
|
|
# Additional pod labels to apply
|
|
podLabels: {}
|
|
|
|
## Pod Security Context
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
|
|
##
|
|
securityContext:
|
|
enabled: true
|
|
fsGroup: 1001
|
|
runAsUser: 1001
|
|
extra: {}
|
|
|
|
persistence:
|
|
## this enables PVC templates that will create one per pod
|
|
enabled: true
|
|
|
|
## rabbitmq data Persistent Volume Storage Class
|
|
## If defined, storageClassName: <storageClass>
|
|
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
|
## If undefined (the default) or set to null, no storageClassName spec is
|
|
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
|
## GKE, AWS & OpenStack)
|
|
##
|
|
# storageClass: "-"
|
|
## selector can be used to match an existing PersistentVolume
|
|
# selector:
|
|
# matchLabels:
|
|
# app: my-app
|
|
accessMode: ReadWriteOnce
|
|
|
|
## Existing PersistentVolumeClaims
|
|
## The value is evaluated as a template
|
|
## So, for example, the name can depend on .Release or .Chart
|
|
# existingClaim: ""
|
|
|
|
# If you change this value, you might have to adjust `rabbitmq.diskFreeLimit` as well.
|
|
size: 8Gi
|
|
|
|
# persistence directory, maps to the rabbitmq data directory
|
|
path: /opt/bitnami/rabbitmq/var/lib/rabbitmq
|
|
|
|
## Configure resource requests and limits
|
|
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
|
|
##
|
|
resources: {}
|
|
|
|
networkPolicy:
|
|
## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now.
|
|
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
|
|
##
|
|
enabled: false
|
|
|
|
## The Policy model to apply. When set to false, only pods with the correct
|
|
## client label will have network access to the ports RabbitMQ is listening
|
|
## on. When true, RabbitMQ will accept connections from any source
|
|
## (with the correct destination port).
|
|
##
|
|
allowExternal: true
|
|
|
|
## Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed.
|
|
##
|
|
# additionalRules:
|
|
# - matchLabels:
|
|
# - role: frontend
|
|
# - matchExpressions:
|
|
# - key: role
|
|
# operator: In
|
|
# values:
|
|
# - frontend
|
|
|
|
## Replica count, set to 1 to provide a default available cluster
|
|
replicas: 1
|
|
|
|
## Pod priority
|
|
## https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
|
|
# priorityClassName: ""
|
|
|
|
## updateStrategy for RabbitMQ statefulset
|
|
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
|
|
updateStrategy:
|
|
type: RollingUpdate
|
|
|
|
## Node labels and tolerations for pod assignment
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
|
|
nodeSelector: {}
|
|
tolerations: []
|
|
affinity: {}
|
|
podDisruptionBudget: {}
|
|
# maxUnavailable: 1
|
|
# minAvailable: 1
|
|
## annotations for rabbitmq pods
|
|
podAnnotations: {}
|
|
|
|
## Configure the ingress resource that allows you to access the
|
|
## Wordpress installation. Set up the URL
|
|
## ref: http://kubernetes.io/docs/user-guide/ingress/
|
|
##
|
|
ingress:
|
|
## Set to true to enable ingress record generation
|
|
enabled: false
|
|
|
|
## The list of hostnames to be covered with this ingress record.
|
|
## Most likely this will be just one host, but in the event more hosts are needed, this is an array
|
|
## hostName: foo.bar.com
|
|
path: /
|
|
|
|
## Set this to true in order to enable TLS on the ingress record
|
|
## A side effect of this will be that the backend wordpress service will be connected at port 443
|
|
tls: false
|
|
|
|
## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
|
|
tlsSecret: myTlsSecret
|
|
|
|
## Ingress annotations done as key:value pairs
|
|
## If you're using kube-lego, you will want to add:
|
|
## kubernetes.io/tls-acme: true
|
|
##
|
|
## For a full list of possible ingress annotations, please see
|
|
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
|
|
##
|
|
## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
|
|
annotations: {}
|
|
# kubernetes.io/ingress.class: nginx
|
|
# kubernetes.io/tls-acme: true
|
|
|
|
## The following settings are to configure the frequency of the lifeness and readiness probes
|
|
livenessProbe:
|
|
enabled: true
|
|
initialDelaySeconds: 120
|
|
timeoutSeconds: 20
|
|
periodSeconds: 30
|
|
failureThreshold: 6
|
|
successThreshold: 1
|
|
commandOverride: []
|
|
|
|
readinessProbe:
|
|
enabled: true
|
|
initialDelaySeconds: 10
|
|
timeoutSeconds: 20
|
|
periodSeconds: 30
|
|
failureThreshold: 3
|
|
successThreshold: 1
|
|
commandOverride: []
|
|
|
|
## Prometheus Metrics
|
|
##
|
|
metrics:
|
|
enabled: false
|
|
port: 9419
|
|
plugins: "rabbitmq_prometheus"
|
|
## Prometheus pod annotations
|
|
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
|
##
|
|
podAnnotations:
|
|
prometheus.io/scrape: "true"
|
|
prometheus.io/port: "{{ .Values.metrics.port }}"
|
|
|
|
## Prometheus Service Monitor
|
|
## ref: https://github.com/coreos/prometheus-operator
|
|
## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
|
|
serviceMonitor:
|
|
## If the operator is installed in your cluster, set to true to create a Service Monitor Entry
|
|
enabled: false
|
|
## Specify the namespace in which the serviceMonitor resource will be created
|
|
# namespace: ""
|
|
## Specify the interval at which metrics should be scraped
|
|
interval: 30s
|
|
## Specify the timeout after which the scrape is ended
|
|
# scrapeTimeout: 30s
|
|
## Specify Metric Relabellings to add to the scrape endpoint
|
|
# relabellings:
|
|
## Specify honorLabels parameter to add the scrape endpoint
|
|
honorLabels: false
|
|
## Specify the release for ServiceMonitor. Sometimes it should be custom for prometheus operator to work
|
|
# release: ""
|
|
## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with
|
|
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
|
|
additionalLabels: {}
|
|
|
|
## Custom PrometheusRule to be defined
|
|
## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart
|
|
## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions
|
|
prometheusRule:
|
|
enabled: false
|
|
additionalLabels: {}
|
|
namespace: ""
|
|
rules: []
|
|
## List of reules, used as template by Helm.
|
|
## These are just examples rules inspired from https://awesome-prometheus-alerts.grep.to/rules.html
|
|
## Please adapt them to your needs.
|
|
## Make sure to constraint the rules to the current rabbitmq service.
|
|
## Also make sure to escape what looks like helm template.
|
|
# - alert: RabbitmqDown
|
|
# expr: rabbitmq_up{service="{{ template "rabbitmq.fullname" . }}"} == 0
|
|
# for: 5m
|
|
# labels:
|
|
# severity: error
|
|
# annotations:
|
|
# summary: Rabbitmq down (instance {{ "{{ $labels.instance }}" }})
|
|
# description: RabbitMQ node down
|
|
|
|
# - alert: ClusterDown
|
|
# expr: |
|
|
# sum(rabbitmq_running{service="{{ template "rabbitmq.fullname" . }}"})
|
|
# < {{ .Values.replicas }}
|
|
# for: 5m
|
|
# labels:
|
|
# severity: error
|
|
# annotations:
|
|
# summary: Cluster down (instance {{ "{{ $labels.instance }}" }})
|
|
# description: |
|
|
# Less than {{ .Values.replicas }} nodes running in RabbitMQ cluster
|
|
# VALUE = {{ "{{ $value }}" }}
|
|
|
|
# - alert: ClusterPartition
|
|
# expr: rabbitmq_partitions{service="{{ template "rabbitmq.fullname" . }}"} > 0
|
|
# for: 5m
|
|
# labels:
|
|
# severity: error
|
|
# annotations:
|
|
# summary: Cluster partition (instance {{ "{{ $labels.instance }}" }})
|
|
# description: |
|
|
# Cluster partition
|
|
# VALUE = {{ "{{ $value }}" }}
|
|
|
|
# - alert: OutOfMemory
|
|
# expr: |
|
|
# rabbitmq_node_mem_used{service="{{ template "rabbitmq.fullname" . }}"}
|
|
# / rabbitmq_node_mem_limit{service="{{ template "rabbitmq.fullname" . }}"}
|
|
# * 100 > 90
|
|
# for: 5m
|
|
# labels:
|
|
# severity: warning
|
|
# annotations:
|
|
# summary: Out of memory (instance {{ "{{ $labels.instance }}" }})
|
|
# description: |
|
|
# Memory available for RabbmitMQ is low (< 10%)\n VALUE = {{ "{{ $value }}" }}
|
|
# LABELS: {{ "{{ $labels }}" }}
|
|
|
|
# - alert: TooManyConnections
|
|
# expr: rabbitmq_connectionsTotal{service="{{ template "rabbitmq.fullname" . }}"} > 1000
|
|
# for: 5m
|
|
# labels:
|
|
# severity: warning
|
|
# annotations:
|
|
# summary: Too many connections (instance {{ "{{ $labels.instance }}" }})
|
|
# description: |
|
|
# RabbitMQ instance has too many connections (> 1000)
|
|
# VALUE = {{ "{{ $value }}" }}\n LABELS: {{ "{{ $labels }}" }}
|
|
|
|
##
|
|
## Init containers parameters:
|
|
## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup
|
|
##
|
|
volumePermissions:
|
|
enabled: false
|
|
image:
|
|
registry: docker.io
|
|
repository: bitnami/minideb
|
|
tag: buster
|
|
pullPolicy: Always
|
|
## Optionally specify an array of imagePullSecrets.
|
|
## Secrets must be manually created in the namespace.
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
|
##
|
|
# pullSecrets:
|
|
# - myRegistryKeySecretName
|
|
resources: {}
|
|
|
|
## forceBoot: executes 'rabbitmqctl force_boot' to force boot cluster shut down unexpectedly in an
|
|
## unknown order.
|
|
## ref: https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot
|
|
##
|
|
forceBoot:
|
|
enabled: false
|
|
|
|
## Optionally specify extra secrets to be created by the chart.
|
|
## This can be useful when combined with load_definitions to automatically create the secret containing the definitions to be loaded.
|
|
##
|
|
extraSecrets: {}
|
|
# load-definition:
|
|
# load_definition.json: |
|
|
# {
|
|
# ...
|
|
# }
|
|
|
|
## Adding optionals volumeMount
|
|
extraVolumeMounts: []
|
|
# - name: extras
|
|
# mountPath: /usr/share/extras
|
|
# readOnly: true
|
|
|
|
extraVolumes: []
|
|
# - name: extras
|
|
# emptyDir: {}
|