From 17250d1413b4e2b66105c5b7e291c2cec69da4ea Mon Sep 17 00:00:00 2001 From: John Peterson Date: Tue, 11 Feb 2020 10:33:42 -0800 Subject: [PATCH] Openshift4 operator for Artifactory HA v7.0.2 --- Openshift4/.gitignore | 1 + .../artifactory-ha-operator/build/Dockerfile | 4 + ...lm.k8s.io_openshiftartifactoryhas_crd.yaml | 22 + ...io_v1alpha1_openshiftartifactoryha_cr.yaml | 997 ++++++++++++ .../deploy/imagestream.yaml | 6 + .../deploy/namespace.yaml | 17 + ...operator.v1.0.0.clusterserviceversion.yaml | 745 +++++++++ .../artifactory-ha-operator.package.yaml | 5 + .../deploy/operator.yaml | 31 + .../deploy/operatorgroup.yaml | 8 + .../deploy/project.yaml | 89 ++ .../artifactory-ha-operator/deploy/role.yaml | 100 ++ .../deploy/role_binding.yaml | 11 + .../deploy/securitycontextconstraints.yaml | 15 + .../deploy/service_account.yaml | 4 + .../openshift-artifactory-ha/CHANGELOG.md | 557 +++++++ .../openshift-artifactory-ha/Chart.yaml | 24 + .../openshift-artifactory-ha/LICENSE | 201 +++ .../openshift-artifactory-ha/README.md | 1266 ++++++++++++++++ .../ReverseProxyConfiguration.md | 140 ++ .../openshift-artifactory-ha/UPGRADE_NOTES.md | 33 + .../charts/postgresql/.helmignore | 2 + .../charts/postgresql/Chart.yaml | 23 + .../charts/postgresql/README.md | 487 ++++++ .../charts/postgresql/files/README.md | 1 + .../charts/postgresql/files/conf.d/README.md | 4 + .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/postgresql/templates/NOTES.txt | 50 + .../charts/postgresql/templates/_helpers.tpl | 374 +++++ .../postgresql/templates/configmap.yaml | 26 + .../templates/extended-config-configmap.yaml | 21 + .../templates/initialization-configmap.yaml | 24 + .../postgresql/templates/metrics-svc.yaml | 26 + .../postgresql/templates/networkpolicy.yaml | 34 + .../charts/postgresql/templates/secrets.yaml | 17 + .../postgresql/templates/serviceaccount.yaml | 11 + .../postgresql/templates/servicemonitor.yaml | 33 + .../templates/statefulset-slaves.yaml | 247 +++ .../postgresql/templates/statefulset.yaml | 355 +++++ .../postgresql/templates/svc-headless.yaml | 19 + .../charts/postgresql/templates/svc-read.yaml | 31 + .../charts/postgresql/templates/svc.yaml | 38 + .../charts/postgresql/values-production.yaml | 392 +++++ .../charts/postgresql/values.schema.json | 103 ++ .../charts/postgresql/values.yaml | 392 +++++ .../openshift-artifactory-ha/helminstall.sh | 23 + .../pv-examples/pv0001-large.yaml | 15 + .../pv-examples/pv0002-large.yaml | 15 + .../pv-examples/pv0003-large.yaml | 15 + .../pv-examples/pv0004-large.yaml | 15 + .../pv-examples/pv0005-large.yaml | 15 + .../requirements.lock | 6 + .../requirements.yaml | 5 + .../openshift-artifactory-ha/scc.yaml | 18 + .../templates/NOTES.txt | 113 ++ .../templates/_helpers.tpl | 103 ++ .../templates/access-bootstrap-creds.yaml | 15 + .../artifactory-binarystore-secret.yaml | 14 + .../templates/artifactory-configmaps.yaml | 13 + .../templates/artifactory-installer-info.yaml | 25 + .../templates/artifactory-license-secret.yaml | 14 + .../templates/artifactory-networkpolicy.yaml | 34 + .../templates/artifactory-nfs-pvc.yaml | 101 ++ .../templates/artifactory-node-pdb.yaml | 19 + .../artifactory-node-statefulset.yaml | 510 +++++++ .../artifactory-primary-statefulset.yaml | 587 ++++++++ .../templates/artifactory-priority-class.yaml | 9 + .../templates/artifactory-role.yaml | 14 + .../templates/artifactory-rolebinding.yaml | 19 + .../templates/artifactory-secrets.yaml | 17 + .../templates/artifactory-service.yaml | 88 ++ .../templates/artifactory-serviceaccount.yaml | 16 + .../templates/artifactory-storage-pvc.yaml | 27 + .../templates/artifactory-system-yaml.yaml | 14 + .../templates/catalina-logger-configmap.yaml | 53 + .../templates/filebeat-configmap.yaml | 15 + .../templates/ingress.yaml | 56 + .../templates/nginx-artifactory-conf.yaml | 14 + .../templates/nginx-certificate-secret.yaml | 14 + .../templates/nginx-conf.yaml | 14 + .../templates/nginx-deployment.yaml | 185 +++ .../templates/nginx-pvc.yaml | 26 + .../templates/nginx-service.yaml | 69 + .../values-large.yaml | 24 + .../values-medium.yaml | 24 + .../values-small.yaml | 24 + .../openshift-artifactory-ha/values.yaml | 1330 +++++++++++++++++ .../artifactory-ha-operator/watches.yaml | 5 + Openshift4/artifactoryha-helm/CHANGELOG.md | 557 +++++++ Openshift4/artifactoryha-helm/Chart.yaml | 24 + Openshift4/artifactoryha-helm/LICENSE | 201 +++ Openshift4/artifactoryha-helm/README.md | 1266 ++++++++++++++++ .../ReverseProxyConfiguration.md | 140 ++ .../artifactoryha-helm/UPGRADE_NOTES.md | 33 + .../charts/postgresql/.helmignore | 2 + .../charts/postgresql/Chart.yaml | 23 + .../charts/postgresql/README.md | 487 ++++++ .../charts/postgresql/files/README.md | 1 + .../charts/postgresql/files/conf.d/README.md | 4 + .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/postgresql/templates/NOTES.txt | 50 + .../charts/postgresql/templates/_helpers.tpl | 374 +++++ .../postgresql/templates/configmap.yaml | 26 + .../templates/extended-config-configmap.yaml | 21 + .../templates/initialization-configmap.yaml | 24 + .../postgresql/templates/metrics-svc.yaml | 26 + .../postgresql/templates/networkpolicy.yaml | 34 + .../charts/postgresql/templates/secrets.yaml | 17 + .../postgresql/templates/serviceaccount.yaml | 11 + .../postgresql/templates/servicemonitor.yaml | 33 + .../templates/statefulset-slaves.yaml | 247 +++ .../postgresql/templates/statefulset.yaml | 355 +++++ .../postgresql/templates/svc-headless.yaml | 19 + .../charts/postgresql/templates/svc-read.yaml | 31 + .../charts/postgresql/templates/svc.yaml | 38 + .../charts/postgresql/values-production.yaml | 392 +++++ .../charts/postgresql/values.schema.json | 103 ++ .../charts/postgresql/values.yaml | 392 +++++ Openshift4/artifactoryha-helm/helminstall.sh | 23 + .../pv-examples/pv0001-large.yaml | 15 + .../pv-examples/pv0002-large.yaml | 15 + .../pv-examples/pv0003-large.yaml | 15 + .../pv-examples/pv0004-large.yaml | 15 + .../pv-examples/pv0005-large.yaml | 15 + .../artifactoryha-helm/requirements.lock | 6 + .../artifactoryha-helm/requirements.yaml | 5 + Openshift4/artifactoryha-helm/scc.yaml | 18 + .../artifactoryha-helm/templates/NOTES.txt | 113 ++ .../artifactoryha-helm/templates/_helpers.tpl | 103 ++ .../templates/access-bootstrap-creds.yaml | 15 + .../artifactory-binarystore-secret.yaml | 14 + .../templates/artifactory-configmaps.yaml | 13 + .../templates/artifactory-installer-info.yaml | 25 + .../templates/artifactory-license-secret.yaml | 14 + .../templates/artifactory-networkpolicy.yaml | 34 + .../templates/artifactory-nfs-pvc.yaml | 101 ++ .../templates/artifactory-node-pdb.yaml | 19 + .../artifactory-node-statefulset.yaml | 510 +++++++ .../artifactory-primary-statefulset.yaml | 593 ++++++++ .../templates/artifactory-priority-class.yaml | 9 + .../templates/artifactory-role.yaml | 14 + .../templates/artifactory-rolebinding.yaml | 19 + .../templates/artifactory-secrets.yaml | 17 + .../templates/artifactory-service.yaml | 88 ++ .../templates/artifactory-serviceaccount.yaml | 16 + .../templates/artifactory-storage-pvc.yaml | 27 + .../templates/artifactory-system-yaml.yaml | 14 + .../templates/catalina-logger-configmap.yaml | 53 + .../templates/filebeat-configmap.yaml | 15 + .../artifactoryha-helm/templates/ingress.yaml | 56 + .../templates/nginx-artifactory-conf.yaml | 14 + .../templates/nginx-certificate-secret.yaml | 14 + .../templates/nginx-conf.yaml | 14 + .../templates/nginx-deployment.yaml | 185 +++ .../templates/nginx-pvc.yaml | 26 + .../templates/nginx-service.yaml | 69 + .../artifactoryha-helm/values-large.yaml | 24 + .../artifactoryha-helm/values-medium.yaml | 24 + .../artifactoryha-helm/values-small.yaml | 24 + Openshift4/artifactoryha-helm/values.yaml | 1330 +++++++++++++++++ 160 files changed, 19388 insertions(+) create mode 100644 Openshift4/.gitignore create mode 100644 Openshift4/artifactory-ha-operator/build/Dockerfile create mode 100644 Openshift4/artifactory-ha-operator/deploy/crds/charts.helm.k8s.io_openshiftartifactoryhas_crd.yaml create mode 100644 Openshift4/artifactory-ha-operator/deploy/crds/charts.helm.k8s.io_v1alpha1_openshiftartifactoryha_cr.yaml create mode 100644 Openshift4/artifactory-ha-operator/deploy/imagestream.yaml create mode 100644 Openshift4/artifactory-ha-operator/deploy/namespace.yaml create mode 100644 Openshift4/artifactory-ha-operator/deploy/olm-catalog/artifactory-ha-operator/1.0.0/artifactory-ha-operator.v1.0.0.clusterserviceversion.yaml create mode 100644 Openshift4/artifactory-ha-operator/deploy/olm-catalog/artifactory-ha-operator/artifactory-ha-operator.package.yaml create mode 100644 Openshift4/artifactory-ha-operator/deploy/operator.yaml create mode 100644 Openshift4/artifactory-ha-operator/deploy/operatorgroup.yaml create mode 100644 Openshift4/artifactory-ha-operator/deploy/project.yaml create mode 100644 Openshift4/artifactory-ha-operator/deploy/role.yaml create mode 100644 Openshift4/artifactory-ha-operator/deploy/role_binding.yaml create mode 100644 Openshift4/artifactory-ha-operator/deploy/securitycontextconstraints.yaml create mode 100644 Openshift4/artifactory-ha-operator/deploy/service_account.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/CHANGELOG.md create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/Chart.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/LICENSE create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/README.md create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/ReverseProxyConfiguration.md create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/UPGRADE_NOTES.md create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/.helmignore create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/Chart.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/README.md create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/files/README.md create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/files/conf.d/README.md create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/files/docker-entrypoint-initdb.d/README.md create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/NOTES.txt create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/_helpers.tpl create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/configmap.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/extended-config-configmap.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/initialization-configmap.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/metrics-svc.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/networkpolicy.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/secrets.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/serviceaccount.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/servicemonitor.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/statefulset-slaves.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/statefulset.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/svc-headless.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/svc-read.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/svc.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/values-production.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/values.schema.json create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/values.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/helminstall.sh create mode 100644 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/pv-examples/pv0001-large.yaml create mode 100644 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/pv-examples/pv0002-large.yaml create mode 100644 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/pv-examples/pv0003-large.yaml create mode 100644 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/pv-examples/pv0004-large.yaml create mode 100644 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/pv-examples/pv0005-large.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/requirements.lock create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/requirements.yaml create mode 100644 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/scc.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/NOTES.txt create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/_helpers.tpl create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/access-bootstrap-creds.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-binarystore-secret.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-configmaps.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-installer-info.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-license-secret.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-networkpolicy.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-nfs-pvc.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-node-pdb.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-node-statefulset.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-primary-statefulset.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-priority-class.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-role.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-rolebinding.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-secrets.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-service.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-serviceaccount.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-storage-pvc.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-system-yaml.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/catalina-logger-configmap.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/filebeat-configmap.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/ingress.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/nginx-artifactory-conf.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/nginx-certificate-secret.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/nginx-conf.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/nginx-deployment.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/nginx-pvc.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/nginx-service.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/values-large.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/values-medium.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/values-small.yaml create mode 100755 Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/values.yaml create mode 100644 Openshift4/artifactory-ha-operator/watches.yaml create mode 100755 Openshift4/artifactoryha-helm/CHANGELOG.md create mode 100755 Openshift4/artifactoryha-helm/Chart.yaml create mode 100755 Openshift4/artifactoryha-helm/LICENSE create mode 100755 Openshift4/artifactoryha-helm/README.md create mode 100755 Openshift4/artifactoryha-helm/ReverseProxyConfiguration.md create mode 100755 Openshift4/artifactoryha-helm/UPGRADE_NOTES.md create mode 100755 Openshift4/artifactoryha-helm/charts/postgresql/.helmignore create mode 100755 Openshift4/artifactoryha-helm/charts/postgresql/Chart.yaml create mode 100755 Openshift4/artifactoryha-helm/charts/postgresql/README.md create mode 100755 Openshift4/artifactoryha-helm/charts/postgresql/files/README.md create mode 100755 Openshift4/artifactoryha-helm/charts/postgresql/files/conf.d/README.md create mode 100755 Openshift4/artifactoryha-helm/charts/postgresql/files/docker-entrypoint-initdb.d/README.md create mode 100755 Openshift4/artifactoryha-helm/charts/postgresql/templates/NOTES.txt create mode 100755 Openshift4/artifactoryha-helm/charts/postgresql/templates/_helpers.tpl create mode 100755 Openshift4/artifactoryha-helm/charts/postgresql/templates/configmap.yaml create mode 100755 Openshift4/artifactoryha-helm/charts/postgresql/templates/extended-config-configmap.yaml create mode 100755 Openshift4/artifactoryha-helm/charts/postgresql/templates/initialization-configmap.yaml create mode 100755 Openshift4/artifactoryha-helm/charts/postgresql/templates/metrics-svc.yaml create mode 100755 Openshift4/artifactoryha-helm/charts/postgresql/templates/networkpolicy.yaml create mode 100755 Openshift4/artifactoryha-helm/charts/postgresql/templates/secrets.yaml create mode 100755 Openshift4/artifactoryha-helm/charts/postgresql/templates/serviceaccount.yaml create mode 100755 Openshift4/artifactoryha-helm/charts/postgresql/templates/servicemonitor.yaml create mode 100755 Openshift4/artifactoryha-helm/charts/postgresql/templates/statefulset-slaves.yaml create mode 100755 Openshift4/artifactoryha-helm/charts/postgresql/templates/statefulset.yaml create mode 100755 Openshift4/artifactoryha-helm/charts/postgresql/templates/svc-headless.yaml create mode 100755 Openshift4/artifactoryha-helm/charts/postgresql/templates/svc-read.yaml create mode 100755 Openshift4/artifactoryha-helm/charts/postgresql/templates/svc.yaml create mode 100755 Openshift4/artifactoryha-helm/charts/postgresql/values-production.yaml create mode 100755 Openshift4/artifactoryha-helm/charts/postgresql/values.schema.json create mode 100755 Openshift4/artifactoryha-helm/charts/postgresql/values.yaml create mode 100755 Openshift4/artifactoryha-helm/helminstall.sh create mode 100644 Openshift4/artifactoryha-helm/pv-examples/pv0001-large.yaml create mode 100644 Openshift4/artifactoryha-helm/pv-examples/pv0002-large.yaml create mode 100644 Openshift4/artifactoryha-helm/pv-examples/pv0003-large.yaml create mode 100644 Openshift4/artifactoryha-helm/pv-examples/pv0004-large.yaml create mode 100644 Openshift4/artifactoryha-helm/pv-examples/pv0005-large.yaml create mode 100755 Openshift4/artifactoryha-helm/requirements.lock create mode 100755 Openshift4/artifactoryha-helm/requirements.yaml create mode 100644 Openshift4/artifactoryha-helm/scc.yaml create mode 100755 Openshift4/artifactoryha-helm/templates/NOTES.txt create mode 100755 Openshift4/artifactoryha-helm/templates/_helpers.tpl create mode 100755 Openshift4/artifactoryha-helm/templates/access-bootstrap-creds.yaml create mode 100755 Openshift4/artifactoryha-helm/templates/artifactory-binarystore-secret.yaml create mode 100755 Openshift4/artifactoryha-helm/templates/artifactory-configmaps.yaml create mode 100755 Openshift4/artifactoryha-helm/templates/artifactory-installer-info.yaml create mode 100755 Openshift4/artifactoryha-helm/templates/artifactory-license-secret.yaml create mode 100755 Openshift4/artifactoryha-helm/templates/artifactory-networkpolicy.yaml create mode 100755 Openshift4/artifactoryha-helm/templates/artifactory-nfs-pvc.yaml create mode 100755 Openshift4/artifactoryha-helm/templates/artifactory-node-pdb.yaml create mode 100755 Openshift4/artifactoryha-helm/templates/artifactory-node-statefulset.yaml create mode 100755 Openshift4/artifactoryha-helm/templates/artifactory-primary-statefulset.yaml create mode 100755 Openshift4/artifactoryha-helm/templates/artifactory-priority-class.yaml create mode 100755 Openshift4/artifactoryha-helm/templates/artifactory-role.yaml create mode 100755 Openshift4/artifactoryha-helm/templates/artifactory-rolebinding.yaml create mode 100755 Openshift4/artifactoryha-helm/templates/artifactory-secrets.yaml create mode 100755 Openshift4/artifactoryha-helm/templates/artifactory-service.yaml create mode 100755 Openshift4/artifactoryha-helm/templates/artifactory-serviceaccount.yaml create mode 100755 Openshift4/artifactoryha-helm/templates/artifactory-storage-pvc.yaml create mode 100755 Openshift4/artifactoryha-helm/templates/artifactory-system-yaml.yaml create mode 100755 Openshift4/artifactoryha-helm/templates/catalina-logger-configmap.yaml create mode 100755 Openshift4/artifactoryha-helm/templates/filebeat-configmap.yaml create mode 100755 Openshift4/artifactoryha-helm/templates/ingress.yaml create mode 100755 Openshift4/artifactoryha-helm/templates/nginx-artifactory-conf.yaml create mode 100755 Openshift4/artifactoryha-helm/templates/nginx-certificate-secret.yaml create mode 100755 Openshift4/artifactoryha-helm/templates/nginx-conf.yaml create mode 100755 Openshift4/artifactoryha-helm/templates/nginx-deployment.yaml create mode 100755 Openshift4/artifactoryha-helm/templates/nginx-pvc.yaml create mode 100755 Openshift4/artifactoryha-helm/templates/nginx-service.yaml create mode 100755 Openshift4/artifactoryha-helm/values-large.yaml create mode 100755 Openshift4/artifactoryha-helm/values-medium.yaml create mode 100755 Openshift4/artifactoryha-helm/values-small.yaml create mode 100755 Openshift4/artifactoryha-helm/values.yaml diff --git a/Openshift4/.gitignore b/Openshift4/.gitignore new file mode 100644 index 0000000..bcacc76 --- /dev/null +++ b/Openshift4/.gitignore @@ -0,0 +1 @@ +artifactory.cluster.license diff --git a/Openshift4/artifactory-ha-operator/build/Dockerfile b/Openshift4/artifactory-ha-operator/build/Dockerfile new file mode 100644 index 0000000..6fcc547 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/build/Dockerfile @@ -0,0 +1,4 @@ +FROM quay.io/operator-framework/helm-operator:v0.14.1 + +COPY watches.yaml ${HOME}/watches.yaml +COPY helm-charts/ ${HOME}/helm-charts/ diff --git a/Openshift4/artifactory-ha-operator/deploy/crds/charts.helm.k8s.io_openshiftartifactoryhas_crd.yaml b/Openshift4/artifactory-ha-operator/deploy/crds/charts.helm.k8s.io_openshiftartifactoryhas_crd.yaml new file mode 100644 index 0000000..a125712 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/deploy/crds/charts.helm.k8s.io_openshiftartifactoryhas_crd.yaml @@ -0,0 +1,22 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: openshiftartifactoryhas.charts.helm.k8s.io +spec: + group: charts.helm.k8s.io + names: + kind: OpenshiftArtifactoryHa + listKind: OpenshiftArtifactoryHaList + plural: openshiftartifactoryhas + singular: openshiftartifactoryha + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + type: object + x-kubernetes-preserve-unknown-fields: true + versions: + - name: v1alpha1 + served: true + storage: true diff --git a/Openshift4/artifactory-ha-operator/deploy/crds/charts.helm.k8s.io_v1alpha1_openshiftartifactoryha_cr.yaml b/Openshift4/artifactory-ha-operator/deploy/crds/charts.helm.k8s.io_v1alpha1_openshiftartifactoryha_cr.yaml new file mode 100644 index 0000000..da682db --- /dev/null +++ b/Openshift4/artifactory-ha-operator/deploy/crds/charts.helm.k8s.io_v1alpha1_openshiftartifactoryha_cr.yaml @@ -0,0 +1,997 @@ +apiVersion: charts.helm.k8s.io/v1alpha1 +kind: OpenshiftArtifactoryHa +metadata: + name: artifactoryha +spec: + # Default values copied from /helm-charts/openshift-artifactory-ha/values.yaml + + access: + database: + maxOpenConnections: 80 + artifactory: + accessAdmin: + dataKey: null + ip: 127.0.0.1 + password: null + secret: null + annotations: {} + binarystore: + enabled: true + catalinaLoggers: [] + configMapName: null + configMaps: "" + copyOnEveryStartup: null + customInitContainers: "" + customInitContainersBegin: | + - name: "custom-setup" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" + command: + - 'sh' + - '-c' + - 'chown -R 1030:1030 {{ .Values.artifactory.persistence.mountPath }}' + securityContext: + runAsUser: 0 + volumeMounts: + - mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + name: volume + customPersistentPodVolumeClaim: {} + customPersistentVolumeClaim: {} + customSidecarContainers: "" + customVolumeMounts: "" + customVolumes: "" + database: + maxOpenConnections: 80 + deleteDBPropertiesOnStartup: true + externalArtifactoryPort: 8081 + externalPort: 8082 + haDataDir: + enabled: false + path: null + image: + pullPolicy: IfNotPresent + repository: earlyaccess.jfrog.io/artifactory-pro + internalArtifactoryPort: 8081 + internalPort: 8082 + javaOpts: {} + joinKey: EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE + license: + dataKey: artifactory.cluster.license + licenseKey: null + secret: artifactory-license + livenessProbe: + enabled: true + failureThreshold: 10 + initialDelaySeconds: 180 + path: /router/api/v1/system/health + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 10 + loggers: [] + masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + name: artifactory-ha + node: + affinity: {} + javaOpts: + corePoolSize: 16 + jmx: + accessFile: null + authenticate: false + enabled: false + host: null + passwordFile: null + port: 9010 + ssl: false + labels: {} + minAvailable: 1 + name: artifactory-ha-member + nodeSelector: {} + persistence: + existingClaim: false + podAntiAffinity: + topologyKey: kubernetes.io/hostname + type: "" + replicaCount: 1 + resources: {} + tolerations: [] + waitForPrimaryStartup: + enabled: true + time: 60 + persistence: + accessMode: ReadWriteOnce + awsS3: + bucketName: artifactory-ha-aws + credential: null + endpoint: null + httpsOnly: true + identity: null + path: artifactory-ha/filestore + properties: {} + refreshCredentials: true + region: null + roleName: null + s3AwsVersion: AWS4-HMAC-SHA256 + testConnection: false + awsS3V3: + bucketName: artifactory-aws + cloudFrontDomainName: null + cloudFrontKeyPairId: null + cloudFrontPrivateKey: null + credential: null + endpoint: null + identity: null + kmsCryptoMode: null + kmsKeyRegion: null + kmsServerSideEncryptionKeyId: null + path: artifactory/filestore + region: null + signatureExpirySeconds: 300 + testConnection: false + useInstanceCredentials: true + usePresigning: false + azureBlob: + accountKey: null + accountName: null + containerName: null + endpoint: null + testConnection: false + binarystoreXml: | + {{- if eq .Values.artifactory.persistence.type "file-system" }} + + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + + + + + + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) -}} + + {{- end }} + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + // Specify the read and write strategy and redundancy for the sharding binary provider + + roundRobin + percentageFreeSpace + 2 + + + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) -}} + //For each sub-provider (mount), specify the filestore location + + filestore{{ $sharedClaimNumber }} + + {{- end }} + + {{- else }} + + + + + crossNetworkStrategy + crossNetworkStrategy + {{ .Values.artifactory.persistence.redundancy }} + 2 + 2 + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + + shard-fs-1 + local + + + + + 30 + tester-remote1 + 10000 + remote + + + + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "google-storage" }} + + + + + crossNetworkStrategy + crossNetworkStrategy + {{ .Values.artifactory.persistence.redundancy }} + 2 + + + + + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + local + + + + 30 + 10000 + remote + + + + {{ .Values.artifactory.persistence.mountPath }}/data/filestore + /tmp + + + + google-cloud-storage + {{ .Values.artifactory.persistence.googleStorage.endpoint }} + {{ .Values.artifactory.persistence.googleStorage.httpsOnly }} + {{ .Values.artifactory.persistence.googleStorage.bucketName }} + {{ .Values.artifactory.persistence.googleStorage.identity }} + {{ .Values.artifactory.persistence.googleStorage.credential }} + {{ .Values.artifactory.persistence.googleStorage.path }} + {{ .Values.artifactory.persistence.googleStorage.bucketExists }} + + + {{- end }} + {{- if eq .Values.artifactory.persistence.type "aws-s3-v3" }} + + + + + + + + + + + + + + + + + crossNetworkStrategy + crossNetworkStrategy + {{ .Values.artifactory.persistence.redundancy }} + + + + + remote + + + + local + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + {{- with .Values.artifactory.persistence.awsS3V3 }} + + {{ .testConnection }} + {{- if .identity }} + {{ .identity }} + {{- end }} + {{- if .credential }} + {{ .credential }} + {{- end }} + {{ .region }} + {{ .bucketName }} + {{ .path }} + {{ .endpoint }} + {{- with .kmsServerSideEncryptionKeyId }} + {{ . }} + {{- end }} + {{- with .kmsKeyRegion }} + {{ . }} + {{- end }} + {{- with .kmsCryptoMode }} + {{ . }} + {{- end }} + true + {{ .usePresigning }} + {{ .signatureExpirySeconds }} + {{- with .cloudFrontDomainName }} + {{ . }} + {{- end }} + {{- with .cloudFrontKeyPairId }} + {{ .cloudFrontKeyPairId }} + {{- end }} + {{- with .cloudFrontPrivateKey }} + {{ . }} + {{- end }} + + {{- end }} + + {{- end }} + + {{- if eq .Values.artifactory.persistence.type "aws-s3" }} + + + + + + + + + + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + local + + + + 30 + 10000 + remote + + + + crossNetworkStrategy + crossNetworkStrategy + {{ .Values.artifactory.persistence.redundancy }} + + + + + {{ .Values.artifactory.persistence.awsS3.endpoint }} + {{- if .Values.artifactory.persistence.awsS3.roleName }} + {{ .Values.artifactory.persistence.awsS3.roleName }} + true + {{- else }} + {{ .Values.artifactory.persistence.awsS3.refreshCredentials }} + {{- end }} + {{ .Values.artifactory.persistence.awsS3.s3AwsVersion }} + {{ .Values.artifactory.persistence.awsS3.testConnection }} + {{ .Values.artifactory.persistence.awsS3.httpsOnly }} + {{ .Values.artifactory.persistence.awsS3.region }} + {{ .Values.artifactory.persistence.awsS3.bucketName }} + {{- if .Values.artifactory.persistence.awsS3.identity }} + {{ .Values.artifactory.persistence.awsS3.identity }} + {{- end }} + {{- if .Values.artifactory.persistence.awsS3.credential }} + {{ .Values.artifactory.persistence.awsS3.credential }} + {{- end }} + {{ .Values.artifactory.persistence.awsS3.path }} + {{- range $key, $value := .Values.artifactory.persistence.awsS3.properties }} + + {{- end }} + + + {{- end }} + {{- if eq .Values.artifactory.persistence.type "azure-blob" }} + + + + + + + + + + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + + crossNetworkStrategy + crossNetworkStrategy + 2 + 1 + + + + + remote + + + + local + + + + + {{ .Values.artifactory.persistence.azureBlob.accountName }} + {{ .Values.artifactory.persistence.azureBlob.accountKey }} + {{ .Values.artifactory.persistence.azureBlob.endpoint }} + {{ .Values.artifactory.persistence.azureBlob.containerName }} + {{ .Values.artifactory.persistence.azureBlob.testConnection }} + + + {{- end }} + cacheProviderDir: cache + customBinarystoreXmlSecret: null + enabled: true + eventual: + numberOfThreads: 10 + fileSystem: + existingSharedClaim: + backupDir: /var/opt/jfrog/artifactory-backup + dataDir: '{{ .Values.artifactory.persistence.mountPath }}/artifactory-data' + enabled: false + numberOfExistingClaims: 1 + googleStorage: + bucketExists: false + bucketName: artifactory-ha-gcp + credential: null + endpoint: storage.googleapis.com + httpsOnly: false + identity: null + path: artifactory-ha/filestore + local: false + maxCacheSize: 50000000000 + mountPath: /var/opt/jfrog/artifactory + nfs: + backupDir: /var/opt/jfrog/artifactory-backup + capacity: 200Gi + dataDir: /var/opt/jfrog/artifactory-ha + haBackupMount: /backup + haDataMount: /data + ip: null + mountOptions: [] + redundancy: 3 + size: 200Gi + type: file-system + primary: + affinity: {} + javaOpts: + corePoolSize: 16 + jmx: + accessFile: null + authenticate: false + enabled: false + host: null + passwordFile: null + port: 9010 + ssl: false + labels: {} + name: artifactory-ha-primary + nodeSelector: {} + persistence: + existingClaim: false + podAntiAffinity: + topologyKey: kubernetes.io/hostname + type: "" + resources: {} + tolerations: [] + priorityClass: + create: false + value: 1000000000 + readinessProbe: + enabled: true + failureThreshold: 10 + initialDelaySeconds: 60 + path: /router/api/v1/system/health + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 10 + service: + annotations: {} + loadBalancerSourceRanges: [] + name: artifactory + pool: members + type: ClusterIP + systemYaml: | + shared: + extraJavaOpts: > + {{- with .Values.artifactory.primary.javaOpts }} + -Dartifactory.async.corePoolSize={{ .corePoolSize }} + {{- if .xms }} + -Xms{{ .xms }} + {{- end }} + {{- if .xmx }} + -Xmx{{ .xmx }} + {{- end }} + {{- if .jmx.enabled }} + -Dcom.sun.management.jmxremote + -Dcom.sun.management.jmxremote.port={{ .jmx.port }} + -Dcom.sun.management.jmxremote.rmi.port={{ .jmx.port }} + -Dcom.sun.management.jmxremote.ssl={{ .jmx.ssl }} + {{- if .jmx.host }} + -Djava.rmi.server.hostname={{ tpl .jmx.host $ }} + {{- else }} + -Djava.rmi.server.hostname={{ template "artifactory-ha.fullname" $ }} + {{- end }} + {{- if .jmx.authenticate }} + -Dcom.sun.management.jmxremote.authenticate=true + -Dcom.sun.management.jmxremote.access.file={{ .jmx.accessFile }} + -Dcom.sun.management.jmxremote.password.file={{ .jmx.passwordFile }} + {{- else }} + -Dcom.sun.management.jmxremote.authenticate=false + {{- end }} + {{- end }} + {{- if .other }} + {{ .other }} + {{- end }} + {{- end }} + database: + {{- if .Values.postgresql.enabled }} + type: postgresql + url: 'jdbc:postgresql://{{ .Release.Name }}-postgresql:{{ .Values.postgresql.service.port }}/{{ .Values.postgresql.postgresqlDatabase }}' + host: '' + driver: org.postgresql.Driver + username: '{{ .Values.postgresql.postgresqlUsername }}' + password: '{{ .Values.postgresql.postgresqlPassword }}' + {{ else }} + type: '{{ .Values.database.type }}' + url: '{{ .Values.database.url }}' + driver: '{{ .Values.database.driver }}' + username: '{{ .Values.database.user }}' + password: '{{ .Values.database.password }}' + {{- end }} + security: + joinKey: '{{ .Values.artifactory.joinKey }}' + masterKey: '{{ .Values.artifactory.masterKey }}' + artifactory: + {{- if .Values.artifactory.haDataDir.enabled }} + node: + haDataDir: {{ .Values.artifactory.haDataDir.path }} + {{- end }} + database: + maxOpenConnections: {{ .Values.artifactory.database.maxOpenConnections }} + access: + database: + maxOpenConnections: '{{ .Values.access.database.maxOpenConnections }}' + {{- if .Values.access.database.enabled }} + type: '{{ .Values.access.database.type }}' + url: '{{ .Values.access.database.url }}' + driver: '{{ .Values.access.database.driver }}' + username: '{{ .Values.access.database.user }}' + password: '{{ .Values.access.database.password }}' + {{- end }} + terminationGracePeriodSeconds: 30 + uid: 1030 + userPluginSecrets: null + database: + driver: null + password: null + secrets: {} + type: null + url: null + user: null + filebeat: + enabled: false + filebeatYml: | + logging.level: info + path.data: {{ .Values.artifactory.persistence.mountPath }}/log/filebeat + name: artifactory-filebeat + queue.spool: ~ + filebeat.inputs: + - type: log + enabled: true + close_eof: ${CLOSE:false} + paths: + - {{ .Values.artifactory.persistence.mountPath }}/log/*.log + fields: + service: "jfrt" + log_type: "artifactory" + output: + logstash: + hosts: ["{{ .Values.filebeat.logstashUrl }}"] + image: + repository: docker.elastic.co/beats/filebeat + version: 7.5.1 + livenessProbe: + exec: + command: + - sh + - -c + - | + #!/usr/bin/env bash -e + curl --fail 127.0.0.1:5066 + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + logstashUrl: logstash:5044 + name: artifactory-filebeat + readinessProbe: + exec: + command: + - sh + - -c + - | + #!/usr/bin/env bash -e + filebeat test output + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + resources: {} + terminationGracePeriod: 10 + imagePullSecrets: null + ingress: + additionalRules: [] + annotations: {} + artifactoryPath: /artifactory/ + defaultBackend: + enabled: true + enabled: false + hosts: [] + labels: {} + routerPath: / + tls: [] + initContainerImage: alpine:3.10 + initContainers: + resources: {} + installer: + platform: null + type: null + logger: + image: + repository: busybox + tag: "1.30" + networkpolicy: + - egress: + - {} + ingress: + - {} + name: artifactory + podSelector: + matchLabels: + app: artifactory-ha + nginx: + affinity: {} + artifactoryConf: | + ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; + ssl_certificate {{ .Values.nginx.persistence.mountPath }}/ssl/tls.crt; + ssl_certificate_key {{ .Values.nginx.persistence.mountPath }}/ssl/tls.key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + ## server configuration + server { + {{- if .Values.nginx.internalPortHttps }} + listen {{ .Values.nginx.internalPortHttps }} ssl; + {{- else -}} + {{- if .Values.nginx.https.enabled }} + listen {{ .Values.nginx.https.internalPort }} ssl; + {{- end }} + {{- end }} + {{- if .Values.nginx.internalPortHttp }} + listen {{ .Values.nginx.internalPortHttp }}; + {{- else -}} + {{- if .Values.nginx.http.enabled }} + listen {{ .Values.nginx.http.internalPort }}; + {{- end }} + {{- end }} + server_name ~(?.+)\.{{ include "artifactory-ha.fullname" . }} {{ include "artifactory-ha.fullname" . }} + {{- range .Values.ingress.hosts -}} + {{- if contains "." . -}} + {{ "" | indent 0 }} ~(?.+)\.{{ (splitn "." 2 .)._1 }} {{ . }} + {{- end -}} + {{- end -}}; + + if ($http_x_forwarded_proto = '') { + set $http_x_forwarded_proto $scheme; + } + ## Application specific logs + ## access_log /var/log/nginx/artifactory-access.log timing; + ## error_log /var/log/nginx/artifactory-error.log; + rewrite ^/artifactory/?$ / redirect; + if ( $repo != "" ) { + rewrite ^/(v1|v2)/(.*) /artifactory/api/docker/$repo/$1/$2 break; + } + chunked_transfer_encoding on; + client_max_body_size 0; + + location / { + proxy_read_timeout 900; + proxy_pass_header Server; + proxy_cookie_path ~*^/.* /; + proxy_pass http://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalPort }}/; + proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host:$server_port; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + + location /artifactory/ { + if ( $request_uri ~ ^/artifactory/(.*)$ ) { + proxy_pass http://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalArtifactoryPort }}/artifactory/$1; + } + proxy_pass http://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalArtifactoryPort }}/artifactory/; + } + } + } + customArtifactoryConfigMap: null + customConfigMap: null + enabled: true + gid: 107 + http: + enabled: true + externalPort: 80 + internalPort: 80 + https: + enabled: true + externalPort: 443 + internalPort: 443 + image: + pullPolicy: IfNotPresent + repository: earlyaccess.jfrog.io/nginx-artifactory-pro + labels: {} + livenessProbe: + enabled: true + failureThreshold: 10 + initialDelaySeconds: 60 + path: /router/api/v1/system/health + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 10 + loggers: [] + mainConf: | + # Main Nginx configuration file + worker_processes 4; + error_log {{ .Values.nginx.persistence.mountPath }}/logs//error.log warn; + pid /tmp/nginx.pid; + events { + worker_connections 1024; + } + http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + variables_hash_max_size 1024; + variables_hash_bucket_size 64; + server_names_hash_max_size 4096; + server_names_hash_bucket_size 128; + types_hash_max_size 2048; + types_hash_bucket_size 64; + proxy_read_timeout 2400s; + client_header_timeout 2400s; + client_body_timeout 2400s; + proxy_connect_timeout 75s; + proxy_send_timeout 2400s; + proxy_buffer_size 32k; + proxy_buffers 40 32k; + proxy_busy_buffers_size 64k; + proxy_temp_file_write_size 250m; + proxy_http_version 1.1; + client_body_buffer_size 128k; + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + log_format timing 'ip = $remote_addr ' + 'user = \"$remote_user\" ' + 'local_time = \"$time_local\" ' + 'host = $host ' + 'request = \"$request\" ' + 'status = $status ' + 'bytes = $body_bytes_sent ' + 'upstream = \"$upstream_addr\" ' + 'upstream_time = $upstream_response_time ' + 'request_time = $request_time ' + 'referer = \"$http_referer\" ' + 'UA = \"$http_user_agent\"'; + access_log {{ .Values.nginx.persistence.mountPath }}/logs/access.log timing; + sendfile on; + #tcp_nopush on; + keepalive_timeout 65; + #gzip on; + include /etc/nginx/conf.d/*.conf; + } + name: nginx + nodeSelector: {} + persistence: + accessMode: ReadWriteOnce + enabled: false + mountPath: /var/opt/jfrog/nginx + size: 5Gi + readinessProbe: + enabled: true + failureThreshold: 10 + initialDelaySeconds: 10 + path: /router/api/v1/system/health + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 10 + replicaCount: 1 + resources: {} + service: + externalTrafficPolicy: Cluster + labels: {} + loadBalancerIP: null + loadBalancerSourceRanges: [] + type: LoadBalancer + tolerations: [] + uid: 104 + postgresql: + enabled: true + extraEnv: [] + global: + postgresql: {} + image: + debug: false + pullPolicy: IfNotPresent + registry: docker.bintray.io + repository: bitnami/postgresql + tag: 9.6.15-debian-9-r91 + livenessProbe: + enabled: true + failureThreshold: 6 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + master: + affinity: {} + annotations: {} + extraVolumeMounts: [] + extraVolumes: [] + labels: {} + nodeSelector: {} + podAnnotations: {} + podLabels: {} + tolerations: [] + metrics: + enabled: false + image: + pullPolicy: IfNotPresent + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.6.0-debian-9-r0 + livenessProbe: + enabled: true + failureThreshold: 6 + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + enabled: true + failureThreshold: 6 + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + securityContext: + enabled: false + runAsUser: 1001 + service: + annotations: + prometheus.io/port: "9187" + prometheus.io/scrape: "true" + loadBalancerIP: null + type: ClusterIP + serviceMonitor: + additionalLabels: {} + enabled: false + networkPolicy: + allowExternal: true + enabled: false + nodeSelector: {} + persistence: + accessModes: + - ReadWriteOnce + annotations: {} + enabled: true + mountPath: /bitnami/postgresql + size: 50Gi + subPath: "" + postgresqlConfiguration: + listenAddresses: '''*''' + maxConnections: "1500" + postgresqlDataDir: /bitnami/postgresql/data + postgresqlDatabase: artifactory + postgresqlPassword: "" + postgresqlUsername: artifactory + readinessProbe: + enabled: true + failureThreshold: 6 + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + replication: + applicationName: my_application + enabled: false + numSynchronousReplicas: 0 + password: repl_password + slaveReplicas: 1 + synchronousCommit: "off" + user: repl_user + resources: + requests: + cpu: 250m + memory: 256Mi + securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + service: + annotations: {} + port: 5432 + type: ClusterIP + serviceAccount: + enabled: false + slave: + affinity: {} + annotations: {} + extraVolumeMounts: [] + extraVolumes: [] + labels: {} + nodeSelector: {} + podAnnotations: {} + podLabels: {} + tolerations: [] + updateStrategy: + type: RollingUpdate + volumePermissions: + enabled: true + image: + pullPolicy: Always + registry: docker.io + repository: bitnami/minideb + tag: stretch + securityContext: + runAsUser: 0 + rbac: + create: true + role: + rules: + - apiGroups: + - "" + resources: + - services + - endpoints + - pods + verbs: + - get + - watch + - list + serviceAccount: + annotations: {} + create: true + name: null + waitForDatabase: true + diff --git a/Openshift4/artifactory-ha-operator/deploy/imagestream.yaml b/Openshift4/artifactory-ha-operator/deploy/imagestream.yaml new file mode 100644 index 0000000..12d9a2f --- /dev/null +++ b/Openshift4/artifactory-ha-operator/deploy/imagestream.yaml @@ -0,0 +1,6 @@ +apiVersion: image.openshift.io/v1 +kind: ImageStream +metadata: + name: artifactory-ha + namespace: jfrog-artifactory + diff --git a/Openshift4/artifactory-ha-operator/deploy/namespace.yaml b/Openshift4/artifactory-ha-operator/deploy/namespace.yaml new file mode 100644 index 0000000..1be0be1 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/deploy/namespace.yaml @@ -0,0 +1,17 @@ +kind: Namespace +apiVersion: v1 +metadata: + name: jfrog-artifactory + selfLink: /api/v1/namespaces/jfrog-artifactory + uid: 402ec7e9-3ca2-11ea-bd94-0ef0e3c74fbe + resourceVersion: '523038' + creationTimestamp: '2020-01-21T23:03:34Z' + annotations: + openshift.io/sa.scc.mcs: 's0:c23,c2' + openshift.io/sa.scc.supplemental-groups: 1000510000/10000 + openshift.io/sa.scc.uid-range: 1000510000/10000 +spec: + finalizers: + - kubernetes +status: + phase: Active diff --git a/Openshift4/artifactory-ha-operator/deploy/olm-catalog/artifactory-ha-operator/1.0.0/artifactory-ha-operator.v1.0.0.clusterserviceversion.yaml b/Openshift4/artifactory-ha-operator/deploy/olm-catalog/artifactory-ha-operator/1.0.0/artifactory-ha-operator.v1.0.0.clusterserviceversion.yaml new file mode 100644 index 0000000..0a3b20c --- /dev/null +++ b/Openshift4/artifactory-ha-operator/deploy/olm-catalog/artifactory-ha-operator/1.0.0/artifactory-ha-operator.v1.0.0.clusterserviceversion.yaml @@ -0,0 +1,745 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + annotations: + alm-examples: |- + [ + { + "apiVersion": "charts.helm.k8s.io/v1alpha1", + "kind": "OpenshiftArtifactoryHa", + "metadata": { + "name": "artifactoryha" + }, + "spec": { + "access": { + "database": { + "maxOpenConnections": 80 + } + }, + "artifactory": { + "accessAdmin": { + "dataKey": null, + "ip": "127.0.0.1", + "password": null, + "secret": null + }, + "annotations": {}, + "binarystore": { + "enabled": true + }, + "catalinaLoggers": [], + "configMapName": null, + "configMaps": "", + "copyOnEveryStartup": null, + "customInitContainers": "", + "customInitContainersBegin": "- name: \"custom-setup\"\n image: \"{{ .Values.initContainerImage }}\"\n imagePullPolicy: \"{{ .Values.artifactory.image.pullPolicy }}\"\n command:\n - 'sh'\n - '-c'\n - 'chown -R 1030:1030 {{ .Values.artifactory.persistence.mountPath }}'\n securityContext:\n runAsUser: 0\n volumeMounts:\n - mountPath: \"{{ .Values.artifactory.persistence.mountPath }}\"\n name: volume\n", + "customPersistentPodVolumeClaim": {}, + "customPersistentVolumeClaim": {}, + "customSidecarContainers": "", + "customVolumeMounts": "", + "customVolumes": "", + "database": { + "maxOpenConnections": 80 + }, + "deleteDBPropertiesOnStartup": true, + "externalArtifactoryPort": 8081, + "externalPort": 8082, + "haDataDir": { + "enabled": false, + "path": null + }, + "image": { + "pullPolicy": "IfNotPresent", + "repository": "earlyaccess.jfrog.io/artifactory-pro" + }, + "internalArtifactoryPort": 8081, + "internalPort": 8082, + "javaOpts": {}, + "joinKey": "EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE", + "license": { + "dataKey": "artifactory.cluster.license", + "licenseKey": null, + "secret": "artifactory-license" + }, + "livenessProbe": { + "enabled": true, + "failureThreshold": 10, + "initialDelaySeconds": 180, + "path": "/router/api/v1/system/health", + "periodSeconds": 10, + "successThreshold": 1, + "timeoutSeconds": 10 + }, + "loggers": [], + "masterKey": "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + "name": "artifactory-ha", + "node": { + "affinity": {}, + "javaOpts": { + "corePoolSize": 16, + "jmx": { + "accessFile": null, + "authenticate": false, + "enabled": false, + "host": null, + "passwordFile": null, + "port": 9010, + "ssl": false + } + }, + "labels": {}, + "minAvailable": 1, + "name": "artifactory-ha-member", + "nodeSelector": {}, + "persistence": { + "existingClaim": false + }, + "podAntiAffinity": { + "topologyKey": "kubernetes.io/hostname", + "type": "" + }, + "replicaCount": 1, + "resources": {}, + "tolerations": [], + "waitForPrimaryStartup": { + "enabled": true, + "time": 60 + } + }, + "persistence": { + "accessMode": "ReadWriteOnce", + "awsS3": { + "bucketName": "artifactory-ha-aws", + "credential": null, + "endpoint": null, + "httpsOnly": true, + "identity": null, + "path": "artifactory-ha/filestore", + "properties": {}, + "refreshCredentials": true, + "region": null, + "roleName": null, + "s3AwsVersion": "AWS4-HMAC-SHA256", + "testConnection": false + }, + "awsS3V3": { + "bucketName": "artifactory-aws", + "cloudFrontDomainName": null, + "cloudFrontKeyPairId": null, + "cloudFrontPrivateKey": null, + "credential": null, + "endpoint": null, + "identity": null, + "kmsCryptoMode": null, + "kmsKeyRegion": null, + "kmsServerSideEncryptionKeyId": null, + "path": "artifactory/filestore", + "region": null, + "signatureExpirySeconds": 300, + "testConnection": false, + "useInstanceCredentials": true, + "usePresigning": false + }, + "azureBlob": { + "accountKey": null, + "accountName": null, + "containerName": null, + "endpoint": null, + "testConnection": false + }, + "binarystoreXml": "{{- if eq .Values.artifactory.persistence.type \"file-system\" }}\n\u003c!-- File system replication --\u003e\n{{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }}\n\u003c!-- File Storage - Dynamic for Artifactory files, pre-created for DATA and BACKUP --\u003e\n\u003cconfig version=\"4\"\u003e\n \u003cchain\u003e\n \u003cprovider id=\"cache-fs\" type=\"cache-fs\"\u003e \u003c!-- This is a cached filestore --\u003e\n \u003cprovider id=\"sharding\" type=\"sharding\"\u003e \u003c!-- This is a sharding provider --\u003e\n {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) -}}\n \u003csub-provider id=\"shard{{ $sharedClaimNumber }}\" type=\"state-aware\"/\u003e\n {{- end }}\n \u003c/provider\u003e\n \u003c/provider\u003e\n \u003c/chain\u003e\n\n \u003cprovider id=\"cache-fs\" type=\"cache-fs\"\u003e\n \u003cmaxCacheSize\u003e{{ .Values.artifactory.persistence.maxCacheSize }}\u003c/maxCacheSize\u003e\n \u003ccacheProviderDir\u003e{{ .Values.artifactory.persistence.cacheProviderDir }}\u003c/cacheProviderDir\u003e\n \u003c/provider\u003e\n\n // Specify the read and write strategy and redundancy for the sharding binary provider\n \u003cprovider id=\"sharding\" type=\"sharding\"\u003e\n \u003creadBehavior\u003eroundRobin\u003c/readBehavior\u003e\n \u003cwriteBehavior\u003epercentageFreeSpace\u003c/writeBehavior\u003e\n \u003credundancy\u003e2\u003c/redundancy\u003e\n \u003c/provider\u003e\n\n {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) -}}\n //For each sub-provider (mount), specify the filestore location\n \u003cprovider id=\"shard{{ $sharedClaimNumber }}\" type=\"state-aware\"\u003e\n \u003cfileStoreDir\u003efilestore{{ $sharedClaimNumber }}\u003c/fileStoreDir\u003e\n \u003c/provider\u003e\n {{- end }}\n\u003c/config\u003e\n{{- else }}\n\u003cconfig version=\"2\"\u003e\n \u003cchain\u003e\n \u003cprovider id=\"cache-fs\" type=\"cache-fs\"\u003e\n \u003cprovider id=\"sharding-cluster\" type=\"sharding-cluster\"\u003e\n \u003creadBehavior\u003ecrossNetworkStrategy\u003c/readBehavior\u003e\n \u003cwriteBehavior\u003ecrossNetworkStrategy\u003c/writeBehavior\u003e\n \u003credundancy\u003e{{ .Values.artifactory.persistence.redundancy }}\u003c/redundancy\u003e\n \u003clenientLimit\u003e2\u003c/lenientLimit\u003e\n \u003cminSpareUploaderExecutor\u003e2\u003c/minSpareUploaderExecutor\u003e\n \u003csub-provider id=\"state-aware\" type=\"state-aware\"/\u003e\n \u003cdynamic-provider id=\"remote\" type=\"remote\"/\u003e\n \u003cproperty name=\"zones\" value=\"local,remote\"/\u003e\n \u003c/provider\u003e\n \u003c/provider\u003e\n \u003c/chain\u003e\n\n \u003cprovider id=\"cache-fs\" type=\"cache-fs\"\u003e\n \u003cmaxCacheSize\u003e{{ .Values.artifactory.persistence.maxCacheSize }}\u003c/maxCacheSize\u003e\n \u003ccacheProviderDir\u003e{{ .Values.artifactory.persistence.cacheProviderDir }}\u003c/cacheProviderDir\u003e\n \u003c/provider\u003e\n\n \u003c!-- Shards add local file-system provider configuration --\u003e\n \u003cprovider id=\"state-aware\" type=\"state-aware\"\u003e\n \u003cfileStoreDir\u003eshard-fs-1\u003c/fileStoreDir\u003e\n \u003czone\u003elocal\u003c/zone\u003e\n \u003c/provider\u003e\n\n \u003c!-- Shards dynamic remote provider configuration --\u003e\n \u003cprovider id=\"remote\" type=\"remote\"\u003e\n \u003ccheckPeriod\u003e30\u003c/checkPeriod\u003e\n \u003cserviceId\u003etester-remote1\u003c/serviceId\u003e\n \u003ctimeout\u003e10000\u003c/timeout\u003e\n \u003czone\u003eremote\u003c/zone\u003e\n \u003cproperty name=\"header.remote.block\" value=\"true\"/\u003e\n \u003c/provider\u003e\n\u003c/config\u003e\n{{- end }}\n{{- end }}\n{{- if eq .Values.artifactory.persistence.type \"google-storage\" }}\n\u003c!-- Google storage --\u003e\n\u003cconfig version=\"2\"\u003e\n \u003cchain\u003e\n \u003cprovider id=\"sharding-cluster\" type=\"sharding-cluster\"\u003e\n \u003creadBehavior\u003ecrossNetworkStrategy\u003c/readBehavior\u003e\n \u003cwriteBehavior\u003ecrossNetworkStrategy\u003c/writeBehavior\u003e\n \u003credundancy\u003e{{ .Values.artifactory.persistence.redundancy }}\u003c/redundancy\u003e\n \u003cminSpareUploaderExecutor\u003e2\u003c/minSpareUploaderExecutor\u003e\n \u003csub-provider id=\"eventual-cluster\" type=\"eventual-cluster\"\u003e\n \u003cprovider id=\"retry\" type=\"retry\"\u003e\n \u003cprovider id=\"google-storage\" type=\"google-storage\"/\u003e\n \u003c/provider\u003e\n \u003c/sub-provider\u003e\n \u003cdynamic-provider id=\"remote\" type=\"remote\"/\u003e\n \u003cproperty name=\"zones\" value=\"local,remote\"/\u003e\n \u003c/provider\u003e\n \u003c/chain\u003e\n\n \u003c!-- Set max cache-fs size --\u003e\n \u003cprovider id=\"cache-fs\" type=\"cache-fs\"\u003e\n \u003cmaxCacheSize\u003e{{ .Values.artifactory.persistence.maxCacheSize }}\u003c/maxCacheSize\u003e\n \u003ccacheProviderDir\u003e{{ .Values.artifactory.persistence.cacheProviderDir }}\u003c/cacheProviderDir\u003e\n \u003c/provider\u003e\n\n \u003cprovider id=\"eventual-cluster\" type=\"eventual-cluster\"\u003e\n \u003czone\u003elocal\u003c/zone\u003e\n \u003c/provider\u003e\n\n \u003cprovider id=\"remote\" type=\"remote\"\u003e\n \u003ccheckPeriod\u003e30\u003c/checkPeriod\u003e\n \u003ctimeout\u003e10000\u003c/timeout\u003e\n \u003czone\u003eremote\u003c/zone\u003e\n \u003c/provider\u003e\n\n \u003cprovider id=\"file-system\" type=\"file-system\"\u003e\n \u003cfileStoreDir\u003e{{ .Values.artifactory.persistence.mountPath }}/data/filestore\u003c/fileStoreDir\u003e\n \u003ctempDir\u003e/tmp\u003c/tempDir\u003e\n \u003c/provider\u003e\n\n \u003cprovider id=\"google-storage\" type=\"google-storage\"\u003e\n \u003cproviderId\u003egoogle-cloud-storage\u003c/providerId\u003e\n \u003cendpoint\u003e{{ .Values.artifactory.persistence.googleStorage.endpoint }}\u003c/endpoint\u003e\n \u003chttpsOnly\u003e{{ .Values.artifactory.persistence.googleStorage.httpsOnly }}\u003c/httpsOnly\u003e\n \u003cbucketName\u003e{{ .Values.artifactory.persistence.googleStorage.bucketName }}\u003c/bucketName\u003e\n \u003cidentity\u003e{{ .Values.artifactory.persistence.googleStorage.identity }}\u003c/identity\u003e\n \u003ccredential\u003e{{ .Values.artifactory.persistence.googleStorage.credential }}\u003c/credential\u003e\n \u003cpath\u003e{{ .Values.artifactory.persistence.googleStorage.path }}\u003c/path\u003e\n \u003cbucketExists\u003e{{ .Values.artifactory.persistence.googleStorage.bucketExists }}\u003c/bucketExists\u003e\n \u003c/provider\u003e\n\u003c/config\u003e\n{{- end }}\n{{- if eq .Values.artifactory.persistence.type \"aws-s3-v3\" }}\n\u003c!-- AWS S3 V3 --\u003e\n\u003cconfig version=\"2\"\u003e\n \u003cchain\u003e \u003c!--template=\"cluster-s3-storage-v3\"--\u003e\n \u003cprovider id=\"cache-fs-eventual-s3\" type=\"cache-fs\"\u003e\n \u003cprovider id=\"sharding-cluster-eventual-s3\" type=\"sharding-cluster\"\u003e\n \u003csub-provider id=\"eventual-cluster-s3\" type=\"eventual-cluster\"\u003e\n \u003cprovider id=\"retry-s3\" type=\"retry\"\u003e\n \u003cprovider id=\"s3-storage-v3\" type=\"s3-storage-v3\"/\u003e\n \u003c/provider\u003e\n \u003c/sub-provider\u003e\n \u003cdynamic-provider id=\"remote-s3\" type=\"remote\"/\u003e\n \u003c/provider\u003e\n \u003c/provider\u003e\n \u003c/chain\u003e\n\n \u003cprovider id=\"sharding-cluster-eventual-s3\" type=\"sharding-cluster\"\u003e\n \u003creadBehavior\u003ecrossNetworkStrategy\u003c/readBehavior\u003e\n \u003cwriteBehavior\u003ecrossNetworkStrategy\u003c/writeBehavior\u003e\n \u003credundancy\u003e{{ .Values.artifactory.persistence.redundancy }}\u003c/redundancy\u003e\n \u003cproperty name=\"zones\" value=\"local,remote\"/\u003e\n \u003c/provider\u003e\n\n \u003cprovider id=\"remote-s3\" type=\"remote\"\u003e\n \u003czone\u003eremote\u003c/zone\u003e\n \u003c/provider\u003e\n\n \u003cprovider id=\"eventual-cluster-s3\" type=\"eventual-cluster\"\u003e\n \u003czone\u003elocal\u003c/zone\u003e\n \u003c/provider\u003e\n\n \u003c!-- Set max cache-fs size --\u003e\n \u003cprovider id=\"cache-fs-eventual-s3\" type=\"cache-fs\"\u003e\n \u003cmaxCacheSize\u003e{{ .Values.artifactory.persistence.maxCacheSize }}\u003c/maxCacheSize\u003e\n \u003ccacheProviderDir\u003e{{ .Values.artifactory.persistence.cacheProviderDir }}\u003c/cacheProviderDir\u003e\n \u003c/provider\u003e\n\n {{- with .Values.artifactory.persistence.awsS3V3 }}\n \u003cprovider id=\"s3-storage-v3\" type=\"s3-storage-v3\"\u003e\n \u003ctestConnection\u003e{{ .testConnection }}\u003c/testConnection\u003e\n {{- if .identity }}\n \u003cidentity\u003e{{ .identity }}\u003c/identity\u003e\n {{- end }}\n {{- if .credential }}\n \u003ccredential\u003e{{ .credential }}\u003c/credential\u003e\n {{- end }}\n \u003cregion\u003e{{ .region }}\u003c/region\u003e\n \u003cbucketName\u003e{{ .bucketName }}\u003c/bucketName\u003e\n \u003cpath\u003e{{ .path }}\u003c/path\u003e\n \u003cendpoint\u003e{{ .endpoint }}\u003c/endpoint\u003e\n {{- with .kmsServerSideEncryptionKeyId }}\n \u003ckmsServerSideEncryptionKeyId\u003e{{ . }}\u003c/kmsServerSideEncryptionKeyId\u003e\n {{- end }}\n {{- with .kmsKeyRegion }}\n \u003ckmsKeyRegion\u003e{{ . }}\u003c/kmsKeyRegion\u003e\n {{- end }}\n {{- with .kmsCryptoMode }}\n \u003ckmsCryptoMode\u003e{{ . }}\u003c/kmsCryptoMode\u003e\n {{- end }}\n \u003cuseInstanceCredentials\u003etrue\u003c/useInstanceCredentials\u003e\n \u003cusePresigning\u003e{{ .usePresigning }}\u003c/usePresigning\u003e\n \u003csignatureExpirySeconds\u003e{{ .signatureExpirySeconds }}\u003c/signatureExpirySeconds\u003e\n {{- with .cloudFrontDomainName }}\n \u003ccloudFrontDomainName\u003e{{ . }}\u003c/cloudFrontDomainName\u003e\n {{- end }}\n {{- with .cloudFrontKeyPairId }}\n \u003ccloudFrontKeyPairId\u003e{{ .cloudFrontKeyPairId }}\u003c/cloudFrontKeyPairId\u003e\n {{- end }}\n {{- with .cloudFrontPrivateKey }}\n \u003ccloudFrontPrivateKey\u003e{{ . }}\u003c/cloudFrontPrivateKey\u003e\n {{- end }}\n \u003c/provider\u003e\n {{- end }}\n\u003c/config\u003e\n{{- end }}\n\n{{- if eq .Values.artifactory.persistence.type \"aws-s3\" }}\n\u003c!-- AWS S3 --\u003e\n\u003cconfig version=\"2\"\u003e\n \u003cchain\u003e \u003c!--template=\"cluster-s3\"--\u003e\n \u003cprovider id=\"cache-fs\" type=\"cache-fs\"\u003e\n \u003cprovider id=\"sharding-cluster\" type=\"sharding-cluster\"\u003e\n \u003csub-provider id=\"eventual-cluster\" type=\"eventual-cluster\"\u003e\n \u003cprovider id=\"retry-s3\" type=\"retry\"\u003e\n \u003cprovider id=\"s3\" type=\"s3\"/\u003e\n \u003c/provider\u003e\n \u003c/sub-provider\u003e\n \u003cdynamic-provider id=\"remote\" type=\"remote\"/\u003e\n \u003c/provider\u003e\n \u003c/provider\u003e\n \u003c/chain\u003e\n\n \u003c!-- Set max cache-fs size --\u003e\n \u003cprovider id=\"cache-fs\" type=\"cache-fs\"\u003e\n \u003cmaxCacheSize\u003e{{ .Values.artifactory.persistence.maxCacheSize }}\u003c/maxCacheSize\u003e\n \u003ccacheProviderDir\u003e{{ .Values.artifactory.persistence.cacheProviderDir }}\u003c/cacheProviderDir\u003e\n \u003c/provider\u003e\n\n \u003cprovider id=\"eventual-cluster\" type=\"eventual-cluster\"\u003e\n \u003czone\u003elocal\u003c/zone\u003e\n \u003c/provider\u003e\n\n \u003cprovider id=\"remote\" type=\"remote\"\u003e\n \u003ccheckPeriod\u003e30\u003c/checkPeriod\u003e\n \u003ctimeout\u003e10000\u003c/timeout\u003e\n \u003czone\u003eremote\u003c/zone\u003e\n \u003c/provider\u003e\n\n \u003cprovider id=\"sharding-cluster\" type=\"sharding-cluster\"\u003e\n \u003creadBehavior\u003ecrossNetworkStrategy\u003c/readBehavior\u003e\n \u003cwriteBehavior\u003ecrossNetworkStrategy\u003c/writeBehavior\u003e\n \u003credundancy\u003e{{ .Values.artifactory.persistence.redundancy }}\u003c/redundancy\u003e\n \u003cproperty name=\"zones\" value=\"local,remote\"/\u003e\n \u003c/provider\u003e\n\n \u003cprovider id=\"s3\" type=\"s3\"\u003e\n \u003cendpoint\u003e{{ .Values.artifactory.persistence.awsS3.endpoint }}\u003c/endpoint\u003e\n {{- if .Values.artifactory.persistence.awsS3.roleName }}\n \u003croleName\u003e{{ .Values.artifactory.persistence.awsS3.roleName }}\u003c/roleName\u003e\n \u003crefreshCredentials\u003etrue\u003c/refreshCredentials\u003e\n {{- else }}\n \u003crefreshCredentials\u003e{{ .Values.artifactory.persistence.awsS3.refreshCredentials }}\u003c/refreshCredentials\u003e\n {{- end }}\n \u003cs3AwsVersion\u003e{{ .Values.artifactory.persistence.awsS3.s3AwsVersion }}\u003c/s3AwsVersion\u003e\n \u003ctestConnection\u003e{{ .Values.artifactory.persistence.awsS3.testConnection }}\u003c/testConnection\u003e\n \u003chttpsOnly\u003e{{ .Values.artifactory.persistence.awsS3.httpsOnly }}\u003c/httpsOnly\u003e\n \u003cregion\u003e{{ .Values.artifactory.persistence.awsS3.region }}\u003c/region\u003e\n \u003cbucketName\u003e{{ .Values.artifactory.persistence.awsS3.bucketName }}\u003c/bucketName\u003e\n {{- if .Values.artifactory.persistence.awsS3.identity }}\n \u003cidentity\u003e{{ .Values.artifactory.persistence.awsS3.identity }}\u003c/identity\u003e\n {{- end }}\n {{- if .Values.artifactory.persistence.awsS3.credential }}\n \u003ccredential\u003e{{ .Values.artifactory.persistence.awsS3.credential }}\u003c/credential\u003e\n {{- end }}\n \u003cpath\u003e{{ .Values.artifactory.persistence.awsS3.path }}\u003c/path\u003e\n {{- range $key, $value := .Values.artifactory.persistence.awsS3.properties }}\n \u003cproperty name=\"{{ $key }}\" value=\"{{ $value }}\"/\u003e\n {{- end }}\n \u003c/provider\u003e\n\u003c/config\u003e\n{{- end }}\n{{- if eq .Values.artifactory.persistence.type \"azure-blob\" }}\n\u003c!-- Azure Blob Storage --\u003e\n\u003cconfig version=\"2\"\u003e\n \u003cchain\u003e \u003c!--template=\"cluster-azure-blob-storage\"--\u003e\n \u003cprovider id=\"cache-fs\" type=\"cache-fs\"\u003e\n \u003cprovider id=\"sharding-cluster\" type=\"sharding-cluster\"\u003e\n \u003csub-provider id=\"eventual-cluster\" type=\"eventual-cluster\"\u003e\n \u003cprovider id=\"retry-azure-blob-storage\" type=\"retry\"\u003e\n \u003cprovider id=\"azure-blob-storage\" type=\"azure-blob-storage\"/\u003e\n \u003c/provider\u003e\n \u003c/sub-provider\u003e\n \u003cdynamic-provider id=\"remote\" type=\"remote\"/\u003e\n \u003c/provider\u003e\n \u003c/provider\u003e\n \u003c/chain\u003e\n\n \u003c!-- Set max cache-fs size --\u003e\n \u003cprovider id=\"cache-fs\" type=\"cache-fs\"\u003e\n \u003cmaxCacheSize\u003e{{ .Values.artifactory.persistence.maxCacheSize }}\u003c/maxCacheSize\u003e\n \u003ccacheProviderDir\u003e{{ .Values.artifactory.persistence.cacheProviderDir }}\u003c/cacheProviderDir\u003e\n \u003c/provider\u003e\n\n \u003c!-- cluster eventual Azure Blob Storage Service default chain --\u003e\n \u003cprovider id=\"sharding-cluster\" type=\"sharding-cluster\"\u003e\n \u003creadBehavior\u003ecrossNetworkStrategy\u003c/readBehavior\u003e\n \u003cwriteBehavior\u003ecrossNetworkStrategy\u003c/writeBehavior\u003e\n \u003credundancy\u003e2\u003c/redundancy\u003e\n \u003clenientLimit\u003e1\u003c/lenientLimit\u003e\n \u003cproperty name=\"zones\" value=\"local,remote\"/\u003e\n \u003c/provider\u003e\n\n \u003cprovider id=\"remote\" type=\"remote\"\u003e\n \u003czone\u003eremote\u003c/zone\u003e\n \u003c/provider\u003e\n\n \u003cprovider id=\"eventual-cluster\" type=\"eventual-cluster\"\u003e\n \u003czone\u003elocal\u003c/zone\u003e\n \u003c/provider\u003e\n\n \u003c!--cluster eventual template--\u003e\n \u003cprovider id=\"azure-blob-storage\" type=\"azure-blob-storage\"\u003e\n \u003caccountName\u003e{{ .Values.artifactory.persistence.azureBlob.accountName }}\u003c/accountName\u003e\n \u003caccountKey\u003e{{ .Values.artifactory.persistence.azureBlob.accountKey }}\u003c/accountKey\u003e\n \u003cendpoint\u003e{{ .Values.artifactory.persistence.azureBlob.endpoint }}\u003c/endpoint\u003e\n \u003ccontainerName\u003e{{ .Values.artifactory.persistence.azureBlob.containerName }}\u003c/containerName\u003e\n \u003ctestConnection\u003e{{ .Values.artifactory.persistence.azureBlob.testConnection }}\u003c/testConnection\u003e\n \u003c/provider\u003e\n\u003c/config\u003e\n{{- end }}\n", + "cacheProviderDir": "cache", + "customBinarystoreXmlSecret": null, + "enabled": true, + "eventual": { + "numberOfThreads": 10 + }, + "fileSystem": { + "existingSharedClaim": { + "backupDir": "/var/opt/jfrog/artifactory-backup", + "dataDir": "{{ .Values.artifactory.persistence.mountPath }}/artifactory-data", + "enabled": false, + "numberOfExistingClaims": 1 + } + }, + "googleStorage": { + "bucketExists": false, + "bucketName": "artifactory-ha-gcp", + "credential": null, + "endpoint": "storage.googleapis.com", + "httpsOnly": false, + "identity": null, + "path": "artifactory-ha/filestore" + }, + "local": false, + "maxCacheSize": 50000000000, + "mountPath": "/var/opt/jfrog/artifactory", + "nfs": { + "backupDir": "/var/opt/jfrog/artifactory-backup", + "capacity": "200Gi", + "dataDir": "/var/opt/jfrog/artifactory-ha", + "haBackupMount": "/backup", + "haDataMount": "/data", + "ip": null, + "mountOptions": [] + }, + "redundancy": 3, + "size": "200Gi", + "type": "file-system" + }, + "primary": { + "affinity": {}, + "javaOpts": { + "corePoolSize": 16, + "jmx": { + "accessFile": null, + "authenticate": false, + "enabled": false, + "host": null, + "passwordFile": null, + "port": 9010, + "ssl": false + } + }, + "labels": {}, + "name": "artifactory-ha-primary", + "nodeSelector": {}, + "persistence": { + "existingClaim": false + }, + "podAntiAffinity": { + "topologyKey": "kubernetes.io/hostname", + "type": "" + }, + "resources": {}, + "tolerations": [] + }, + "priorityClass": { + "create": false, + "value": 1000000000 + }, + "readinessProbe": { + "enabled": true, + "failureThreshold": 10, + "initialDelaySeconds": 60, + "path": "/router/api/v1/system/health", + "periodSeconds": 10, + "successThreshold": 1, + "timeoutSeconds": 10 + }, + "service": { + "annotations": {}, + "loadBalancerSourceRanges": [], + "name": "artifactory", + "pool": "members", + "type": "ClusterIP" + }, + "systemYaml": "shared:\n extraJavaOpts: \u003e\n {{- with .Values.artifactory.primary.javaOpts }}\n -Dartifactory.async.corePoolSize={{ .corePoolSize }}\n {{- if .xms }}\n -Xms{{ .xms }}\n {{- end }}\n {{- if .xmx }}\n -Xmx{{ .xmx }}\n {{- end }}\n {{- if .jmx.enabled }}\n -Dcom.sun.management.jmxremote\n -Dcom.sun.management.jmxremote.port={{ .jmx.port }}\n -Dcom.sun.management.jmxremote.rmi.port={{ .jmx.port }}\n -Dcom.sun.management.jmxremote.ssl={{ .jmx.ssl }}\n {{- if .jmx.host }}\n -Djava.rmi.server.hostname={{ tpl .jmx.host $ }}\n {{- else }}\n -Djava.rmi.server.hostname={{ template \"artifactory-ha.fullname\" $ }}\n {{- end }}\n {{- if .jmx.authenticate }}\n -Dcom.sun.management.jmxremote.authenticate=true\n -Dcom.sun.management.jmxremote.access.file={{ .jmx.accessFile }}\n -Dcom.sun.management.jmxremote.password.file={{ .jmx.passwordFile }}\n {{- else }}\n -Dcom.sun.management.jmxremote.authenticate=false\n {{- end }}\n {{- end }}\n {{- if .other }}\n {{ .other }}\n {{- end }}\n {{- end }}\n database:\n {{- if .Values.postgresql.enabled }}\n type: postgresql\n url: 'jdbc:postgresql://{{ .Release.Name }}-postgresql:{{ .Values.postgresql.service.port }}/{{ .Values.postgresql.postgresqlDatabase }}'\n host: ''\n driver: org.postgresql.Driver\n username: '{{ .Values.postgresql.postgresqlUsername }}'\n password: '{{ .Values.postgresql.postgresqlPassword }}'\n {{ else }}\n type: '{{ .Values.database.type }}'\n url: '{{ .Values.database.url }}'\n driver: '{{ .Values.database.driver }}'\n username: '{{ .Values.database.user }}'\n password: '{{ .Values.database.password }}'\n {{- end }}\n security:\n joinKey: '{{ .Values.artifactory.joinKey }}'\n masterKey: '{{ .Values.artifactory.masterKey }}'\nartifactory:\n{{- if .Values.artifactory.haDataDir.enabled }}\n node:\n haDataDir: {{ .Values.artifactory.haDataDir.path }}\n{{- end }}\n database:\n maxOpenConnections: {{ .Values.artifactory.database.maxOpenConnections }}\naccess:\n database:\n maxOpenConnections: '{{ .Values.access.database.maxOpenConnections }}'\n {{- if .Values.access.database.enabled }}\n type: '{{ .Values.access.database.type }}'\n url: '{{ .Values.access.database.url }}'\n driver: '{{ .Values.access.database.driver }}'\n username: '{{ .Values.access.database.user }}'\n password: '{{ .Values.access.database.password }}'\n {{- end }}\n", + "terminationGracePeriodSeconds": 30, + "uid": 1030, + "userPluginSecrets": null + }, + "database": { + "driver": null, + "password": null, + "secrets": {}, + "type": null, + "url": null, + "user": null + }, + "filebeat": { + "enabled": false, + "filebeatYml": "logging.level: info\npath.data: {{ .Values.artifactory.persistence.mountPath }}/log/filebeat\nname: artifactory-filebeat\nqueue.spool: ~\nfilebeat.inputs:\n- type: log\n enabled: true\n close_eof: ${CLOSE:false}\n paths:\n - {{ .Values.artifactory.persistence.mountPath }}/log/*.log\n fields:\n service: \"jfrt\"\n log_type: \"artifactory\"\noutput:\n logstash:\n hosts: [\"{{ .Values.filebeat.logstashUrl }}\"]\n", + "image": { + "repository": "docker.elastic.co/beats/filebeat", + "version": "7.5.1" + }, + "livenessProbe": { + "exec": { + "command": [ + "sh", + "-c", + "#!/usr/bin/env bash -e\ncurl --fail 127.0.0.1:5066\n" + ] + }, + "failureThreshold": 3, + "initialDelaySeconds": 10, + "periodSeconds": 10, + "timeoutSeconds": 5 + }, + "logstashUrl": "logstash:5044", + "name": "artifactory-filebeat", + "readinessProbe": { + "exec": { + "command": [ + "sh", + "-c", + "#!/usr/bin/env bash -e\nfilebeat test output\n" + ] + }, + "failureThreshold": 3, + "initialDelaySeconds": 10, + "periodSeconds": 10, + "timeoutSeconds": 5 + }, + "resources": {}, + "terminationGracePeriod": 10 + }, + "imagePullSecrets": null, + "ingress": { + "additionalRules": [], + "annotations": {}, + "artifactoryPath": "/artifactory/", + "defaultBackend": { + "enabled": true + }, + "enabled": false, + "hosts": [], + "labels": {}, + "routerPath": "/", + "tls": [] + }, + "initContainerImage": "alpine:3.10", + "initContainers": { + "resources": {} + }, + "installer": { + "platform": null, + "type": null + }, + "logger": { + "image": { + "repository": "busybox", + "tag": "1.30" + } + }, + "networkpolicy": [ + { + "egress": [ + {} + ], + "ingress": [ + {} + ], + "name": "artifactory", + "podSelector": { + "matchLabels": { + "app": "artifactory-ha" + } + } + } + ], + "nginx": { + "affinity": {}, + "artifactoryConf": "ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;\nssl_certificate {{ .Values.nginx.persistence.mountPath }}/ssl/tls.crt;\nssl_certificate_key {{ .Values.nginx.persistence.mountPath }}/ssl/tls.key;\nssl_session_cache shared:SSL:1m;\nssl_prefer_server_ciphers on;\n## server configuration\nserver {\n {{- if .Values.nginx.internalPortHttps }}\n listen {{ .Values.nginx.internalPortHttps }} ssl;\n {{- else -}}\n {{- if .Values.nginx.https.enabled }}\n listen {{ .Values.nginx.https.internalPort }} ssl;\n {{- end }}\n {{- end }}\n {{- if .Values.nginx.internalPortHttp }}\n listen {{ .Values.nginx.internalPortHttp }};\n {{- else -}}\n {{- if .Values.nginx.http.enabled }}\n listen {{ .Values.nginx.http.internalPort }};\n {{- end }}\n {{- end }}\n server_name ~(?\u003crepo\u003e.+)\\.{{ include \"artifactory-ha.fullname\" . }} {{ include \"artifactory-ha.fullname\" . }}\n {{- range .Values.ingress.hosts -}}\n {{- if contains \".\" . -}}\n {{ \"\" | indent 0 }} ~(?\u003crepo\u003e.+)\\.{{ (splitn \".\" 2 .)._1 }} {{ . }}\n {{- end -}}\n {{- end -}};\n\n if ($http_x_forwarded_proto = '') {\n set $http_x_forwarded_proto $scheme;\n }\n ## Application specific logs\n ## access_log /var/log/nginx/artifactory-access.log timing;\n ## error_log /var/log/nginx/artifactory-error.log;\n rewrite ^/artifactory/?$ / redirect;\n if ( $repo != \"\" ) {\n rewrite ^/(v1|v2)/(.*) /artifactory/api/docker/$repo/$1/$2 break;\n }\n chunked_transfer_encoding on;\n client_max_body_size 0;\n\n location / {\n proxy_read_timeout 900;\n proxy_pass_header Server;\n proxy_cookie_path ~*^/.* /;\n proxy_pass http://{{ include \"artifactory-ha.fullname\" . }}:{{ .Values.artifactory.externalPort }}/;\n proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host:$server_port;\n proxy_set_header X-Forwarded-Port $server_port;\n proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;\n proxy_set_header Host $http_host;\n proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n\n location /artifactory/ {\n if ( $request_uri ~ ^/artifactory/(.*)$ ) {\n proxy_pass http://{{ include \"artifactory-ha.fullname\" . }}:{{ .Values.artifactory.externalArtifactoryPort }}/artifactory/$1;\n }\n proxy_pass http://{{ include \"artifactory-ha.fullname\" . }}:{{ .Values.artifactory.externalArtifactoryPort }}/artifactory/;\n }\n }\n}\n", + "customArtifactoryConfigMap": null, + "customConfigMap": null, + "enabled": true, + "gid": 107, + "http": { + "enabled": true, + "externalPort": 80, + "internalPort": 80 + }, + "https": { + "enabled": true, + "externalPort": 443, + "internalPort": 443 + }, + "image": { + "pullPolicy": "IfNotPresent", + "repository": "earlyaccess.jfrog.io/nginx-artifactory-pro" + }, + "labels": {}, + "livenessProbe": { + "enabled": true, + "failureThreshold": 10, + "initialDelaySeconds": 60, + "path": "/router/api/v1/system/health", + "periodSeconds": 10, + "successThreshold": 1, + "timeoutSeconds": 10 + }, + "loggers": [], + "mainConf": "# Main Nginx configuration file\nworker_processes 4;\nerror_log {{ .Values.nginx.persistence.mountPath }}/logs//error.log warn;\npid /tmp/nginx.pid;\nevents {\n worker_connections 1024;\n}\nhttp {\n include /etc/nginx/mime.types;\n default_type application/octet-stream;\n variables_hash_max_size 1024;\n variables_hash_bucket_size 64;\n server_names_hash_max_size 4096;\n server_names_hash_bucket_size 128;\n types_hash_max_size 2048;\n types_hash_bucket_size 64;\n proxy_read_timeout 2400s;\n client_header_timeout 2400s;\n client_body_timeout 2400s;\n proxy_connect_timeout 75s;\n proxy_send_timeout 2400s;\n proxy_buffer_size 32k;\n proxy_buffers 40 32k;\n proxy_busy_buffers_size 64k;\n proxy_temp_file_write_size 250m;\n proxy_http_version 1.1;\n client_body_buffer_size 128k;\n log_format main '$remote_addr - $remote_user [$time_local] \"$request\" '\n '$status $body_bytes_sent \"$http_referer\" '\n '\"$http_user_agent\" \"$http_x_forwarded_for\"';\n log_format timing 'ip = $remote_addr '\n 'user = \\\"$remote_user\\\" '\n 'local_time = \\\"$time_local\\\" '\n 'host = $host '\n 'request = \\\"$request\\\" '\n 'status = $status '\n 'bytes = $body_bytes_sent '\n 'upstream = \\\"$upstream_addr\\\" '\n 'upstream_time = $upstream_response_time '\n 'request_time = $request_time '\n 'referer = \\\"$http_referer\\\" '\n 'UA = \\\"$http_user_agent\\\"';\n access_log {{ .Values.nginx.persistence.mountPath }}/logs/access.log timing;\n sendfile on;\n #tcp_nopush on;\n keepalive_timeout 65;\n #gzip on;\n include /etc/nginx/conf.d/*.conf;\n}\n", + "name": "nginx", + "nodeSelector": {}, + "persistence": { + "accessMode": "ReadWriteOnce", + "enabled": false, + "mountPath": "/var/opt/jfrog/nginx", + "size": "5Gi" + }, + "readinessProbe": { + "enabled": true, + "failureThreshold": 10, + "initialDelaySeconds": 10, + "path": "/router/api/v1/system/health", + "periodSeconds": 10, + "successThreshold": 1, + "timeoutSeconds": 10 + }, + "replicaCount": 1, + "resources": {}, + "service": { + "externalTrafficPolicy": "Cluster", + "labels": {}, + "loadBalancerIP": null, + "loadBalancerSourceRanges": [], + "type": "LoadBalancer" + }, + "tolerations": [], + "uid": 104 + }, + "postgresql": { + "enabled": true, + "extraEnv": [], + "global": { + "postgresql": {} + }, + "image": { + "debug": false, + "pullPolicy": "IfNotPresent", + "registry": "docker.bintray.io", + "repository": "bitnami/postgresql", + "tag": "9.6.15-debian-9-r91" + }, + "livenessProbe": { + "enabled": true, + "failureThreshold": 6, + "initialDelaySeconds": 30, + "periodSeconds": 10, + "successThreshold": 1, + "timeoutSeconds": 5 + }, + "master": { + "affinity": {}, + "annotations": {}, + "extraVolumeMounts": [], + "extraVolumes": [], + "labels": {}, + "nodeSelector": {}, + "podAnnotations": {}, + "podLabels": {}, + "tolerations": [] + }, + "metrics": { + "enabled": false, + "image": { + "pullPolicy": "IfNotPresent", + "registry": "docker.io", + "repository": "bitnami/postgres-exporter", + "tag": "0.6.0-debian-9-r0" + }, + "livenessProbe": { + "enabled": true, + "failureThreshold": 6, + "initialDelaySeconds": 5, + "periodSeconds": 10, + "successThreshold": 1, + "timeoutSeconds": 5 + }, + "readinessProbe": { + "enabled": true, + "failureThreshold": 6, + "initialDelaySeconds": 5, + "periodSeconds": 10, + "successThreshold": 1, + "timeoutSeconds": 5 + }, + "securityContext": { + "enabled": false, + "runAsUser": 1001 + }, + "service": { + "annotations": { + "prometheus.io/port": "9187", + "prometheus.io/scrape": "true" + }, + "loadBalancerIP": null, + "type": "ClusterIP" + }, + "serviceMonitor": { + "additionalLabels": {}, + "enabled": false + } + }, + "networkPolicy": { + "allowExternal": true, + "enabled": false + }, + "nodeSelector": {}, + "persistence": { + "accessModes": [ + "ReadWriteOnce" + ], + "annotations": {}, + "enabled": true, + "mountPath": "/bitnami/postgresql", + "size": "50Gi", + "subPath": "" + }, + "postgresqlConfiguration": { + "listenAddresses": "'*'", + "maxConnections": "1500" + }, + "postgresqlDataDir": "/bitnami/postgresql/data", + "postgresqlDatabase": "artifactory", + "postgresqlPassword": "", + "postgresqlUsername": "artifactory", + "readinessProbe": { + "enabled": true, + "failureThreshold": 6, + "initialDelaySeconds": 5, + "periodSeconds": 10, + "successThreshold": 1, + "timeoutSeconds": 5 + }, + "replication": { + "applicationName": "my_application", + "enabled": false, + "numSynchronousReplicas": 0, + "password": "repl_password", + "slaveReplicas": 1, + "synchronousCommit": "off", + "user": "repl_user" + }, + "resources": { + "requests": { + "cpu": "250m", + "memory": "256Mi" + } + }, + "securityContext": { + "enabled": true, + "fsGroup": 1001, + "runAsUser": 1001 + }, + "service": { + "annotations": {}, + "port": 5432, + "type": "ClusterIP" + }, + "serviceAccount": { + "enabled": false + }, + "slave": { + "affinity": {}, + "annotations": {}, + "extraVolumeMounts": [], + "extraVolumes": [], + "labels": {}, + "nodeSelector": {}, + "podAnnotations": {}, + "podLabels": {}, + "tolerations": [] + }, + "updateStrategy": { + "type": "RollingUpdate" + }, + "volumePermissions": { + "enabled": true, + "image": { + "pullPolicy": "Always", + "registry": "docker.io", + "repository": "bitnami/minideb", + "tag": "stretch" + }, + "securityContext": { + "runAsUser": 0 + } + } + }, + "rbac": { + "create": true, + "role": { + "rules": [ + { + "apiGroups": [ + "" + ], + "resources": [ + "services", + "endpoints", + "pods" + ], + "verbs": [ + "get", + "watch", + "list" + ] + } + ] + } + }, + "serviceAccount": { + "annotations": {}, + "create": true, + "name": null + }, + "waitForDatabase": true + } + } + ] + capabilities: Basic Install + name: artifactory-ha-operator.v1.0.0 + namespace: placeholder +spec: + apiservicedefinitions: {} + customresourcedefinitions: {} + description: Openshift 4 Operator to deploy JFrog Artifactory-HA + displayName: JFrog Artifactory-HA Operator + provider: + name: JFrog + links: + - name: JFrog + url: http://www.jfrog.com + icon: + - base64data: iVBORw0KGgoAAAANSUhEUgAAAMkAAADCCAYAAADjAebGAAAKN2lDQ1BzUkdCIElFQzYxOTY2LTIuMQAAeJydlndUU9kWh8+9N71QkhCKlNBraFICSA29SJEuKjEJEErAkAAiNkRUcERRkaYIMijggKNDkbEiioUBUbHrBBlE1HFwFBuWSWStGd+8ee/Nm98f935rn73P3Wfvfda6AJD8gwXCTFgJgAyhWBTh58WIjYtnYAcBDPAAA2wA4HCzs0IW+EYCmQJ82IxsmRP4F726DiD5+yrTP4zBAP+flLlZIjEAUJiM5/L42VwZF8k4PVecJbdPyZi2NE3OMErOIlmCMlaTc/IsW3z2mWUPOfMyhDwZy3PO4mXw5Nwn4405Er6MkWAZF+cI+LkyviZjg3RJhkDGb+SxGXxONgAoktwu5nNTZGwtY5IoMoIt43kA4EjJX/DSL1jMzxPLD8XOzFouEiSniBkmXFOGjZMTi+HPz03ni8XMMA43jSPiMdiZGVkc4XIAZs/8WRR5bRmyIjvYODk4MG0tbb4o1H9d/JuS93aWXoR/7hlEH/jD9ld+mQ0AsKZltdn6h21pFQBd6wFQu/2HzWAvAIqyvnUOfXEeunxeUsTiLGcrq9zcXEsBn2spL+jv+p8Of0NffM9Svt3v5WF485M4knQxQ143bmZ6pkTEyM7icPkM5p+H+B8H/nUeFhH8JL6IL5RFRMumTCBMlrVbyBOIBZlChkD4n5r4D8P+pNm5lona+BHQllgCpSEaQH4eACgqESAJe2Qr0O99C8ZHA/nNi9GZmJ37z4L+fVe4TP7IFiR/jmNHRDK4ElHO7Jr8WgI0IABFQAPqQBvoAxPABLbAEbgAD+ADAkEoiARxYDHgghSQAUQgFxSAtaAYlIKtYCeoBnWgETSDNnAYdIFj4DQ4By6By2AE3AFSMA6egCnwCsxAEISFyBAVUod0IEPIHLKFWJAb5AMFQxFQHJQIJUNCSAIVQOugUqgcqobqoWboW+godBq6AA1Dt6BRaBL6FXoHIzAJpsFasBFsBbNgTzgIjoQXwcnwMjgfLoK3wJVwA3wQ7oRPw5fgEVgKP4GnEYAQETqiizARFsJGQpF4JAkRIauQEqQCaUDakB6kH7mKSJGnyFsUBkVFMVBMlAvKHxWF4qKWoVahNqOqUQdQnag+1FXUKGoK9RFNRmuizdHO6AB0LDoZnYsuRlegm9Ad6LPoEfQ4+hUGg6FjjDGOGH9MHCYVswKzGbMb0445hRnGjGGmsVisOtYc64oNxXKwYmwxtgp7EHsSewU7jn2DI+J0cLY4X1w8TogrxFXgWnAncFdwE7gZvBLeEO+MD8Xz8MvxZfhGfA9+CD+OnyEoE4wJroRIQiphLaGS0EY4S7hLeEEkEvWITsRwooC4hlhJPEQ8TxwlviVRSGYkNimBJCFtIe0nnSLdIr0gk8lGZA9yPFlM3kJuJp8h3ye/UaAqWCoEKPAUVivUKHQqXFF4pohXNFT0VFysmK9YoXhEcUjxqRJeyUiJrcRRWqVUo3RU6YbStDJV2UY5VDlDebNyi/IF5UcULMWI4kPhUYoo+yhnKGNUhKpPZVO51HXURupZ6jgNQzOmBdBSaaW0b2iDtCkVioqdSrRKnkqNynEVKR2hG9ED6On0Mvph+nX6O1UtVU9Vvuom1TbVK6qv1eaoeajx1UrU2tVG1N6pM9R91NPUt6l3qd/TQGmYaYRr5Grs0Tir8XQObY7LHO6ckjmH59zWhDXNNCM0V2ju0xzQnNbS1vLTytKq0jqj9VSbru2hnaq9Q/uE9qQOVcdNR6CzQ+ekzmOGCsOTkc6oZPQxpnQ1df11Jbr1uoO6M3rGelF6hXrtevf0Cfos/ST9Hfq9+lMGOgYhBgUGrQa3DfGGLMMUw12G/YavjYyNYow2GHUZPTJWMw4wzjduNb5rQjZxN1lm0mByzRRjyjJNM91tetkMNrM3SzGrMRsyh80dzAXmu82HLdAWThZCiwaLG0wS05OZw2xljlrSLYMtCy27LJ9ZGVjFW22z6rf6aG1vnW7daH3HhmITaFNo02Pzq62ZLde2xvbaXPJc37mr53bPfW5nbse322N3055qH2K/wb7X/oODo4PIoc1h0tHAMdGx1vEGi8YKY21mnXdCO3k5rXY65vTW2cFZ7HzY+RcXpkuaS4vLo3nG8/jzGueNueq5clzrXaVuDLdEt71uUnddd457g/sDD30PnkeTx4SnqWeq50HPZ17WXiKvDq/XbGf2SvYpb8Tbz7vEe9CH4hPlU+1z31fPN9m31XfKz95vhd8pf7R/kP82/xsBWgHcgOaAqUDHwJWBfUGkoAVB1UEPgs2CRcE9IXBIYMj2kLvzDecL53eFgtCA0O2h98KMw5aFfR+OCQ8Lrwl/GGETURDRv4C6YMmClgWvIr0iyyLvRJlESaJ6oxWjE6Kbo1/HeMeUx0hjrWJXxl6K04gTxHXHY+Oj45vipxf6LNy5cDzBPqE44foi40V5iy4s1licvvj4EsUlnCVHEtGJMYktie85oZwGzvTSgKW1S6e4bO4u7hOeB28Hb5Lvyi/nTyS5JpUnPUp2Td6ePJninlKR8lTAFlQLnqf6p9alvk4LTduf9ik9Jr09A5eRmHFUSBGmCfsytTPzMoezzLOKs6TLnJftXDYlChI1ZUPZi7K7xTTZz9SAxESyXjKa45ZTk/MmNzr3SJ5ynjBvYLnZ8k3LJ/J9879egVrBXdFboFuwtmB0pefK+lXQqqWrelfrry5aPb7Gb82BtYS1aWt/KLQuLC98uS5mXU+RVtGaorH1futbixWKRcU3NrhsqNuI2ijYOLhp7qaqTR9LeCUXS61LK0rfb+ZuvviVzVeVX33akrRlsMyhbM9WzFbh1uvb3LcdKFcuzy8f2x6yvXMHY0fJjpc7l+y8UGFXUbeLsEuyS1oZXNldZVC1tep9dUr1SI1XTXutZu2m2te7ebuv7PHY01anVVda926vYO/Ner/6zgajhop9mH05+x42Rjf2f836urlJo6m06cN+4X7pgYgDfc2Ozc0tmi1lrXCrpHXyYMLBy994f9Pdxmyrb6e3lx4ChySHHn+b+O31w0GHe4+wjrR9Z/hdbQe1o6QT6lzeOdWV0iXtjusePhp4tLfHpafje8vv9x/TPVZzXOV42QnCiaITn07mn5w+lXXq6enk02O9S3rvnIk9c60vvG/wbNDZ8+d8z53p9+w/ed71/LELzheOXmRd7LrkcKlzwH6g4wf7HzoGHQY7hxyHui87Xe4Znjd84or7ldNXva+euxZw7dLI/JHh61HXb95IuCG9ybv56Fb6ree3c27P3FlzF3235J7SvYr7mvcbfjT9sV3qID0+6j068GDBgztj3LEnP2X/9H686CH5YcWEzkTzI9tHxyZ9Jy8/Xvh4/EnWk5mnxT8r/1z7zOTZd794/DIwFTs1/lz0/NOvm1+ov9j/0u5l73TY9P1XGa9mXpe8UX9z4C3rbf+7mHcTM7nvse8rP5h+6PkY9PHup4xPn34D94Tz+49wZioAAAAJcEhZcwAACxIAAAsSAdLdfvwAACAASURBVHic7V0HfBzF1Z83u3un5iLJGGzAdoyDgWDAgIxtSdd0xZiaxEASWiDARw9gei8hQCghhN5CJ4BDMHGMdbqiU7ExpgZCb4ZgTLFsg2Wr3O18792d7JN0ZfeaTvb9f7/T3u3N7oz25j/z3swrshCCEfg1nJvqG44F4HWMCQlPrwQmWgKt3oB6laqyIorYRiHTnzBBTI6ngLGjIqeBAUSOZpNjpdXrvDLg8DyuqqoYuqYWUcTQIEwSU73jtC0EGYSJjMOjZq/jQL6AH6POU0N5bF8RGaDe49pHkthM/G27u4W6eKmt6ZuhbtNwhIwdXzJX2y+hWSMFfmWqtv8Xj3/IQ7uKyBA4+58uS3BX32cj8A6LxzWz2d740VC2azhCrhtl3QcJsqOWwiiEzZ+1ZNYdy+Ys+yHXDUsEi98+Gxi/HlszDT/2oO60Aph6p9/W1DRUbSpIcDhnwJkq4OIsPJ49FM0pFNQ8UKNUTKk6hgl2NHZo7PusmwnxzKqelZd8MOeD7njXyAByhY46RpcYR/4Sj3/LSot1YrbPsb0RpCX4dkTfOdSdDsUecbDJ63S1NLg9Q9GuAsXYOOd2zXsrCgjYR+oqplQ+xOg5xApOAOeOM0wche9+F+86uWNd72tjqpW1+L5SS0VCCAsbIpIYGduXxRAkBpxzQJGRFUmyBV/ia1S/MwAdQ9OUoYfF7/qFxOEZFtXDBwIATqh1225od/o+Hvid/PY83wazx3UEl9jf8fOYVJXhzeKNUHlBD4j/GBgLsjj/KA4M+wxBkwoWgrHX8Jns2e+cUP89VO0ZSuy70DZ61EiFZpC4BIkCFFk6AI+DSUJ/AvZGr7nRvBdXjH9DFriSVym+zKC9GaHd6vnK6nNdhoy4kQ1caRCsqJDGAAmxAIAfv/kzY6+0tIaeYbahbNXQYNRI+Qg8jE5VTgj4Pt75zcwKuAJfo8xyoNlrPxP73014qjTefdQQPJp2a7MAv63xT1av8yVUTC/Ej7/Gl4Sv7pAQFw9luwoNAVvTIqvfebpgMA9//f91bgzOV6/yBYe6XUMD2FdDoVWre1c2x/ui3/QT3Sz8a73f7paF9CiO1QfEfi8EuwFnnfa025ol+Bvcb+PhWLPPcQMwOIKB+lJLg+eVoW5XocFvdd+Dh3uGuh0FgGRiVh/OT7i6Fe9kq9XzAb/GVmcyKSehTHMkztUGlbEHA7bGRzJpabaBo+W7eLhmqNtRRKFDvJtsHxBnhvuarY1PJ/o+IcOiU/O90VcRRQxbbOgMPl5RrvyekfVIf3QIIe5oaQ1ez6yJr9cyDRVRxLDGioN939csqtmjvLxqDgh1Jyagi4F47/uO1cvfnvd2T6rFjIxJYvU5rAz4efiW9JdynLw+wEb8vadjw1/b57VvyvT+abRnlgB+C74tYaq4trnBvTDfbSgUYMcoKy+tPETt7nK3zG1ZO9TtGUqsOHjFRjw8n861GZHE4nedA8BvY/0EPpiOn6YbqiuOq3XbDo+3OZMrWNyWnUExvgR9m2gcnrd4nUciUf6RrzYUEirKKp9iAIdJpSXN+DGJQFFEMqRNkmiH/BNLrBH9zCArfvMS876BOYHv0q1HD0Ax0LJw7C4zBw4PmBvNS2mJOx9tKBRYmiw/Adl4WOQTWCwe10+Lxo3pIW2SgCLTDreSvBDbiRuMZC5yXrr16AMcFudkJZKZxK+j89OGwgBwQ33sZ8HVvfGw1ZIExWz8f/kswVinYGpTwNb0YbbunTZJenrV9wyKRPsqyW3sAU6Y/Kjt0k+P93WlW5cWYB0lEycoO8dvAvwaH+KDfluTP5dtKCgA2z/2IwcYN1RNySXqmmx7KrJ8Dwt71EY6IzCuWn3O+wKtwbOzsYGaNklI17D6nY9gk05IUXT0xJ3kI/H4WLp1aUF5+XcqY+MTkRbwId6DRNon12QtGACri/2ID2b7oWpKrmD2Og9SZOU5Ntg6hOPIeJq5XtmA7y/MtJ6MFPeeNZ1nGKrKx2KDDkpakMP5fAF/MpdejbSUZ/W7yClszwRFpk7cWbkSj5fmqg2FAtQDt+PGkr1jz4FGn6HhAvK6lCWIR5AtAHZmzaKaq6MrW2kjI5LQEi+/xna42STfgC2azxKLXtPMlfb/w+PdWu9t8bsOZUJEXIoBnmy2Ni5OdY0Q6i0A/JGEBYBdYG5qWBhweJdrbcdwBFeMh9Ch/1kxZUgakwNQTAazyX4//qCJCRJBqaKMIKv1zzOpL+N9kqjMdwF26mUQ8TMZGbcghz9Y3JZ/NTubU1oRW/zOi3HkuyEajYLwa7PP8QtUxl5Idl2zrelRi8+1K16WaLaQuSw9Nr1p+r5vON7oTNWOYQsOxww+CT/Nf0NyA7PJQdbNNRqKdvR+8eNXmdaXtR13HOmfx478DgdOexLxRJ5KkI1PT1swzRbe5UyAaQtsFWOqlasHnAa8742c84WpIrY02xovQ7GL9mZuZ3EJC7uOlseSqc2xKf6lYYmIIqtY4ny1vbnRPG64L4XXLqgtNVRVXJs6JANB3LLi5BW9mdaZVbMUWnbDUXrmKGm7hwHgyEEFgNWOqR53J747JdE9FCXcpnhLy1NNbgeJDCmXMf3Wxr+hXL6IG42otAHVNZAsx+DM147E3urs0pAgpHfF7ULcoNCK17/y26LswlBdfiYedkpVTgjxfEtr8OZsbKFm3XaLxBgc8X9l8trfQJHpejZINoaTUSRaiSP+9fGuf/0w3zqLz/kEkuy4gd+pIDQr/tENzAv2XWi7ftQI5TzsNrTKYdzcCsbuMHmd77c0uJu13rPQYfY6XZzDEYlL8BlsGJOkrqlurCKXp1p4oXiLNyNBLsuW/0xODByjItGNOFr/Bzvjk2yAVxjqDNfhd+txJL8z3vW9HZ2nGqrKO7EgyZ5l+PqOCXYLduhP9baFSIeHK1EUfAFFtpfYlgAJisThHyiemNscvnf03rfQQDvsXDamcogjr9Mr8tGeXAAJcitL5mEo2PKgys5ptTe+nE0PzJxaAdOKFHbOAzjAv0gXiPmKVPI7rH5nt9/qfmDgdVHDyNP5NbazZ83qHrlszrK1mUaPRFHwdavPcThqNy1sy/9dpcjyEovHZR3OJhv4jPfgkpH811PshcB+FHFmOAapw//xcBzk4ixIhPERU8XlAYfnuVxEGc25qTzpKabFpplSacmz+CPZY75CnsB9OKNIiXSD6HTZwbIUidhva1qGxMTZKzYmFewIEgvgjOIcbjNK/Yv1I6SKsrOx85AIUqbhEm5g7GA8PpTjpmUV0Vny4ThfdQomru78eO1fwgp6jiJW58WfhMy0cVY40Fwv30E7oTFf0Yxyt8XvHN1sdd+Yj7Zs6AxeX1GuUHyl2NBE41DhbcWZ5sh8BLnDZyEfcADbQZFge8alEahHGCLfhIKqynsAxMZQKLRRVVmXJCk9QnQGQyEDQhoJwElp3RUl73p5RNlcfK8nbhqZbNCq3rAhSWS100ArpgNCXgkPsuKkNqtnZa7tm/PmdBWdFU7HmeNDJAbJln0KPdCeiNXnHB/o8Jyb61jD5IBj9btITzp1wFejURR7yepz/fH7jlV/SLZMrQcUEMzcZKcl8XoBrBb/2z3NJmU3/GwYXFpiUvipYFeWY9c7ylEeH1AUNK2BDgYwE4oue+EM/5/0bpA/0GBiMikUK2t6zOkeJtRLAq3e2/OV7SDvnokoWt2ORPkCf+InWKxJAcBZ5mrHJBQhjm49tPXHnDZCFQ8zDgNJQpCwE10xpnr8L3BWuQBnlZfSuX2tu7bKIJXPQVK4zF6HA0+FjQvT7NbZBu05Xc4odkEBg2JUm+rtj+IzmxtzejUOofMC9qZ2PYq5xes8Cv/pM1hYehBPBdZ4btMzGA+J+y5tPGInbMCf60XWPyDeIXJF2Ssmd8O8Fqf3v7mqHxW8V7Hzrsa3OyQo8jNs22KccV5DLfDBTZt6X1g+17c60f1IJKislPfnnFnwR3UalAraDZYLhBTxMM/scZkD9sZApjey+FyX4aS2d6Ch6ahsKc2RIO6OR/Dtb7acFe+K3p45AQ0WG7FAHfRkJMj9W87APuZqO1kfJNyrG4gh83EnJbreb6+TmdTIYh30ge0mKfIr+M9dHGjx3KVlSiVbntmzZ49pc7R9q6Vu+jGRAMvw7c9TFN0PO/p+ZaXK3diej4SA9/EckaULO0YZ6gU4Q8AuY6oVeuiSlroLBMAl9rBpsWn/dN16wyN9leM2fA7hANxmT8Nf8dCaacNoR91cZafIJbG+QS+HNnXN1dvWqPvEHwd/AyfXe1wPh5eKNWBIA0FQ6KJav73WwCQSa6bFfFWG/8gd5nrH0Ra//bxmq2dponvU+e0TsdyTSK59rV5nrb/B/YaWuoVg7+EPnIokfaCVOLIJ6x9wOl29oDAwWSoteXHfhbZDontJmmHxuHbHkf4+fLvZsUswTiuXGZEESVtpqK5YGHtfvPOyru4f5yybqz+TwYQJnILSxQ3diyqfEw+FTxIChS7FH8o0aqRMeyn9fCAoOB4wqR11GB/qEfd3MrZkRYN7PX1F5uBgNP6fwqSLsVx5pDzcjH/tgyqJD02zztYNqBs1QlmGA9EZOBD5UpXG0XemxMVpIAGJQf36Do4XWqIkJgQRTyotJYLEGGKKN9b/EJz7+mHppfro7la/KDFKcWNHo76o+fcfcpIQaCSrWVTjKi+v+ifJ9AO/x3M2VLRtFfi/oZhEBnqcG0to55wPKNhAbpwoyqUc0YCR/MxTFdv6geItDkReq8/1Pj4QIsq72OG/xffdQqhlHGBnLLQXnjfJEpuQZPlh70RfpILVbz8EJIkWcmJt7D7DBhw4cJajCDBlZaMPxV9ufEjA620OTyCRLrRsju9/Fp/jVABOLhpbVhMFe39NR+8TWttXECQhkGPM1CVTDx1vmPgMRfhIUIx+ofHJ7iOAX42HhpQVAmynu5FbM8JkYbtt+UiPSNcgsvMBi207JFvgGIjwEm+9cg2ARHGcYytbJ0LsoKX2/pYB5GhVUV5Fs80E+iwB2avZV6DIfUR4vyQOmm1ND9U12doVST5FAExFgrzTxdQ/UTYFre0sGJIQKBYrPrh5JpPyRJIcjklBsw7OJgemWr5FJXyv4a1SFB5KSpQ5eHhES9lZS2w7mU3KU6yf/hFGkAn1yGZ703uxJ8nMXzaU0G86YEUSalDk9tW6a2vane1x86+0OXy04JJ2MJKCIgmBNh2RKMfgA6Qp9Fdp3QT4zXwBdydaC5/eNL18tDy2GIcqy+AgKCLNI6nKWbzOX5YYFTJFGqRUC8HOaR5g9RBdEqYN4ERL9pMNcgXNRhn7s8dDwZGEECXKsUgU+pgOUX5mrrafjse/xvtytDRmoFlKEVkB2C1+19xErtazG22TjAblZuAwL/714vZmm/uugWfxtyTbtOSDGrBfsG2JJIQ+opjqZQMA/EL/HeA6nKIXDPTEoyVjBaRiJPocASXYp80+x+9a7d5/kEJNZjm1bsd+EmcnI0HIR6gk3nURJynP/IFUsPjtJmDSVRqqLs+89fFRsCQhEFGmLZj26zFV455PGZFlMEZxQ8lf2ADzC5lJtPuaMutREWljJAf+nNnr+M7qd63GI0VpqUp6hWDtvR2dxwzcOK5ZZBtTUabQHpiWjdpFGbQ5KQqaJAQyNJz8qG3exJ2VJfiwzDovPwJHtXkBW9OCvhNCqPcA8EHLzEVkHdtFX0lBaeo6hThoxYDg6lE9hGK1pXTVxbu0rf8heEG6DU2FgicJgQLK1Xidh1UA84cDcusAjmp31TXVtfSZrFDEFavPuTDJMvPWAlri/CdTBWUkfk8IWCe4OiJiai/2wN65O1kko3i0B0sgAuUB/u7uHw5fMWfwZqGp2k7u3QcmuZaWml/G/+/pQJtnQS4tgocFSQi00z7b5zjQyKANZxQ9MaTIL/oRlI0P6tt06gH1DAOTaHe/OgdN7cEOuEoA+wE74EbasQTayBJkih9encl1h9xErs7rf+y9LYG5yev4erHvw9QlU41j5UnTJYnNBCHqkDi0x5QHcVQ8sKp75VnxUrBZfI6jkcwXDb6EfYzP894QhF5otXo+2Xw+9a5YRhg2JCGQ26nF45oLEiNbrpTptGNACVPJG/HP9IFMYSx+1ynYeTNNyUCsexWY8KlCLBe0A9zm+V+yUS1iTqPszphEo/l0AOyYDPBzNizpRXNIZb/TEwsg2klfjr5upw2+ujp5JufsYGDhiDc/ybxd/bAORd4zm21NT8b70up11gDnA126V+GDvqy1temxfPmQxGJYkYRAvuj1HtchshQ2oUgVwS8GcAPqJwHydQ/fx9r4PBLlIeyZv9PbBiTGf5AYD27cFHxu0A5zilEtGsWFXi1958IKaqnsxDmHxAt66Z3haPa6ItDquSXTThR1jmujF86+l9Q3WmdwWT6JRczWtbgIJ8Pfe1jo/HabJ27AOEoPARKQAr75dxWC3b+mo3d+eId8iNJrDzuSEMjE2eJ1ngAcyKRa6whs5ABPT1tg26/PJGF98Nvfj5bHUiwqTXZHSA43Y6Hrm62eltSltYO8JfFAu89P1TxQo5ROrmyQOK3KhZe+RyW/WrwbCoaOa3F4X8t2J4qKpxQSdrlpselCqcR4AkqPp+kUd6mNb2Bvn58sqn+t2zbFoCikP/VFsyHHu+ObbY3/TLP5WcOwJAmhucH9jMXn3A0ArtZ+Few6plr5G46QR1IHoBhhdX77YQqTyLckSWoC8YYagnOz4aSUCtGIg0voVbOo5syK0sp5OMPQSE46VOyAsBpH2Vu+7ll5Z6LUytlE1JfjNn4Nv72+vuFQHHB+T8mBUlz2jirU61tbvc8mm+EsfvsMJAjZZPXtqK8O9QbntDi9b2Wp+Rlh2JKE0GL3XGv2OmgW0OoXQpiH+gmZMNxAH8gwzuRucEiK3MwG6zkbsSNe2tLhuTPXvvfxEI2GTsugj6GIOR5FTMpLWRFSxWebPl27PBshPPUi2tkpJvMLZHCICv8xEBEyaXaRcED5jJE1sRDPB+ze5vBslGCGI/3HbJLPAiaRY1TfgsY3SCwzEiRrSXgyxbAmCf0A9S/WHy+PKJ2Ko9oe2q+EP1h9jjf7jCDJVRjlYRNI4eiGu0QLvSpC7DeFEo8LRcxVjJZ0+5DjFR0twDa9iYc3ExZIMHfQLrypyX6o2aTQQLV7zFedoWDwIBQdC4YghGFNEgIFjcAOPg87+Aqm3TSBM+BP4nUH9JEAj+/VeJ37lQOE3T2/7vn8vHyIMdsSwnHCRpQdj7M/xfOdOqiAKk4J61YFhmFPEgJ1cIvPcUbS3CSDUYnE+vdMn2PWy7amNXQi6vV4Rk4auQ3D7HPsCoyfIY8o+y1LlJqDsSf8De6n8tgszdgqSEII5ybxu6woHx+v47KflgL/59QlUx3FWSP7QJ1lf4mLSzjww1lyN9B1Gzp7z81Xu/RiqyEJYX3w2zNGy2MpcvruKQtvQf04w8SHUU4+JhdxZLdFULginKUvlSVyxU69Qi+YuCm6DF6QGDYkodhWVVWSBUelyp7eDf+O54VGS7o4tR+DZWj3OHn67BgAwG/MHgdtcOXEHyEfCK9+cXaqAHYYROR9yuTlU4PBW/KV/s7kddolDldyaZC3YTKs7+7+UXOawKFAQZMkvBNdLv8cu/GhY6qVGajYXRPoaLov2XIs7ahb/M5rgcF1uioDSmnn/LbZ6r4l44bnEWR7Nc448RJZAiJ4acy4TblY5nFZ/qXV73qoq/uH+cviGBJmirC1bqX9l4LDfCTIDL3XC8GeyUW7somCIwntOJftMvownA2OryhXKJ+Ggk/y3109wf0o+oWWe7S0BG80mxTyP5mpp24k1p+wQ62hTFnptD3fQLGmdrxxEtk5JRMviTcnlRhH2nCkP76lwd2WjbopoY4sl51ornL8H9YwKV3DM6HmNnV5NlAwJKFIG6Wl8pkVU6pod7kvz8ZGnD3OCTg89+jRF8j+CMWu45FotIavw74r3KEesHidG2lHX8d1eQWJntVVyvUo1tBSqtaQJpNxpA/gIHA/KslXpKMDUDifivKqg3D0P0aRyynogyEzs0zxVavTszRXKROyhSEnicVt2ZkpxsvKShValYo1I/8ACXKEv8H9djoPkfKiWH2uP+CPGDftXBJIwOFx7EwGnFEe119zbmH1Oo9E0ZOC8E1I43Ii1Kk4Qx9t9TsfVYOhJ1qX+lckMhmhWb1icuWeDEQ9A25HgtAWZlnWoswI9uJwWCwZMpLsu9A2etRI+SpQjJSvxDjg6yUoQx+Vqay64ZOOm3FmokAS01IW7g9S+h+z+pxHdfcGz1zq8n2eSTuyASTHNMbhL/jKRpSXEThpnon6yplmk2O9xe+i4ORfAvmiYJ/AXluF7yfgs6OwroacxcMHNa2o/fnGkJAER+ljR40Mj4aD0peRaXRLa+8Z6lXLMk4KSbZN9R7XKbLE2lk64RoBDjIaFCsq9DevD35381DkfqcIIwZFvgJnNwqikIvfaxRSYHbsiTyFIwsGf+xuzk9VmSGvJKHVqvIy+b6E0U8oa6q96aJsTsFkVo+ixd00cqZ5izJU6K8aLW93ktnnunrjJx2P5sOwMBp+5yJ8ncjiJvwZ3sAf+PWc56HJEvJGElSk90VZ+HkWm2YhBvjQbm22NV6YCyUu+OOmS+WKssNxiNQQVCARYEcO7AEUQS5H0t20LvjdY7mYWUxepwXrOQvJQT74wymdgy4AEwkzBcQDJUbatMmo6o2Anw3khSQo8zo5cCJIXANEJMiTLQ1NF+RqlSNsBOl3nYFixMIs3A5JDnePlsfeYPG5HmUQwrb7VmQy+9U12XaTZflXOGP9SuIw2PBvK4RQtaU96ANtHuPzvg91s7cCbcH7s5WjXQtyThKz1+niHKhzDlTOIxBs+Rdf9p6kt5PRJpYeH49ma+OLSJRn0o0xHAejIglspLPNXsdKVPIX4//SJtSeZS1LW1YmWjGihEOzZlkmy7K0PzBOtmY2RVZ0evoNfwS5qoskhE1dvVeVlSofmE3KKSZ3w/H5csrKKUnwH9lbUuTnWCKCMLa2u7f3VxQySOs9w74IHsfZdSOtzfhR10MKBjvPVuRyWsbUE0RCCyaGswoDOw24kZlNjh4UyT7HGacDRwH634KCAeo2bDv8bmc2dCF8CgWrEkWBTwaKJ4DP9Q58rpeHs6H5XNcHWnv/mOtZJWckiW48kQ96wpi7OHecp2d5ddaSWSNx1KYoG+NwFPmL3jZR7C2Lz3EOANecmyJN0LJpNCsWxPwtIor2dC/sYeq9BiZRbGDayLzGZFIaDlhsO0pPyge9yBlJKsorL2FJzCVQtmpptTc9qlUPodWeEuNIiqTxM7xYS2zYuKBQNla/i/ZODk73HkVkBvztdSntsaBwUPj7UcQbCuBBg48JRbDlKLXMzVUy2pyQJGKYqCTNBxEKMc1LvSavc7LRoFCkjfAus8rU1zNpX1d372klRoUsVVNEIikiFxBCTZl6Lvn17HXUB/ePOTUBxa8AKvUOrTkz9SAnJCkvV45hSWM0ieZWu1uT4kaGdKhH0EPdbIYBIDLKd0iGkmaf6xwObFgYMm5l+LLV7k3L1KgPkd9/kABbzTg01rpts9udvo8zaeBA5IQkwMRByaVw0OQ/QLZD5VOqSPHvt7eiqjzjzbWArfERi89pBQjvZBeRJ+As8FzGm8UCjAm613YGRV44vWn6jGzuYeVIJ4H9kny5oWfNBk1h8it2qSK9xjTwPAdBpMnY5Lu3o/NUQ3XFz/BtsvYWkT2oIQjdn+lNBBMTIeEgDHuMlre7Ed+cpeVepOumWjzKOknClqNTqpIFXG5rHxBmPx7IOhgU48XxvsMHVIuHuLFk9YDaYW40H8KVkqXkE5Hp/YpIiWdbrZ4PMrkBbQGYPY7ZyUvBaajI36tFkceZ51azz/E4ZRtIVCZXq1sJZS0hhCZdBBQDKf7xfUEAfjFtwbRzKHdJes3bAsqEhXKsw6AoFJ0xaWbfIjLC+q7u3oxziJg9DXUazIskLkvkqZkyKAgAexcYf97qc5zvtzXdFq9M1klCxn/RXOtxw4biVJlyJIkmcDk6SZHtq6t2OBaPD6XZzH4gRQ+JYo7Goo1rW1ZERiAl5DitnqXJIBg/X8ueE+qa82oW1ZwWjYKZ+H5CvI9lUTjht1r9zkq/1X3FwDK5mklotogbelTLylTtaAf5fyTNkgTAr521ZNZz2fKPJqIcsNg2s6xUfh7vPisb9ywijF5VqCeiOPNi6qLJYfa4GrjEDtVYvKy8fBS5bydfbgYRk1MTLscZZe3AGSUnJBFC/Qd24vgkEZBSH5FATNWwRz2+xDiSMrUem0YT44J2bacumWodb5x4C9ZPQeqKG+WZ4dNgiB3dam/Sbac1EGQFbFAqdEkOoPLdWCqSMNbfJAr4zRav85PmBvdmY9ickOTrni8WYEe7iczLB34nQKT0ORcAIzT2zmOQ+W8kkiXTQTRI3Vlmr3MRB7i3qNCnBRRxxG0bOtfekErc0YLIYlAlxRzQJwoDVKQqIkJSKfR3SODA4cF6j2tFNP5ybkhCHc3ic1yGot4jg1sFSVIcRMCBrddcGTLf6nWu8ze4H9bVyBQINLgbaxfU7mGoqriAwg3hqZQPvAjK0xhObnRTtmypwntlu1Q+QTni9V5LKflSFuLquDhOq2Nkid2Lx7BolzPbrRa79zGzx37EwNTSSJzdUl6sivfDmQa1gWPZByw+V2WzrfHWNJqaENGl6mvNS8z3cKPxPGz96SxxLNttGR8IJIe6qeuhaB6TrICiwoyZEjaSTcvOTqV+lAIoLSTy3zkEpRQrJR7KGUloVxXlyOMMSvmyLRaxjOidUikOrPW8a652fMPi+MAnAP6v7BaLz7lH58a1Z2Vjiu/XnkgKt0vqX6z/o1xeeizWRmT5WTbrxdCg7wAAIABJREFUGIZAUUQ8iwroswG79+VsRz2h1HDV1QqlFt8rzVt0btq09hUN5RLGZhPAaZ8udyQhkDfZ7Eaby2gI70FEbK+A1VII/mT+zeRMZfU7cQSBc/TUh7PUiRXlVTNQnjw2mjsjq4i2mUxq7iZ3ZGTmsVjrL/HzztmuqwARQha8xgTzhFT2r/b2plc2O5Zl2aPU7HP9lkvsDpbEzUIDnk81WFI/lEeUJQzJirKMnTa1c+6ZSFv+WFEdKMbF+HFPfJVIFSUUCOLRZNd1dQdvLTEq8cINpcKeKE+SQ86NPR0bbtCyu58OoglKX+ecn1fvaZiOhDlIoNwMkZFpawjc0C3IqY28LZnaGgxubOkXf1m3hpAalibLT0Ay/JUPENHTgBrqDd6UqhBKBbQCm6x/caYYD8qLj3uzs/nLGq+zrgJ1B/x4BI74tLyalCS08YQd/Y/kWJNGlQpedwUq3cfiiH9Jq937TK6CoEXv+3r0dd30punlI/h2NZyzmcDgAOxk+2BbaFWmkJeT1+E/8S428D9hYrDQ6193f/lWvtJRhEf0irILQDaez/RF3IwPIe7S5FvC4bRURfCZHJC3aCnRBDlHWr3OU7Bxt1JwiGZrozvZNd93rLpxTPW4OWlv7gGbhFr90yav4wKs75qWhqZ/5TpiYNT6tDn6CmOmz1GNv/weKuNTeWQPiHzaJ7HIkmZlLtsTBZnvrMbe84UQ7Aus/3N8CJ9xpn6ysSv0Xi69+pIhrJhXy6ehyEOrh0k3j3Xgv993BC9NVcjit9uASRpiRYspeQ9O529w34/i10tMMfyOL+DeZMEcyDar1m8/wsAk8mRLJ6xnGDga7IuHhWav4y2cWW7p6Fj9bDbsvrQimkmrNfrqh9oFtaVihHF7hcP2IMvVQqhVAHw0/jgVqDmWYuPLcTZSAMIiXMzvJYjsNNJ34ZtuEIDyt7oBr1mngugQeBRc/U7t3fTdsjnL1hZSONHZPsf2BoDTxlQrFAtNb876ZPhOhNjP+1KQJwIF4zCbHCnFsQhg1JBEcCTxCw9XU2PZvORlyV2z3m93ykwiz8SUeywpsDcH/viY6vE3WXyuB4MQejidgATZRFRn+jz6yg0KJCC1CUVuicMpRuBHMv26Zip0qEKdE7A3pUwEa6p3kJi1f6pyUfQMacDsZLm9Y0Hm1eEHDNCII2s2wu+Mx5H5SoVJFGjOh53o8WDnpn8Ol4iCwwl1fvtERUhkrHosEiT1HllaEF/1BoNz2hy+d1KVxAF3qgySxlkk7I//zZBHldeKlgb3p7Xu2gMMSsXf8aMjS7fl4Z1czuwoF99r8TlfwsfyvNrVvTibm2LbGmiPAyRxuGAwDweiGpbYQypjUFAJ0dM9r80V+DpV2YgOpJCnq9YszVTBO8OGJARagkQ95kBzleMifOxXMx0p3zSgNBKjGH4hlZYGrT4Xhb1ZrDLV09rqfVPrrLctgsJHlZZWmjiAA9kwFyS2G8slMyIIoVZ2U+cnHVdric0cdb+g3XtdGQZUIXK7mZgLRBX9P9Z7XItlSdyPP0ZNDqqR8Rc249HMcZpBJW+N1e9qxVErEAqxtq7POt7KR9DsQoVpsakSjCUzUKOsw4+WivIq+g2yrWMkwztMqKc025qWMVvqwmFvRq/jQabfvGXNpk/XeocdSfpAO+o4OswyVTtOxhGL9lLG5rA6WoE5HOs5XJYYq5hS1YWkeRNHsleQOG+IYPCNtT98+14+V8zyhRqvc1Q5V/dmTNqXCTEd54cZONOSvdNQ7PusE0xcv2bN13dofdbhGcRrvwff/lZ3bYLdT4PhsCUJITqr3DtryaynSgwjz8efjcxYMjFl0AoKUzoTlf+Z4Z6iyGxM9fheq9/5Efadd5E8H4IQnwiufhwKSR+3O5u+LqQl2IGYtmCaoapqh0kAfAoIdYpgfBf838jebo8KDjuTh0+4YNZSXOnGJnym93Ru7L1BTxq7yY/aSswTHJST8Yg06lyv9nT9md4Ma5L0IeqdeGXNItsd5eXyOTja0RJfVZ6boVCkDjzuEe5L5BHKJEYzD071GyOxgckoEGhTbzWtmgghVjOVf43KzppQqHddryyvfa3N82M29B8yMZd2GT1SCoYqFYVXgYBqclMQnI2HsOEo7IDt2IF8fpDgtAcV8aoAXkimAT8gOR4MqezWPt8OraC9mAkTwqk+UgSNSAChXhk1bN06SNKH6Chz+fSm6TeM5tsdj+rEGdGOO9Qo6yNQ5GNEqSXXauqa5M0gS0pYqEf9R6AoR7v2P+J0H94sxMK0YdgDEV9xFfAXFNSb8TK6A55TQIRj45JJB71GokgYCcqt8L4qw6/+BCggOvTHJ/gv3r1BwENRSw1dQH11ppFzWsVKLx+NYN5Aq/fOPn1nqyJJH6KmIXejwnaP2dNgwQ51UjS71nCI5k49tyL8gv4ntxz7rxxBbIHhC9IxXkRyPIwdtDGd2ZT0D1OV42KcvSlWdHorn4J93hvq/E1s/VslSfoQ1QNop95PiUxHjFDmcRBHYY+i5JxbbRapYQTsiGIpDmLPiO6uZ/rEGy0rVgNBm4TmajuFrc0kiMd3QmVzKftA7MmtmiSxiKYRo2XABym+sCSVH8qBUco1MvoeDjPM1gKcMUQLEmNhd3fvC5mGGZq6ZKpxvGHSRTJIFO0zk99xdW+w19Hm8L038ItthiSxiI4UYcKQaftIaYyVA7hQsbWj2JIj04ltGIJ9LIB5hVCXqBu6vNky/zH7HIcjQW7O2FRJsPdFqBtnkObP4n29TZIkFlH9ZVH0xWr99h1lAVZgvB71aloZIWVbf3rrbRco4oqPRNhZC1pDEPK32bJrRGr1OVCk4jdw4OYs3O6Frp4fjk8Wv22bJ8lAkNUxHp6IvsK7y7y0tAZ/+Bk409QgcfZhEXfd4a8qZwerkBVv4vNZgY/klc7O3lf07GXogcVvnwFMuoIBz0YCpk6migsDDs89qfawiiRJgaihozv6CoOIw4wle3MOe+K4uTuAwNkGKKuX1sAVwxFrkAjvCQHv4vDwHvard6C3+63NynaOQO4U9fUNcwH4uUiQNFT6uHipu6f39HA0eQ1raEWSpIEocZpZjPchgXI6yvLIXTlXpwDAJGAwUUScxXaCSDBuMm8p1BmI/Ndpw44U6c+R/F8Kpq5kID5SN/V8lG+r6HDOzdLK35pN9t/3i7aTGT4QqrgoNjqjFhRJkkVE5dpXo69BoJWYathxnMHAxzImjUG5fSzOQmMgkpZuNMrwlfi+XESyhFUAHUV4xaYEqUV7jbRszWNeaswrNGDzcSMwsVGEPRbFBuxo64A8FhlbDyrrEBy+D4XYt5Lo/uaLVfxrPRmQc4moSHVCRXkV5bVMlsJDDz5F0eqmDZ+u/Vs6hqlFkuQR0cAKn7NceiEOQ9BiiUHAUahrnIgEyWY8s1eRHH8OtAWfDaexbkjvJkWSFDEkiBLjlwyAYhjMxtkvWyuIFGr1uWAI7m+1N0YCdadJjj4USVJEXkA+HRSfDHW1uSj6HWigSCXZIwaJikvw9eyGzo6F2Y7gWSRJETmDudE8jikGMwfuMnsdc/DUDlm8Pe29NKGO9VLox41NuYxPUCRJEVmD2efYlQuoE8BqgYGJG0qyEbSDEIwEzxPLVQFLhRAtFPMgS/dOibyThAK1QU9Ic5Q+Awt1J1qLr/fbd5EYmxgKSe/r9TcoIn2Q6FTXZP+JRGF5OJsuGOwLjO2HM0b1YHN8XehGfWIl3o/ysH8EqniXgXh7w8Z1b2VbhNKDvJOkhPGHwci1pvRCyJSKOm5QY0lIJwKwS8mxyepzfY6/zstCsJdDKlv+1Ve9bxbKsubWhp89aykXAkaGQPwgqew9JMo39NzZFhP/8rCfixB9OkcQdREK3LAJQFBn39C3FK2C+FZV+eqent7/rTi4+ZtC9ODcesStSEaqSUiaXxFpJk5Qei1+19v4Y71O8W3x2b8pOje9VYytlTmiERKzHrW/UILoDcTWQ5LBUKLhTUkUYBIlBRpRplr9rs9xRKMgZu8Ipr6HI9uHQ7GjXMTwwdZMknig6X8yzjaT8Xho3wqkVFrKkDzfMsE+Ekx8grPSZygKfIry8GchAV9907vyq3xFWC+i8LCtkSQZxiI5xgKD2vCnMH8gHCdkvHES+Z1/R+E08RwFcPgWiUY+KZ/6re57hrLRReQeRZJoA0lsY/EQju0Fm0PrkHk4K5JkK0eRJBlAMMjbWn0RQ4ciSTKBYJ8MdROKyD2KJMkIanEm2QZQJEkGAPJTKGKrx7ZGkjd7g73HZutmq1axj7N1ryIKF9sUSchbT0s2pCKKiMU2RZIiikgHRZIUsU2BghFGY61pRpEkRWwzoKB2o6Wxz1o8LmezvXFQONNEKJIkh4gE6ZbswPiOzbbGvyQrS342pQJcgrO9UHnaDgC4oFhXqlixZm3w36lyk8cD+X0csNiyo6LIu0kcxolwVBZRAirbKECsJ9u0zk71w1wFk9MKSvhZVSVNwH95JyHUkRx4Kf7fIRXERgDxbXe3+sXyuc1fpWtGT7laKqZUns+AU0Y0BSR2Hz4bs9b7FUmSRVDo/9rRjhpJEnMYA8eokcoMFnnGal2TrbHN4Xt/4DWRjLDydaXAT6OwQbFpFMIHDmxMtfJDvcfV0GpvjBuqaGAbTNUOF157hNnrcLJIvK8t96O/fEvyhopyiVn9zg8pJ4cA9e8tDb7WXPt0mJeYt+OK8RDBwYmtqMH/7yd9zQOIuqBwMj+NtLHEKFEipPUWv+stYKJFDUFzcN2Gpe3z2jclqydMjsmVR1TsUnUFfoyN8Vxf73H8jkXiQadEkSQZAjvveM6ZE39Tl7naQamzq+P45nFZUk7A40WxJ80+x77YQZ7Ft7ukqKa0C8RHyQpEO8TJ2IYL8eNEff8F7IpN3hWYdJrZ43jf7HPd1NrR9Hg03V7WYPHbTVjH+dxYciCLJCDSg1FY3sTILVhilxuqK7qQ3OTo1aYK8RoXsDIUCvVIMh/JBPxUANRWTKmirAFxo2ri73VTXVPdiwPTLMRDkSQZwOp3/VOWwukbUv7eAOznLIYkFq/zMJzyn2KRQHTJIVhLsoxPJGujOPFIViIdAtsNx/K/IdnOQRKfGLA1vZ7pLSl3iMz4nUgQe8bt24ISbKwFjxYOEZ9hiUe7szYX4ipZLr8Nj8ekKlgkSSYQ4rMYk+BU+Cnl8Vtqa/oGyfVr4EAJL7U9fxD/jneadA6Tx3EpiihXa76XduyNusFSq9d5tr/BfX+6N0ER6VSZSZSgs+BywOAPdzS277Fma6M7WbkiSTKAECyAFDlXa3kjY3uZvU4D10MQqicEiwaeC+seHvv9WP+JWu+TBowol9yHYs3OgQbPlXp0lUjudPttqPuck8P2ZQwQ7K7aBbV7JdNviiTJACEBK/Q8QMG4HfUXygys57IPmu2Ng/QRczXlJodcEiQGcDnqKkSQK7VegTPcjXgoaIKEAWyKUlV2HL67L1GRIkkyAIUxQtFpDYtEi08JHPUvYDoj7ohocqFY4Mh+Cd7mZD33yRjArkDR639aRC/Ut45CcfLCfDQrc4gHWlpDDyXL01gkSeagUV4TSVgaIalQwulHEpPXWSdxuFbvfbICDnegMv9qMmU+vLxrLLkrn81KE9/g61y/1f00syYvuK2RpKTWbUs7quBGWPv1QJMGHOlXYs+fmXnT4mJdW1uwrS/gczSJJmWYTed368LWvoE8/R4bXYV03Y/pV6aNqMw/he3YO1FgDG40Xsq0DxoDsQaf53/xef6ARwNElrJpxS6deHe0GjiC9U/l14n/+8sC2IL1wW8f12qesk2RhEIMGRQl6X5DMhhYNQXV+1e/k4J9lcO0PEvCKQOiQILMTyOJJiqk4rqu7h/vis0LWP9i/QhpRNnvsOm0Cz1Sx/2mjjNMOh+P1w/8osbrHFXB4RSd7aNnGFBVdlVre1PrwPzts5bYdjIalNNQVJ3Pwmsfmu/56sovew/ecUelShLdRhESG1rntn6fzkbpNkWSnADEdxkmrwpHRBdMtAshVnOAUvyBp1NKAiHUzaIWZdEqMY68QOe9NwgWcjVbPUsHfhEN0nc7ik9unB18TEcqO+ywF8/0Oe592da0JvZ8OYh5+G3qfZ9YCPHnQKvn/DA54uyiRFNYX1bvCe9JNeL7Km2NZA0TdpaOarY1Prr5XJrB74okyRAUqjPNBAI0oj3ULdTLae9k4JfTm6ZfIHfLm0e9EuOI/2P6Mz+dHo8gsUD94l3Uc45BPadJx30rShicySKz0GYAcJfO9vkDds98LaM7meRY/K4TcDjSnMoN23MVX8CfyNRyoEiSTMEhnbCpm4Qqjm1ucP8jUYFYeTmy5+A4VWcdrwYamp7QMnq2NLg9Vp+zkVEue40AgFOxA/5hQAfUp5sJmj+0iz/N1sYXrT7XcpwlDtB4yU9M1Q7S6JJuFqZCkSQZAn/hTp3CVjd2q0MCdrdX6wX1bgflk5+sqxZVPKCnAwrBHkAxSs9MsIO5qoEWTsMzEPlpjJbH7qTj+u8Cdm+bbhEIBA4soJUkyCdBdmJFkgwlsB/2hOMMawT22jsD9kbNBCFwKSzr62pXdzCoq2OsV79bMpqP7WG0PqERgvHDWZQk5XL1GKavke+ko0QLBm/p22iC6XrrGIgiSTJHMHWRGAimyysuAnDovGBlOEe5DpB4h6LMGzpEGRS5hKXvvSSgXA9FhBBpBSgHFa/TMShhm/TMbnFRJEmG4JyWLKWc3Z+MIo3A99B1kRBpBbugFBXY/TSTBHvg7qbFpspoRH5dgwXoWc6NASpARp1PW99qWxwUSVLgUATss9lDSiuAaXZNHYAPdZYHMJQSgdtZT89aZtSxNwkwSWddYXAQu+gUPZM6ZmlBkSQFDg6wdxqXfZFWZUL8j2m2/I+Cs93xb3vr8tY1ZpODslhpHbl3P2CxbYflc32r9VUITl3FBfta3/0HY1sjySeoKt6Y7sVC8Ley2RhNADFFr9KuChE3x2TK6xj7Rq/gyKPuwbSYa/G73sGWztB6aWmJchYeL9NaF5kUGRTll7oaCOxtXeXjYJsiCcrc3wRsjZr8mgsHoF/xVHlaJJEi9k66IPrt1As/tlcrSWjSOt/scfm0rPbVumurFKXiGaZTlxGq8OgpHw/bFEmGKcbpvUBl6aygMdYTCv5o4Iqua7CjV26uNxh6TpLli5KVHwADl9hii8/5R7Wr6454KfnId798cuUvDHIFSQCTdDWOsbWdm9a+pPOaQSiSpNAhwtlsdYFztSedqiRJ1n2dEGLzyN7i8L5m9bnasb21Om5hAICrpdLSS61+1+t4v3eReD8IwUbicULFlCqamfQYYMY0jt2bjdTWRZIUOkCU6tVJKGpIOlUJAfr2fNjgpVw1FJzPZbmd6V8Xp03MmUiYsGmL3vWDOPh6gxA3ZXwXViTJMADk7TeS1G6JSTq3LwD67ZoHHN7lKD5dR7NDFpumF2pIFScmizCjB0WSFD669F4gSZJm05JY9DJu1H2hEIOcr1rsnmvNHns1EuisdNqRKZC181sa3Euydb8iSQodgm3SrZOkuZstMdB9nQAYROKoTdbZVq/zfcbhZpaFXW+NCGGLzm22uv+azZsWSVLoALJx0qmTCP2dPVyVzHVfByyxDZa/wX23yetcJHEgv5Oj8aVv6UwfvhQs9Ntmq8eX7RsXSVLwgK90XwFCr3NW9Do+Ru81qOwn3TFHsYd2/0+Y7XNcbAQ4Dmu5BD9XJrtGJ9bjbHvH9x29f0onqLgWFElS8BD/0zuTgA5X3P5VqeMY6HOzBGCrUpWx+O2zDYyfgaUPZuku5/YHrcItQ5nuqe7uH56K9d3PBYokKXCoAv6rxzKcAABj06uN76D7EqEOipTfByTHDBDSbQBSrU6eEwleEELQUraM/89GrGidYLAShcn/dKr8tWytXGlBkSQFDi7EW3o3DVAESs+HAthPdF4huno2/DfeFxa/83Jg0tV4T337JUIs7A0FL46XpmKoUCRJgWNV78r/jDdO0mNdS4P21HTqAsH21DnifxhP1KEIk8DgOp3V031O8tvcz+m8LucokqTAQUHgrD5nq54gDdjR9+HXcD4whlUyhAPfGSftp6txQgxaSarz2ycqTLomXvGkt2LsiFTR3YcKRZIMA6As/hIO8HqCNIwy19nJD+UNrReMM04yM50RHYUIx8HqB1lIxyBJ9S71bmxZ06TL7z+fGAYkgZymJhsO2NTV+0xZqXIL05OugYeTC2kmCeI3OpvVsWbt14MsbFF92l/nfQhl5mq7B/WYRhBiI4vYkPV7qSB6APhGEWKdIdG7prubrc5XrsfCJ4lIN+7e1gPy3rP4XW6cTeZqvQZ1glNqFtX8SYsVrMVt2RkU41F62oQj19Nvz3t7kCElnq9KzzYRLHidhUH8NFV9+RNBovcKU+RwpjFa4XoPa21TQ7Cktb3Jr0fE1Iq8kwSYkPSs+wtgWc3bN1wBQr2FAddMEsS4ivLKOznnv0sWuod0F7PJcTfTJ2oF8Za3JfhOt61ZBhjFwgHxYCaX2Pn4f3xp9TluWdXzxX2JAnqng/zPJAC6lgSRVNv8TELw25r8Vr9zGT6RWdqvghNMHrtx34W2M14/zLdu4LfTFkwzmCIEOVhnc55oaXB/GrdGJj7U7YeePeyMA8lfxhsnnWpqajiW/FuycdOhELd0rpsXZ5I+oOR5DjAJiaI9+jAA/GbUSGUOimuP40cfqOJrVcBILokZY6rHU5rmVJl/B2Jdt1AvTthGlXmAszN13jPb2F2S5TYUx47zWxszXlLOO0lw3pd0yqy6HYG2VjRbPa9Yfc77sOefpvNS0hN+z+gVzo9OSE9zEEI9P16A7z50frp2ccUuVZ/j7SelVUH2QOLj02afQwRsTQsyudFQzCT6lhnT9NfeWtHT0TnfUF1Rh2+n5btu/C2ebLY1PZSszIqTV/TWe1y/liVGARjK89S0RJA48MesXucH/gZ32lFThkJxH61zFMuJZedwBWWJrffbfy4zqQ0/6re1ShtixZo1QU2R7VvtjS+b3A21KPLcpdPfPRcoFRwe5NfwWemufA3BTAL6zLghrdQGWzVarZ5PcLQ+MDpap5t6TQ/+q3Z3H/T2vIDmAavF6aUYZXWzG22TDAblYBwWaTefgldTpq68zjAUC8xUZ/85vk2Y6iIZ8kqSaJ4NbZmK+pCFCHxbI3C0ftPicc1CJXlxGiniNANFrBZ106bD44X70YJo4O47Y89R1i7ZWD5OiizhVqBUVMFUUQq0qMNBwvcKjv4GEGoZDpLbYTffkUXESwqpmpZrMnA4gw0Hkhyw2EL/rE6dRF2Zo+YMe1B+9xqvc/8KgNvx42+zfHsUTcSfOj9eeyXpGdm8cdQoUrcPSDglnjLiYCTSeSwyM+mBaabPUT0whZ0W5JUkBoO0j95ruIC4pthFRBD1qzjB7HE9waVwCNd0zEL6AWcPXyjELmq1u19Nlb45n4iS6ym+gD9jrrY/QPtAOi6nODC0x7QoZckByCtJgHG9eTY2BNZ63k14PyFW4k3btNcPaaUkSAY1KNZLstDcBuyC6QWzTgEKFYri7AyT12YFJp2Epw5i+rwAyQ7qBcFCD9BSs976UUfaR5JYnZayoIp2f4Nbj11ZP1AKutoFtWcYqsudUVFMEzgLp7AoXJJMftRWMnGCoss+CNGWLCkkPuj78XB/Zi3LDNFd3fqhbEMfouYnZL7u49fYZHO9VCOA74dTA0V+3wlHiQoQzIjHH4UQPwCwL1Co+iikhl5rW+p/IxO7J1liZKaiad4JZeF50Sqfxed6Ev+HC7Vegw9HM6FikTeSTJwg0waYLt9r/Kf+naPmbPWI5n9fFn2lht45PgYk65cCN2stv4llHumdgAR5U98VoiKdevJCEloGNBoUvY443aK765mcNKiIrMJAeQlBu6nMa23BTtaQeb1CFSroSg2XnttFzkkS9Xh7Gt+O0HMdigNPBuYE0kohUER+wUFsr2eDeEa9REu/uleZBgIAdK1wARNp7bnllCRkhm2qtz/M9Ob3JnPrUM8fctGmIrIP7Ky6IjSWCCAFf2EmdU5bYKsYU60cp+caIeB/6dSVM5JE/BTs9+Ij1OvxRm6hf2p2NH+Wi3YVkQuAPtMhDhfyBXxRskWZVBhTLd/BdOq4KkWeSQM5IUnNopoyc739MXx4+lJ3RfDamo5V12e9UUXkDqr4WE/aaCw521Rlv3/agmmnxfNuTIbIKikRRNceCaFr7drgyzqvCSPrJKn326dWlFeiDpJWkvnvRLD7CL0ProihxYZP175ZMaWKZhPNq0coop04pnrcTKvfdeP3a3r/mSpEKSUhLSuVfz5xgjKf6feBIfHk3+mGQc06SSQmkadbOgT5UQ0GDwkUxaxhBzJbwc7+T3x7rL4rgTb3HkPdosfid72PM8wHKGqTjVgXADMIIch2ayc8P6WsVNmZpesEwyiIOLs93WuzTpIeof7GCJzW5vVEA1wfUsXBLQ7v8my3p4j8INQbvFVS5GNYeh3ZgBfthce9YoNVQhbSXRGQeC+2NLh1WEX0R9ZJQl5rZp/jYA58KYtYeSaHYJ+rTD2spaHpP9luSxH5A5nGW3yuB7BfnzLUbRmAjl4InZ7JDXKiuAdsTe+aPa5fcolRXKZkgcr+tWFj74n5ip9URG6xpqN3PopO5GT1s6FuSxS9KKEc1d7g0Z2+IhY5WwImgzuzz3U6B/ZAnK87VEFWpk0PJQt3U8TwAinGFrflQFCMAaZP3M4FelShHo0SSmHncQ/YGh+0+J1TgEFfbu8elK/u7untvK7d2d5RDDu39aHZ2fxlrd9eb2ASbRbq9fnIFr4VLPTrgC07Wa9ybpbS0uK51GyyjxUCvuvu6f3rsjm+tHY9ixg+aLd6vpq6ZGrtOOPEa3CAPJdBy7cAAAAAtklEQVSl6U2YBkgqeULt7pqfTZOmnJMkan59Yq7rKaKwEI2geHG93/6AJKTzUKH/LctdglHynHwBxasbUR9+Pds3L/xYwEUMa1DQCjycYVpsuhxKSg9DHZUCeZNZfaZ5E38QgjWDEI3dIP6RLBZYpiiSpIi8IBpI4hF6UUCQek/D7sDYdGB8FwFsMp7fDgSrZkCRVISBcgGj8NSDn7tQhurAsqsEE1+ByshT9e1AW/DtqM9MzvH/uFCgxBI9EGYAAAAASUVORK5CYII= + mediatype: image/png + maintainers: + - name: JFrog, Ltd + email: support@jfrog.com + install: + spec: + deployments: + - name: artifactory-ha-operator + spec: + replicas: 1 + selector: + matchLabels: + name: artifactory-ha-operator + strategy: {} + template: + metadata: + labels: + name: artifactory-ha-operator + spec: + containers: + - env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.annotations['olm.targetNamespaces'] + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: artifactory-ha-operator + image: image-registry.openshift-image-registry.svc:5000/jfrog-artifactory/artifactory-ha + imagePullPolicy: IfNotPresent + name: artifactory-ha-operator + resources: {} + serviceAccountName: artifactory-ha-operator + permissions: + - rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - secrets + verbs: + - '*' + - apiGroups: + - "" + resources: + - events + verbs: + - create + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - '*' + - apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - '*' + - apiGroups: + - "" + resources: + - configmaps + - secrets + - serviceaccounts + - services + verbs: + - '*' + - apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - '*' + - apiGroups: + - apps + resources: + - deployments + - statefulsets + verbs: + - '*' + - apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - get + - create + - apiGroups: + - apps + resourceNames: + - artifactory-ha-operator + resources: + - deployments/finalizers + verbs: + - update + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - apiGroups: + - apps + resources: + - replicasets + - deployments + verbs: + - get + - apiGroups: + - charts.helm.k8s.io + resources: + - '*' + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + serviceAccountName: artifactory-ha-operator + strategy: deployment + installModes: + - supported: true + type: OwnNamespace + - supported: true + type: SingleNamespace + - supported: false + type: MultiNamespace + - supported: true + type: AllNamespaces + maturity: alpha + provider: {} + replaces: artifactory-ha-operator.v0.0.0 + version: 1.0.0 diff --git a/Openshift4/artifactory-ha-operator/deploy/olm-catalog/artifactory-ha-operator/artifactory-ha-operator.package.yaml b/Openshift4/artifactory-ha-operator/deploy/olm-catalog/artifactory-ha-operator/artifactory-ha-operator.package.yaml new file mode 100644 index 0000000..6cf02ab --- /dev/null +++ b/Openshift4/artifactory-ha-operator/deploy/olm-catalog/artifactory-ha-operator/artifactory-ha-operator.package.yaml @@ -0,0 +1,5 @@ +channels: +- currentCSV: artifactory-ha-operator.v1.0.0 + name: alpha +defaultChannel: alpha +packageName: artifactory-ha-operator diff --git a/Openshift4/artifactory-ha-operator/deploy/operator.yaml b/Openshift4/artifactory-ha-operator/deploy/operator.yaml new file mode 100644 index 0000000..c0d5a69 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/deploy/operator.yaml @@ -0,0 +1,31 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: artifactory-ha-operator +spec: + replicas: 1 + selector: + matchLabels: + name: artifactory-ha-operator + template: + metadata: + labels: + name: artifactory-ha-operator + spec: + serviceAccountName: artifactory-ha-operator + containers: + - name: artifactory-ha-operator + image: image-registry.openshift-image-registry.svc:5000/jfrog-artifactory/artifactory-ha + #image: ubuntu + imagePullPolicy: IfNotPresent + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: "artifactory-ha-operator" diff --git a/Openshift4/artifactory-ha-operator/deploy/operatorgroup.yaml b/Openshift4/artifactory-ha-operator/deploy/operatorgroup.yaml new file mode 100644 index 0000000..8356f6c --- /dev/null +++ b/Openshift4/artifactory-ha-operator/deploy/operatorgroup.yaml @@ -0,0 +1,8 @@ +apiVersion: operators.coreos.com/v1alpha2 +kind: OperatorGroup +metadata: + name: jfrog-group + namespace: jfrog-artifactory +spec: + targetNamespaces: + - jfrog-artifactory diff --git a/Openshift4/artifactory-ha-operator/deploy/project.yaml b/Openshift4/artifactory-ha-operator/deploy/project.yaml new file mode 100644 index 0000000..49904a2 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/deploy/project.yaml @@ -0,0 +1,89 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + creationTimestamp: null + name: project-request +objects: +- apiVersion: project.openshift.io/v1 + kind: Project + metadata: + annotations: + openshift.io/description: JFrog Artifactory + openshift.io/display-name: jfrog-artifactory + openshift.io/requester: johnp@jfrog.com + creationTimestamp: null + name: jfrog-artifactory + spec: {} + status: {} +- apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + annotations: + openshift.io/description: Allows all pods in this namespace to pull images from + this namespace. It is auto-managed by a controller; remove subjects to disable. + creationTimestamp: null + name: system:image-pullers + namespace: jfrog-artifactory + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:image-puller + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts:jfrog-artifactory +- apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + annotations: + openshift.io/description: Allows builds in this namespace to push images to + this namespace. It is auto-managed by a controller; remove subjects to disable. + creationTimestamp: null + name: system:image-builders + namespace: jfrog-artifactory + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:image-builder + subjects: + - kind: ServiceAccount + name: builder + namespace: jfrog-artifactory +- apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + annotations: + openshift.io/description: Allows deploymentconfigs in this namespace to rollout + pods in this namespace. It is auto-managed by a controller; remove subjects + to disable. + creationTimestamp: null + name: system:deployers + namespace: jfrog-artifactory + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:deployer + subjects: + - kind: ServiceAccount + name: deployer + namespace: jfrog-artifactory +- apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + creationTimestamp: null + name: admin + namespace: jfrog-artifactory + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: admin + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: User + name: kubeadmin +parameters: +- name: PROJECT_NAME +- name: PROJECT_DISPLAYNAME +- name: PROJECT_DESCRIPTION +- name: PROJECT_ADMIN_USER +- name: PROJECT_REQUESTING_USER diff --git a/Openshift4/artifactory-ha-operator/deploy/role.yaml b/Openshift4/artifactory-ha-operator/deploy/role.yaml new file mode 100644 index 0000000..f881935 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/deploy/role.yaml @@ -0,0 +1,100 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + name: artifactory-ha-operator +rules: +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get +- apiGroups: + - "" + resources: + - configmaps + - secrets + verbs: + - '*' +- apiGroups: + - "" + resources: + - events + verbs: + - create +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - '*' +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - '*' +- apiGroups: + - "" + resources: + - configmaps + - secrets + - serviceaccounts + - services + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - '*' +- apiGroups: + - apps + resources: + - deployments + - statefulsets + verbs: + - '*' +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - get + - create +- apiGroups: + - apps + resourceNames: + - artifactory-ha-operator + resources: + - deployments/finalizers + verbs: + - update +- apiGroups: + - "" + resources: + - pods + verbs: + - get +- apiGroups: + - apps + resources: + - replicasets + - deployments + verbs: + - get +- apiGroups: + - charts.helm.k8s.io + resources: + - '*' + verbs: + - create + - delete + - get + - list + - patch + - update + - watch diff --git a/Openshift4/artifactory-ha-operator/deploy/role_binding.yaml b/Openshift4/artifactory-ha-operator/deploy/role_binding.yaml new file mode 100644 index 0000000..5e1093e --- /dev/null +++ b/Openshift4/artifactory-ha-operator/deploy/role_binding.yaml @@ -0,0 +1,11 @@ +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: artifactory-ha-operator +subjects: +- kind: ServiceAccount + name: artifactory-ha-operator +roleRef: + kind: Role + name: artifactory-ha-operator + apiGroup: rbac.authorization.k8s.io diff --git a/Openshift4/artifactory-ha-operator/deploy/securitycontextconstraints.yaml b/Openshift4/artifactory-ha-operator/deploy/securitycontextconstraints.yaml new file mode 100644 index 0000000..6bcf847 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/deploy/securitycontextconstraints.yaml @@ -0,0 +1,15 @@ +kind: SecurityContextConstraints +apiVersion: v1 +metadata: + name: scc-admin +allowPrivilegedContainer: true +runAsUser: + type: RunAsAny +seLinuxContext: + type: RunAsAny +fsGroup: + type: RunAsAny +supplementalGroups: + type: RunAsAny +users: +- kubeadmin diff --git a/Openshift4/artifactory-ha-operator/deploy/service_account.yaml b/Openshift4/artifactory-ha-operator/deploy/service_account.yaml new file mode 100644 index 0000000..37ccebe --- /dev/null +++ b/Openshift4/artifactory-ha-operator/deploy/service_account.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: artifactory-ha-operator diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/CHANGELOG.md b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/CHANGELOG.md new file mode 100755 index 0000000..075bfa2 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/CHANGELOG.md @@ -0,0 +1,557 @@ +# JFrog Artifactory-ha Chart Changelog +All changes to this chart will be documented in this file. + +## [1.3.7] - Jan 07, 2020 +* Add support for customizable `mountOptions` of NFS PVs + +## [1.3.6] - Dec 30, 2019 +* Fix for nginx probes failing when launched with http disabled + +## [1.3.5] - Dec 24, 2019 +* Better support for custom `artifactory.internalPort` + +## [1.3.4] - Dec 23, 2019 +* Mark empty map values with `{}` + +## [1.3.3] - Dec 16, 2019 +* Another fix for toggling nginx service ports + +## [1.3.2] - Dec 12, 2019 +* Fix for toggling nginx service ports + +## [1.3.1] - Dec 10, 2019 +* Add support for toggling nginx service ports + +## [1.3.0] - Dec 1, 2019 +* Updated Artifactory version to 6.16.0 + +## [1.2.4] - Nov 28, 2019 +* Add support for using existing PriorityClass + +## [1.2.3] - Nov 27, 2019 +* Add support for PriorityClass + +## [1.2.2] - Nov 20, 2019 +* Update Artifactory logo + +## [1.2.1] - Nov 18, 2019 +* Add the option to provide service account annotations (in order to support stuff like https://docs.aws.amazon.com/eks/latest/userguide/specify-service-account-role.html) + +## [1.2.0] - Nov 18, 2019 +* Updated Artifactory version to 6.15.0 + +## [1.1.12] - Nov 17, 2019 +* Fix `README.md` format (broken table) + +## [1.1.11] - Nov 17, 2019 +* Update comment on Artifactory master key + +## [1.1.10] - Nov 17, 2019 +* Fix creation of double slash in nginx artifactory configuration + +## [1.1.9] - Nov 14, 2019 +* Set explicit `postgresql.postgresqlPassword=""` to avoid helm v3 error + +## [1.1.8] - Nov 12, 2019 +* Updated Artifactory version to 6.14.1 + +## [1.1.7] - Nov 11, 2019 +* Additional documentation for masterKey + +## [1.1.6] - Nov 10, 2019 +* Update PostgreSQL chart version to 7.0.1 +* Use formal PostgreSQL configuration format + +## [1.1.5] - Nov 8, 2019 +* Add support `artifactory.service.loadBalancerSourceRanges` for whitelisting when setting `artifactory.service.type=LoadBalancer` + +## [1.1.4] - Nov 6, 2019 +* Add support for any type of environment variable by using `extraEnvironmentVariables` as-is + +## [1.1.3] - Nov 6, 2019 +* Add nodeselector support for Postgresql + +## [1.1.2] - Nov 5, 2019 +* Add support for the aws-s3-v3 filestore, which adds support for pod IAM roles + +## [1.1.1] - Nov 4, 2019 +* When using `copyOnEveryStartup`, make sure that the target base directories are created before copying the files + +## [1.1.0] - Nov 3, 2019 +* Updated Artifactory version to 6.14.0 + +## [1.0.1] - Nov 3, 2019 +* Make sure the artifactory pod exits when one of the pre-start stages fail + +## [1.0.0] - Oct 27, 2019 +**IMPORTANT - BREAKING CHANGES!**
+**DOWNTIME MIGHT BE REQUIRED FOR AN UPGRADE!** +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you**! +* If this is an upgrade and you are using the default PostgreSQL (`postgresql.enabled=true`), must use the upgrade instructions in [UPGRADE_NOTES.md](UPGRADE_NOTES.md)! +* PostgreSQL sub chart was upgraded to version `6.5.x`. This version is **not backward compatible** with the old version (`0.9.5`)! +* Note the following **PostgreSQL** Helm chart changes + * The chart configuration has changed! See [values.yaml](values.yaml) for the new keys used + * **PostgreSQL** is deployed as a StatefulSet + * See [PostgreSQL helm chart](https://hub.helm.sh/charts/stable/postgresql) for all available configurations + +## [0.17.3] - Oct 24, 2019 +* Change the preStartCommand to support templating + +## [0.17.2] - Oct 21, 2019 +* Add support for setting `artifactory.primary.labels` +* Add support for setting `artifactory.node.labels` +* Add support for setting `nginx.labels` + +## [0.17.1] - Oct 10, 2019 +* Updated Artifactory version to 6.13.1 + +## [0.17.0] - Oct 7, 2019 +* Updated Artifactory version to 6.13.0 + +## [0.16.7] - Sep 24, 2019 +* Option to skip wait-for-db init container with '--set waitForDatabase=false' + +## [0.16.6] - Sep 24, 2019 +* Add support for setting `nginx.service.labels` + +## [0.16.5] - Sep 23, 2019 +* Add support for setting `artifactory.customInitContainersBegin` + +## [0.16.4] - Sep 20, 2019 +* Add support for setting `initContainers.resources` + +## [0.16.3] - Sep 11, 2019 +* Updated Artifactory version to 6.12.2 + +## [0.16.2] - Sep 9, 2019 +* Updated Artifactory version to 6.12.1 + +## [0.16.1] - Aug 22, 2019 +* Fix the nginx server_name directive used with ingress.hosts + +## [0.16.0] - Aug 21, 2019 +* Updated Artifactory version to 6.12.0 + +## [0.15.15] - Aug 18, 2019 +* Fix existingSharedClaim permissions issue and example + +## [0.15.14] - Aug 14, 2019 +* Updated Artifactory version to 6.11.6 + +## [0.15.13] - Aug 11, 2019 +* Fix Ingress routing and add an example + +## [0.15.12] - Aug 6, 2019 +* Do not mount `access/etc/bootstrap.creds` unless user specifies a custom password or secret (Access already generates a random password if not provided one) +* If custom `bootstrap.creds` is provided (using keys or custom secret), prepare it with an init container so the temp file does not persist + +## [0.15.11] - Aug 5, 2019 +* Improve binarystore config + 1. Convert to a secret + 2. Move config to values.yaml + 3. Support an external secret + +## [0.15.10] - Aug 5, 2019 +* Don't create the nginx configmaps when nginx.enabled is false + +## [0.15.9] - Aug 1, 2019 +* Fix masterkey/masterKeySecretName not specified warning render logic in NOTES.txt + +## [0.15.8] - Jul 28, 2019 +* Simplify nginx setup and shorten initial wait for probes + +## [0.15.7] - Jul 25, 2019 +* Updated README about how to apply Artifactory licenses + +## [0.15.6] - Jul 22, 2019 +* Change Ingress API to be compatible with recent kubernetes versions + +## [0.15.5] - Jul 22, 2019 +* Updated Artifactory version to 6.11.3 + +## [0.15.4] - Jul 11, 2019 +* Add `artifactory.customVolumeMounts` support to member node statefulset template + +## [0.15.3] - Jul 11, 2019 +* Add ingress.hosts to the Nginx server_name directive when ingress is enabled to help with Docker repository sub domain configuration + +## [0.15.2] - Jul 3, 2019 +* Add the option for changing nginx config using values.yaml and remove outdated reverse proxy documentation + +## [0.15.1] - Jul 1, 2019 +* Updated Artifactory version to 6.11.1 + +## [0.15.0] - Jun 27, 2019 +* Updated Artifactory version to 6.11.0 and Restart Primary node when bootstrap.creds file has been modified in artifactory-ha + +## [0.14.4] - Jun 24, 2019 +* Add the option to provide an IP for the access-admin endpoints + +## [0.14.3] - Jun 24, 2019 +* Update chart maintainers + +## [0.14.2] - Jun 24, 2019 +* Change Nginx to point to the artifactory externalPort + +## [0.14.1] - Jun 23, 2019 +* Add values files for small, medium and large installations + +## [0.14.0] - Jun 20, 2019 +* Use ConfigMaps for nginx configuration and remove nginx postStart command + +## [0.13.10] - Jun 19, 2019 +* Updated Artifactory version to 6.10.4 + +## [0.13.9] - Jun 18, 2019 +* Add the option to provide additional ingress rules + +## [0.13.8] - Jun 14, 2019 +* Updated readme with improved external database setup example + +## [0.13.7] - Jun 6, 2019 +* Updated Artifactory version to 6.10.3 +* Updated installer-info template + +## [0.13.6] - Jun 6, 2019 +* Updated Google Cloud Storage API URL and https settings + +## [0.13.5] - Jun 5, 2019 +* Delete the db.properties file on Artifactory startup + +## [0.13.4] - Jun 3, 2019 +* Updated Artifactory version to 6.10.2 + +## [0.13.3] - May 21, 2019 +* Updated Artifactory version to 6.10.1 + +## [0.13.2] - May 19, 2019 +* Fix missing logger image tag + +## [0.13.1] - May 15, 2019 +* Support `artifactory.persistence.cacheProviderDir` for on-premise cluster + +## [0.13.0] - May 7, 2019 +* Updated Artifactory version to 6.10.0 + +## [0.12.23] - May 5, 2019 +* Add support for setting `artifactory.async.corePoolSize` + +## [0.12.22] - May 2, 2019 +* Remove unused property `artifactory.releasebundle.feature.enabled` + +## [0.12.21] - Apr 30, 2019 +* Add support for JMX monitoring + +## [0.12.20] - Apr29, 2019 +* Added support for headless services + +## [0.12.19] - Apr 28, 2019 +* Added support for `cacheProviderDir` + +## [0.12.18] - Apr 18, 2019 +* Changing API StatefulSet version to `v1` and permission fix for custom `artifactory.conf` for Nginx + +## [0.12.17] - Apr 16, 2019 +* Updated documentation for Reverse Proxy Configuration + +## [0.12.16] - Apr 12, 2019 +* Added support for `customVolumeMounts` + +## [0.12.15] - Aprl 12, 2019 +* Added support for `bucketExists` flag for googleStorage + +## [0.12.14] - Apr 11, 2019 +* Replace `curl` examples with `wget` due to the new base image + +## [0.12.13] - Aprl 07, 2019 +* Add support for providing the Artifactory license as a parameter + +## [0.12.12] - Apr 10, 2019 +* Updated Artifactory version to 6.9.1 + +## [0.12.11] - Aprl 04, 2019 +* Add support for templated extraEnvironmentVariables + +## [0.12.10] - Aprl 07, 2019 +* Change network policy API group + +## [0.12.9] - Aprl 04, 2019 +* Apply the existing PVC for members (in addition to primary) + +## [0.12.8] - Aprl 03, 2019 +* Bugfix for userPluginSecrets + +## [0.12.7] - Apr 4, 2019 +* Add information about upgrading Artifactory with auto-generated postgres password + +## [0.12.6] - Aprl 03, 2019 +* Added installer info + +## [0.12.5] - Aprl 03, 2019 +* Allow secret names for user plugins to contain template language + +## [0.12.4] - Apr 02, 2019 +* Fix issue #253 (use existing PVC for data and backup storage) + +## [0.12.3] - Apr 02, 2019 +* Allow NetworkPolicy configurations (defaults to allow all) + +## [0.12.2] - Aprl 01, 2019 +* Add support for user plugin secret + +## [0.12.1] - Mar 26, 2019 +* Add the option to copy a list of files to ARTIFACTORY_HOME on startup + +## [0.12.0] - Mar 26, 2019 +* Updated Artifactory version to 6.9.0 + +## [0.11.18] - Mar 25, 2019 +* Add CI tests for persistence, ingress support and nginx + +## [0.11.17] - Mar 22, 2019 +* Add the option to change the default access-admin password + +## [0.11.16] - Mar 22, 2019 +* Added support for `.Probe.path` to customise the paths used for health probes + +## [0.11.15] - Mar 21, 2019 +* Added support for `artifactory.customSidecarContainers` to create custom sidecar containers +* Added support for `artifactory.customVolumes` to create custom volumes + +## [0.11.14] - Mar 21, 2019 +* Make ingress path configurable + +## [0.11.13] - Mar 19, 2019 +* Move the copy of bootstrap config from postStart to preStart for Primary + +## [0.11.12] - Mar 19, 2019 +* Fix existingClaim example + +## [0.11.11] - Mar 18, 2019 +* Disable the option to use nginx PVC with more than one replica + +## [0.11.10] - Mar 15, 2019 +* Wait for nginx configuration file before using it + +## [0.11.9] - Mar 15, 2019 +* Revert securityContext changes since they were causing issues + +## [0.11.8] - Mar 15, 2019 +* Fix issue #247 (init container failing to run) + +## [0.11.7] - Mar 14, 2019 +* Updated Artifactory version to 6.8.7 + +## [0.11.6] - Mar 13, 2019 +* Move securityContext to container level + +## [0.11.5] - Mar 11, 2019 +* Add the option to use existing volume claims for Artifactory storage + +## [0.11.4] - Mar 11, 2019 +* Updated Artifactory version to 6.8.6 + +## [0.11.3] - Mar 5, 2019 +* Updated Artifactory version to 6.8.4 + +## [0.11.2] - Mar 4, 2019 +* Add support for catalina logs sidecars + +## [0.11.1] - Feb 27, 2019 +* Updated Artifactory version to 6.8.3 + +## [0.11.0] - Feb 25, 2019 +* Add nginx support for tail sidecars + +## [0.10.3] - Feb 21, 2019 +* Add s3AwsVersion option to awsS3 configuration for use with IAM roles + +## [0.10.2] - Feb 19, 2019 +* Updated Artifactory version to 6.8.2 + +## [0.10.1] - Feb 17, 2019 +* Updated Artifactory version to 6.8.1 +* Add example of `SERVER_XML_EXTRA_CONNECTOR` usage + +## [0.10.0] - Feb 15, 2019 +* Updated Artifactory version to 6.8.0 + +## [0.9.7] - Feb 13, 2019 +* Updated Artifactory version to 6.7.3 + +## [0.9.6] - Feb 7, 2019 +* Add support for tail sidecars to view logs from k8s api + +## [0.9.5] - Feb 6, 2019 +* Fix support for customizing statefulset `terminationGracePeriodSeconds` + +## [0.9.4] - Feb 5, 2019 +* Add support for customizing statefulset `terminationGracePeriodSeconds` + +## [0.9.3] - Feb 5, 2019 +* Remove the inactive server remove plugin + +## [0.9.2] - Feb 3, 2019 +* Updated Artifactory version to 6.7.2 + +## [0.9.1] - Jan 27, 2019 +* Fix support for Azure Blob Storage Binary provider + +## [0.9.0] - Jan 23, 2019 +* Updated Artifactory version to 6.7.0 + +## [0.8.10] - Jan 22, 2019 +* Added support for `artifactory.customInitContainers` to create custom init containers + +## [0.8.9] - Jan 18, 2019 +* Added support of values ingress.labels + +## [0.8.8] - Jan 16, 2019 +* Mount replicator.yaml (config) directly to /replicator_extra_conf + +## [0.8.7] - Jan 15, 2018 +* Add support for Azure Blob Storage Binary provider + +## [0.8.6] - Jan 13, 2019 +* Fix documentation about nginx group id + +## [0.8.5] - Jan 13, 2019 +* Updated Artifactory version to 6.6.5 + +## [0.8.4] - Jan 8, 2019 +* Make artifactory.replicator.publicUrl required when the replicator is enabled + +## [0.8.3] - Jan 1, 2019 +* Updated Artifactory version to 6.6.3 +* Add support for `artifactory.extraEnvironmentVariables` to pass more environment variables to Artifactory + +## [0.8.2] - Dec 28, 2018 +* Fix location `replicator.yaml` is copied to + +## [0.8.1] - Dec 27, 2018 +* Updated Artifactory version to 6.6.1 + +## [0.8.0] - Dec 20, 2018 +* Updated Artifactory version to 6.6.0 + +## [0.7.17] - Dec 17, 2018 +* Updated Artifactory version to 6.5.13 + +## [0.7.16] - Dec 12, 2018 +* Fix documentation about Artifactory license setup using secret + +## [0.7.15] - Dec 9, 2018 +* AWS S3 add `roleName` for using IAM role + +## [0.7.14] - Dec 6, 2018 +* AWS S3 `identity` and `credential` are now added only if have a value to allow using IAM role + +## [0.7.13] - Dec 5, 2018 +* Remove Distribution certificates creation. + +## [0.7.12] - Dec 2, 2018 +* Remove Java option "-Dartifactory.locking.provider.type=db". This is already the default setting. + +## [0.7.11] - Nov 30, 2018 +* Updated Artifactory version to 6.5.9 + +## [0.7.10] - Nov 29, 2018 +* Fixed the volumeMount for the replicator.yaml + +## [0.7.9] - Nov 29, 2018 +* Optionally include primary node into poddisruptionbudget + +## [0.7.8] - Nov 29, 2018 +* Updated postgresql version to 9.6.11 + +## [0.7.7] - Nov 27, 2018 +* Updated Artifactory version to 6.5.8 + +## [0.7.6] - Nov 18, 2018 +* Added support for configMap to use custom Reverse Proxy Configuration with Nginx + +## [0.7.5] - Nov 14, 2018 +* Updated Artifactory version to 6.5.3 + +## [0.7.4] - Nov 13, 2018 +* Allow pod anti-affinity settings to include primary node + +## [0.7.3] - Nov 12, 2018 +* Support artifactory.preStartCommand for running command before entrypoint starts + +## [0.7.2] - Nov 7, 2018 +* Support database.url parameter (DB_URL) + +## [0.7.1] - Oct 29, 2018 +* Change probes port to 8040 (so they will not be blocked when all tomcat threads on 8081 are exhausted) + +## [0.7.0] - Oct 28, 2018 +* Update postgresql chart to version 0.9.5 to be able and use `postgresConfig` options + +## [0.6.9] - Oct 23, 2018 +* Fix providing external secret for database credentials + +## [0.6.8] - Oct 22, 2018 +* Allow user to configure externalTrafficPolicy for Loadbalancer + +## [0.6.7] - Oct 22, 2018 +* Updated ingress annotation support (with examples) to support docker registry v2 + +## [0.6.6] - Oct 21, 2018 +* Updated Artifactory version to 6.5.2 + +## [0.6.5] - Oct 19, 2018 +* Allow providing pre-existing secret containing master key +* Allow arbitrary annotations on primary and member node pods +* Enforce size limits when using local storage with `emptyDir` +* Allow `soft` or `hard` specification of member node anti-affinity +* Allow providing pre-existing secrets containing external database credentials +* Fix `s3` binary store provider to properly use the `cache-fs` provider +* Allow arbitrary properties when using the `s3` binary store provider + +## [0.6.4] - Oct 18, 2018 +* Updated Artifactory version to 6.5.1 + +## [0.6.3] - Oct 17, 2018 +* Add Apache 2.0 license + +## [0.6.2] - Oct 14, 2018 +* Make S3 endpoint configurable (was hardcoded with `s3.amazonaws.com`) + +## [0.6.1] - Oct 11, 2018 +* Allows ingress default `backend` to be enabled or disabled (defaults to enabled) + +## [0.6.0] - Oct 11, 2018 +* Updated Artifactory version to 6.5.0 + +## [0.5.3] - Oct 9, 2018 +* Quote ingress hosts to support wildcard names + +## [0.5.2] - Oct 2, 2018 +* Add `helm repo add jfrog https://charts.jfrog.io` to README + +## [0.5.1] - Oct 2, 2018 +* Set Artifactory to 6.4.1 + +## [0.5.0] - Sep 27, 2018 +* Set Artifactory to 6.4.0 + +## [0.4.7] - Sep 26, 2018 +* Add ci/test-values.yaml + +## [0.4.6] - Sep 25, 2018 +* Add PodDisruptionBudget for member nodes, defaulting to minAvailable of 1 + +## [0.4.4] - Sep 2, 2018 +* Updated Artifactory version to 6.3.2 + +## [0.4.0] - Aug 22, 2018 +* Added support to run as non root +* Updated Artifactory version to 6.2.0 + +## [0.3.0] - Aug 22, 2018 +* Enabled RBAC Support +* Added support for PostStartCommand (To download Database JDBC connector) +* Increased postgresql max_connections +* Added support for `nginx.conf` ConfigMap +* Updated Artifactory version to 6.1.0 diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/Chart.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/Chart.yaml new file mode 100755 index 0000000..0e1989f --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +appVersion: 7.0.2 +description: Universal Repository Manager supporting all major packaging formats, + build tools and CI servers. +home: https://www.jfrog.com/artifactory/ +icon: https://raw.githubusercontent.com/jfrog/charts/master/stable/artifactory-ha/logo/artifactory-logo.png +keywords: +- artifactory +- jfrog +- devops +maintainers: +- email: amithk@jfrog.com + name: amithins +- email: daniele@jfrog.com + name: danielezer +- email: eldada@jfrog.com + name: eldada +- email: rimasm@jfrog.com + name: rimusz +name: openshift-artifactory-ha +sources: +- https://bintray.com/jfrog/product/JFrog-Artifactory-Pro/view +- https://github.com/jfrog/charts +version: 2.0.4 diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/LICENSE b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/LICENSE new file mode 100755 index 0000000..8dada3e --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/README.md b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/README.md new file mode 100755 index 0000000..76bd6af --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/README.md @@ -0,0 +1,1266 @@ +# JFrog Artifactory High Availability Helm Chart + +## Prerequisites Details + +* Kubernetes 1.8+ +* Artifactory HA license + +## Chart Details +This chart will do the following: + +* Deploy Artifactory highly available cluster. 1 primary node and 2 member nodes. +* Deploy a PostgreSQL database +* Deploy an Nginx server + +## Artifactory HA architecture +The Artifactory HA cluster in this chart is made up of +- A single primary node +- Two member nodes, which can be resized at will + +Load balancing is done to the member nodes only. +This leaves the primary node free to handle jobs and tasks and not be interrupted by inbound traffic. +> This can be controlled by the parameter `artifactory.service.pool`. + +## Installing the Chart + +### Add JFrog Helm repository +Before installing JFrog helm charts, you need to add the [JFrog helm repository](https://charts.jfrog.io/) to your helm client +```bash +helm repo add jfrog https://charts.jfrog.io +``` + +### Install Chart +To install the chart with the release name `artifactory-ha`: +```bash +helm install --name artifactory-ha --set postgresql.postgresqlPassword= jfrog/artifactory-ha +``` + +### System Configuration +Artifactory uses a common system configuration file - `system.yaml`. See [official documentation](https://www.jfrog.com/confluence/display/JFROG/System+YAML+Configuration+File) on its usage. +In order to override the default `system.yaml` configuration, do the following: +```bash +artifactory: + systemYaml: | + +``` + +### Accessing Artifactory +**NOTE:** It might take a few minutes for Artifactory's public IP to become available, and the nodes to complete initial setup. +Follow the instructions outputted by the install command to get the Artifactory IP and URL to access it. + +### Updating Artifactory +Once you have a new chart version, you can update your deployment with +```bash +helm upgrade artifactory-ha jfrog/artifactory-ha +``` + +If artifactory was installed without providing a value to postgresql.postgresqlPassword (a password was autogenerated), follow these instructions: +1. Get the current password by running: +```bash +POSTGRES_PASSWORD=$(kubectl get secret -n -postgresql -o jsonpath="{.data.postgresql-password}" | base64 --decode) +``` +2. Upgrade the release by passing the previously auto-generated secret: +```bash +helm upgrade jfrog/artifactory-ha --set postgresql.postgresqlPassword=${POSTGRES_PASSWORD} +``` + +This will apply any configuration changes on your existing deployment. + +### Artifactory memory and CPU resources +The Artifactory HA Helm chart comes with support for configured resource requests and limits to all pods. By default, these settings are commented out. +It is **highly** recommended to set these so you have full control of the allocated resources and limits. + +See more information on [setting resources for your Artifactory based on planned usage](https://www.jfrog.com/confluence/display/RTF/System+Requirements#SystemRequirements-RecommendedHardware). + +```bash +# Example of setting resource requests and limits to all pods (including passing java memory settings to Artifactory) +helm install --name artifactory-ha \ + --set artifactory.primary.resources.requests.cpu="500m" \ + --set artifactory.primary.resources.limits.cpu="2" \ + --set artifactory.primary.resources.requests.memory="1Gi" \ + --set artifactory.primary.resources.limits.memory="4Gi" \ + --set artifactory.primary.javaOpts.xms="1g" \ + --set artifactory.primary.javaOpts.xmx="4g" \ + --set artifactory.node.resources.requests.cpu="500m" \ + --set artifactory.node.resources.limits.cpu="2" \ + --set artifactory.node.resources.requests.memory="1Gi" \ + --set artifactory.node.resources.limits.memory="4Gi" \ + --set artifactory.node.javaOpts.xms="1g" \ + --set artifactory.node.javaOpts.xmx="4g" \ + --set initContainers.resources.requests.cpu="10m" \ + --set initContainers.resources.limits.cpu="250m" \ + --set initContainers.resources.requests.memory="64Mi" \ + --set initContainers.resources.limits.memory="128Mi" \ + --set postgresql.resources.requests.cpu="200m" \ + --set postgresql.resources.limits.cpu="1" \ + --set postgresql.resources.requests.memory="500Mi" \ + --set postgresql.resources.limits.memory="1Gi" \ + --set nginx.resources.requests.cpu="100m" \ + --set nginx.resources.limits.cpu="250m" \ + --set nginx.resources.requests.memory="250Mi" \ + --set nginx.resources.limits.memory="500Mi" \ + jfrog/artifactory-ha +``` +> Artifactory java memory parameters can (and should) also be set to match the allocated resources with `artifactory.[primary|node].javaOpts.xms` and `artifactory.[primary|node].javaOpts.xmx`. + +Get more details on configuring Artifactory in the [official documentation](https://www.jfrog.com/confluence/). + +Although it is possible to set resources limits and requests this way, it is recommended to use the pre-built values files +for small, medium and large installation and change them according to your needs (if necessary), as described [here](#Deploying-Artifactory-for-small/medium/large-installations) + +### Deploying Artifactory for small/medium/large installations +In the chart directory, we have added three values files, one for each installation type - small/medium/large. These values files are recommendations for setting resources requests and limits for your installation. The values are derived from the following [documentation](https://www.jfrog.com/confluence/display/EP/Installing+on+Kubernetes#InstallingonKubernetes-Systemrequirements). You can find them in the corresponding chart directory - values-small.yaml, values-medium.yaml and values-large.yaml + +### Artifactory storage +Artifactory HA support a wide range of storage back ends. You can see more details on [Artifactory HA storage options](https://www.jfrog.com/confluence/display/RTF/HA+Installation+and+Setup#HAInstallationandSetup-SettingUpYourStorageConfiguration) + +In this chart, you set the type of storage you want with `artifactory.persistence.type` and pass the required configuration settings. +The default storage in this chart is the `file-system` replication, where the data is replicated to all nodes. + +> **IMPORTANT:** All storage configurations (except NFS) come with a default `artifactory.persistence.redundancy` parameter. +This is used to set how many replicas of a binary should be stored in the cluster's nodes. +Once this value is set on initial deployment, you can not update it using helm. +It is recommended to set this to a number greater than half of your cluster's size, and never scale your cluster down to a size smaller than this number. + +#### Existing volume claim + +###### Primary node +In order to use an existing volume claim for the Artifactory primary storage, you need to: +- Create a persistent volume claim by the name `volume--artifactory-ha-primary-0` e.g `volume-myrelease-artifactory-ha-primary-0` +- Pass a parameter to `helm install` and `helm upgrade` +```bash +... +--set artifactory.primary.persistence.existingClaim=true +``` + +###### Member nodes +In order to use an existing volume claim for the Artifactory member nodes storage, you need to: +- Create persistent volume claims according to the number of replicas defined at `artifactory.node.replicaCount` by the names `volume--artifactory-ha-member-`, e.g `volume-myrelease-artifactory-ha-member-0` and `volume-myrelease-artifactory-ha-primary-1`. +- Pass a parameter to `helm install` and `helm upgrade` +```bash +... +--set artifactory.node.persistence.existingClaim=true +``` + +#### Existing shared volume claim + +In order to use an existing claim (for data and backup) that is to be shared across all nodes, you need to: + +- Create PVCs with ReadWriteMany that match the naming conventions: +``` + {{ template "artifactory-ha.fullname" . }}-data-pvc- + {{ template "artifactory-ha.fullname" . }}-backup-pvc- +``` +An example that shows 2 existing claims to be used: +``` + myexample-artifactory-ha-data-pvc-0 + myexample-artifactory-ha-backup-pvc-0 + myexample-artifactory-ha-data-pvc-1 + myexample-artifactory-ha-backup-pvc-1 +``` +- Set the artifactory.persistence.fileSystem.existingSharedClaim.enabled in values.yaml to true: +``` +-- set artifactory.persistence.fileSystem.existingSharedClaim.enabled=true +-- set artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims=2 +``` + +#### NFS +To use an NFS server as your cluster's storage, you need to +- Setup an NFS server. Get its IP as `NFS_IP` +- Create a `data` and `backup` directories on the NFS exported directory with write permissions to all +- Pass NFS parameters to `helm install` and `helm upgrade` +```bash +... +--set artifactory.persistence.type=nfs \ +--set artifactory.persistence.nfs.ip=${NFS_IP} \ +... +``` + +#### Google Storage +To use a Google Storage bucket as the cluster's filestore. See [Google Storage Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-GoogleStorageBinaryProvider) +- Pass Google Storage parameters to `helm install` and `helm upgrade` +```bash +... +--set artifactory.persistence.type=google-storage \ +--set artifactory.persistence.googleStorage.identity=${GCP_ID} \ +--set artifactory.persistence.googleStorage.credential=${GCP_KEY} \ +... +``` + +#### AWS S3 +**NOTE** Keep in mind that when using the `aws-s3` persistence type, you will not be able to provide an IAM on the pod level. +In order to grant permissions to Artifactory using an IAM role, you will have to attach the IAM role to the machine(s) on which Artifactory is running. +This is due to the fact that the `aws-s3` template uses the `JetS3t` library to interact with AWS. If you want to grant an IAM role at the pod level, see the `AWS S3 Vs` section. + +To use an AWS S3 bucket as the cluster's filestore. See [S3 Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-S3BinaryProvider) +- Pass AWS S3 parameters to `helm install` and `helm upgrade` +```bash +... +# With explicit credentials: +--set artifactory.persistence.type=aws-s3 \ +--set artifactory.persistence.awsS3.endpoint=${AWS_S3_ENDPOINT} \ +--set artifactory.persistence.awsS3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3.identity=${AWS_ACCESS_KEY_ID} \ +--set artifactory.persistence.awsS3.credential=${AWS_SECRET_ACCESS_KEY} \ +... + +... +# With using existing IAM role +--set artifactory.persistence.type=aws-s3 \ +--set artifactory.persistence.awsS3.endpoint=${AWS_S3_ENDPOINT} \ +--set artifactory.persistence.awsS3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3.roleName=${AWS_ROLE_NAME} \ +... +``` +**NOTE:** Make sure S3 `endpoint` and `region` match. See [AWS documentation on endpoint](https://docs.aws.amazon.com/general/latest/gr/rande.html) + +#### AWS S3 V3 +To use an AWS S3 bucket as the cluster's filestore and access it with the official AWS SDK, See [S3 Official SDK Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-AmazonS3OfficialSDKTemplate). +This filestore template uses the official AWS SDK, unlike the `aws-s3` implementation that uses the `JetS3t` library. +Use this template if you want to attach an IAM role to the Artifactory pod directly (as opposed to attaching it to the machine/s that Artifactory will run on). + +**NOTE** This will have to be combined with a k8s mechanism for attaching IAM roles to pods, like [kube2iam](https://github.com/helm/charts/tree/master/stable/kube2iam) or anything similar. + +- Pass AWS S3 V3 parameters and the annotation pointing to the IAM role (when using an IAM role. this is kube2iam specific and may vary depending on the implementation) to `helm install` and `helm upgrade` + +```bash +# With explicit credentials: +--set artifactory.persistence.type=aws-s3-v3 \ +--set artifactory.persistence.awsS3V3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3V3.bucketName=${AWS_S3_BUCKET_NAME} \ +--set artifactory.persistence.awsS3V3.identity=${AWS_ACCESS_KEY_ID} \ +--set artifactory.persistence.awsS3V3.credential=${AWS_SECRET_ACCESS_KEY} \ +... +``` + +```bash +# With using existing IAM role +--set artifactory.persistence.type=aws-s3-v3 \ +--set artifactory.persistence.awsS3V3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3V3.bucketName=${AWS_S3_BUCKET_NAME} \ +--set artifactory.annotations.'iam\.amazonaws\.com/role'=${AWS_IAM_ROLE_ARN} +... +``` + +#### Microsoft Azure Blob Storage +To use Azure Blob Storage as the cluster's filestore. See [Azure Blob Storage Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-AzureBlobStorageClusterBinaryProvider) +- Pass Azure Blob Storage parameters to `helm install` and `helm upgrade` +```bash +... +--set artifactory.persistence.type=azure-blob \ +--set artifactory.persistence.azureBlob.accountName=${AZURE_ACCOUNT_NAME} \ +--set artifactory.persistence.azureBlob.accountKey=${AZURE_ACCOUNT_KEY} \ +--set artifactory.persistence.azureBlob.endpoint=${AZURE_ENDPOINT} \ +--set artifactory.persistence.azureBlob.containerName=${AZURE_CONTAINER_NAME} \ +... +``` + +#### Custom binarystore.xml +You have an option to provide a custom [binarystore.xml](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore).
+There are two options for this + +1. Editing directly in [values.yaml](values.yaml) +```yaml +artifactory: + persistence: + binarystoreXml: | + + + + + +``` + +2. Create your own [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) and pass it to your `helm install` command +```yaml +# Prepare your custom Secret file (custom-binarystore.yaml) +kind: Secret +apiVersion: v1 +metadata: + name: custom-binarystore + labels: + app: artifactory + chart: artifactory +stringData: + binarystore.xml: |- + + + + +``` + +```bash +# Create a secret from the file +kubectl apply -n artifactory -f ./custom-binarystore.yaml + +# Pass it to your helm install command: +helm install --name artifactory-ha --set artifactory.persistence.customBinarystoreXmlSecret=custom-binarystore jfrog/artifactory-ha +``` + +### Create a unique Master Key +Artifactory HA cluster requires a unique master key. By default the chart has one set in values.yaml (`artifactory.masterKey`). + +**This key is for demo purpose and should not be used in a production environment!** + +You should generate a unique one and pass it to the template at install/upgrade time. +```bash +# Create a key +export MASTER_KEY=$(openssl rand -hex 32) +echo ${MASTER_KEY} + +# Pass the created master key to helm +helm install --name artifactory-ha --set artifactory.masterKey=${MASTER_KEY} jfrog/artifactory-ha +``` + +Alternatively, you can create a secret containing the master key manually and pass it to the template at install/upgrade time. +```bash +# Create a key +export MASTER_KEY=$(openssl rand -hex 32) +echo ${MASTER_KEY} + +# Create a secret containing the key. The key in the secret must be named master-key +kubectl create secret generic my-secret --from-literal=master-key=${MASTER_KEY} + +# Pass the created secret to helm +helm install --name artifactory-ha --set artifactory.masterKeySecretName=my-secret jfrog/artifactory-ha +``` +**NOTE:** In either case, make sure to pass the same master key on all future calls to `helm install` and `helm upgrade`! In the first case, this means always passing `--set artifactory.masterKey=${MASTER_KEY}`. In the second, this means always passing `--set artifactory.masterKeySecretName=my-secret` and ensuring the contents of the secret remain unchanged. + +### Create a unique Join Key +Artifactory requires a unique join key. By default the chart has one set in values.yaml (`artifactory.joinKey`). + +**This key is for demo purpose and should not be used in a production environment!** + +You should generate a unique key and pass it to the template at install/upgrade time. +```bash +# Create a key +export JOIN_KEY=$(openssl rand -hex 16) +echo ${JOIN_KEY} + +# Pass the created master key to helm +helm install --name artifactory --set artifactory.joinKey=${JOIN_KEY} jfrog/artifactory +``` + +**NOTE:** In either case, make sure to pass the same join key on all future calls to `helm install` and `helm upgrade`! This means always passing `--set artifactory.joinKey=${JOIN_KEY}`. + +### Install Artifactory HA license +For activating Artifactory HA, you must install an appropriate license. There are three ways to manage the license. **Artifactory UI**, **REST API**, or a **Kubernetes Secret**. + +The easier and recommended way is the **Artifactory UI**. Using the **Kubernetes Secret** or **REST API** is for advanced users and is better suited for automation. + +**IMPORTANT:** You should use only one of the following methods. Switching between them while a cluster is running might disable your Artifactory HA cluster! + +##### Artifactory UI +Once primary cluster is running, open Artifactory UI and insert the license(s) in the UI. See [HA installation and setup](https://www.jfrog.com/confluence/display/RTF/HA+Installation+and+Setup) for more details. **Note that you should enter all licenses at once, with each license is separated by a newline.** If you add the licenses one at a time, you may get redirected to a node without a license and the UI won't load for that node. + +##### REST API +You can add licenses via REST API (https://www.jfrog.com/confluence/display/RTF/Artifactory+REST+API#ArtifactoryRESTAPI-InstallHAClusterLicenses). Note that the REST API expects "\n" for the newlines in the licenses. + +##### Kubernetes Secret +You can deploy the Artifactory license(s) as a [Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/). +Prepare a text file with the license(s) written in it. If writing multiple licenses (must be in the same file), it's important to put **two new lines between each license block**! +```bash +# Create the Kubernetes secret (assuming the local license file is 'art.lic') +kubectl create secret generic artifactory-cluster-license --from-file=./art.lic + +# Pass the license to helm +helm install --name artifactory-ha --set artifactory.license.secret=artifactory-cluster-license,artifactory.license.dataKey=art.lic jfrog/artifactory-ha +``` +**NOTE:** This method is relevant for initial deployment only! Once Artifactory is deployed, you should not keep passing these parameters as the license is already persisted into Artifactory's storage (they will be ignored). +Updating the license should be done via Artifactory UI or REST API. + +##### Create the secret as part of the helm release +values.yaml +```yaml +artifactory: + license: + licenseKey: |- + + + + + + + +``` + +```bash +helm install --name artifactory-ha -f values.yaml jfrog/artifactory-ha +``` +**NOTE:** This method is relevant for initial deployment only! Once Artifactory is deployed, you should not keep passing these parameters as the license is already persisted into Artifactory's storage (they will be ignored). +Updating the license should be done via Artifactory UI or REST API. +If you want to keep managing the artifactory license using the same method, you can use the copyOnEveryStartup example shown in the values.yaml file + + +### copyOnEveryStartup feature +Files stored in the `/artifactory-extra-conf` directory are only copied to the `ARTIFACTORY_HOME/etc` directory upon the first startup. +In some cases, you want your configuration files to be copied to the `ARTIFACTORY_HOME/etc` directory on every startup. +Two examples for that would be: + +1. the binarstore.xml file. If you use the default behaviour, your binarystore.xml configuration will only be copied on the first startup, +which means that changes you make over time to the `binaryStoreXml` configuration will not be applied. In order to make sure your changes are applied on every startup, do the following: +Create a values file with the following values: +```yaml +artifactory: + copyOnEveryStartup: + - source: /artifactory_extra_conf/binarystore.xml + target: etc/ +``` + +Install the helm chart with the values file you created: +```bash +helm upgrade --install artifactory-ha jfrog/artifactory-ha -f values.yaml +``` + +2. Any custom configuration file you have to configure artifactory, such as `logabck.xml`: +Create a config map with your `logback.xml` configuration. + +Create a values file with the following values: +```yaml +artifactory: + ## Create a volume pointing to the config map with your configuration file + customVolumes: | + - name: logback-xml-configmap + configMap: + name: logback-xml-configmap + customVolumeMounts: | + - name: logback-xml-configmap + mountPath: /tmp/artifactory-logback/ + copyOnEveryStartup: + - source: /tmp/artifactory-logback/* + target: etc/ +``` + +Install the helm chart with the values file you created: +```bash +helm upgrade --install artifactory-ha jfrog/artifactory-ha -f values.yaml +``` + +### Configure NetworkPolicy + +NetworkPolicy specifies what ingress and egress is allowed in this namespace. It is encouraged to be more specific whenever possible to increase security of the system. + +In the `networkpolicy` section of values.yaml you can specify a list of NetworkPolicy objects. + +For podSelector, ingress and egress, if nothing is provided then a default `- {}` is applied which is to allow everything. + +A full (but very wide open) example that results in 2 NetworkPolicy objects being created: +```yaml +networkpolicy: + # Allows all ingress and egress to/from artifactory primary and member pods. + - name: artifactory + podSelector: + matchLabels: + app: artifactory-ha + egress: + - {} + ingress: + - {} + # Allows connectivity from artifactory-ha pods to postgresql pods, but no traffic leaving postgresql pod. + - name: postgresql + podSelector: + matchLabels: + app: postgresql + ingress: + - from: + - podSelector: + matchLabels: + app: artifactory-ha +``` + +### Artifactory JMX Configuration +** You can see some information about the exposed MBeans here - https://www.jfrog.com/confluence/display/RTF/Artifactory+JMX+MBeans + +Enable JMX in your deployment: +```bash +helm install --name artifactory \ + --set artifactory.primary.javaOpts.jmx.enabled=true \ + --set artifactory.node.javaOpts.jmx.enabled=true \ + jfrog/artifactory-ha +``` +This will enable access to Artifactory with JMX on the default port (9010). +** You have the option to change the port by setting ```artifactory.primary.javaOpts.jmx.port``` and ```artifactory.node.javaOpts.jmx.port``` +to your choice of port + +In order to connect to Artifactory using JMX with jconsole (or any similar tool) installed on your computer, follow the following steps: +1. Enable JMX as described above and Change the Artifactory service to be of type LoadBalancer: +```bash +helm install --name artifactory \ + --set artifactory.primary.javaOpts.jmx.enabled=true \ + --set artifactory.node.javaOpts.jmx.enabled=true \ + --set artifactory.service.type=LoadBalancer \ + jfrog/artifactory-ha +``` +2. The default setting for java.rmi.server.hostname is the service name (this is also configurable with +```artifactory.primary.javaOpts.jmx.host``` and ```artifactory.node.javaOpts.jmx.host```), So in order to connect to Artifactory +with jconsole you should map the Artifactory kuberentes service IP to the service name using your hosts file as such: +``` + artifactory-ha--primary + +``` +3. Launch jconsole with the service address and port: +```bash +jconsole artifactory-ha--primary: +jconsole : +``` + +### Access creds. bootstraping +**IMPORTANT:** Bootsrapping access creds. will allow access for the user access-admin from certain IP's. + +* User guide to [bootstrap Artifactory Access credentials](https://www.jfrog.com/confluence/display/ACC/Configuring+Access) + +1. Create `access-creds-values.yaml` and provide the IP (By default 127.0.0.1) and password: +```yaml +artifactory: + accessAdmin: + ip: "" #Example: "*" + password: "" +``` + +2. Apply the `access-creds-values.yaml` file: +```bash +helm upgrade --install artifactory-ha jfrog/artifactory-ha -f access-creds-values.yaml +``` + +### Bootstrapping Artifactory +**IMPORTANT:** Bootstrapping Artifactory needs license. Pass license as shown in above section. + +* User guide to [bootstrap Artifactory Global Configuration](https://www.jfrog.com/confluence/display/RTF/Configuration+Files#ConfigurationFiles-BootstrappingtheGlobalConfiguration) +* User guide to [bootstrap Artifactory Security Configuration](https://www.jfrog.com/confluence/display/RTF/Configuration+Files#ConfigurationFiles-BootstrappingtheSecurityConfiguration) + +1. Create `bootstrap-config.yaml` with artifactory.config.import.xml and security.import.xml as shown below: +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: my-release-bootstrap-config +data: + artifactory.config.import.xml: | + + security.import.xml: | + +``` + +2. Create configMap in Kubernetes: +```bash +kubectl apply -f bootstrap-config.yaml +``` +3. Pass the configMap to helm +```bash +helm install --name artifactory-ha --set artifactory.license.secret=artifactory-cluster-license,artifactory.license.dataKey=art.lic,artifactory.configMapName=my-release-bootstrap-config jfrog/artifactory-ha +``` + +### Use custom nginx.conf with Nginx + +Steps to create configMap with nginx.conf +* Create `nginx.conf` file. +```bash +kubectl create configmap nginx-config --from-file=nginx.conf +``` +* Pass configMap to helm install +```bash +helm install --name artifactory-ha --set nginx.customConfigMap=nginx-config jfrog/artifactory-ha +``` + +### Scaling your Artifactory cluster +A key feature in Artifactory HA is the ability to set an initial cluster size with `--set artifactory.node.replicaCount=${CLUSTER_SIZE}` and if needed, resize it. + +##### Before scaling +**IMPORTANT:** When scaling, you need to explicitly pass the database password if it's an auto generated one (this is the default with the enclosed PostgreSQL helm chart). + +Get the current database password +```bash +export DB_PASSWORD=$(kubectl get $(kubectl get secret -o name | grep postgresql) -o jsonpath="{.data.postgresql-password}" | base64 --decode) +``` +Use `--set postgresql.postgresqlPassword=${DB_PASSWORD}` with every scale action to prevent a miss configured cluster! + +##### Scale up +Let's assume you have a cluster with **2** member nodes, and you want to scale up to **3** member nodes (a total of 4 nodes). +```bash +# Scale to 4 nodes (1 primary and 3 member nodes) +helm upgrade --install artifactory-ha --set artifactory.node.replicaCount=3 --set postgresql.postgresqlPassword=${DB_PASSWORD} jfrog/artifactory-ha +``` + +##### Scale down +Let's assume you have a cluster with **3** member nodes, and you want to scale down to **2** member node. + +```bash +# Scale down to 2 member nodes +helm upgrade --install artifactory-ha --set artifactory.node.replicaCount=2 --set postgresql.postgresqlPassword=${DB_PASSWORD} jfrog/artifactory-ha +``` +- **NOTE:** Since Artifactory is running as a Kubernetes Stateful Set, the removal of the node will **not** remove the persistent volume. You need to explicitly remove it +```bash +# List PVCs +kubectl get pvc + +# Remove the PVC with highest ordinal! +# In this example, the highest node ordinal was 2, so need to remove its storage. +kubectl delete pvc volume-artifactory-node-2 +``` + +### Use an external Database + +#### PostgreSQL +There are cases where you will want to use external PostgreSQL with a different database name e.g. `my-artifactory-db`, then you need set a custom PostgreSQL connection URL, where `my-artifactory-db` is the database name. + +This can be done with the following parameters +```bash +... +--set postgresql.enabled=false \ +--set database.type=postgresql \ +--set database.driver=org.postgresql.Driver \ +--set database.url='jdbc:postgresql://${DB_HOST}:${DB_PORT}/my-artifactory-db' \ +--set database.user=${DB_USER} \ +--set database.password=${DB_PASSWORD} \ +... +``` +**NOTE:** You must set `postgresql.enabled=false` in order for the chart to use the `database.*` parameters. Without it, they will be ignored! + +#### Other DB type +There are cases where you will want to use a different database and not the enclosed **PostgreSQL**. +See more details on [configuring the database](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Database) +> The official Artifactory Docker images include the PostgreSQL database driver. +> For other database types, you will have to add the relevant database driver to Artifactory's tomcat/lib + +This can be done with the following parameters +```bash +# Make sure your Artifactory Docker image has the MySQL database driver in it +... +--set postgresql.enabled=false \ +--set artifactory.preStartCommand="wget -O /opt/jfrog/artifactory/tomcat/lib/mysql-connector-java-5.1.41.jar https://jcenter.bintray.com/mysql/mysql-connector-java/5.1.41/mysql-connector-java-5.1.41.jar" \ +--set database.type=mysql \ +--set database.driver=com.mysql.jdbc.Driver \ +--set database.url=${DB_URL} \ +--set database.user=${DB_USER} \ +--set database.password=${DB_PASSWORD} \ +... +``` +**NOTE:** You must set `postgresql.enabled=false` in order for the chart to use the `database.*` parameters. Without it, they will be ignored! + +#### Using pre-existing Kubernetes Secret +If you store your database credentials in a pre-existing Kubernetes `Secret`, you can specify them via `database.secrets` instead of `database.user` and `database.password`: +```bash +# Create a secret containing the database credentials +kubectl create secret generic my-secret --from-literal=user=${DB_USER} --from-literal=password=${DB_PASSWORD} +... +--set postgresql.enabled=false \ +--set database.secrets.user.name=my-secret \ +--set database.secrets.user.key=user \ +--set database.secrets.password.name=my-secret \ +--set database.secrets.password.key=password \ +... +``` + +### Deleting Artifactory +To delete the Artifactory HA cluster +```bash +helm delete --purge artifactory-ha +``` +This will completely delete your Artifactory HA cluster. +**NOTE:** Since Artifactory is running as Kubernetes Stateful Sets, the removal of the helm release will **not** remove the persistent volumes. You need to explicitly remove them +```bash +kubectl delete pvc -l release=artifactory-ha +``` +See more details in the official [Kubernetes Stateful Set removal page](https://kubernetes.io/docs/tasks/run-application/delete-stateful-set/) + +### Custom Docker registry for your images +If you need to pull your Docker images from a private registry (for example, when you have a custom image with a MySQL database driver), you need to create a +[Kubernetes Docker registry secret](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/) and pass it to helm +```bash +# Create a Docker registry secret called 'regsecret' +kubectl create secret docker-registry regsecret --docker-server=${DOCKER_REGISTRY} --docker-username=${DOCKER_USER} --docker-password=${DOCKER_PASS} --docker-email=${DOCKER_EMAIL} +``` +Once created, you pass it to `helm` +```bash +helm install --name artifactory-ha --set imagePullSecrets=regsecret jfrog/artifactory-ha +``` + +### Logger sidecars +This chart provides the option to add sidecars to tail various logs from Artifactory. See the available values in [values.yaml](values.yaml) + +Get list of containers in the pod +```bash +kubectl get pods -n -o jsonpath='{.spec.containers[*].name}' | tr ' ' '\n' +``` + +View specific log +```bash +kubectl logs -n -c +``` + + +### Custom init containers +There are cases where a special, unsupported init processes is needed like checking something on the file system or testing something before spinning up the main container. + +For this, there is a section for writing custom init containers before and after the predefined init containers in the [values.yaml](values.yaml) . By default it's commented out +```yaml +artifactory: + ## Add custom init containers executed before predefined init containers + customInitContainersBegin: | + ## Init containers template goes here ## + ## Add custom init containers executed after predefined init containers + customInitContainers: | + ## Init containers template goes here ## +``` + +### Custom sidecar containers +There are cases where an extra sidecar container is needed. For example monitoring agents or log collection. + +For this, there is a section for writing a custom sidecar container in the [values.yaml](values.yaml). By default it's commented out +```yaml +artifactory: + ## Add custom sidecar containers + customSidecarContainers: | + ## Sidecar containers template goes here ## +``` + +You can configure the sidecar to run as a custom user if needed by setting the following in the container template +```yaml + # Example of running container as root (id 0) + securityContext: + runAsUser: 0 + fsGroup: 0 +``` + +### Custom volumes +If you need to use a custom volume in a custom init or sidecar container, you can use this option. + +For this, there is a section for defining custom volumes in the [values.yaml](values.yaml). By default it's commented out +```yaml +artifactory: + ## Add custom volumes + customVolumes: | + ## Custom volume comes here ## +``` + +### Add Artifactory User Plugin during installation +If you need to add [Artifactory User Plugin](https://github.com/jfrog/artifactory-user-plugins), you can use this option. + +Create a secret with [Artifactory User Plugin](https://github.com/jfrog/artifactory-user-plugins) by following command: +```bash +# Secret with single user plugin +kubectl create secret generic archive-old-artifacts --from-file=archiveOldArtifacts.groovy --namespace=artifactory-ha + +# Secret with single user plugin with configuration file +kubectl create secret generic webhook --from-file=webhook.groovy --from-file=webhook.config.json.sample --namespace=artifactory-ha +``` + +Add plugin secret names to `plugins.yaml` as following: +```yaml +artifactory: + userPluginSecrets: + - archive-old-artifacts + - webhook +``` + +You can now pass the created `plugins.yaml` file to helm install command to deploy Artifactory with user plugins as follows: +```bash +helm install --name artifactory-ha -f plugins.yaml jfrog/artifactory-ha +``` + +Alternatively, you may be in a situation in which you would like to create a secret in a Helm chart that depends on this chart. In this scenario, the name of the secret is likely dynamically generated via template functions, so passing a statically named secret isn't possible. In this case, the chart supports evaluating strings as templates via the [`tpl`](https://helm.sh/docs/charts_tips_and_tricks/#using-the-tpl-function) function - simply pass the raw string containing the templating language used to name your secret as a value instead by adding the following to your chart's `values.yaml` file: +```yaml +artifactory-ha: # Name of the artifactory-ha dependency + artifactory: + userPluginSecrets: + - '{{ template "my-chart.fullname" . }}' +``` +NOTE: By defining userPluginSecrets, this overrides any pre-defined plugins from the container image that are stored in /tmp/plugins. At this time [artifactory-pro:6.9.0](https://bintray.com/jfrog/artifactory-pro) is distributed with `internalUser.groovy` plugin. If you need this plugin in addition to your user plugins, you should include these additional plugins as part of your userPluginSecrets. + +### Provide custom configMaps to Artifactory +If you want to mount a custom file to Artifactory, either an init shell script or a custom configuration file (such as `logback.xml`), you can use this option. + +Create a `configmaps.yaml` file with the following content: +```yaml +artifactory: + configMaps: | + logback.xml: | + + + + + %date [%-5level] \(%-20c{3}:%L\) %message%n + + + + + + + + + + + + + + + my-custom-post-start-hook.sh: | + echo "This is my custom post start hook" + + customVolumeMounts: | + - name: artifactory-configmaps + mountPath: /tmp/my-config-map + + postStartCommand: | + chmod +x /tmp/my-config-map/my-custom-post-start-hook.sh; + /tmp/my-config-map/my-custom-post-start-hook.sh; + + copyOnEveryStartup: + - source: /tmp/my-config-map/logback.xml + target: etc/ + +``` + +and use it with you helm install/upgrade: +```bash +helm install --name artifactory-ha -f configmaps.yaml jfrog/artifactory-ha +``` + +This will, in turn: +* create a configMap with the files you specified above +* create a volume pointing to the configMap with the name `artifactory-configmaps` +* Mount said configMap onto `/tmp/my-config-map` using a `customVolumeMounts` +* Set the shell script we mounted as the `postStartCommand` +* Copy the `logback.xml` file to its proper location in the `$ARTIFACTORY_HOME/etc` directory. + + +### Artifactory filebeat +If you want to collect logs from your Artifactory installation and send them to a central log collection solution like ELK, you can use this option. + +Create a `filebeat.yaml` values file with the following content: +```yaml +filebeat: + enabled: true + logstashUrl: + resources: + requests: + memory: "100Mi" + cpu: "100m" + limits: + memory: "100Mi" + cpu: "100m" +``` + +You can optionally customize the `filebeat.yaml` to send output to a different location like so: +```yaml +filebeat: + enabled: true + filebeatYml: | + +``` + +and use it with you helm install/upgrade: +```bash +helm install --name artifactory -f filebeat.yaml jfrog/artifactory +``` + +This will start sending your Artifactory logs to the log aggregator of your choice, based on your configuration in the `filebeatYml` + +## Configuration +The following table lists the configurable parameters of the artifactory chart and their default values. + +| Parameter | Description | Default | +|------------------------------|-----------------------------------|-------------------------------------------------------| +| `imagePullSecrets` | Docker registry pull secret | | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the fullname template | +| `serviceAccount.annotations` | Artifactory service account annotations | `` | +| `rbac.create` | Specifies whether RBAC resources should be created | `true` | +| `rbac.role.rules` | Rules to create | `[]` | +| `logger.image.repository` | repository for logger image | `busybox` | +| `logger.image.tag` | tag for logger image | `1.30` | +| `artifactory.name` | Artifactory name | `artifactory` | +| `artifactory.image.pullPolicy` | Container pull policy | `IfNotPresent` | +| `artifactory.image.repository` | Container image | `docker.bintray.io/jfrog/artifactory-pro` | +| `artifactory.image.version` | Container image tag | `.Chart.AppVersion` | +| `artifactory.priorityClass.create` | Create a PriorityClass object | `false` | +| `artifactory.priorityClass.value` | Priority Class value | `1000000000` | +| `artifactory.priorityClass.name` | Priority Class name | `{{ template "artifactory-ha.fullname" . }}` | +| `artifactory.priorityClass.existingPriorityClass` | Use existing priority class | `` | +| `artifactory.loggers` | Artifactory loggers (see values.yaml for possible values) | `[]` | +| `artifactory.catalinaLoggers` | Artifactory Tomcat loggers (see values.yaml for possible values) | `[]` | +| `artifactory.customInitContainersBegin`| Custom init containers to run before existing init containers | | +| `artifactory.customInitContainers`| Custom init containers to run after existing init containers | | +| `artifactory.customSidecarContainers`| Custom sidecar containers | | +| `artifactory.customVolumes` | Custom volumes | | +| `artifactory.customVolumeMounts` | Custom Artifactory volumeMounts | | +| `artifactory.customPersistentPodVolumeClaim` | Custom PVC spec to create and attach a unique PVC for each pod on startup with the volumeClaimTemplates feature in StatefulSet | | +| `artifactory.customPersistentVolumeClaim` | Custom PVC spec to be mounted to the all artifactory containers using a volume | | +| `artifactory.userPluginSecrets` | Array of secret names for Artifactory user plugins | | +| `artifactory.masterKey` | Artifactory master key. A 128-Bit key size (hexadecimal encoded) string (32 hex characters). Can be generated with `openssl rand -hex 16`. NOTE: This key can be generated only once and cannot be updated once created |`FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF`| +| `artifactory.masterKeySecretName` | Artifactory Master Key secret name | | +| `artifactory.joinKey` | Join Key to connect other services to Artifactory. Can be generated with `openssl rand -hex 16` | `EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE` | +| `artifactory.accessAdmin.ip` | Artifactory access-admin ip to be set upon startup, can use (*) for 0.0.0.0| 127.0.0.1 | +| `artifactory.accessAdmin.password` | Artifactory access-admin password to be set upon startup| | +| `artifactory.accessAdmin.secret` | Artifactory access-admin secret name | | +| `artifactory.accessAdmin.dataKey` | Artifactory access-admin secret data key | | +| `artifactory.preStartCommand` | Command to run before entrypoint starts | | +| `artifactory.postStartCommand` | Command to run after container starts | | +| `artifactory.license.licenseKey` | Artifactory license key. Providing the license key as a parameter will cause a secret containing the license key to be created as part of the release. Use either this setting or the license.secret and license.dataKey. If you use both, the latter will be used. | | +| `artifactory.configMaps` | configMaps to be created as volume by the name `artifactory-configmaps`. In order to use these configMaps, you will need to add `customVolumeMounts` to point to the created volume and mount it onto a container | | +| `artifactory.license.secret` | Artifactory license secret name | | +| `artifactory.license.dataKey`| Artifactory license secret data key | | +| `artifactory.service.name` | Artifactory service name to be set in Nginx configuration | `artifactory` | +| `artifactory.service.type` | Artifactory service type | `ClusterIP` | +| `artifactory.service.clusterIP`| Specific cluster IP or `None` for headless services | `nil` | +| `artifactory.service.loadBalancerSourceRanges`| Artifactory service array of IP CIDR ranges to whitelist (only when service type is LoadBalancer) | | +| `artifactory.service.annotations` | Artifactory service annotations | `{}` | +| `artifactory.service.pool` | Artifactory instances to be in the load balancing pool. `members` or `all` | `members` | +| `artifactory.externalPort` | Artifactory service external port | `8082` | +| `artifactory.internalPort` | Artifactory service internal port (**DO NOT** use port lower than 1024) | `8082` | +| `artifactory.internalArtifactoryPort` | Artifactory service internal port (**DO NOT** use port lower than 1024) | `8081` | +| `artifactory.externalArtifactoryPort` | Artifactory service external port | `8081` | +| `artifactory.extraEnvironmentVariables` | Extra environment variables to pass to Artifactory. Supports evaluating strings as templates via the [`tpl`](https://helm.sh/docs/charts_tips_and_tricks/#using-the-tpl-function) function. See [documentation](https://www.jfrog.com/confluence/display/RTF/Installing+with+Docker#InstallingwithDocker-SupportedEnvironmentVariables) | | +| `artifactory.livenessProbe.enabled` | Enable liveness probe | `true` | +| `artifactory.livenessProbe.path` | liveness probe HTTP Get path | `/router/api/v1/system/health` | +| `artifactory.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 180 | +| `artifactory.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `artifactory.livenessProbe.timeoutSeconds` | When the probe times out | 10 | +| `artifactory.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 | +| `artifactory.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 10 | +| `artifactory.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `artifactory.readinessProbe.path` | readiness probe HTTP Get path | `/router/api/v1/system/health` | +| `artifactory.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 60 | +| `artifactory.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `artifactory.readinessProbe.timeoutSeconds` | When the probe times out | 10 | +| `artifactory.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 | +| `artifactory.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 10 | +| `artifactory.copyOnEveryStartup` | List of files to copy on startup from source (which is absolute) to target (which is relative to ARTIFACTORY_HOME | | +| `artifactory.deleteDBPropertiesOnStartup` | Whether to delete the ARTIFACTORY_HOME/etc/db.properties file on startup. Disabling this will remove the ability for the db.properties to be updated with any DB-related environment variables change (e.g. DB_HOST, DB_URL) | `true` | +| `artifactory.database.maxOpenConnections` | Maximum amount of open connections from Artifactory to the DB | `80` | +| `artifactory.haDataDir.enabled` | Enable haDataDir for eventual storage in the HA cluster | `false` | +| `artifactory.haDataDir.path` | Path to the directory intended for use with NFS eventual configuration for HA | | +| `artifactory.persistence.mountPath` | Artifactory persistence volume mount path | `"/var/opt/jfrog/artifactory"` | +| `artifactory.persistence.enabled` | Artifactory persistence volume enabled | `true` | +| `artifactory.persistence.accessMode` | Artifactory persistence volume access mode | `ReadWriteOnce` | +| `artifactory.persistence.size` | Artifactory persistence or local volume size | `200Gi` | +| `artifactory.persistence.binarystore.enabled` | whether you want to mount the binarystore.xml file from a secret created by the chart. If `false` you will need need to get the binarystore.xml file into the file-system from either an `initContainer` or using a `preStartCommand` | `true` | +| `artifactory.persistence.binarystoreXml` | Artifactory binarystore.xml template | See `values.yaml` | +| `artifactory.persistence.customBinarystoreXmlSecret` | A custom Secret for binarystore.xml | `` | +| `artifactory.persistence.maxCacheSize` | Artifactory cache-fs provider maxCacheSize in bytes | `50000000000` | +| `artifactory.persistence.cacheProviderDir` | the root folder of binaries for the filestore cache. If the value specified starts with a forward slash ("/") it is considered the fully qualified path to the filestore folder. Otherwise, it is considered relative to the *baseDataDir*. | `cache` | +| `artifactory.persistence.type` | Artifactory HA storage type | `file-system` | +| `artifactory.persistence.redundancy` | Artifactory HA storage redundancy | `3` | +| `artifactory.persistence.nfs.ip` | NFS server IP | | +| `artifactory.persistence.nfs.haDataMount` | NFS data directory | `/data` | +| `artifactory.persistence.nfs.haBackupMount` | NFS backup directory | `/backup` | +| `artifactory.persistence.nfs.dataDir` | HA data directory | `/var/opt/jfrog/artifactory-ha` | +| `artifactory.persistence.nfs.backupDir` | HA backup directory | `/var/opt/jfrog/artifactory-backup` | +| `artifactory.persistence.nfs.capacity` | NFS PVC size | `200Gi` | +| `artifactory.persistence.nfs.mountOptions` | NFS mount options | `[]` | +| `artifactory.persistence.eventual.numberOfThreads` | Eventual number of threads | `10` | +| `artifactory.persistence.googleStorage.endpoint` | Google Storage API endpoint| `storage.googleapis.com` | +| `artifactory.persistence.googleStorage.httpsOnly` | Google Storage API has to be consumed https only| `false` | +| `artifactory.persistence.googleStorage.bucketName` | Google Storage bucket name | `artifactory-ha` | +| `artifactory.persistence.googleStorage.identity` | Google Storage service account id | | +| `artifactory.persistence.googleStorage.credential` | Google Storage service account key | | +| `artifactory.persistence.googleStorage.path` | Google Storage path in bucket | `artifactory-ha/filestore` | +| `artifactory.persistence.googleStorage.bucketExists`| Google Storage bucket exists therefore does not need to be created.| `false` | +| `artifactory.persistence.awsS3.bucketName` | AWS S3 bucket name | `artifactory-ha` | +| `artifactory.persistence.awsS3.endpoint` | AWS S3 bucket endpoint | See https://docs.aws.amazon.com/general/latest/gr/rande.html | +| `artifactory.persistence.awsS3.region` | AWS S3 bucket region | | +| `artifactory.persistence.awsS3.roleName` | AWS S3 IAM role name | | +| `artifactory.persistence.awsS3.identity` | AWS S3 AWS_ACCESS_KEY_ID | | +| `artifactory.persistence.awsS3.credential` | AWS S3 AWS_SECRET_ACCESS_KEY | | +| `artifactory.persistence.awsS3.properties` | AWS S3 additional properties | | +| `artifactory.persistence.awsS3.path` | AWS S3 path in bucket | `artifactory-ha/filestore` | +| `artifactory.persistence.awsS3.refreshCredentials` | AWS S3 renew credentials on expiration | `true` (When roleName is used, this parameter will be set to true) | +| `artifactory.persistence.awsS3.httpsOnly` | AWS S3 https access to the bucket only | `true` | +| `artifactory.persistence.awsS3.testConnection` | AWS S3 test connection on start up | `false` | +| `artifactory.persistence.awsS3.s3AwsVersion` | AWS S3 signature version | `AWS4-HMAC-SHA256` | +| `artifactory.persistence.awsS3V3.testConnection` | AWS S3 test connection on start up | `false` | +| `artifactory.persistence.awsS3V3.identity` | AWS S3 AWS_ACCESS_KEY_ID | | +| `artifactory.persistence.awsS3V3.credential` | AWS S3 AWS_SECRET_ACCESS_KEY | | +| `artifactory.persistence.awsS3V3.region` | AWS S3 bucket region | | +| `artifactory.persistence.awsS3V3.bucketName` | AWS S3 bucket name | `artifactory-aws` | +| `artifactory.persistence.awsS3V3.path` | AWS S3 path in bucket | `artifactory/filestore` | +| `artifactory.persistence.awsS3V3.endpoint` | AWS S3 bucket endpoint | See https://docs.aws.amazon.com/general/latest/gr/rande.html | +| `artifactory.persistence.awsS3V3.kmsServerSideEncryptionKeyId` | AWS S3 encryption key ID or alias | | +| `artifactory.persistence.awsS3V3.kmsKeyRegion` | AWS S3 KMS Key region | | +| `artifactory.persistence.awsS3V3.kmsCryptoMode` | AWS S3 KMS encryption mode | See https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-AmazonS3OfficialSDKTemplate | +| `artifactory.persistence.awsS3V3.useInstanceCredentials` | AWS S3 Use default authentication mechanism | See https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-authentication | +| `artifactory.persistence.awsS3V3.usePresigning` | AWS S3 Use URL signing | `false` | +| `artifactory.persistence.awsS3V3.signatureExpirySeconds` | AWS S3 Validity period in seconds for signed URLs | `300` | +| `artifactory.persistence.awsS3V3.cloudFrontDomainName` | AWS CloudFront domain name | See https://www.jfrog.com/confluence/display/RTF/Direct+Cloud+Storage+Download#DirectCloudStorageDownload-UsingCloudFront(Optional)| +| `artifactory.persistence.awsS3V3.cloudFrontKeyPairId` | AWS CloudFront key pair ID | See https://www.jfrog.com/confluence/display/RTF/Direct+Cloud+Storage+Download#DirectCloudStorageDownload-UsingCloudFront(Optional)| +| `artifactory.persistence.awsS3V3.cloudFrontPrivateKey` | AWS CloudFront private key | See https://www.jfrog.com/confluence/display/RTF/Direct+Cloud+Storage+Download#DirectCloudStorageDownload-UsingCloudFront(Optional)| +| `artifactory.persistence.azureBlob.accountName` | Azure Blob Storage account name | `` | +| `artifactory.persistence.azureBlob.accountKey` | Azure Blob Storage account key | `` | +| `artifactory.persistence.azureBlob.endpoint` | Azure Blob Storage endpoint | `` | +| `artifactory.persistence.azureBlob.containerName` | Azure Blob Storage container name | `` | +| `artifactory.persistence.azureBlob.testConnection` | Azure Blob Storage test connection | `false` | +| `artifactory.persistence.fileSystem.existingSharedClaim` | Enable using an existing shared pvc | `false` | +| `artifactory.persistence.fileStorage.dataDir` | HA data directory | `/var/opt/jfrog/artifactory/artifactory-data` | +| `artifactory.persistence.fileStorage.backupDir` | HA backup directory | `/var/opt/jfrog/artifactory-backup` | +| `artifactory.javaOpts.other` | Artifactory additional java options (for all nodes) | | +| `artifactory.primary.labels` | Artifactory primary node labels | `{}` | +| `artifactory.primary.resources.requests.memory` | Artifactory primary node initial memory request | | +| `artifactory.primary.resources.requests.cpu` | Artifactory primary node initial cpu request | | +| `artifactory.primary.resources.limits.memory` | Artifactory primary node memory limit | | +| `artifactory.primary.resources.limits.cpu` | Artifactory primary node cpu limit | | +| `artifactory.primary.javaOpts.xms` | Artifactory primary node java Xms size | | +| `artifactory.primary.javaOpts.xmx` | Artifactory primary node java Xms size | | +| `artifactory.primary.javaOpts.corePoolSize` | The number of async processes that can run in parallel in the primary node - https://jfrog.com/knowledge-base/how-do-i-tune-artifactory-for-heavy-loads/ | `16` | +| `artifactory.primary.javaOpts.jmx.enabled` | Enable JMX monitoring | `false` | +| `artifactory.primary.javaOpts.jmx.port` | JMX Port number | `9010` | +| `artifactory.primary.javaOpts.jmx.host` | JMX hostname (parsed as a helm template) | `{{ template "artifactory-ha.primary.name" $ }}` | +| `artifactory.primary.javaOpts.jmx.ssl` | Enable SSL | `false` | +| `artifactory.primary.javaOpts.jmx.authenticate` | Enable JMX authentication | `false` | +| `artifactory.primary.javaOpts.jmx.accessFile` | The path to the JMX access file, when JMX authentication is enabled | | +| `artifactory.primary.javaOpts.jmx.passwordFile` | The path to the JMX password file, when JMX authentication is enabled | | +| `artifactory.primary.javaOpts.other` | Artifactory primary node additional java options | | +| `artifactory.primary.persistence.existingClaim` | Whether to use an existing pvc for the primary node | `false` | +| `artifactory.node.labels` | Artifactory member node labels | `{}` | +| `artifactory.node.replicaCount` | Artifactory member node replica count | `2` | +| `artifactory.node.minAvailable` | Artifactory member node min available count | `1` | +| `artifactory.node.resources.requests.memory` | Artifactory member node initial memory request | | +| `artifactory.node.resources.requests.cpu` | Artifactory member node initial cpu request | | +| `artifactory.node.resources.limits.memory` | Artifactory member node memory limit | | +| `artifactory.node.resources.limits.cpu` | Artifactory member node cpu limit | | +| `artifactory.node.javaOpts.xms` | Artifactory member node java Xms size | | +| `artifactory.node.javaOpts.xmx` | Artifactory member node java Xms size | | +| `artifactory.node.javaOpts.corePoolSize` | The number of async processes that can run in parallel in the member nodes - https://jfrog.com/knowledge-base/how-do-i-tune-artifactory-for-heavy-loads/ | `16` | +| `artifactory.node.javaOpts.jmx.enabled` | Enable JMX monitoring | `false` | +| `artifactory.node.javaOpts.jmx.port` | JMX Port number | `9010` | +| `artifactory.node.javaOpts.jmx.host` | JMX hostname (parsed as a helm template) | `{{ template "artifactory-ha.fullname" $ }}` | +| `artifactory.node.javaOpts.jmx.ssl` | Enable SSL | `false` | +| `artifactory.node.javaOpts.jmx.authenticate` | Enable JMX authentication | `false` | +| `artifactory.node.javaOpts.jmx.accessFile` | The path to the JMX access file, when JMX authentication is enabled | | +| `artifactory.node.javaOpts.jmx.passwordFile` | The path to the JMX password file, when JMX authentication is enabled | | +| `artifactory.node.javaOpts.other` | Artifactory member node additional java options | | +| `artifactory.node.persistence.existingClaim` | Whether to use existing PVCs for the member nodes | `false` | +| `artifactory.node.waitForPrimaryStartup.enabled` | Whether to wait for the primary node to start before starting up the member nodes | `false` | +| `artifactory.node.waitForPrimaryStartup.time` | The amount of time to wait for the primary node to start before starting up the member nodes | `60` | +| `artifactory.systemYaml` | Artifactory system configuration (`system.yaml`) as described here - https://www.jfrog.com/confluence/display/JFROG/Artifactory+System+YAML | `see values.yaml` | +| `access.database.maxOpenConnections` | Maximum amount of open connections from Access to the DB | `80` | +| `initContainers.resources.requests.memory` | Init containers initial memory request | | +| `initContainers.resources.requests.cpu` | Init containers initial cpu request | | +| `initContainers.resources.limits.memory` | Init containers memory limit | | +| `initContainers.resources.limits.cpu` | Init containers cpu limit | | +| `ingress.enabled` | If true, Artifactory Ingress will be created | `false` | +| `ingress.annotations` | Artifactory Ingress annotations | `{}` | +| `ingress.labels` | Artifactory Ingress labels | `{}` | +| `ingress.hosts` | Artifactory Ingress hostnames | `[]` | +| `ingress.routerPath` | Router Ingress path | `/` | +| `ingress.artifactoryPath` | Artifactory Ingress path | `/artifactory` | +| `ingress.tls` | Artifactory Ingress TLS configuration (YAML) | `[]` | +| `ingress.defaultBackend.enabled` | If true, the default `backend` will be added using serviceName and servicePort | `true` | +| `ingress.annotations` | Ingress annotations, which are written out if annotations section exists in values. Everything inside of the annotations section will appear verbatim inside the resulting manifest. See `Ingress annotations` section below for examples of how to leverage the annotations, specifically for how to enable docker authentication. | | +| `ingress.additionalRules` | Ingress additional rules to be added to the Artifactory ingress. | `[]` | +| `nginx.enabled` | Deploy nginx server | `true` | +| `nginx.name` | Nginx name | `nginx` | +| `nginx.replicaCount` | Nginx replica count | `1` | +| `nginx.uid` | Nginx User Id | `104` | +| `nginx.gid` | Nginx Group Id | `107` | +| `nginx.image.repository` | Container image | `docker.bintray.io/jfrog/nginx-artifactory-pro` | +| `nginx.image.version` | Container version | `.Chart.AppVersion` | +| `nginx.image.pullPolicy` | Container pull policy | `IfNotPresent` | +| `nginx.labels` | Nginx deployment labels | `{}` | +| `nginx.loggers` | Artifactory loggers (see values.yaml for possible values) | `[]` | +| `nginx.mainConf` | Content of the Artifactory nginx main nginx.conf config file | `see values.yaml` | +| `nginx.artifactoryConf` | Content of Artifactory nginx artifactory.conf config file | `see values.yaml` | +| `nginx.service.type` | Nginx service type | `LoadBalancer` | +| `nginx.service.clusterIP` | Specific cluster IP or `None` for headless services | `nil` | +| `nginx.service.loadBalancerSourceRanges`| Nginx service array of IP CIDR ranges to whitelist (only when service type is LoadBalancer) | | +| `nginx.service.labels` | Nginx service labels | `{}` | +| `nginx.service.annotations` | Nginx service annotations | `{}` | +| `nginx.service.externalTrafficPolicy`| Nginx service desires to route external traffic to node-local or cluster-wide endpoints. | `Cluster` | +| `nginx.loadBalancerIP`| Provide Static IP to configure with Nginx | | +| `nginx.http.enabled` | Nginx http service enabled/disabled | true | +| `nginx.http.externalPort` | Nginx service external port | `80` | +| `nginx.http.internalPort` | Nginx service internal port | `80` | +| `nginx.https.enabled` | Nginx http service enabled/disabled | true | +| `nginx.https.externalPort` | Nginx service external port | `443` | +| `nginx.https.internalPort` | Nginx service internal port | `443` | +| `nginx.externalPortHttp` | DEPRECATED: Nginx service external port | `80` | +| `nginx.internalPortHttp` | DEPRECATED: Nginx service internal port | `80` | +| `nginx.externalPortHttps` | DEPRECATED: Nginx service external port | `443` | +| `nginx.internalPortHttps` | DEPRECATED: Nginx service internal port | `443` | +| `nginx.livenessProbe.enabled` | would you like a liveness Probe to be enabled | `true` | +| `nginx.livenessProbe.path` | liveness probe HTTP Get path | `/router/api/v1/system/health` | +| `nginx.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 100 | +| `nginx.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `nginx.livenessProbe.timeoutSeconds` | When the probe times out | 10 | +| `nginx.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 | +| `nginx.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 10 | +| `nginx.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `nginx.readinessProbe.path` | Readiness probe HTTP Get path | `/router/api/v1/system/health` | +| `nginx.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 60 | +| `nginx.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `nginx.readinessProbe.timeoutSeconds` | When the probe times out | 10 | +| `nginx.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 | +| `nginx.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 10 | +| `nginx.tlsSecretName` | SSL secret that will be used by the Nginx pod | | +| `nginx.customConfigMap` | Nginx CustomeConfigMap name for `nginx.conf` | ` ` | +| `nginx.customArtifactoryConfigMap`| Nginx CustomeConfigMap name for `artifactory-ha.conf` | ` ` | +| `nginx.resources.requests.memory` | Nginx initial memory request | `250Mi` | +| `nginx.resources.requests.cpu` | Nginx initial cpu request | `100m` | +| `nginx.resources.limits.memory` | Nginx memory limit | `250Mi` | +| `nginx.resources.limits.cpu` | Nginx cpu limit | `500m` | +| `nginx.persistence.mountPath` | Nginx persistence volume mount path | `"/var/opt/jfrog/nginx"` | +| `nginx.persistence.enabled` | Nginx persistence volume enabled. This is only available when the nginx.replicaCount is set to 1 | `false` | +| `nginx.persistence.accessMode` | Nginx persistence volume access mode | `ReadWriteOnce` | +| `nginx.persistence.size` | Nginx persistence volume size | `5Gi` | +| `waitForDatabase` | Wait for database (using wait-for-db init container) | `true` | +| `postgresql.enabled` | Use enclosed PostgreSQL as database | `true` | +| `postgresql.imageTag` | PostgreSQL version | `9.6.11` | +| `postgresql.postgresqlDatabase` | PostgreSQL database name | `artifactory` | +| `postgresql.postgresqlUsername` | PostgreSQL database user | `artifactory` | +| `postgresql.postgresqlPassword` | PostgreSQL database password | | +| `postgresql.persistence.enabled` | PostgreSQL use persistent storage | `true` | +| `postgresql.persistence.size` | PostgreSQL persistent storage size | `50Gi` | +| `postgresql.service.port` | PostgreSQL database port | `5432` | +| `postgresql.resources.requests.memory` | PostgreSQL initial memory request | | +| `postgresql.resources.requests.cpu` | PostgreSQL initial cpu request | | +| `postgresql.resources.limits.memory` | PostgreSQL memory limit | | +| `postgresql.resources.limits.cpu` | PostgreSQL cpu limit | | +| `database.type` | External database type (`postgresql`, `mysql`, `oracle` or `mssql`) | | +| `database.driver` | External database driver e.g. `org.postgresql.Driver` | | +| `database.url` | External database connection URL | | +| `database.user` | External database username | | +| `database.password` | External database password | | +| `database.secrets.user.name` | External database username `Secret` name | | +| `database.secrets.user.key` | External database username `Secret` key | | +| `database.secrets.password.name` | External database password `Secret` name | | +| `database.secrets.password.key` | External database password `Secret` key | | +| `database.secrets.url.name ` | External database url `Secret` name | | +| `database.secrets.url.key` | External database url `Secret` key | | +| `networkpolicy.name` | Becomes part of the NetworkPolicy object name | `artifactory` | +| `networkpolicy.podselector` | Contains the YAML that specifies how to match pods. Usually using matchLabels. | | +| `networkpolicy.ingress` | YAML snippet containing to & from rules applied to incoming traffic | `- {}` (open to all inbound traffic) | +| `networkpolicy.egress` | YAML snippet containing to & from rules applied to outgoing traffic | `- {}` (open to all outbound traffic) | +| `filebeat.enabled` | Enable a filebeat container to send your logs to a log management solution like ELK | `false` | +| `filebeat.name` | filebeat container name | `artifactory-filebeat` | +| `filebeat.image.repository` | filebeat Docker image repository | `docker.elastic.co/beats/filebeat` | +| `filebeat.image.version` | filebeat Docker image version | `7.5.1` | +| `filebeat.logstashUrl` | The URL to the central Logstash service, if you have one | `logstash:5044` | +| `filebeat.livenessProbe.exec.command` | liveness probe exec command | see [values.yaml](stable/artifactory-ha/values.yaml) | +| `filebeat.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 10 | +| `filebeat.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 180 | +| `filebeat.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `filebeat.readinessProbe.exec.command` | readiness probe exec command | see [values.yaml](stable/artifactory-ha/values.yaml) | +| `filebeat.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 10 | +| `filebeat.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 180 | +| `filebeat.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `filebeat.resources.requests.memory` | Filebeat initial memory request | | +| `filebeat.resources.requests.cpu` | Filebeat initial cpu request | | +| `filebeat.resources.limits.memory` | Filebeat memory limit | | +| `filebeat.resources.limits.cpu` | Filebeat cpu limit | | +| `filebeat.filebeatYml` | Filebeat yaml configuration file | see [values.yaml](stable/artifactory-ha/values.yaml) | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +### Ingress and TLS +To get Helm to create an ingress object with a hostname, add these two lines to your Helm command: +```bash +helm install --name artifactory-ha \ + --set ingress.enabled=true \ + --set ingress.hosts[0]="artifactory.company.com" \ + --set artifactory.service.type=NodePort \ + --set nginx.enabled=false \ + jfrog/artifactory-ha +``` + +If your cluster allows automatic creation/retrieval of TLS certificates (e.g. [cert-manager](https://github.com/jetstack/cert-manager)), please refer to the documentation for that mechanism. + +To manually configure TLS, first create/retrieve a key & certificate pair for the address(es) you wish to protect. Then create a TLS secret in the namespace: + +```bash +kubectl create secret tls artifactory-tls --cert=path/to/tls.cert --key=path/to/tls.key +``` + +Include the secret's name, along with the desired hostnames, in the Artifactory Ingress TLS section of your custom `values.yaml` file: + +```yaml + ingress: + ## If true, Artifactory Ingress will be created + ## + enabled: true + + ## Artifactory Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: + - artifactory.domain.com + annotations: + kubernetes.io/tls-acme: "true" + ## Artifactory Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: + - secretName: artifactory-tls + hosts: + - artifactory.domain.com +``` + +### Ingress annotations + +This example specifically enables Artifactory to work as a Docker Registry using the Repository Path method. See [Artifactory as Docker Registry](https://www.jfrog.com/confluence/display/RTF/Getting+Started+with+Artifactory+as+a+Docker+Registry) documentation for more information about this setup. + +```yaml +ingress: + enabled: true + defaultBackend: + enabled: false + hosts: + - myhost.example.com + annotations: + ingress.kubernetes.io/force-ssl-redirect: "true" + ingress.kubernetes.io/proxy-body-size: "0" + ingress.kubernetes.io/proxy-read-timeout: "600" + ingress.kubernetes.io/proxy-send-timeout: "600" + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite ^/(v2)/token /artifactory/api/docker/null/v2/token; + rewrite ^/(v2)/([^\/]*)/(.*) /artifactory/api/docker/$2/$1/$3; + nginx.ingress.kubernetes.io/proxy-body-size: "0" + tls: + - hosts: + - "myhost.example.com" +``` + +### Ingress additional rules + +You have the option to add additional ingress rules to the Artifactory ingress. An example for this use case can be routing the /xray path to Xray. +In order to do that, simply add the following to a `artifactory-ha-values.yaml` file: +```yaml +ingress: + enabled: true + + defaultBackend: + enabled: false + + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite "(?i)/xray(/|$)(.*)" /$2 break; + + additionalRules: | + - host: + http: + paths: + - path: / + backend: + serviceName: + servicePort: + - path: /xray + backend: + serviceName: + servicePort: + - path: /artifactory + backend: + serviceName: {{ template "artifactory.nginx.fullname" . }} + servicePort: {{ .Values.nginx.externalPortHttp }} +``` + +and running: +```bash +helm upgrade --install artifactory-ha jfrog/artifactory-ha -f artifactory-ha-values.yaml +``` + + + +## Useful links +- https://www.jfrog.com/confluence/display/EP/Getting+Started +- https://www.jfrog.com/confluence/display/RTF/Installing+Artifactory +- https://www.jfrog.com/confluence/ diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/ReverseProxyConfiguration.md b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/ReverseProxyConfiguration.md new file mode 100755 index 0000000..8515932 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/ReverseProxyConfiguration.md @@ -0,0 +1,140 @@ +# JFrog Artifactory Reverse Proxy Settings using Nginx + +#### Reverse Proxy +* To use Artifactory as docker registry it's mandatory to use Reverse Proxy. +* Artifactory provides a Reverse Proxy Configuration Generator screen in which you can fill in a set of fields to generate +the required configuration snippet which you can then download and install directly in the corresponding directory of your reverse proxy server. +* To learn about configuring NGINX or Apache for reverse proxy refer to documentation provided on [JFrog wiki](https://www.jfrog.com/confluence/display/RTF/Configuring+a+Reverse+Proxy) +* By default Artifactory helm chart uses Nginx for reverse proxy and load balancing. + +**Note**: Nginx image distributed with Artifactory helm chart is custom image managed and maintained by JFrog. + +#### Features of Artifactory Nginx +* Provides default configuration with self signed SSL certificate generated on each helm install/upgrade. +* Persist configuration and SSL certificate in `/var/opt/jfrog/nginx` directory + +#### Changing the default Artifactory nginx conf +Use a values.yaml file for changing the value of nginx.mainConf or nginx.artifactoryConf +These configuration will be mounted to the nginx container using a configmap. +For example: +1. Create a values file `nginx-values.yaml` with the following values: +```yaml +nginx: + artifactoryConf: | + ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; + ssl_certificate {{ .Values.nginx.persistence.mountPath }}/ssl/tls.crt; + ssl_certificate_key {{ .Values.nginx.persistence.mountPath }}/ssl/tls.key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + ## server configuration + server { + listen {{ .Values.nginx.internalPortHttps }} ssl; + listen {{ .Values.nginx.internalPortHttp }} ; + ## Change to you DNS name you use to access Artifactory + server_name ~(?.+)\.{{ include "artifactory-ha.fullname" . }} {{ include "artifactory-ha.fullname" . }}; + + if ($http_x_forwarded_proto = '') { + set $http_x_forwarded_proto $scheme; + } + ## Application specific logs + ## access_log /var/log/nginx/artifactory-access.log timing; + ## error_log /var/log/nginx/artifactory-error.log; + rewrite ^/$ /artifactory/webapp/ redirect; + rewrite ^/artifactory/?(/webapp)?$ /artifactory/webapp/ redirect; + if ( $repo != "" ) { + rewrite ^/(v1|v2)/(.*) /artifactory/api/docker/$repo/$1/$2 break; + } + rewrite ^/(v1|v2)/([^/]+)(.*)$ /artifactory/api/docker/$2/$1/$3; + rewrite ^/(v1|v2)/ /artifactory/api/docker/$1/; + chunked_transfer_encoding on; + client_max_body_size 0; + location /artifactory/ { + proxy_read_timeout 900; + proxy_pass_header Server; + proxy_cookie_path ~*^/.* /; + if ( $request_uri ~ ^/artifactory/(.*)$ ) { + proxy_pass http://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalPort }}/artifactory/$1; + } + proxy_pass http://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalPort }}/artifactory/; + proxy_set_header X-Artifactory-Override-Base-Url $http_x_forwarded_proto://$host:$server_port/artifactory; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + } +``` + +2. Install/upgrade artifactory: +```bash +helm upgrade --install artifactory-ha jfrog/artifactory-ha -f nginx-values.yaml +``` + + +#### Steps to use static configuration for reverse proxy in nginx. +1. Get Artifactory service name using this command `kubectl get svc -n $NAMESPACE` + +2. Create `artifactory.conf` file with nginx configuration. More [nginx configuration examples](https://github.com/jfrog/artifactory-docker-examples/tree/master/files/nginx/conf.d) + + Following is example `artifactory.conf` + + **Note**: + * Create file with name `artifactory.conf` as it's fixed in configMap key. + * Replace `artifactory-artifactory` with service name taken from step 1. + + ```bash + ## add ssl entries when https has been set in config + ssl_certificate /var/opt/jfrog/nginx/ssl/tls.crt; + ssl_certificate_key /var/opt/jfrog/nginx/ssl/tls.key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + ## server configuration + server { + listen 443 ssl; + listen 80; + ## Change to you DNS name you use to access Artifactory + server_name ~(?.+)\.artifactory-artifactory artifactory-artifactory; + + if ($http_x_forwarded_proto = '') { + set $http_x_forwarded_proto $scheme; + } + ## Application specific logs + ## access_log /var/log/nginx/artifactory-access.log timing; + ## error_log /var/log/nginx/artifactory-error.log; + rewrite ^/$ /artifactory/webapp/ redirect; + rewrite ^/artifactory/?(/webapp)?$ /artifactory/webapp/ redirect; + if ( $repo != "" ) { + rewrite ^/(v1|v2)/(.*) /artifactory/api/docker/$repo/$1/$2 break; + } + rewrite ^/(v1|v2)/([^/]+)(.*)$ /artifactory/api/docker/$2/$1/$3; + rewrite ^/(v1|v2)/ /artifactory/api/docker/$1/; + chunked_transfer_encoding on; + client_max_body_size 0; + location /artifactory/ { + proxy_read_timeout 900; + proxy_pass_header Server; + proxy_cookie_path ~*^/.* /; + if ( $request_uri ~ ^/artifactory/(.*)$ ) { + proxy_pass http://artifactory-artifactory:8081/artifactory/$1 break; + } + proxy_pass http://artifactory-artifactory:8081/artifactory/; + proxy_set_header X-Artifactory-Override-Base-Url $http_x_forwarded_proto://$host:$server_port/artifactory; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + } + ``` + +3. Create configMap of `artifactory.conf` created with step above. + ```bash + kubectl create configmap art-nginx-conf --from-file=artifactory.conf + ``` +4. Deploy Artifactory using helm chart. + You can achieve this by providing the name of configMap created above to `nginx.customArtifactoryConfigMap` in [values.yaml](values.yaml) + + Following is command to set values at runtime: + ```bash + helm install --name artifactory-ha nginx.customArtifactoryConfigMap=art-nginx-conf jfrog/artifactory-ha + ``` \ No newline at end of file diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/UPGRADE_NOTES.md b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/UPGRADE_NOTES.md new file mode 100755 index 0000000..640474f --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/UPGRADE_NOTES.md @@ -0,0 +1,33 @@ +# JFrog Artifactory Chart Upgrade Notes +This file describes special upgrade notes needed at specific versions + +## Upgrade from 0.X to 1.X +**DOWNTIME IS REQUIRED FOR AN UPGRADE!** +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you!** +* PostgreSQL sub chart was upgraded to version `6.5.x`. This version is not backward compatible with the old version (`0.9.5`)! +* Note the following **PostgreSQL** Helm chart changes + * The chart configuration has changed! See [values.yaml](values.yaml) for the new keys used + * **PostgreSQL** is deployed as a StatefulSet + * See [PostgreSQL helm chart](https://hub.helm.sh/charts/stable/postgresql) for all available configurations +* Upgrade + * Due to breaking changes in the **PostgreSQL** Helm chart, a migration of the database is needed from the old to the new database + * The recommended migration process is the [full system export and import](https://www.jfrog.com/confluence/display/RTF/Importing+and+Exporting) + * **NOTE:** To save time, export only metadata and configuration (check `Exclude Content` in the `System Import & Export`) since the Artifactory filestore is persisted + * Upgrade steps: + 1. Block user access to Artifactory (do not shutdown) + a. Scale down the cluster to primary node only (`node.replicaCount=0`) so the exported db and configuration will be kept on one known node (the primary) + b. If your Artifactory HA K8s service is set to member nodes only (`service.pool=members`) you will need to access the primary node directly (use `kubectl port-forward`) + 2. Perform `Export System` from the `Admin` -> `Import & Export` -> `System` -> `Export System` + a. Check `Exclude Content` to save export size (as Artifactory filestore will persist across upgrade) + b. Choose to save the export on the persisted Artifactory volume (`/var/opt/jfrog/artifactory/`) + c. Click `Export` (this can take some time) + 3. Run the `helm upgrade` with the new version. Old PostgreSQL will be removed and new one deployed + a. You must pass explicit "ready for upgrade flag" with `--set databaseUpgradeReady=yes`. Failing to provide this will block the upgrade! + 4. Once ready, open Artifactory UI (you might need to re-enter a valid license). Skip all onboarding wizard steps + a. **NOTE:** Don't worry you can't see the old config and files. It will all restore with the system import in the next step + 5. Perform `Import System` from the `Admin` -> `Import & Export` -> `System` -> `Import System` + a. Browse to where the export was saved Artifactory volume (`/var/opt/jfrog/artifactory/`) + b. Click `Import` (this can take some time) + 6. Restore access to Artifactory + a. Scale the cluster member nodes back to the original size + * Artifactory should now be ready to get back to normal operation diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/.helmignore b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/.helmignore new file mode 100755 index 0000000..a1c17ae --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/.helmignore @@ -0,0 +1,2 @@ +.git +OWNERS \ No newline at end of file diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/Chart.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/Chart.yaml new file mode 100755 index 0000000..4687eb3 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/Chart.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +appVersion: 11.5.0 +description: Chart for PostgreSQL, an object-relational database management system + (ORDBMS) with an emphasis on extensibility and on standards-compliance. +engine: gotpl +home: https://www.postgresql.org/ +icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-110x117.png +keywords: +- postgresql +- postgres +- database +- sql +- replication +- cluster +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: postgresql +sources: +- https://github.com/bitnami/bitnami-docker-postgresql +version: 7.0.1 diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/README.md b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/README.md new file mode 100755 index 0000000..e6621b4 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/README.md @@ -0,0 +1,487 @@ +# PostgreSQL + +[PostgreSQL](https://www.postgresql.org/) is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance. + +## TL;DR; + +```console +$ helm install stable/postgresql +``` + +## Introduction + +This chart bootstraps a [PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.11+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart +To install the chart with the release name `my-release`: + +```console +$ helm install --name my-release stable/postgresql +``` + +The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following tables lists the configurable parameters of the PostgreSQL chart and their default values. + +| Parameter | Description | Default | +| --------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------- | +| `global.imageRegistry` | Global Docker Image registry | `nil` | +| `global.postgresql.postgresqlDatabase` | PostgreSQL database (overrides `postgresqlDatabase`) | `nil` | +| `global.postgresql.postgresqlUsername` | PostgreSQL username (overrides `postgresqlUsername`) | `nil` | +| `global.postgresql.existingSecret` | Name of existing secret to use for PostgreSQL passwords (overrides `existingSecret`) | `nil` | +| `global.postgresql.postgresqlPassword` | PostgreSQL admin password (overrides `postgresqlPassword`) | `nil` | +| `global.postgresql.servicePort` | PostgreSQL port (overrides `service.port`) | `nil` | +| `global.postgresql.replicationPassword` | Replication user password (overrides `replication.password`) | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `image.registry` | PostgreSQL Image registry | `docker.io` | +| `image.repository` | PostgreSQL Image name | `bitnami/postgresql` | +| `image.tag` | PostgreSQL Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `image.debug` | Specify if debug values should be set | `false` | +| `nameOverride` | String to partially override postgresql.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override postgresql.fullname template with a string | `nil` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `stretch` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.securityContext.runAsUser` | User ID for the init container | `0` | +| `usePasswordFile` | Have the secrets mounted as a file instead of env vars | `false` | +| `replication.enabled` | Enable replication | `false` | +| `replication.user` | Replication user | `repl_user` | +| `replication.password` | Replication user password | `repl_password` | +| `replication.slaveReplicas` | Number of slaves replicas | `1` | +| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` | +| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `replication.slaveReplicas`. | `0` | +| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` | +| `existingSecret` | Name of existing secret to use for PostgreSQL passwords | `nil` | +| `postgresqlUsername` | PostgreSQL admin user | `postgres` | +| `postgresqlPassword` | PostgreSQL admin password | _random 10 character alphanumeric string_ | +| `postgresqlDatabase` | PostgreSQL database | `nil` | +| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql` (same value as persistence.mountPath) | +| `postgresqlInitdbArgs` | PostgreSQL initdb extra arguments | `nil` | +| `postgresqlInitdbWalDir` | PostgreSQL location for transaction log | `nil` | +| `postgresqlConfiguration` | Runtime Config Parameters | `nil` | +| `postgresqlExtendedConf` | Extended Runtime Config Parameters (appended to main or default configuration) | `nil` | +| `pgHbaConfiguration` | Content of pg\_hba.conf | `nil (do not create pg_hba.conf)` | +| `configurationConfigMap` | ConfigMap with the PostgreSQL configuration files (Note: Overrides `postgresqlConfiguration` and `pgHbaConfiguration`). The value is evaluated as a template. | `nil` | +| `extendedConfConfigMap` | ConfigMap with the extended PostgreSQL configuration files. The value is evaluated as a template. | `nil` | +| `initdbScripts` | Dictionary of initdb scripts | `nil` | +| `initdbUsername` | PostgreSQL user to execute the .sql and sql.gz scripts | `nil` | +| `initdbPassword` | Password for the user specified in `initdbUsername` | `nil` | +| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`). The value is evaluated as a template. | `nil` | +| `initdbScriptsSecret` | Secret with initdb scripts that contain sensitive information (Note: can be used with `initdbScriptsConfigMap` or `initdbScripts`). The value is evaluated as a template. | `nil` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | PostgreSQL port | `5432` | +| `service.nodePort` | Kubernetes Service nodePort | `nil` | +| `service.annotations` | Annotations for PostgreSQL service, the value is evaluated as a template. | {} | +| `service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | +| `service.loadBalancerSourceRanges` | Address that are allowed when svc is LoadBalancer | [] | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `persistence.enabled` | Enable persistence using PVC | `true` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template. | `nil` | +| `persistence.mountPath` | Path to mount the volume at | `/bitnami/postgresql` | +| `persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `persistence.storageClass` | PVC Storage Class for PostgreSQL volume | `nil` | +| `persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `[ReadWriteOnce]` | +| `persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `master.nodeSelector` | Node labels for pod assignment (postgresql master) | `{}` | +| `master.affinity` | Affinity labels for pod assignment (postgresql master) | `{}` | +| `master.tolerations` | Toleration labels for pod assignment (postgresql master) | `[]` | +| `master.anotations` | Map of annotations to add to the statefulset (postgresql master) | `{}` | +| `master.labels` | Map of labels to add to the statefulset (postgresql master) | `{}` | +| `master.podAnnotations` | Map of annotations to add to the pods (postgresql master) | `{}` | +| `master.podLabels` | Map of labels to add to the pods (postgresql master) | `{}` | +| `master.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql master) | `[]` | +| `master.extraVolumes` | Additional volumes to add to the pods (postgresql master) | `[]` | +| `slave.nodeSelector` | Node labels for pod assignment (postgresql slave) | `{}` | +| `slave.affinity` | Affinity labels for pod assignment (postgresql slave) | `{}` | +| `slave.tolerations` | Toleration labels for pod assignment (postgresql slave) | `[]` | +| `slave.anotations` | Map of annotations to add to the statefulsets (postgresql slave) | `{}` | +| `slave.labels` | Map of labels to add to the statefulsets (postgresql slave) | `{}` | +| `slave.podAnnotations` | Map of annotations to add to the pods (postgresql slave) | `{}` | +| `slave.podLabels` | Map of labels to add to the pods (postgresql slave) | `{}` | +| `slave.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumes` | Additional volumes to add to the pods (postgresql slave) | `[]` | +| `terminationGracePeriodSeconds` | Seconds the pod needs to terminate gracefully | `nil` | +| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `250m` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `serviceAccount.enabled` | Enable service account (Note: Service Account will only be automatically created if `serviceAccount.name` is not set) | `false` | +| `serviceAcccount.name` | Name of existing service account | `nil` | +| `livenessProbe.enabled` | Would you like a livenessProbe to be enabled | `true` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 5 | +| `readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `metrics.enabled` | Start a prometheus exporter | `false` | +| `metrics.service.type` | Kubernetes Service type | `ClusterIP` | +| `service.clusterIP` | Static clusterIP or None for headless services | `nil` | +| `metrics.service.annotations` | Additional annotations for metrics exporter pod | `{ prometheus.io/scrape: "true", prometheus.io/port: "9187"}` | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.namespace` | Optional namespace in which to create ServiceMonitor | `nil` | +| `metrics.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `metrics.serviceMonitor.scrapeTimeout` | Scrape timeout. If not set, the Prometheus default scrape timeout is used | `nil` | +| `metrics.image.registry` | PostgreSQL Image registry | `docker.io` | +| `metrics.image.repository` | PostgreSQL Image name | `bitnami/postgres-exporter` | +| `metrics.image.tag` | PostgreSQL Image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `metrics.securityContext.enabled` | Enable security context for metrics | `false` | +| `metrics.securityContext.runAsUser` | User ID for the container for metrics | `1001` | +| `metrics.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `metrics.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `metrics.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 5 | +| `metrics.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `extraEnv` | Any extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `{}` | +| `updateStrategy` | Update strategy policy | `{type: "RollingUpdate"}` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install --name my-release \ + --set postgresqlPassword=secretpassword,postgresqlDatabase=my-database \ + stable/postgresql +``` + +The above command sets the PostgreSQL `postgres` account password to `secretpassword`. Additionally it creates a database named `my-database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install --name my-release -f values.yaml stable/postgresql +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration and horizontal scaling + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Enable replication: +```diff +- replication.enabled: false ++ replication.enabled: true +``` + +- Number of slaves replicas: +```diff +- replication.slaveReplicas: 1 ++ replication.slaveReplicas: 2 +``` + +- Set synchronous commit mode: +```diff +- replication.synchronousCommit: "off" ++ replication.synchronousCommit: "on" +``` + +- Number of replicas that will have synchronous replication: +```diff +- replication.numSynchronousReplicas: 0 ++ replication.numSynchronousReplicas: 1 +``` + +- Start a prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +To horizontally scale this chart, you can use the `--replicas` flag to modify the number of nodes in your PostgreSQL deployment. Also you can use the `values-production.yaml` file or modify the parameters shown above. + +### Change PostgreSQL version + +To modify the PostgreSQL version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/postgresql/tags/) using the `image.tag` parameter. For example, `image.tag=12.0.0-debian-9-r0` + +### postgresql.conf / pg_hba.conf files as configMap + +This helm chart also supports to customize the whole configuration file. + +Add your custom file to "files/postgresql.conf" in your working directory. This file will be mounted as configMap to the containers and it will be used for configuring the PostgreSQL server. + +Alternatively, you can specify PostgreSQL configuration parameters using the `postgresqlConfiguration` parameter as a dict, using camelCase, e.g. {"sharedBuffers": "500MB"}. + +In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `configurationConfigMap` parameter. Note that this will override the two previous options. + +### Allow settings to be loaded from files other than the default `postgresql.conf` + +If you don't want to provide the whole PostgreSQL configuration file and only specify certain parameters, you can add your extended `.conf` files to "files/conf.d/" in your working directory. +Those files will be mounted as configMap to the containers adding/overwriting the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +Alternatively, you can also set an external ConfigMap with all the extra configuration files. This is done by setting the `extendedConfConfigMap` parameter. Note that this will override the previous option. + +### Initialize a fresh instance + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. + +Alternatively, you can specify custom scripts using the `initdbScripts` parameter as dict. + +In addition to these options, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `initdbScriptsSecret` parameter. + +The allowed extensions are `.sh`, `.sql` and `.sql.gz`. + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). + +The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details. + +### Use of global variables + +In more complex scenarios, we may have the following tree of dependencies + +``` + +--------------+ + | | + +------------+ Chart 1 +-----------+ + | | | | + | --------+------+ | + | | | + | | | + | | | + | | | + v v v ++-------+------+ +--------+------+ +--------+------+ +| | | | | | +| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 | +| | | | | | ++--------------+ +---------------+ +---------------+ +``` + +The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters: + +``` +postgresql.postgresqlPassword=testtest +subchart1.postgresql.postgresqlPassword=testtest +subchart2.postgresql.postgresqlPassword=testtest +postgresql.postgresqlDatabase=db1 +subchart1.postgresql.postgresqlDatabase=db1 +subchart2.postgresql.postgresqlDatabase=db1 +``` + +If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows: + +``` +global.postgresql.postgresqlPassword=testtest +global.postgresql.postgresqlDatabase=db1 +``` + +This way, the credentials will be available in all of the subcharts. + +## Persistence + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. +See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to [code](https://github.com/bitnami/bitnami-docker-postgresql/blob/8725fe1d7d30ebe8d9a16e9175d05f7ad9260c93/9.6/debian-9/rootfs/libpostgresql.sh#L518-L556). If you need to use those data, please covert them to sql and import after `helm install` finished. + +## NetworkPolicy + +To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + +```console +$ kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 5432. + +For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL. +This label will be displayed in the output of a successful install. + +## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image + +- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image. +- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift. + +### Deploy chart using Docker Official PostgreSQL Image + +From chart version 4.0.0, it is possible to use this chart with the Docker Official PostgreSQL image. +Besides specifying the new Docker repository and tag, it is important to modify the PostgreSQL data directory and volume mount point. Basically, the PostgreSQL data dir cannot be the mount point directly, it has to be a subdirectory. + +``` +helm install --name postgres \ + --set image.repository=postgres \ + --set image.tag=10.6 \ + --set postgresqlDataDir=/data/pgdata \ + --set persistence.mountPath=/data/ \ + stable/postgresql +``` + +## Upgrade + +It's necessary to specify the existing passwords while performing an upgrade to ensure the secrets are not updated with invalid randomly generated passwords. Remember to specify the existing values of the `postgresqlPassword` and `replication.password` parameters when upgrading the chart: + +```bash +$ helm upgrade my-release bitnami/influxdb \ + --set postgresqlPassword=[POSTGRESQL_PASSWORD] \ + --set replication.password=[REPLICATION_PASSWORD] +``` + +> Note: you need to substitute the placeholders _[POSTGRESQL_PASSWORD]_, and _[REPLICATION_PASSWORD]_ with the values obtained from instructions in the installation notes. + +## 7.0.0 + +Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec. + +In https://github.com/helm/charts/pull/17281 the `apiVersion` of the statefulset resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage. + +This major version bump signifies this change. + +## 5.0.0 + +In this version, the **chart is using PostgreSQL 11 instead of PostgreSQL 10**. You can find the main difference and notable changes in the following links: [https://www.postgresql.org/about/news/1894/](https://www.postgresql.org/about/news/1894/) and [https://www.postgresql.org/about/featurematrix/](https://www.postgresql.org/about/featurematrix/). + +For major releases of PostgreSQL, the internal data storage format is subject to change, thus complicating upgrades, you can see some errors like the following one in the logs: + +```bash +Welcome to the Bitnami postgresql container +Subscribe to project updates by watching https://github.com/bitnami/bitnami-docker-postgresql +Submit issues and feature requests at https://github.com/bitnami/bitnami-docker-postgresql/issues +Send us your feedback at containers@bitnami.com + +INFO ==> ** Starting PostgreSQL setup ** +NFO ==> Validating settings in POSTGRESQL_* env vars.. +INFO ==> Initializing PostgreSQL database... +INFO ==> postgresql.conf file not detected. Generating it... +INFO ==> pg_hba.conf file not detected. Generating it... +INFO ==> Deploying PostgreSQL with persisted data... +INFO ==> Configuring replication parameters +INFO ==> Loading custom scripts... +INFO ==> Enabling remote connections +INFO ==> Stopping PostgreSQL... +INFO ==> ** PostgreSQL setup finished! ** + +INFO ==> ** Starting PostgreSQL ** + [1] FATAL: database files are incompatible with server + [1] DETAIL: The data directory was initialized by PostgreSQL version 10, which is not compatible with this version 11.3. +``` +In this case, you should migrate the data from the old chart to the new one following an approach similar to that described in [this section](https://www.postgresql.org/docs/current/upgrading.html#UPGRADING-VIA-PGDUMPALL) from the official documentation. Basically, create a database dump in the old chart, move and restore it in the new one. + +### 4.0.0 + +This chart will use by default the Bitnami PostgreSQL container starting from version `10.7.0-r68`. This version moves the initialization logic from node.js to bash. This new version of the chart requires setting the `POSTGRES_PASSWORD` in the slaves as well, in order to properly configure the `pg_hba.conf` file. Users from previous versions of the chart are advised to upgrade immediately. + +IMPORTANT: If you do not want to upgrade the chart version then make sure you use the `10.7.0-r68` version of the container. Otherwise, you will get this error + +``` +The POSTGRESQL_PASSWORD environment variable is empty or not set. Set the environment variable ALLOW_EMPTY_PASSWORD=yes to allow the container to be started with blank passwords. This is recommended only for development +``` + +### 3.0.0 + +This releases make it possible to specify different nodeSelector, affinity and tolerations for master and slave pods. +It also fixes an issue with `postgresql.master.fullname` helper template not obeying fullnameOverride. + +#### Breaking changes + +- `affinty` has been renamed to `master.affinity` and `slave.affinity`. +- `tolerations` has been renamed to `master.tolerations` and `slave.tolerations`. +- `nodeSelector` has been renamed to `master.nodeSelector` and `slave.nodeSelector`. + +### 2.0.0 + +In order to upgrade from the `0.X.X` branch to `1.X.X`, you should follow the below steps: + + - Obtain the service name (`SERVICE_NAME`) and password (`OLD_PASSWORD`) of the existing postgresql chart. You can find the instructions to obtain the password in the NOTES.txt, the service name can be obtained by running + + ```console +$ kubectl get svc + ``` + +- Install (not upgrade) the new version + +```console +$ helm repo update +$ helm install --name my-release stable/postgresql +``` + +- Connect to the new pod (you can obtain the name by running `kubectl get pods`): + +```console +$ kubectl exec -it NAME bash +``` + +- Once logged in, create a dump file from the previous database using `pg_dump`, for that we should connect to the previous postgresql chart: + +```console +$ pg_dump -h SERVICE_NAME -U postgres DATABASE_NAME > /tmp/backup.sql +``` + +After run above command you should be prompted for a password, this password is the previous chart password (`OLD_PASSWORD`). +This operation could take some time depending on the database size. + +- Once you have the backup file, you can restore it with a command like the one below: + +```console +$ psql -U postgres DATABASE_NAME < /tmp/backup.sql +``` + +In this case, you are accessing to the local postgresql, so the password should be the new one (you can find it in NOTES.txt). + +If you want to restore the database and the database schema does not exist, it is necessary to first follow the steps described below. + +```console +$ psql -U postgres +postgres=# drop database DATABASE_NAME; +postgres=# create database DATABASE_NAME; +postgres=# create user USER_NAME; +postgres=# alter role USER_NAME with password 'BITNAMI_USER_PASSWORD'; +postgres=# grant all privileges on database DATABASE_NAME to USER_NAME; +postgres=# alter database DATABASE_NAME owner to USER_NAME; +``` diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/files/README.md b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/files/README.md new file mode 100755 index 0000000..1813a2f --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/files/README.md @@ -0,0 +1 @@ +Copy here your postgresql.conf and/or pg_hba.conf files to use it as a config map. diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/files/conf.d/README.md b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/files/conf.d/README.md new file mode 100755 index 0000000..184c187 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/files/conf.d/README.md @@ -0,0 +1,4 @@ +If you don't want to provide the whole configuration file and only specify certain parameters, you can copy here your extended `.conf` files. +These files will be injected as a config maps and add/overwrite the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +More info in the [bitnami-docker-postgresql README](https://github.com/bitnami/bitnami-docker-postgresql#configuration-file). diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/files/docker-entrypoint-initdb.d/README.md b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/files/docker-entrypoint-initdb.d/README.md new file mode 100755 index 0000000..cba3809 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/files/docker-entrypoint-initdb.d/README.md @@ -0,0 +1,3 @@ +You can copy here your custom `.sh`, `.sql` or `.sql.gz` file so they are executed during the first boot of the image. + +More info in the [bitnami-docker-postgresql](https://github.com/bitnami/bitnami-docker-postgresql#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/NOTES.txt b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/NOTES.txt new file mode 100755 index 0000000..798fa10 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/NOTES.txt @@ -0,0 +1,50 @@ +** Please be patient while the chart is being deployed ** + +PostgreSQL can be accessed via port {{ template "postgresql.port" . }} on the following DNS name from within your cluster: + + {{ template "postgresql.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read/Write connection +{{- if .Values.replication.enabled }} + {{ template "postgresql.fullname" . }}-read.{{ .Release.Namespace }}.svc.cluster.local - Read only connection +{{- end }} +To get the password for "{{ template "postgresql.username" . }}" run: + + export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-password}" | base64 --decode) + +To connect to your database run the following command: + + kubectl run {{ template "postgresql.fullname" . }}-client --rm --tty -i --restart='Never' --namespace {{ .Release.Namespace }} --image {{ template "postgresql.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + --labels="{{ template "postgresql.fullname" . }}-client=true" {{- end }} --command -- psql --host {{ template "postgresql.fullname" . }} -U {{ .Values.postgresqlUsername }}{{- if .Values.postgresqlDatabase }} -d {{ .Values.postgresqlDatabase }}{{- end }} -p {{ template "postgresql.port" . }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label {{ template "postgresql.fullname" . }}-client=true" will be able to connect to this PostgreSQL cluster. +{{- end }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "postgresql.fullname" . }}) + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $NODE_IP --port $NODE_PORT -U {{ .Values.postgresqlUsername }}{{- if .Values.postgresqlDatabase }} -d {{ .Values.postgresqlDatabase }}{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "postgresql.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "postgresql.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $SERVICE_IP --port {{ template "postgresql.port" . }} -U {{ .Values.postgresqlUsername }}{{- if .Values.postgresqlDatabase }} -d {{ .Values.postgresqlDatabase }}{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "postgresql.fullname" . }} {{ template "postgresql.port" . }}:{{ template "postgresql.port" . }} & + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host 127.0.0.1 -U {{ .Values.postgresqlUsername }}{{- if .Values.postgresqlDatabase }} -d {{ .Values.postgresqlDatabase }}{{- end }} -p {{ template "postgresql.port" . }} + +{{- end }} + +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} + +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ + +{{- end }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/_helpers.tpl b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/_helpers.tpl new file mode 100755 index 0000000..b379f29 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/_helpers.tpl @@ -0,0 +1,374 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "postgresql.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.master.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- $fullname := default (printf "%s-%s" .Release.Name $name) .Values.fullnameOverride -}} +{{- if .Values.replication.enabled -}} +{{- printf "%s-%s" $fullname "master" | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s" $fullname | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "postgresql.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"extensions/v1beta1" +{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"networking.k8s.io/v1" +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "postgresql.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL image name +*/}} +{{- define "postgresql.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL password +*/}} +{{- define "postgresql.password" -}} +{{- if .Values.global.postgresql.postgresqlPassword }} + {{- .Values.global.postgresql.postgresqlPassword -}} +{{- else if .Values.postgresqlPassword -}} + {{- .Values.postgresqlPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL replication password +*/}} +{{- define "postgresql.replication.password" -}} +{{- if .Values.global.postgresql.replicationPassword }} + {{- .Values.global.postgresql.replicationPassword -}} +{{- else if .Values.replication.password -}} + {{- .Values.replication.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL username +*/}} +{{- define "postgresql.username" -}} +{{- if .Values.global.postgresql.postgresqlUsername }} + {{- .Values.global.postgresql.postgresqlUsername -}} +{{- else -}} + {{- .Values.postgresqlUsername -}} +{{- end -}} +{{- end -}} + + +{{/* +Return PostgreSQL replication username +*/}} +{{- define "postgresql.replication.username" -}} +{{- if .Values.global.postgresql.replicationUser }} + {{- .Values.global.postgresql.replicationUser -}} +{{- else -}} + {{- .Values.replication.user -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL port +*/}} +{{- define "postgresql.port" -}} +{{- if .Values.global.postgresql.servicePort }} + {{- .Values.global.postgresql.servicePort -}} +{{- else -}} + {{- .Values.service.port -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL created database +*/}} +{{- define "postgresql.database" -}} +{{- if .Values.global.postgresql.postgresqlDatabase }} + {{- .Values.global.postgresql.postgresqlDatabase -}} +{{- else if .Values.postgresqlDatabase -}} + {{- .Values.postgresqlDatabase -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name to change the volume permissions +*/}} +{{- define "postgresql.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL metrics image name +*/}} +{{- define "postgresql.metrics.image" -}} +{{- $registryName := default "docker.io" .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := default "latest" .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "postgresql.secretName" -}} +{{- if .Values.global.postgresql.existingSecret }} + {{- printf "%s" .Values.global.postgresql.existingSecret -}} +{{- else if .Values.existingSecret -}} + {{- printf "%s" .Values.existingSecret -}} +{{- else -}} + {{- printf "%s" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created +*/}} +{{- define "postgresql.createSecret" -}} +{{- if .Values.global.postgresql.existingSecret }} +{{- else if .Values.existingSecret -}} +{{- else -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the configuration ConfigMap name. +*/}} +{{- define "postgresql.configurationCM" -}} +{{- if .Values.configurationConfigMap -}} +{{- printf "%s" (tpl .Values.configurationConfigMap $) -}} +{{- else -}} +{{- printf "%s-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the extended configuration ConfigMap name. +*/}} +{{- define "postgresql.extendedConfigurationCM" -}} +{{- if .Values.extendedConfConfigMap -}} +{{- printf "%s" (tpl .Values.extendedConfConfigMap $) -}} +{{- else -}} +{{- printf "%s-extended-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "postgresql.initdbScriptsCM" -}} +{{- if .Values.initdbScriptsConfigMap -}} +{{- printf "%s" (tpl .Values.initdbScriptsConfigMap $) -}} +{{- else -}} +{{- printf "%s-init-scripts" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts Secret name. +*/}} +{{- define "postgresql.initdbScriptsSecret" -}} +{{- printf "%s" (tpl .Values.initdbScriptsSecret $) -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "postgresql.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Get the readiness probe command +*/}} +{{- define "postgresql.readinessProbeCommand" -}} +- | +{{- if (include "postgresql.database" .) }} + pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- else }} + pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- end }} +{{- if contains "bitnami/" .Values.image.repository }} + [ -f /opt/bitnami/postgresql/tmp/.initialized ] +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "postgresql.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "postgresql.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "postgresql.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "postgresql.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/configmap.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/configmap.yaml new file mode 100755 index 0000000..d2178c0 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/configmap.yaml @@ -0,0 +1,26 @@ +{{ if and (or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration) (not .Values.configurationConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-configuration + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +data: +{{- if (.Files.Glob "files/postgresql.conf") }} +{{ (.Files.Glob "files/postgresql.conf").AsConfig | indent 2 }} +{{- else if .Values.postgresqlConfiguration }} + postgresql.conf: | +{{- range $key, $value := default dict .Values.postgresqlConfiguration }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- if (.Files.Glob "files/pg_hba.conf") }} +{{ (.Files.Glob "files/pg_hba.conf").AsConfig | indent 2 }} +{{- else if .Values.pgHbaConfiguration }} + pg_hba.conf: | +{{ .Values.pgHbaConfiguration | indent 4 }} +{{- end }} +{{ end }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/extended-config-configmap.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/extended-config-configmap.yaml new file mode 100755 index 0000000..8a41195 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/extended-config-configmap.yaml @@ -0,0 +1,21 @@ +{{- if and (or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf) (not .Values.extendedConfConfigMap)}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-extended-configuration + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +data: +{{- with .Files.Glob "files/conf.d/*.conf" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{ with .Values.postgresqlExtendedConf }} + override.conf: | +{{- range $key, $value := . }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- end }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/initialization-configmap.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/initialization-configmap.yaml new file mode 100755 index 0000000..8eb5e05 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/initialization-configmap.yaml @@ -0,0 +1,24 @@ +{{- if and (or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-init-scripts + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz" }} +binaryData: +{{- range $path, $bytes := . }} + {{ base $path }}: {{ $.Files.Get $path | b64enc | quote }} +{{- end }} +{{- end }} +data: +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{- with .Values.initdbScripts }} +{{ toYaml . | indent 2 }} +{{- end }} +{{- end }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/metrics-svc.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/metrics-svc.yaml new file mode 100755 index 0000000..f370041 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/metrics-svc.yaml @@ -0,0 +1,26 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-metrics + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + annotations: +{{ toYaml .Values.metrics.service.annotations | indent 4 }} +spec: + type: {{ .Values.metrics.service.type }} + {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - name: metrics + port: 9187 + targetPort: metrics + selector: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name }} + role: master +{{- end }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/networkpolicy.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/networkpolicy.yaml new file mode 100755 index 0000000..8fb23f2 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/networkpolicy.yaml @@ -0,0 +1,34 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "postgresql.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + - ports: + - port: {{ template "postgresql.port" . }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "postgresql.fullname" . }}-client: "true" + - podSelector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: slave + {{- end }} + # Allow prometheus scrapes + - ports: + - port: 9187 +{{- end }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/secrets.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/secrets.yaml new file mode 100755 index 0000000..e0bc3b2 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/secrets.yaml @@ -0,0 +1,17 @@ +{{- if (include "postgresql.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +type: Opaque +data: + postgresql-password: {{ include "postgresql.password" . | b64enc | quote }} + {{- if .Values.replication.enabled }} + postgresql-replication-password: {{ include "postgresql.replication.password" . | b64enc | quote }} + {{- end }} +{{- end -}} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/serviceaccount.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/serviceaccount.yaml new file mode 100755 index 0000000..27e5b51 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and (.Values.serviceAccount.enabled) (not .Values.serviceAccount.name) }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + name: {{ template "postgresql.fullname" . }} +{{- end }} \ No newline at end of file diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/servicemonitor.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/servicemonitor.yaml new file mode 100755 index 0000000..9211d51 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "postgresql.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} +{{ toYaml .Values.metrics.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} +spec: + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/statefulset-slaves.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/statefulset-slaves.yaml new file mode 100755 index 0000000..74a81d0 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/statefulset-slaves.yaml @@ -0,0 +1,247 @@ +{{- if .Values.replication.enabled }} +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: "{{ template "postgresql.fullname" . }}-slave" + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.slave.labels }} +{{ toYaml . | indent 4 }} +{{- end }} +{{- with .Values.slave.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: {{ .Values.replication.slaveReplicas }} + selector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: slave + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + role: slave +{{- with .Values.slave.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.slave.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: +{{ toYaml .Values.slave.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.slave.affinity }} + affinity: +{{ toYaml .Values.slave.affinity | indent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: +{{ toYaml .Values.slave.tolerations | indent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name}} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + initContainers: + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: "{{ .Values.volumePermissions.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - | + mkdir -p {{ .Values.persistence.mountPath }}/data + chmod 700 {{ .Values.persistence.mountPath }}/data + find {{ .Values.persistence.mountPath }} -mindepth 0 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | \ + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + volumeMounts: + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + - name: POSTGRES_REPLICATION_MODE + value: "slave" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + - name: POSTGRES_MASTER_HOST + value: {{ template "postgresql.fullname" . }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ include "postgresql.port" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + ports: + - name: postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{ end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.slave.extraVolumeMounts }} + {{- toYaml .Values.slave.extraVolumeMounts | nindent 12 }} + {{- end }} + volumes: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- if .Values.slave.extraVolumes }} + {{- toYaml .Values.slave.extraVolumes | nindent 8 }} + {{- end }} + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} +{{- end }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/statefulset.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/statefulset.yaml new file mode 100755 index 0000000..64c297f --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/statefulset.yaml @@ -0,0 +1,355 @@ +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "postgresql.master.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.master.labels }} +{{ toYaml . | indent 4 }} +{{- end }} +{{- with .Values.master.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: 1 + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: master + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + role: master +{{- with .Values.master.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.master.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.master.nodeSelector }} + nodeSelector: +{{ toYaml .Values.master.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.master.affinity }} + affinity: +{{ toYaml .Values.master.affinity | indent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: +{{ toYaml .Values.master.tolerations | indent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + initContainers: + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: "{{ .Values.volumePermissions.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - | + mkdir -p {{ .Values.persistence.mountPath }}/data + chmod 700 {{ .Values.persistence.mountPath }}/data + find {{ .Values.persistence.mountPath }} -mindepth 0 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | \ + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + volumeMounts: + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + {{- if .Values.postgresqlInitdbArgs }} + - name: POSTGRES_INITDB_ARGS + value: {{ .Values.postgresqlInitdbArgs | quote }} + {{- end }} + {{- if .Values.postgresqlInitdbWalDir }} + - name: POSTGRES_INITDB_WALDIR + value: {{ .Values.postgresqlInitdbWalDir | quote }} + {{- end }} + {{- if .Values.initdbUser }} + - name: POSTGRESQL_INITSCRIPTS_USERNAME + value: {{ .Values.initdbUser }} + {{- end }} + {{- if .Values.initdbPassword }} + - name: POSTGRESQL_INITSCRIPTS_PASSWORD + value: .Values.initdbPassword + {{- end }} + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + {{- if .Values.replication.enabled }} + - name: POSTGRES_REPLICATION_MODE + value: "master" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + {{- if not (eq .Values.replication.synchronousCommit "off")}} + - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE + value: {{ .Values.replication.synchronousCommit | quote }} + - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS + value: {{ .Values.replication.numSynchronousReplicas | quote }} + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + {{- end }} + - name: POSTGRES_USER + value: {{ include "postgresql.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + {{- if (include "postgresql.database" .) }} + - name: POSTGRES_DB + value: {{ (include "postgresql.database" .) | quote }} + {{- end }} + {{- if .Values.extraEnv }} + {{- include "postgresql.tplValue" (dict "value" .Values.extraEnv "context" $) | nindent 12 }} + {{- end }} + ports: + - name: postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/ + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.master.extraVolumeMounts }} + {{- toYaml .Values.master.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "postgresql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.metrics.securityContext.runAsUser }} + {{- end }} + env: + {{- $database := required "In order to enable metrics you need to specify a database (.Values.postgresqlDatabase or .Values.global.postgresql.postgresqlDatabase)" (include "postgresql.database" .) }} + - name: DATA_SOURCE_URI + value: {{ printf "127.0.0.1:%d/%s?sslmode=disable" (int (include "postgresql.port" .)) $database | quote }} + {{- if .Values.usePasswordFile }} + - name: DATA_SOURCE_PASS_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: DATA_SOURCE_USER + value: {{ template "postgresql.username" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + ports: + - name: metrics + containerPort: 9187 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} +{{- end }} + volumes: + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "postgresql.initdbScriptsCM" . }} + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "postgresql.initdbScriptsSecret" . }} + {{- end }} + {{- if .Values.master.extraVolumes }} + {{- toYaml .Values.master.extraVolumes | nindent 8 }} + {{- end }} +{{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: +{{- with .Values.persistence.existingClaim }} + claimName: {{ tpl . $ }} +{{- end }} +{{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} +{{- else if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/svc-headless.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/svc-headless.yaml new file mode 100755 index 0000000..0bc60be --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/svc-headless.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-headless + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: postgresql + port: {{ template "postgresql.port" . }} + targetPort: postgresql + selector: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/svc-read.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/svc-read.yaml new file mode 100755 index 0000000..17bda04 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/svc-read.yaml @@ -0,0 +1,31 @@ +{{- if .Values.replication.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-read + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.service.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and .Values.service.loadBalancerIP (eq .Values.service.type "LoadBalancer") }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + ports: + - name: postgresql + port: {{ template "postgresql.port" . }} + targetPort: postgresql + {{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + selector: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: slave +{{- end }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/svc.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/svc.yaml new file mode 100755 index 0000000..3b880b7 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/templates/svc.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.service.annotations }} + annotations: +{{ tpl (toYaml .) $ | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and .Values.service.loadBalancerIP (eq .Values.service.type "LoadBalancer") }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{ with .Values.service.loadBalancerSourceRanges }} +{{ toYaml . | indent 4 }} +{{- end }} + {{- end }} + {{- if and (eq .Values.service.type "ClusterIP") .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + ports: + - name: postgresql + port: {{ template "postgresql.port" . }} + targetPort: postgresql + {{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + selector: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: master diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/values-production.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/values-production.yaml new file mode 100755 index 0000000..353848a --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/values-production.yaml @@ -0,0 +1,392 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 11.5.0-debian-9-r84 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: true + image: + registry: docker.io + repository: bitnami/minideb + tag: stretch + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +replication: + enabled: true + user: repl_user + password: repl_password + slaveReplicas: 2 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: "on" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 1 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin user +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity and tolerations labels for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity and tolerations labels for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +networkPolicy: + ## Enable creation of NetworkPolicy resources. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Configure metrics exporter +## +metrics: + enabled: true + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.6.0-debian-9-r0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +# Define custom environment variables to pass to the image here +extraEnv: [] diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/values.schema.json b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/values.schema.json new file mode 100755 index 0000000..ac2de6e --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/values.schema.json @@ -0,0 +1,103 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "postgresqlUsername": { + "type": "string", + "title": "Admin user", + "form": true + }, + "postgresqlPassword": { + "type": "string", + "title": "Password", + "form": true + }, + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi" + } + } + }, + "resources": { + "type": "object", + "title": "Required Resources", + "description": "Configure resource requests", + "form": true, + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "string", + "form": true, + "render": "slider", + "title": "Memory Request", + "sliderMin": 10, + "sliderMax": 2048, + "sliderUnit": "Mi" + }, + "cpu": { + "type": "string", + "form": true, + "render": "slider", + "title": "CPU Request", + "sliderMin": 10, + "sliderMax": 2000, + "sliderUnit": "m" + } + } + } + } + }, + "replication": { + "type": "object", + "form": true, + "title": "Replication Details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Replication", + "form": true + }, + "slaveReplicas": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "condition": false, + "value": "replication.enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup" + } + } + }, + "metrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Configure metrics exporter", + "form": true + } + } + } + } +} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/values.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/values.yaml new file mode 100755 index 0000000..e13d0a7 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/charts/postgresql/values.yaml @@ -0,0 +1,392 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 11.5.0-debian-9-r84 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: true + image: + registry: docker.io + repository: bitnami/minideb + tag: stretch + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +replication: + enabled: false + user: repl_user + password: repl_password + slaveReplicas: 1 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: "off" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 0 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin user +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity and tolerations labels for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity and tolerations labels for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +networkPolicy: + ## Enable creation of NetworkPolicy resources. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Configure metrics exporter +## +metrics: + enabled: false + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.6.0-debian-9-r0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +# Define custom environment variables to pass to the image here +extraEnv: [] diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/helminstall.sh b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/helminstall.sh new file mode 100755 index 0000000..6f0132d --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/helminstall.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +if [ -z "$1" ]; then echo "Skipping creation of persistent volume examples. Ensure there is available PVs 200Gi per node for HA."; else oc create -f pv-examples/; fi + +oc new-project jfrog-artifactory +oc create serviceaccount svcaccount -n jfrog-artifactory +oc adm policy add-scc-to-user privileged system:serviceaccount:jfrog-artifactory:svcaccount +oc adm policy add-scc-to-user anyuid system:serviceaccount:jfrog-artifactory:svcaccount +oc adm policy add-scc-to-group anyuid system:authenticated + +# enables hostPath plugin for openshift system wide +oc create -f scc.yaml -n jfrog-artifactory +oc patch securitycontextconstraints.security.openshift.io/hostpath --type=merge --patch='{"allowHostDirVolumePlugin": true}' +oc adm policy add-scc-to-user hostpath system:serviceaccount:jfrog-artifactory:svcaccount + +# create the license secret +oc create secret generic artifactory-license --from-file=./artifactory.cluster.license + +# install via helm +helm install . --generate-name \ + --set artifactory.node.replicaCount=1 \ + --set nginx.service.type=NodePort \ + --set artifactory.license.secret=artifactory-license,artifactory.license.dataKey=artifactory.cluster.license diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/pv-examples/pv0001-large.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/pv-examples/pv0001-large.yaml new file mode 100644 index 0000000..8a36385 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/pv-examples/pv0001-large.yaml @@ -0,0 +1,15 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: pv0001-large +spec: + capacity: + storage: 200Gi + hostPath: + path: /mnt/pv-data/pv0001-large + accessModes: + - ReadWriteOnce + - ReadWriteMany + - ReadOnlyMany + persistentVolumeReclaimPolicy: Recycle + volumeMode: Filesystem diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/pv-examples/pv0002-large.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/pv-examples/pv0002-large.yaml new file mode 100644 index 0000000..b96fa47 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/pv-examples/pv0002-large.yaml @@ -0,0 +1,15 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: pv0002-large +spec: + capacity: + storage: 200Gi + hostPath: + path: /mnt/pv-data/pv0002-large + accessModes: + - ReadWriteOnce + - ReadWriteMany + - ReadOnlyMany + persistentVolumeReclaimPolicy: Recycle + volumeMode: Filesystem diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/pv-examples/pv0003-large.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/pv-examples/pv0003-large.yaml new file mode 100644 index 0000000..476ad41 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/pv-examples/pv0003-large.yaml @@ -0,0 +1,15 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: pv0003-large +spec: + capacity: + storage: 200Gi + hostPath: + path: /mnt/pv-data/pv0003-large + accessModes: + - ReadWriteOnce + - ReadWriteMany + - ReadOnlyMany + persistentVolumeReclaimPolicy: Recycle + volumeMode: Filesystem diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/pv-examples/pv0004-large.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/pv-examples/pv0004-large.yaml new file mode 100644 index 0000000..ae2fbda --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/pv-examples/pv0004-large.yaml @@ -0,0 +1,15 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: pv0004-large +spec: + capacity: + storage: 200Gi + hostPath: + path: /mnt/pv-data/pv0004-large + accessModes: + - ReadWriteOnce + - ReadWriteMany + - ReadOnlyMany + persistentVolumeReclaimPolicy: Recycle + volumeMode: Filesystem diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/pv-examples/pv0005-large.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/pv-examples/pv0005-large.yaml new file mode 100644 index 0000000..9488514 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/pv-examples/pv0005-large.yaml @@ -0,0 +1,15 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: pv0005-large +spec: + capacity: + storage: 200Gi + hostPath: + path: /mnt/pv-data/pv0005-large + accessModes: + - ReadWriteOnce + - ReadWriteMany + - ReadOnlyMany + persistentVolumeReclaimPolicy: Recycle + volumeMode: Filesystem diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/requirements.lock b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/requirements.lock new file mode 100755 index 0000000..7037cff --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: postgresql + repository: https://kubernetes-charts.storage.googleapis.com/ + version: 7.0.1 +digest: sha256:dcdafe9ab91ccf0e5883e2b5dd9ba13e82190b5e16e6dee6d39fd16a04123ce8 +generated: 2019-11-10T13:12:29.836238+02:00 diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/requirements.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/requirements.yaml new file mode 100755 index 0000000..756a19c --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/requirements.yaml @@ -0,0 +1,5 @@ +dependencies: + - name: postgresql + version: 7.0.1 + repository: https://kubernetes-charts.storage.googleapis.com/ + condition: postgresql.enabled diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/scc.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/scc.yaml new file mode 100644 index 0000000..13eef79 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/scc.yaml @@ -0,0 +1,18 @@ +kind: SecurityContextConstraints +apiVersion: v1 +metadata: + name: hostpath +allowPrivilegedContainer: false +runAsUser: + type: RunAsAny +seLinuxContext: + type: RunAsAny +fsGroup: + type: RunAsAny +supplementalGroups: + type: RunAsAny +users: +- artifactory +groups: +- artifactory +- jfrog-artifactory diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/NOTES.txt b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/NOTES.txt new file mode 100755 index 0000000..d08fa88 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/NOTES.txt @@ -0,0 +1,113 @@ +Congratulations. You have just deployed JFrog Artifactory HA! + +{{- if and (not .Values.artifactory.masterKeySecretName) (eq .Values.artifactory.masterKey "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF") }} + + +***************************************** WARNING ****************************************** +* Your Artifactory master key is still set to the provided example: * +* artifactory.masterKey=FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF * +* * +* You should change this to your own generated key: * +* $ export MASTER_KEY=$(openssl rand -hex 32) * +* $ echo ${MASTER_KEY} * +* * +* Pass the created master key to helm with '--set artifactory.masterKey=${MASTER_KEY}' * +* * +* Alternatively, you can use a pre-existing secret with a key called master-key with * +* '--set artifactory.masterKeySecretName=${SECRET_NAME}' * +******************************************************************************************** +{{- end }} + +{{ if eq .Values.artifactory.joinKey "EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE" }} + + +***************************************** WARNING ****************************************** +* Your Artifactory join key is still set to the provided example: * +* artifactory.joinKey=EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE * +* * +* You should change this to your own generated key: * +* $ export JOIN_KEY=$(openssl rand -hex 16) * +* $ echo ${JOIN_KEY} * +* * +* Pass the created master key to helm with '--set artifactory.joinKey=${JOIN_KEY}' * +* * +******************************************************************************************** +{{- end }} + +{{- if .Values.postgresql.enabled }} + +DATABASE: +To extract the database password, run the following +export DB_PASSWORD=$(kubectl get --namespace {{ .Release.Namespace }} $(kubectl get secret --namespace {{ .Release.Namespace }} -o name | grep postgresql) -o jsonpath="{.data.postgresql-password}" | base64 --decode) +echo ${DB_PASSWORD} +{{- end }} + +SETUP: +1. Get the Artifactory IP and URL + + {{- if contains "NodePort" .Values.nginx.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "artifactory-ha.nginx.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT/ + + {{- else if contains "LoadBalancer" .Values.nginx.service.type }} + NOTE: It may take a few minutes for the LoadBalancer public IP to be available! + + You can watch the status of the service by running 'kubectl get svc -w {{ template "artifactory-ha.nginx.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "artifactory-ha.nginx.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP/ + + {{- else if contains "ClusterIP" .Values.nginx.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "component={{ .Values.nginx.name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl port-forward --namespace {{ .Release.Namespace }} $POD_NAME 8080:80 + echo http://127.0.0.1:8080 + + {{- end }} + +2. Open Artifactory in your browser + Default credential for Artifactory: + user: admin + password: password + + {{- if .Values.artifactory.license.secret }} + +3. Manage Artifactory license through the {{ .Values.artifactory.license.secret }} secret ONLY! + Since the artifactory license(s) is managed with a secret ({{ .Values.artifactory.license.secret }}), any change through the Artifactory UI might not be saved! + + {{- else }} + +3. Add HA licenses to activate Artifactory HA through the Artifactory UI + NOTE: Each Artifactory node requires a valid license. See https://www.jfrog.com/confluence/display/RTF/HA+Installation+and+Setup for more details. + + {{- end }} + +{{ if or .Values.artifactory.primary.javaOpts.jmx.enabled .Values.artifactory.node.javaOpts.jmx.enabled }} +JMX configuration: +{{- if not (contains "LoadBalancer" .Values.artifactory.service.type) }} +If you want to access JMX from you computer with jconsole, you should set ".Values.artifactory.service.type=LoadBalancer" !!! +{{ end }} + +1. Get the Artifactory service IP: +{{- if .Values.artifactory.primary.javaOpts.jmx.enabled }} +export PRIMARY_SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "artifactory-ha.primary.name" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +{{- end }} +{{- if .Values.artifactory.node.javaOpts.jmx.enabled }} +export MEMBER_SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "artifactory-ha.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +{{- end }} + +2. Map the service name to the service IP in /etc/hosts: +{{- if .Values.artifactory.primary.javaOpts.jmx.enabled }} +sudo sh -c "echo \"${PRIMARY_SERVICE_IP} {{ template "artifactory-ha.primary.name" . }}\" >> /etc/hosts" +{{- end }} +{{- if .Values.artifactory.node.javaOpts.jmx.enabled }} +sudo sh -c "echo \"${MEMBER_SERVICE_IP} {{ template "artifactory-ha.fullname" . }}\" >> /etc/hosts" +{{- end }} + +3. Launch jconsole: +{{- if .Values.artifactory.primary.javaOpts.jmx.enabled }} +jconsole {{ template "artifactory-ha.primary.name" . }}:{{ .Values.artifactory.primary.javaOpts.jmx.port }} +{{- end }} +{{- if .Values.artifactory.node.javaOpts.jmx.enabled }} +jconsole {{ template "artifactory-ha.fullname" . }}:{{ .Values.artifactory.node.javaOpts.jmx.port }} +{{- end }} +{{- end }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/_helpers.tpl b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/_helpers.tpl new file mode 100755 index 0000000..32171c2 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/_helpers.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "artifactory-ha.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +The primary node name +*/}} +{{- define "artifactory-ha.primary.name" -}} +{{- if .Values.nameOverride -}} +{{- printf "%s-primary" .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := .Release.Name | trunc 29 -}} +{{- printf "%s-%s-primary" $name .Chart.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +The member node name +*/}} +{{- define "artifactory-ha.node.name" -}} +{{- if .Values.nameOverride -}} +{{- printf "%s-member" .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := .Release.Name | trunc 29 -}} +{{- printf "%s-%s-member" $name .Chart.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Expand the name nginx service. +*/}} +{{- define "artifactory-ha.nginx.name" -}} +{{- default .Values.nginx.name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "artifactory-ha.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "artifactory-ha.nginx.fullname" -}} +{{- if .Values.nginx.fullnameOverride -}} +{{- .Values.nginx.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nginx.name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "artifactory-ha.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} +{{ default (include "artifactory-ha.fullname" .) .Values.serviceAccount.name }} +{{- else -}} +{{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "artifactory-ha.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Generate SSL certificates +*/}} +{{- define "artifactory-ha.gen-certs" -}} +{{- $altNames := list ( printf "%s.%s" (include "artifactory-ha.name" .) .Release.Namespace ) ( printf "%s.%s.svc" (include "artifactory-ha.name" .) .Release.Namespace ) -}} +{{- $ca := genCA "artifactory-ca" 365 -}} +{{- $cert := genSignedCert ( include "artifactory-ha.name" . ) nil $altNames 365 $ca -}} +tls.crt: {{ $cert.Cert | b64enc }} +tls.key: {{ $cert.Key | b64enc }} +{{- end -}} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/access-bootstrap-creds.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/access-bootstrap-creds.yaml new file mode 100755 index 0000000..f4e34a2 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/access-bootstrap-creds.yaml @@ -0,0 +1,15 @@ +{{- if not (and .Values.artifactory.accessAdmin.secret .Values.artifactory.accessAdmin.dataKey) }} +{{- if .Values.artifactory.accessAdmin.password }} +kind: Secret +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.fullname" . }}-bootstrap-creds + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + bootstrap.creds: {{ (printf "access-admin@%s=%s" .Values.artifactory.accessAdmin.ip .Values.artifactory.accessAdmin.password) | b64enc }} +{{- end }} +{{- end }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-binarystore-secret.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-binarystore-secret.yaml new file mode 100755 index 0000000..2e7cac7 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-binarystore-secret.yaml @@ -0,0 +1,14 @@ +{{- if not .Values.artifactory.persistence.customBinarystoreXmlSecret }} +kind: Secret +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.fullname" . }}-binarystore + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +stringData: + binarystore.xml: |- +{{ tpl .Values.artifactory.persistence.binarystoreXml . | indent 4 }} +{{- end }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-configmaps.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-configmaps.yaml new file mode 100755 index 0000000..1385bc5 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-configmaps.yaml @@ -0,0 +1,13 @@ +{{ if .Values.artifactory.configMaps }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory-ha.fullname" . }}-configmaps + labels: + app: {{ template "artifactory-ha.fullname" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: +{{ tpl .Values.artifactory.configMaps . | indent 2 }} +{{ end -}} \ No newline at end of file diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-installer-info.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-installer-info.yaml new file mode 100755 index 0000000..0c9a4f4 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-installer-info.yaml @@ -0,0 +1,25 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.fullname" . }}-installer-info + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + installer-info.json: | + { + "productId": "Helm_artifactory-ha/{{ .Chart.Version }}", + "features": [ + { + "featureId": "ArtifactoryVersion/{{ default .Chart.AppVersion .Values.artifactory.image.version }}" + }, + { + "featureId": "{{ if .Values.postgresql.enabled }}postgresql{{ else }}{{ default "derby" .Values.database.type }}{{ end }}/0.0.0" + }, + { + "featureId": "Platform/{{ default "kubernetes" .Values.installer.platform }}" + } + ] + } diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-license-secret.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-license-secret.yaml new file mode 100755 index 0000000..3f629c6 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-license-secret.yaml @@ -0,0 +1,14 @@ +{{- with .Values.artifactory.license.licenseKey }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory-ha.fullname" $ }}-license + labels: + app: {{ template "artifactory-ha.name" $ }} + chart: {{ template "artifactory-ha.chart" $ }} + heritage: {{ $.Release.Service }} + release: {{ $.Release.Name }} +type: Opaque +data: + artifactory.lic: {{ . | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-networkpolicy.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-networkpolicy.yaml new file mode 100755 index 0000000..371dc9a --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-networkpolicy.yaml @@ -0,0 +1,34 @@ +{{- range .Values.networkpolicy }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ template "artifactory-ha.fullname" $ }}-{{ .name }}-networkpolicy + labels: + app: {{ template "artifactory-ha.name" $ }} + chart: {{ template "artifactory-ha.chart" $ }} + release: {{ $.Release.Name }} + heritage: {{ $.Release.Service }} +spec: +{{- if .podSelector }} + podSelector: +{{ .podSelector | toYaml | trimSuffix "\n" | indent 4 -}} +{{ else }} + podSelector: {} +{{- end }} + policyTypes: + {{- if .ingress }} + - Ingress + {{- end }} + {{- if .egress }} + - Egress + {{- end }} +{{- if .ingress }} + ingress: +{{ .ingress | toYaml | trimSuffix "\n" | indent 2 -}} +{{- end }} +{{- if .egress }} + egress: +{{ .egress | toYaml | trimSuffix "\n" | indent 2 -}} +{{- end }} +--- +{{- end -}} \ No newline at end of file diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-nfs-pvc.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-nfs-pvc.yaml new file mode 100755 index 0000000..6ed7d82 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-nfs-pvc.yaml @@ -0,0 +1,101 @@ +{{- if eq .Values.artifactory.persistence.type "nfs" }} +### Artifactory HA data +apiVersion: v1 +kind: PersistentVolume +metadata: + name: {{ template "artifactory-ha.fullname" . }}-data-pv + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + id: {{ template "artifactory-ha.name" . }}-data-pv + type: nfs-volume +spec: + {{- if .Values.artifactory.persistence.nfs.mountOptions }} + mountOptions: +{{ toYaml .Values.artifactory.persistence.nfs.mountOptions | indent 4 }} + {{- end }} + capacity: + storage: {{ .Values.artifactory.persistence.nfs.capacity }} + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + nfs: + server: {{ .Values.artifactory.persistence.nfs.ip }} + path: "{{ .Values.artifactory.persistence.nfs.haDataMount }}" + readOnly: false +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.fullname" . }}-data-pvc + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + type: nfs-volume +spec: + accessModes: + - ReadWriteOnce + storageClassName: "" + resources: + requests: + storage: {{ .Values.artifactory.persistence.nfs.capacity }} + selector: + matchLabels: + id: {{ template "artifactory-ha.name" . }}-data-pv + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} +--- +### Artifactory HA backup +apiVersion: v1 +kind: PersistentVolume +metadata: + name: {{ template "artifactory-ha.fullname" . }}-backup-pv + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + id: {{ template "artifactory-ha.name" . }}-backup-pv + type: nfs-volume +spec: + {{- if .Values.artifactory.persistence.nfs.mountOptions }} + mountOptions: +{{ toYaml .Values.artifactory.persistence.nfs.mountOptions | indent 4 }} + {{- end }} + capacity: + storage: {{ .Values.artifactory.persistence.nfs.capacity }} + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + nfs: + server: {{ .Values.artifactory.persistence.nfs.ip }} + path: "{{ .Values.artifactory.persistence.nfs.haBackupMount }}" + readOnly: false +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.fullname" . }}-backup-pvc + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + type: nfs-volume +spec: + accessModes: + - ReadWriteOnce + storageClassName: "" + resources: + requests: + storage: {{ .Values.artifactory.persistence.nfs.capacity }} + selector: + matchLabels: + id: {{ template "artifactory-ha.name" . }}-backup-pv + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-node-pdb.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-node-pdb.yaml new file mode 100755 index 0000000..871bb21 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-node-pdb.yaml @@ -0,0 +1,19 @@ +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "artifactory-ha.fullname" . }}-node + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + selector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + {{- if eq .Values.artifactory.service.pool "members" }} + role: {{ template "artifactory-ha.node.name" . }} + {{- end }} + release: {{ .Release.Name }} + minAvailable: {{ .Values.artifactory.node.minAvailable }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-node-statefulset.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-node-statefulset.yaml new file mode 100755 index 0000000..67e1ab3 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-node-statefulset.yaml @@ -0,0 +1,510 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "artifactory-ha.node.name" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + force-update: "{{ randAlpha 63 | lower }}" +{{- if .Values.artifactory.node.labels }} +{{ toYaml .Values.artifactory.node.labels | indent 4 }} +{{- end }} +spec: + serviceName: {{ template "artifactory-ha.node.name" . }} + replicas: {{ .Values.artifactory.node.replicaCount }} + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + role: {{ template "artifactory-ha.node.name" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + role: {{ template "artifactory-ha.node.name" . }} + heritage: {{ .Release.Service }} + component: {{ .Values.artifactory.name }} + release: {{ .Release.Name }} + annotations: + checksum/binarystore: {{ include (print $.Template.BasePath "/artifactory-binarystore-secret.yaml") . | sha256sum }} + checksum/systemyaml: {{ include (print $.Template.BasePath "/artifactory-system-yaml.yaml") . | sha256sum }} + {{- range $key, $value := .Values.artifactory.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + spec: + {{- if .Values.artifactory.priorityClass.existingPriorityClass }} + priorityClassName: {{ .Values.artifactory.priorityClass.existingPriorityClass }} + {{- else -}} + {{- if .Values.artifactory.priorityClass.create }} + priorityClassName: {{ default (include "artifactory-ha.fullname" .) .Values.artifactory.priorityClass.name }} + {{- end }} + {{- end }} + serviceAccountName: {{ template "artifactory-ha.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.artifactory.terminationGracePeriodSeconds }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.imagePullSecrets }} + {{- end }} + securityContext: + runAsUser: {{ .Values.artifactory.uid }} + fsGroup: {{ .Values.artifactory.uid }} + initContainers: + {{- if .Values.artifactory.customInitContainersBegin }} +{{ tpl .Values.artifactory.customInitContainersBegin . | indent 6 }} + {{- end }} + {{- if .Values.artifactory.persistence.enabled }} + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + - name: "create-artifactory-data-dir" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > + mkdir -p {{ tpl .Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir . }}; + volumeMounts: + - mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + name: volume + {{- end }} + {{- end }} + {{- if .Values.artifactory.deleteDBPropertiesOnStartup }} + - name: "delete-db-properties" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - 'rm -fv {{ .Values.artifactory.persistence.mountPath }}/etc/db.properties' + volumeMounts: + - mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + name: volume + {{- end }} + - name: "remove-lost-found" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > + rm -rfv {{ .Values.artifactory.persistence.mountPath }}/lost+found + volumeMounts: + - mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + name: volume + {{- end }} + - name: 'copy-system-yaml' + image: '{{ .Values.initContainerImage }}' + command: + - '/bin/sh' + - '-c' + - > + {{- if .Values.artifactory.node.waitForPrimaryStartup.enabled }} + echo "Sleeping to allow time for primary node to come up"; + sleep {{ .Values.artifactory.node.waitForPrimaryStartup.seconds }}; + {{- end }} + echo "Copy system.yaml to {{ .Values.artifactory.persistence.mountPath }}/etc"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/access/keys/trusted; + cp -fv /tmp/etc/system.yaml {{ .Values.artifactory.persistence.mountPath }}/etc/system.yaml; + echo "Remove {{ .Values.artifactory.persistence.mountPath }}/lost+found folder if exists"; + rm -rfv {{ .Values.artifactory.persistence.mountPath }}/lost+found; + volumeMounts: + - name: volume + mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + - name: systemyaml + mountPath: "/tmp/etc/system.yaml" + subPath: system.yaml + {{- if .Values.artifactory.customPersistentPodVolumeClaim }} + - name: "prepare-custom-persistent-volume" + image: "{{ .Values.initContainerImage }}" + command: + - 'sh' + - '-c' + - > + chown -Rv {{ .Values.artifactory.uid }}:{{ .Values.artifactory.uid }} {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + securityContext: + runAsUser: 0 + volumeMounts: + - name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + {{- end }} + {{- if .Values.waitForDatabase }} + {{- if or .Values.postgresql.enabled }} + - name: "wait-for-db" + image: "{{ .Values.initContainerImage }}" + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > + until nc -z -w 2 {{ .Release.Name }}-postgresql {{ .Values.postgresql.service.port }} && echo database ok; do + sleep 2; + done; + {{- end }} + {{- end }} + {{- if .Values.artifactory.customInitContainers }} +{{ tpl .Values.artifactory.customInitContainers . | indent 6 }} + {{- end }} + containers: + - name: {{ .Values.artifactory.name }} + image: '{{ .Values.artifactory.image.repository }}:{{ default .Chart.AppVersion .Values.artifactory.image.version }}' + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + securityContext: + allowPrivilegeEscalation: false + command: + - '/bin/sh' + - '-c' + - > + {{- if .Values.artifactory.userPluginSecrets }} + echo "Copying plugins"; + cp -Lrf /tmp/plugin/*/* /tmp/plugins; + {{- end }} + {{- if .Values.artifactory.preStartCommand }} + echo "Running custom preStartCommand command"; + {{ tpl .Values.artifactory.preStartCommand . }}; + {{- end }} + /entrypoint-artifactory.sh + lifecycle: + postStart: + exec: + command: + - '/bin/sh' + - '-c' + - > + echo; + {{- if .Values.artifactory.postStartCommand }} + {{ .Values.artifactory.postStartCommand }} + {{- end }} + env: + {{- if .Values.database.secrets.user }} + - name: JF_SHARED_DATABSE_USERNAME + valueFrom: + secretKeyRef: + name: {{ .Values.database.secrets.user.name }} + key: {{ .Values.database.secrets.user.key }} + {{- end }} + {{- if .Values.database.secrets.password }} + - name: JF_SHARED_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.database.secrets.password.name }} + key: {{ .Values.database.secrets.password.key }} + {{- end }} + {{- if .Values.database.secrets.url }} + - name: JF_SHARED_DATABSE_URL + valueFrom: + secretKeyRef: + name: {{ .Values.database.secrets.url.name }} + key: {{ .Values.database.secrets.url.key }} + {{- end }} + - name: JF_SHARED_NODE_PRIMARY + value: "false" + - name: JF_SHARED_NODE_HAENABLED + value: "true" + - name: JF_SHARED_DATABSE_USERNAME + value: "artifactory" + - name: JF_SHARED_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Release.Name }}-postgresql + key: postgresql-password + ports: + - containerPort: {{ .Values.artifactory.internalPort }} + - containerPort: {{ .Values.artifactory.internalArtifactoryPort }} + {{- if .Values.artifactory.node.javaOpts.jmx.enabled }} + - containerPort: {{ .Values.artifactory.node.javaOpts.jmx.port }} + {{- end }} + volumeMounts: + {{- if .Values.artifactory.customPersistentVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentVolumeClaim.mountPath }} + {{- end }} + {{- if .Values.artifactory.customPersistentPodVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + {{- end }} + {{- if .Values.artifactory.userPluginSecrets }} + - name: tmp-plugins + mountPath: "/tmp/plugins/" + {{- range .Values.artifactory.userPluginSecrets }} + - name: {{ tpl . $ }} + mountPath: "/tmp/plugin/{{ tpl . $ }}" + {{- end }} + {{- end }} + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + mountPath: "{{ tpl $.Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir $ }}/filestore{{ $sharedClaimNumber }}" + {{- end }} + - name: artifactory-ha-backup + mountPath: "{{ $.Values.artifactory.persistence.fileSystem.existingSharedClaim.backupDir }}" + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-ha-data + mountPath: "{{ .Values.artifactory.persistence.nfs.dataDir }}" + - name: artifactory-ha-backup + mountPath: "{{ .Values.artifactory.persistence.nfs.backupDir }}" + {{- else }} + {{- if .Values.artifactory.binarystore.enabled }} + - name: binarystore-xml + mountPath: "/artifactory_extra_conf/binarystore.xml" + subPath: binarystore.xml + {{- end }} + {{- end }} + - name: installer-info + mountPath: "/artifactory_extra_conf/info/installer-info.json" + subPath: installer-info.json + {{- if .Values.artifactory.customVolumeMounts }} +{{ tpl .Values.artifactory.customVolumeMounts . | indent 8 }} + {{- end }} + resources: +{{ toYaml .Values.artifactory.node.resources | indent 10 }} + {{- if .Values.artifactory.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.artifactory.readinessProbe.path }} + port: {{ .Values.artifactory.internalPort }} + initialDelaySeconds: {{ .Values.artifactory.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.artifactory.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.artifactory.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.artifactory.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.artifactory.readinessProbe.successThreshold }} + {{- end }} + {{- if .Values.artifactory.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.artifactory.livenessProbe.path }} + port: {{ .Values.artifactory.internalPort }} + initialDelaySeconds: {{ .Values.artifactory.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.artifactory.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.artifactory.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.artifactory.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.artifactory.livenessProbe.successThreshold }} + {{- end }} + {{- $image := .Values.logger.image.repository }} + {{- $tag := .Values.logger.image.tag }} + {{- $mountPath := .Values.artifactory.persistence.mountPath }} + {{- range .Values.artifactory.loggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: '{{ $image }}:{{ $tag }}' + command: + - tail + args: + - '-F' + - '{{ $mountPath }}/logs/{{ . }}' + volumeMounts: + - name: volume + mountPath: {{ $mountPath }} + {{- end }} + {{ if .Values.artifactory.catalinaLoggers }} + {{- range .Values.artifactory.catalinaLoggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: '{{ $image }}:{{ $tag }}' + command: + - 'sh' + - '-c' + - 'sh /scripts/tail-log.sh {{ $mountPath }}/logs/catalina {{ . }}' + volumeMounts: + - name: volume + mountPath: {{ $mountPath }} + - name: catalina-logger + mountPath: /scripts/tail-log.sh + subPath: tail-log.sh + {{- end }} + {{- end }} + {{- if .Values.filebeat.enabled }} + - name: {{ .Values.filebeat.name }} + image: "{{ .Values.filebeat.image.repository }}:{{ .Values.filebeat.image.version }}" + imagePullPolicy: {{ .Values.filebeat.image.pullPolicy }} + args: + - "-e" + - "-E" + - "http.enabled=true" + securityContext: + runAsUser: 0 + volumeMounts: + - name: filebeat-config + mountPath: /usr/share/filebeat/filebeat.yml + readOnly: true + subPath: filebeat.yml + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + livenessProbe: +{{ toYaml .Values.filebeat.livenessProbe | indent 10 }} + readinessProbe: +{{ toYaml .Values.filebeat.readinessProbe | indent 10 }} + resources: +{{ toYaml .Values.filebeat.resources | indent 10 }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriod }} + {{- end }} + {{- if .Values.artifactory.customSidecarContainers }} +{{ tpl .Values.artifactory.customSidecarContainers . | indent 6 }} + {{- end }} + {{- with .Values.artifactory.node.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if .Values.artifactory.node.affinity }} + {{- with .Values.artifactory.node.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- else if eq .Values.artifactory.node.podAntiAffinity.type "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: {{ .Values.artifactory.node.podAntiAffinity.topologyKey }} + labelSelector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} + {{- if eq .Values.artifactory.service.pool "members" }} + role: {{ template "artifactory-ha.node.name" . }} + {{- end }} + {{- else if eq .Values.artifactory.node.podAntiAffinity.type "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: {{ .Values.artifactory.node.podAntiAffinity.topologyKey }} + labelSelector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} + {{- if eq .Values.artifactory.service.pool "members" }} + role: {{ template "artifactory-ha.node.name" . }} + {{- end }} + {{- end }} + {{- with .Values.artifactory.node.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + {{- if .Values.artifactory.binarystore.enabled }} + - name: binarystore-xml + secret: + {{- if .Values.artifactory.persistence.customBinarystoreXmlSecret }} + secretName: {{ .Values.artifactory.persistence.customBinarystoreXmlSecret }} + {{- else }} + secretName: {{ template "artifactory-ha.fullname" . }}-binarystore + {{- end }} + {{- end }} + - name: installer-info + configMap: + name: {{ template "artifactory-ha.fullname" . }}-installer-info + {{- if .Values.artifactory.userPluginSecrets }} + - name: tmp-plugins + emptyDir: {} + {{- range .Values.artifactory.userPluginSecrets }} + - name: {{ tpl . $ }} + secret: + secretName: {{ tpl . $ }} + {{- end }} + {{- end }} + {{- if .Values.artifactory.catalinaLoggers }} + - name: catalina-logger + configMap: + name: {{ template "artifactory-ha.fullname" . }}-catalina-logger + {{- end }} + {{- if .Values.artifactory.configMaps }} + - name: artifactory-configmaps + configMap: + name: {{ template "artifactory-ha.fullname" . }}-configmaps + {{- end }} + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" $ }}-data-pvc-{{ $sharedClaimNumber }} + {{- end }} + - name: artifactory-ha-backup + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" $ }}-backup-pvc + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-ha-data + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" . }}-data-pvc + - name: artifactory-ha-backup + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" . }}-backup-pvc + {{- end }} + - name: systemyaml + secret: + secretName: {{ template "artifactory-ha.primary.name" . }}-system-yaml + {{- if .Values.artifactory.customPersistentVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + persistentVolumeClaim: + claimName: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + {{- end }} + {{- if .Values.filebeat.enabled }} + - name: filebeat-config + configMap: + name: {{ template "artifactory-ha.fullname" . }}-filebeat-config + {{- end }} + {{- if .Values.artifactory.customVolumes }} +{{ tpl .Values.artifactory.customVolumes . | indent 6 }} + {{- end }} + {{- if not .Values.artifactory.persistence.enabled }} + - name: volume + emptyDir: + sizeLimit: {{ .Values.artifactory.persistence.size }} + {{- end }} + volumeClaimTemplates: + {{- if .Values.artifactory.persistence.enabled }} + - metadata: + name: volume + {{- if not .Values.artifactory.node.persistence.existingClaim }} + spec: + {{- if .Values.artifactory.persistence.storageClassName }} + {{- if (eq "-" .Values.artifactory.persistence.storageClassName) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.artifactory.persistence.storageClassName }}" + {{- end }} + {{- end }} + accessModes: [ "{{ .Values.artifactory.persistence.accessMode }}" ] + resources: + requests: + storage: {{ .Values.artifactory.persistence.size }} + {{- end }} + {{- end }} + {{- if .Values.artifactory.customPersistentPodVolumeClaim }} + - metadata: + name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + spec: + {{- if .Values.artifactory.customPersistentPodVolumeClaim.storageClassName }} + {{- if (eq "-" .Values.artifactory.customPersistentPodVolumeClaim.storageClassName) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.artifactory.customPersistentPodVolumeClaim.storageClassName }}" + {{- end }} + {{- end }} + accessModes: + {{- range .Values.artifactory.customPersistentPodVolumeClaim.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.artifactory.customPersistentPodVolumeClaim.size }} + {{- end }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-primary-statefulset.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-primary-statefulset.yaml new file mode 100755 index 0000000..8b53315 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-primary-statefulset.yaml @@ -0,0 +1,587 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "artifactory-ha.primary.name" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + force-update: "{{ randAlpha 63 | lower }}" +{{- if .Values.artifactory.primary.labels }} +{{ toYaml .Values.artifactory.primary.labels | indent 4 }} +{{- end }} +spec: + serviceName: {{ template "artifactory-ha.primary.name" . }} + replicas: 1 + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + role: {{ template "artifactory-ha.primary.name" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + role: {{ template "artifactory-ha.primary.name" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + annotations: + checksum/binarystore: {{ include (print $.Template.BasePath "/artifactory-binarystore-secret.yaml") . | sha256sum }} + checksum/systemyaml: {{ include (print $.Template.BasePath "/artifactory-system-yaml.yaml") . | sha256sum }} + {{- if not (and .Values.artifactory.accessAdmin.secret .Values.artifactory.accessAdmin.dataKey) }} + checksum/access-creds: {{ include (print $.Template.BasePath "/access-bootstrap-creds.yaml") . | sha256sum }} + {{- end }} + {{- range $key, $value := .Values.artifactory.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + spec: + {{- if .Values.artifactory.priorityClass.existingPriorityClass }} + priorityClassName: {{ .Values.artifactory.priorityClass.existingPriorityClass }} + {{- else -}} + {{- if .Values.artifactory.priorityClass.create }} + priorityClassName: {{ default (include "artifactory-ha.fullname" .) .Values.artifactory.priorityClass.name }} + {{- end }} + {{- end }} + serviceAccountName: {{ template "artifactory-ha.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.artifactory.terminationGracePeriodSeconds }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.imagePullSecrets }} + {{- end }} + securityContext: + runAsUser: {{ .Values.artifactory.uid }} + fsGroup: {{ .Values.artifactory.uid }} + initContainers: + {{- if .Values.artifactory.customInitContainersBegin }} +{{ tpl .Values.artifactory.customInitContainersBegin . | indent 6 }} + {{- end }} + {{- if .Values.artifactory.persistence.enabled }} + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + - name: "create-artifactory-data-dir" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > + mkdir -p {{ tpl .Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir . }}; + volumeMounts: + - mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + name: volume + {{- end }} + {{- end }} + {{- if .Values.artifactory.deleteDBPropertiesOnStartup }} + - name: "delete-db-properties" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - 'rm -fv {{ .Values.artifactory.persistence.mountPath }}/etc/db.properties' + volumeMounts: + - mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + name: volume + {{- end }} + - name: "remove-lost-found" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > + rm -rfv {{ .Values.artifactory.persistence.mountPath }}/lost+found; + rm -rfv {{ tpl .Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir . }}/lost+found; + rm -rfv {{ .Values.artifactory.persistence.fileSystem.existingSharedClaim.backupDir }}/lost+found; + volumeMounts: + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + mountPath: "{{ tpl $.Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir $ }}/filestore{{ $sharedClaimNumber }}" + {{- end }} + - name: artifactory-ha-backup + mountPath: "{{ $.Values.artifactory.persistence.fileSystem.existingSharedClaim.backupDir }}" + {{- end }} + {{- end }} + {{- if or (and .Values.artifactory.accessAdmin.secret .Values.artifactory.accessAdmin.dataKey) .Values.artifactory.accessAdmin.password }} + - name: "access-bootstrap-creds" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > + echo "Preparing custom Access bootstrap.creds"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/access/etc; + cp -Lrf /tmp/access/bootstrap.creds {{ .Values.artifactory.persistence.mountPath }}/access/etc/bootstrap.creds; + chmod 600 {{ .Values.artifactory.persistence.mountPath }}/access/etc/bootstrap.creds; + volumeMounts: + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + - name: access-bootstrap-creds + mountPath: "/tmp/access/bootstrap.creds" + {{- if and .Values.artifactory.accessAdmin.secret .Values.artifactory.accessAdmin.dataKey }} + subPath: {{ .Values.artifactory.accessAdmin.dataKey }} + {{- else }} + subPath: bootstrap.creds + {{- end }} + {{- end }} + {{- end }} + - name: 'copy-system-yaml' + image: '{{ .Values.initContainerImage }}' + command: + - '/bin/sh' + - '-c' + - > + echo "Copy system.yaml to {{ .Values.artifactory.persistence.mountPath }}/etc"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/access/keys/trusted; + cp -fv /tmp/etc/system.yaml {{ .Values.artifactory.persistence.mountPath }}/etc/system.yaml; + echo "Remove {{ .Values.artifactory.persistence.mountPath }}/lost+found folder if exists"; + rm -rfv {{ .Values.artifactory.persistence.mountPath }}/lost+found; + volumeMounts: + - name: volume + mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + - name: systemyaml + mountPath: "/tmp/etc/system.yaml" + subPath: system.yaml + {{- if .Values.artifactory.customPersistentPodVolumeClaim }} + - name: "prepare-custom-persistent-volume" + image: "{{ .Values.initContainerImage }}" + command: + - 'sh' + - '-c' + - > + chown -Rv {{ .Values.artifactory.uid }}:{{ .Values.artifactory.uid }} {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + securityContext: + runAsUser: 0 + volumeMounts: + - name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + {{- end }} + {{- if .Values.waitForDatabase }} + {{- if or .Values.postgresql.enabled }} + - name: "wait-for-db" + image: "{{ .Values.initContainerImage }}" + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > + until nc -z -w 2 {{ .Release.Name }}-postgresql {{ .Values.postgresql.service.port }} && echo database ok; do + sleep 2; + done; + {{- end }} + {{- end }} + {{- if .Values.artifactory.customInitContainers }} +{{ tpl .Values.artifactory.customInitContainers . | indent 6 }} + {{- end }} + containers: + - name: {{ .Values.artifactory.name }} + image: '{{ .Values.artifactory.image.repository }}:{{ default .Chart.AppVersion .Values.artifactory.image.version }}' + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + securityContext: + allowPrivilegeEscalation: false + command: + - '/bin/sh' + - '-c' + - > + set -e; + {{- if .Values.artifactory.configMapName }} + echo "Copying bootstrap configs"; + cp -Lrf /bootstrap/* /artifactory_extra_conf/; + {{- end }} + {{- if .Values.artifactory.userPluginSecrets }} + echo "Copying plugins"; + cp -Lrf /tmp/plugin/*/* /tmp/plugins; + {{- end }} + {{- range .Values.artifactory.copyOnEveryStartup }} + {{- $targetPath := printf "%s/%s" $.Values.artifactory.persistence.mountPath .target }} + {{- $baseDirectory := regexFind ".*/" $targetPath }} + mkdir -p {{ $baseDirectory }}; + cp -Lrf {{ .source }} {{ $.Values.artifactory.persistence.mountPath }}/{{ .target }}; + {{- end }} + {{- if .Values.artifactory.preStartCommand }} + echo "Running custom preStartCommand command"; + {{ tpl .Values.artifactory.preStartCommand . }}; + {{- end }} + /entrypoint-artifactory.sh + lifecycle: + postStart: + exec: + command: + - '/bin/sh' + - '-c' + - > + echo; + {{- if .Values.artifactory.postStartCommand }} + {{ .Values.artifactory.postStartCommand }} + {{- end }} + env: + {{- if .Values.database.secrets.user }} + - name: JF_SHARED_DATABSE_USERNAME + valueFrom: + secretKeyRef: + name: {{ .Values.database.secrets.user.name }} + key: {{ .Values.database.secrets.user.key }} + {{- end }} + {{- if .Values.database.secrets.password }} + - name: JF_SHARED_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.database.secrets.password.name }} + key: {{ .Values.database.secrets.password.key }} + {{- end }} + {{- if .Values.database.secrets.url }} + - name: JF_SHARED_DATABSE_URL + valueFrom: + secretKeyRef: + name: {{ .Values.database.secrets.url.name }} + key: {{ .Values.database.secrets.url.key }} + {{- end }} + - name: JF_SHARED_NODE_PRIMARY + value: "true" + - name: JF_SHARED_NODE_HAENABLED + value: "true" + - name: JF_SHARED_DATABSE_USERNAME + value: "artifactory" + - name: JF_SHARED_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Release.Name }}-postgresql + key: postgresql-password + ports: + - containerPort: {{ .Values.artifactory.internalPort }} + - containerPort: {{ .Values.artifactory.internalArtifactoryPort }} + {{- if .Values.artifactory.primary.javaOpts.jmx.enabled }} + - containerPort: {{ .Values.artifactory.primary.javaOpts.jmx.port }} + {{- end }} + volumeMounts: + {{- if .Values.artifactory.customPersistentVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentVolumeClaim.mountPath }} + {{- end }} + {{- if .Values.artifactory.customPersistentPodVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + {{- end }} + {{- if .Values.artifactory.userPluginSecrets }} + - name: tmp-plugins + mountPath: "/tmp/plugins/" + {{- range .Values.artifactory.userPluginSecrets }} + - name: {{ tpl . $ }} + mountPath: "/tmp/plugin/{{ tpl . $ }}" + {{- end }} + {{- end }} + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + mountPath: "{{ tpl $.Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir $ }}/filestore{{ $sharedClaimNumber }}" + {{- end }} + - name: artifactory-ha-backup + mountPath: "{{ $.Values.artifactory.persistence.fileSystem.existingSharedClaim.backupDir }}" + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-ha-data + mountPath: "{{ .Values.artifactory.persistence.nfs.dataDir }}" + - name: artifactory-ha-backup + mountPath: "{{ .Values.artifactory.persistence.nfs.backupDir }}" + {{- else }} + {{- if .Values.artifactory.binarystore.enabled }} + - name: binarystore-xml + mountPath: "/artifactory_extra_conf/binarystore.xml" + subPath: binarystore.xml + {{- end }} + {{- end }} + {{- if .Values.artifactory.configMapName }} + - name: bootstrap-config + mountPath: "/bootstrap/" + {{- end }} + {{- if or .Values.artifactory.license.secret .Values.artifactory.license.licenseKey }} + - name: artifactory-license + mountPath: "/artifactory_extra_conf/artifactory.cluster.license" + {{- if .Values.artifactory.license.secret }} + subPath: {{ .Values.artifactory.license.dataKey }} + {{- else if .Values.artifactory.license.licenseKey }} + subPath: artifactory.lic + {{- end }} + {{- end }} + - name: installer-info + mountPath: "/artifactory_extra_conf/info/installer-info.json" + subPath: installer-info.json + {{- if .Values.artifactory.customVolumeMounts }} +{{ tpl .Values.artifactory.customVolumeMounts . | indent 8 }} + {{- end }} + resources: +{{ toYaml .Values.artifactory.primary.resources | indent 10 }} + {{- if .Values.artifactory.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.artifactory.readinessProbe.path }} + port: {{ .Values.artifactory.internalPort }} + initialDelaySeconds: {{ .Values.artifactory.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.artifactory.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.artifactory.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.artifactory.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.artifactory.readinessProbe.successThreshold }} + {{- end }} + {{- if .Values.artifactory.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.artifactory.livenessProbe.path }} + port: {{ .Values.artifactory.internalPort }} + initialDelaySeconds: {{ .Values.artifactory.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.artifactory.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.artifactory.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.artifactory.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.artifactory.livenessProbe.successThreshold }} + {{- end }} + {{- $image := .Values.logger.image.repository }} + {{- $tag := .Values.logger.image.tag }} + {{- $mountPath := .Values.artifactory.persistence.mountPath }} + {{- range .Values.artifactory.loggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: '{{ $image }}:{{ $tag }}' + command: + - tail + args: + - '-F' + - '{{ $mountPath }}/logs/{{ . }}' + volumeMounts: + - name: volume + mountPath: {{ $mountPath }} + {{- end }} + {{ if .Values.artifactory.catalinaLoggers }} + {{- range .Values.artifactory.catalinaLoggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: '{{ $image }}:{{ $tag }}' + command: + - 'sh' + - '-c' + - 'sh /scripts/tail-log.sh {{ $mountPath }}/logs/catalina {{ . }}' + volumeMounts: + - name: volume + mountPath: {{ $mountPath }} + - name: catalina-logger + mountPath: /scripts/tail-log.sh + subPath: tail-log.sh + {{- end }} + {{- end }} + {{- if .Values.filebeat.enabled }} + - name: {{ .Values.filebeat.name }} + image: "{{ .Values.filebeat.image.repository }}:{{ .Values.filebeat.image.version }}" + imagePullPolicy: {{ .Values.filebeat.image.pullPolicy }} + args: + - "-e" + - "-E" + - "http.enabled=true" + securityContext: + runAsUser: 0 + volumeMounts: + - name: filebeat-config + mountPath: /usr/share/filebeat/filebeat.yml + readOnly: true + subPath: filebeat.yml + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + livenessProbe: +{{ toYaml .Values.filebeat.livenessProbe | indent 10 }} + readinessProbe: +{{ toYaml .Values.filebeat.readinessProbe | indent 10 }} + resources: +{{ toYaml .Values.filebeat.resources | indent 10 }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriod }} + {{- end }} + {{- if .Values.artifactory.customSidecarContainers }} +{{ tpl .Values.artifactory.customSidecarContainers . | indent 6 }} + {{- end }} + {{- with .Values.artifactory.primary.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if .Values.artifactory.primary.affinity }} + {{- with .Values.artifactory.primary.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- else if eq .Values.artifactory.primary.podAntiAffinity.type "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: {{ .Values.artifactory.primary.podAntiAffinity.topologyKey }} + labelSelector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} + {{- else if eq .Values.artifactory.primary.podAntiAffinity.type "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: {{ .Values.artifactory.primary.podAntiAffinity.topologyKey }} + labelSelector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} + {{- end }} + {{- with .Values.artifactory.primary.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + {{- if .Values.artifactory.binarystore.enabled }} + - name: binarystore-xml + secret: + {{- if .Values.artifactory.persistence.customBinarystoreXmlSecret }} + secretName: {{ .Values.artifactory.persistence.customBinarystoreXmlSecret }} + {{- else }} + secretName: {{ template "artifactory-ha.fullname" . }}-binarystore + {{- end }} + {{- end }} + - name: installer-info + configMap: + name: {{ template "artifactory-ha.fullname" . }}-installer-info + {{- if .Values.artifactory.userPluginSecrets }} + - name: tmp-plugins + emptyDir: {} + {{- range .Values.artifactory.userPluginSecrets }} + - name: {{ tpl . $ }} + secret: + secretName: {{ tpl . $ }} + {{- end }} + {{- end }} + {{- if .Values.artifactory.configMapName }} + - name: bootstrap-config + configMap: + name: {{ .Values.artifactory.configMapName }} + {{- end}} + {{- if .Values.artifactory.catalinaLoggers }} + - name: catalina-logger + configMap: + name: {{ template "artifactory-ha.fullname" . }}-catalina-logger + {{- end }} + {{- if .Values.artifactory.configMaps }} + - name: artifactory-configmaps + configMap: + name: {{ template "artifactory-ha.fullname" . }}-configmaps + {{- end }} + {{- if or .Values.artifactory.license.secret .Values.artifactory.license.licenseKey }} + - name: artifactory-license + secret: + {{- if .Values.artifactory.license.secret }} + secretName: {{ .Values.artifactory.license.secret }} + {{- else if .Values.artifactory.license.licenseKey }} + secretName: {{ template "artifactory-ha.fullname" . }}-license + {{- end }} + {{- end }} + {{- if or (and .Values.artifactory.accessAdmin.secret .Values.artifactory.accessAdmin.dataKey) .Values.artifactory.accessAdmin.password }} + - name: access-bootstrap-creds + secret: + {{- if and .Values.artifactory.accessAdmin.secret .Values.artifactory.accessAdmin.dataKey }} + secretName: {{ .Values.artifactory.accessAdmin.secret }} + {{- else }} + secretName: {{ template "artifactory-ha.fullname" . }}-bootstrap-creds + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" $ }}-data-pvc-{{ $sharedClaimNumber }} + {{- end }} + - name: artifactory-ha-backup + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" $ }}-backup-pvc + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-ha-data + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" . }}-data-pvc + - name: artifactory-ha-backup + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" . }}-backup-pvc + {{- end }} + - name: systemyaml + secret: + secretName: {{ template "artifactory-ha.primary.name" . }}-system-yaml + {{- if .Values.artifactory.customPersistentVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + persistentVolumeClaim: + claimName: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + {{- end }} + {{- if .Values.filebeat.enabled }} + - name: filebeat-config + configMap: + name: {{ template "artifactory-ha.fullname" . }}-filebeat-config + {{- end }} + {{- if .Values.artifactory.customVolumes }} +{{ tpl .Values.artifactory.customVolumes . | indent 6 }} + {{- end }} + {{- if not .Values.artifactory.persistence.enabled }} + - name: volume + emptyDir: + sizeLimit: {{ .Values.artifactory.persistence.size }} + {{- end }} + volumeClaimTemplates: + {{- if .Values.artifactory.persistence.enabled }} + - metadata: + name: volume + {{- if not .Values.artifactory.primary.persistence.existingClaim }} + spec: + {{- if .Values.artifactory.persistence.storageClassName }} + {{- if (eq "-" .Values.artifactory.persistence.storageClassName) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.artifactory.persistence.storageClassName }}" + {{- end }} + {{- end }} + accessModes: [ "{{ .Values.artifactory.persistence.accessMode }}" ] + resources: + requests: + storage: {{ .Values.artifactory.persistence.size }} + {{- end }} + {{- end }} + {{- if .Values.artifactory.customPersistentPodVolumeClaim }} + - metadata: + name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + spec: + {{- if .Values.artifactory.customPersistentPodVolumeClaim.storageClassName }} + {{- if (eq "-" .Values.artifactory.customPersistentPodVolumeClaim.storageClassName) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.artifactory.customPersistentPodVolumeClaim.storageClassName }}" + {{- end }} + {{- end }} + accessModes: + {{- range .Values.artifactory.customPersistentPodVolumeClaim.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.artifactory.customPersistentPodVolumeClaim.size }} + {{- end }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-priority-class.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-priority-class.yaml new file mode 100755 index 0000000..417ec5c --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-priority-class.yaml @@ -0,0 +1,9 @@ +{{- if .Values.artifactory.priorityClass.create }} +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: {{ default (include "artifactory-ha.fullname" .) .Values.artifactory.priorityClass.name }} +value: {{ .Values.artifactory.priorityClass.value }} +globalDefault: false +description: "Artifactory priority class" +{{- end }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-role.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-role.yaml new file mode 100755 index 0000000..c86bffd --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-role.yaml @@ -0,0 +1,14 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "artifactory-ha.fullname" . }} +rules: +{{ toYaml .Values.rbac.role.rules }} +{{- end }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-rolebinding.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-rolebinding.yaml new file mode 100755 index 0000000..4412870 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "artifactory-ha.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "artifactory-ha.serviceAccountName" . }} +roleRef: + kind: Role + apiGroup: rbac.authorization.k8s.io + name: {{ template "artifactory-ha.fullname" . }} +{{- end }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-secrets.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-secrets.yaml new file mode 100755 index 0000000..2665d32 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-secrets.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory-ha.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +data: +{{- if not .Values.artifactory.masterKeySecretName }} + master-key: {{ .Values.artifactory.masterKey | b64enc | quote }} +{{- end }} +{{- if .Values.database.password }} + db-password: {{ .Values.database.password | b64enc | quote }} +{{- end }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-service.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-service.yaml new file mode 100755 index 0000000..38d3355 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-service.yaml @@ -0,0 +1,88 @@ +# Service for all Artifactory cluster nodes. +apiVersion: v1 +kind: Service +metadata: + name: {{ template "artifactory-ha.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- if .Values.artifactory.service.annotations }} + annotations: +{{ toYaml .Values.artifactory.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.artifactory.service.type }} + {{- if and (eq .Values.artifactory.service.type "ClusterIP") .Values.artifactory.service.clusterIP }} + clusterIP: {{ .Values.artifactory.service.clusterIP }} + {{- end }} + {{- if .Values.artifactory.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.artifactory.service.loadBalancerSourceRanges | indent 4 }} + {{- end }} + ports: + - port: {{ .Values.artifactory.externalPort }} + targetPort: {{ .Values.artifactory.internalPort }} + protocol: TCP + name: {{ .Release.Name }}-router + - port: {{ .Values.artifactory.externalArtifactoryPort }} + targetPort: {{ .Values.artifactory.internalArtifactoryPort }} + protocol: TCP + name: {{ .Release.Name }}-artifactory + {{- with .Values.artifactory.node.javaOpts.jmx }} + {{- if .enabled }} + - port: {{ .port }} + targetPort: {{ .port }} + protocol: TCP + name: jmx + {{- end }} + {{- end }} + selector: +{{- if eq .Values.artifactory.service.pool "members" }} + role: {{ template "artifactory-ha.node.name" . }} +{{- end }} + app: {{ template "artifactory-ha.name" . }} + component: "{{ .Values.artifactory.name }}" + release: {{ .Release.Name }} +--- +# Internal service for Artifactory primary node only! +# Used by member nodes to check readiness of primary node before starting up +apiVersion: v1 +kind: Service +metadata: + name: {{ template "artifactory-ha.primary.name" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + type: {{ .Values.artifactory.service.type }} + {{- if and (eq .Values.artifactory.service.type "ClusterIP") .Values.artifactory.service.clusterIP }} + clusterIP: {{ .Values.artifactory.service.clusterIP }} + {{- end }} + ports: + - port: {{ .Values.artifactory.externalPort }} + targetPort: {{ .Values.artifactory.internalPort }} + protocol: TCP + name: {{ .Release.Name }}-router + - port: {{ .Values.artifactory.externalArtifactoryPort }} + targetPort: {{ .Values.artifactory.internalArtifactoryPort }} + protocol: TCP + name: {{ .Release.Name }}-artifactory + {{- with .Values.artifactory.primary.javaOpts.jmx }} + {{- if .enabled }} + - port: {{ .port }} + targetPort: {{ .port }} + protocol: TCP + name: jmx + {{- end }} + {{- end }} + selector: + role: {{ template "artifactory-ha.primary.name" . }} + app: {{ template "artifactory-ha.name" . }} + component: "{{ .Values.artifactory.name }}" + release: {{ .Release.Name }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-serviceaccount.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-serviceaccount.yaml new file mode 100755 index 0000000..6ea0b10 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: +{{- if .Values.serviceAccount.annotations }} + annotations: +{{ tpl (toYaml .) $ | indent 4 }} +{{- end}} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "artifactory-ha.serviceAccountName" . }} +{{- end }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-storage-pvc.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-storage-pvc.yaml new file mode 100755 index 0000000..e0bfa6b --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-storage-pvc.yaml @@ -0,0 +1,27 @@ +{{ if .Values.artifactory.customPersistentVolumeClaim }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + labels: + app: {{ template "artifactory-ha.name" . }} + version: "{{ .Values.artifactory.version }}" + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + accessModes: + {{- range .Values.artifactory.customPersistentVolumeClaim.accessModes }} + - {{ . | quote }} + {{- end }} + {{- if .Values.artifactory.customPersistentVolumeClaim.storageClassName }} + {{- if (eq "-" .Values.artifactory.customPersistentVolumeClaim.storageClassName) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.artifactory.customPersistentVolumeClaim.storageClassName }}" + {{- end }} + {{- end }} + resources: + requests: + storage: {{ .Values.artifactory.customPersistentVolumeClaim.size | quote }} +{{ end -}} \ No newline at end of file diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-system-yaml.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-system-yaml.yaml new file mode 100755 index 0000000..cf8ffba --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/artifactory-system-yaml.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory-ha.primary.name" . }}-system-yaml + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +stringData: + system.yaml: | +{{ tpl .Values.artifactory.systemYaml . | indent 4 }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/catalina-logger-configmap.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/catalina-logger-configmap.yaml new file mode 100755 index 0000000..807fe72 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/catalina-logger-configmap.yaml @@ -0,0 +1,53 @@ +{{- if .Values.artifactory.catalinaLoggers }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory-ha.fullname" . }}-catalina-logger + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + tail-log.sh: | + #!/bin/sh + + LOG_DIR=$1 + LOG_NAME=$2 + PID= + + # Wait for log dir to appear + while [ ! -d ${LOG_DIR} ]; do + sleep 1 + done + sleep 5 + + cd ${LOG_DIR} + + LOG_PREFIX=$(echo ${LOG_NAME} | awk -F\. '{print $1}') + + # Find the log to tail + LOG_FILE=$(ls -1t ./${LOG_PREFIX}.*.log | head -1) + + # echo "Tailing ${LOG_FILE}" + tail -F ${LOG_FILE} & + PID=$! + + # Loop forever to see if a new log was created + while true; do + # Find the latest log + NEW_LOG_FILE=$(ls -1t ./${LOG_PREFIX}.*.log | head -1) + + # If a new log file is found, kill old tail and switch to tailing it + if [ "${LOG_FILE}" != "${NEW_LOG_FILE}" ]; then + kill -9 ${PID} + wait $! 2>/dev/null + LOG_FILE=${NEW_LOG_FILE} + + # echo "Tailing ${LOG_FILE}" + tail -F ${LOG_FILE} & + PID=$! + fi + sleep 2 + done +{{- end }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/filebeat-configmap.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/filebeat-configmap.yaml new file mode 100755 index 0000000..d2db2a0 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/filebeat-configmap.yaml @@ -0,0 +1,15 @@ +{{- if .Values.filebeat.enabled }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory-ha.name" . }}-filebeat-config + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} +data: + filebeat.yml: | +{{ tpl .Values.filebeat.filebeatYml . | indent 4 }} +{{- end -}} \ No newline at end of file diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/ingress.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/ingress.yaml new file mode 100755 index 0000000..e8e2fd2 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/ingress.yaml @@ -0,0 +1,56 @@ +{{- if .Values.ingress.enabled -}} +{{- $serviceName := include "artifactory-ha.fullname" . -}} +{{- $servicePort := .Values.artifactory.externalPort -}} +{{- $artifactoryServicePort := .Values.artifactory.externalArtifactoryPort -}} +{{- if semverCompare ">=v1.14.0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: networking.k8s.io/v1beta1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ template "artifactory-ha.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.ingress.labels }} +{{ .Values.ingress.labels | toYaml | trimSuffix "\n"| indent 4 -}} +{{- end}} +{{- if .Values.ingress.annotations }} + annotations: + force-update: "{{ randAlpha 63 | lower }}" +{{ .Values.ingress.annotations | toYaml | trimSuffix "\n" | indent 4 -}} +{{- end }} +spec: + {{- if .Values.ingress.defaultBackend.enabled }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end }} + rules: +{{- if .Values.ingress.hosts }} + {{- range $host := .Values.ingress.hosts }} + - host: {{ $host | quote }} + http: + paths: + - path: {{ $.Values.ingress.routerPath }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + - path: {{ $.Values.ingress.artifactoryPath }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $artifactoryServicePort }} + {{- end -}} +{{- end -}} + {{- with .Values.ingress.additionalRules }} +{{ tpl . $ | indent 2 }} + {{- end }} + + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/nginx-artifactory-conf.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/nginx-artifactory-conf.yaml new file mode 100755 index 0000000..eb1f0e6 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/nginx-artifactory-conf.yaml @@ -0,0 +1,14 @@ +{{- if and (not .Values.nginx.customArtifactoryConfigMap) .Values.nginx.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory-ha.fullname" . }}-nginx-artifactory-conf + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + artifactory.conf: | +{{ tpl .Values.nginx.artifactoryConf . | indent 4 }} +{{- end }} \ No newline at end of file diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/nginx-certificate-secret.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/nginx-certificate-secret.yaml new file mode 100755 index 0000000..2c1430a --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/nginx-certificate-secret.yaml @@ -0,0 +1,14 @@ +{{- if and (not .Values.nginx.tlsSecretName) .Values.nginx.enabled }} +apiVersion: v1 +kind: Secret +type: kubernetes.io/tls +metadata: + name: {{ template "artifactory-ha.fullname" . }}-nginx-certificate + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: +{{ ( include "artifactory-ha.gen-certs" . ) | indent 2 }} +{{- end }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/nginx-conf.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/nginx-conf.yaml new file mode 100755 index 0000000..5f424d5 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/nginx-conf.yaml @@ -0,0 +1,14 @@ +{{- if and (not .Values.nginx.customConfigMap) .Values.nginx.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory-ha.fullname" . }}-nginx-conf + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + nginx.conf: | +{{ tpl .Values.nginx.mainConf . | indent 4 }} +{{- end }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/nginx-deployment.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/nginx-deployment.yaml new file mode 100755 index 0000000..4bc3f79 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/nginx-deployment.yaml @@ -0,0 +1,185 @@ +{{- if .Values.nginx.enabled -}} +{{- $serviceName := include "artifactory-ha.fullname" . -}} +{{- $servicePort := .Values.artifactory.externalPort -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "artifactory-ha.nginx.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: {{ .Values.nginx.name }} +{{- if .Values.nginx.labels }} +{{ toYaml .Values.nginx.labels | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.nginx.replicaCount }} + selector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} + component: {{ .Values.nginx.name }} + template: + metadata: + annotations: + checksum/nginx-conf: {{ include (print $.Template.BasePath "/nginx-conf.yaml") . | sha256sum }} + checksum/nginx-artifactory-conf: {{ include (print $.Template.BasePath "/nginx-artifactory-conf.yaml") . | sha256sum }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.nginx.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + spec: + serviceAccountName: {{ template "artifactory-ha.serviceAccountName" . }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.imagePullSecrets }} + {{- end }} + initContainers: + - name: "setup" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.nginx.image.pullPolicy }} + command: + - '/bin/sh' + - '-c' + - > + rm -rfv {{ .Values.nginx.persistence.mountPath }}/lost+found; + mkdir -p {{ .Values.nginx.persistence.mountPath }}/logs; + volumeMounts: + - mountPath: {{ .Values.nginx.persistence.mountPath | quote }} + name: nginx-volume + securityContext: + runAsUser: {{ .Values.nginx.uid }} + fsGroup: {{ .Values.nginx.gid }} + containers: + - name: {{ .Values.nginx.name }} + image: '{{ .Values.nginx.image.repository }}:{{ default .Chart.AppVersion .Values.nginx.image.version }}' + imagePullPolicy: {{ .Values.nginx.image.pullPolicy }} + command: + - 'nginx' + - '-g' + - 'daemon off;' + ports: + # DEPRECATION NOTE: The following is to maintain support for values pre 1.3.1 and + # will be cleaned up in a later version + {{- if .Values.nginx.http }} + {{- if .Values.nginx.http.enabled }} + - containerPort: {{ .Values.nginx.http.internalPort }} + {{- end }} + {{- else }} # DEPRECATED + - containerPort: {{ .Values.nginx.internalPortHttp }} + {{- end }} + {{- if .Values.nginx.https }} + {{- if .Values.nginx.https.enabled }} + - containerPort: {{ .Values.nginx.https.internalPort }} + {{- end }} + {{- else }} # DEPRECATED + - containerPort: {{ .Values.nginx.internalPortHttps }} + {{- end }} + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/nginx.conf + subPath: nginx.conf + - name: nginx-artifactory-conf + mountPath: "{{ .Values.nginx.persistence.mountPath }}/conf.d/" + - name: nginx-volume + mountPath: {{ .Values.nginx.persistence.mountPath | quote }} + - name: ssl-certificates + mountPath: "{{ .Values.nginx.persistence.mountPath }}/ssl" + resources: +{{ toYaml .Values.nginx.resources | indent 10 }} + {{- if .Values.nginx.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.nginx.readinessProbe.path }} + {{- if .Values.nginx.http.enabled }} + port: {{ .Values.nginx.http.internalPort }} + scheme: HTTP + {{- else }} + port: {{ .Values.nginx.https.internalPort }} + scheme: HTTPS + {{- end }} + initialDelaySeconds: {{ .Values.nginx.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.nginx.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.nginx.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.nginx.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.nginx.readinessProbe.successThreshold }} + {{- end }} + {{- if .Values.nginx.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.nginx.livenessProbe.path }} + {{- if .Values.nginx.http.enabled }} + port: {{ .Values.nginx.http.internalPort }} + scheme: HTTP + {{- else }} + port: {{ .Values.nginx.https.internalPort }} + scheme: HTTPS + {{- end }} + initialDelaySeconds: {{ .Values.nginx.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.nginx.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.nginx.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.nginx.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.nginx.livenessProbe.successThreshold }} + {{- end }} + {{- $image := .Values.logger.image.repository }} + {{- $tag := .Values.logger.image.tag }} + {{- $mountPath := .Values.nginx.persistence.mountPath }} + {{- range .Values.nginx.loggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: '{{ $image }}:{{ $tag }}' + command: + - tail + args: + - '-F' + - '{{ $mountPath }}/logs/{{ . }}' + volumeMounts: + - name: nginx-volume + mountPath: {{ $mountPath }} + {{- end }} + {{- with .Values.nginx.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.nginx.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.nginx.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + - name: nginx-conf + configMap: + {{- if .Values.nginx.customConfigMap }} + name: {{ .Values.nginx.customConfigMap }} + {{- else }} + name: {{ template "artifactory-ha.fullname" . }}-nginx-conf + {{- end }} + - name: nginx-artifactory-conf + configMap: + {{- if .Values.nginx.customArtifactoryConfigMap }} + name: {{ .Values.nginx.customArtifactoryConfigMap }} + {{- else }} + name: {{ template "artifactory-ha.fullname" . }}-nginx-artifactory-conf + {{- end }} + + - name: nginx-volume + {{- if .Values.nginx.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ .Values.nginx.persistence.existingClaim | default (include "artifactory-ha.nginx.fullname" .) }} + {{- else }} + emptyDir: {} + {{- end }} + - name: ssl-certificates + secret: + {{- if .Values.nginx.tlsSecretName }} + secretName: {{ .Values.nginx.tlsSecretName }} + {{- else }} + secretName: {{ template "artifactory-ha.fullname" . }}-nginx-certificate + {{- end }} +{{- end }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/nginx-pvc.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/nginx-pvc.yaml new file mode 100755 index 0000000..68a89ce --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/nginx-pvc.yaml @@ -0,0 +1,26 @@ +{{- if and .Values.nginx.persistence.enabled (.Values.nginx.enabled) (eq (int .Values.nginx.replicaCount) 1) }} +{{- if (not .Values.nginx.persistence.existingClaim) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.nginx.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + accessModes: + - {{ .Values.nginx.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.nginx.persistence.size | quote }} +{{- if .Values.nginx.persistence.storageClass }} +{{- if (eq "-" .Values.nginx.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.nginx.persistence.storageClass }}" +{{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/nginx-service.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/nginx-service.yaml new file mode 100755 index 0000000..7a212e0 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/templates/nginx-service.yaml @@ -0,0 +1,69 @@ +{{- if .Values.nginx.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "artifactory-ha.nginx.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: {{ .Values.nginx.name }} + {{- if .Values.nginx.service.labels }} +{{ toYaml .Values.nginx.service.labels | indent 4 }} + {{- end }} +{{- if .Values.nginx.service.annotations }} + annotations: +{{ toYaml .Values.nginx.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.nginx.service.type }} + {{- if and (eq .Values.nginx.service.type "ClusterIP") .Values.nginx.service.clusterIP }} + clusterIP: {{ .Values.nginx.service.clusterIP }} + {{- end }} +{{- if eq .Values.nginx.service.type "LoadBalancer" }} + {{ if .Values.nginx.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.nginx.service.loadBalancerIP }} + {{ end -}} + {{- if .Values.nginx.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.nginx.service.externalTrafficPolicy }} + {{- end }} +{{- end }} +{{- if .Values.nginx.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.nginx.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} + ports: + # DEPRECATION NOTE: The following is to maintain support for values pre 1.3.0 and + # will be cleaned up in a later verion + {{- if .Values.nginx.http }} + {{- if .Values.nginx.http.enabled }} + - port: {{ .Values.nginx.http.externalPort }} + targetPort: {{ .Values.nginx.http.internalPort }} + protocol: TCP + name: http + {{- end }} + {{- else }} # DEPRECATED + - port: {{ .Values.nginx.externalPortHttp }} + targetPort: {{ .Values.nginx.internalPortHttp }} + protocol: TCP + name: http + {{- end }} + {{- if .Values.nginx.https }} + {{- if .Values.nginx.https.enabled }} + - port: {{ .Values.nginx.https.externalPort }} + targetPort: {{ .Values.nginx.https.internalPort }} + protocol: TCP + name: https + {{- end }} + {{- else }} # DEPRECATED + - port: {{ .Values.nginx.externalPortHttps }} + targetPort: {{ .Values.nginx.internalPortHttps }} + protocol: TCP + name: https + {{- end }} + selector: + app: {{ template "artifactory-ha.name" . }} + component: {{ .Values.nginx.name }} + release: {{ .Release.Name }} +{{- end }} diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/values-large.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/values-large.yaml new file mode 100755 index 0000000..ec05d2a --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/values-large.yaml @@ -0,0 +1,24 @@ +artifactory: + primary: + resources: + requests: + memory: "6Gi" + cpu: "4" + limits: + memory: "10Gi" + cpu: "8" + javaOpts: + xms: "6g" + xmx: "8g" + node: + replicaCount: 3 + resources: + requests: + memory: "6Gi" + cpu: "4" + limits: + memory: "10Gi" + cpu: "8" + javaOpts: + xms: "6g" + xmx: "8g" diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/values-medium.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/values-medium.yaml new file mode 100755 index 0000000..33879c0 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/values-medium.yaml @@ -0,0 +1,24 @@ +artifactory: + primary: + resources: + requests: + memory: "4Gi" + cpu: "2" + limits: + memory: "8Gi" + cpu: "6" + javaOpts: + xms: "4g" + xmx: "6g" + node: + replicaCount: 2 + resources: + requests: + memory: "4Gi" + cpu: "2" + limits: + memory: "8Gi" + cpu: "6" + javaOpts: + xms: "4g" + xmx: "6g" diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/values-small.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/values-small.yaml new file mode 100755 index 0000000..4babf97 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/values-small.yaml @@ -0,0 +1,24 @@ +artifactory: + primary: + resources: + requests: + memory: "4Gi" + cpu: "2" + limits: + memory: "6Gi" + cpu: "4" + javaOpts: + xms: "4g" + xmx: "4g" + node: + replicaCount: 1 + resources: + requests: + memory: "4Gi" + cpu: "2" + limits: + memory: "6Gi" + cpu: "4" + javaOpts: + xms: "4g" + xmx: "4g" diff --git a/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/values.yaml b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/values.yaml new file mode 100755 index 0000000..64af84e --- /dev/null +++ b/Openshift4/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/values.yaml @@ -0,0 +1,1330 @@ +# Default values for artifactory-ha. +# This is a YAML-formatted file. +# Beware when changing values here. You should know what you are doing! +# Access the values with {{ .Values.key.subkey }} + +# Common +initContainerImage: "alpine:3.10" + +installer: + type: + platform: + +# For supporting pulling from private registries +imagePullSecrets: + +## Role Based Access Control +## Ref: https://kubernetes.io/docs/admin/authorization/rbac/ +rbac: + create: true + role: + ## Rules to create. It follows the role specification + rules: + - apiGroups: + - '' + resources: + - services + - endpoints + - pods + verbs: + - get + - watch + - list + +## Service Account +## Ref: https://kubernetes.io/docs/admin/service-accounts-admin/ +## +serviceAccount: + create: true + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + annotations: {} + +ingress: + enabled: false + defaultBackend: + enabled: true + # Used to create an Ingress record. + hosts: [] + routerPath: / + artifactoryPath: /artifactory/ + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + labels: {} + # traffic-type: external + # traffic-type: internal + tls: [] + # Secrets must be manually created in the namespace. + # - secretName: chart-example-tls + # hosts: + # - artifactory.domain.example + + # Additional ingress rules + additionalRules: [] + + +networkpolicy: + # Allows all ingress and egress + - name: artifactory + podSelector: + matchLabels: + app: artifactory-ha + egress: + - {} + ingress: + - {} + # Uncomment to allow only artifactory pods to communicate with postgresql (if postgresql.enabled is true) + # - name: postgresql + # podSelector: + # matchLabels: + # app: postgresql + # ingress: + # - from: + # - podSelector: + # matchLabels: + # app: artifactory-ha + + +## Database configurations +## Use the wait-for-db init container. Set to false to skip +waitForDatabase: true + +## Configuration values for the postgresql dependency +## ref: https://github.com/kubernetes/charts/blob/master/stable/postgresql/README.md +## +postgresql: + enabled: true + image: + registry: docker.bintray.io + repository: bitnami/postgresql + tag: 9.6.15-debian-9-r91 + postgresqlUsername: artifactory + postgresqlPassword: "" + postgresqlDatabase: artifactory + postgresqlConfiguration: + listenAddresses: "'*'" + maxConnections: "1500" + persistence: + enabled: true + size: 50Gi + service: + port: 5432 + resources: {} + # requests: + # memory: "512Mi" + # cpu: "100m" + # limits: + # memory: "1Gi" + # cpu: "500m" + nodeSelector: {} + +## If NOT using the PostgreSQL in this chart (postgresql.enabled=false), +## you MUST specify custom database details here or Artifactory will NOT start +database: + type: + driver: + ## If you set the url, leave host and port empty + url: + ## If you would like this chart to create the secret containing the db + ## password, use these values + user: + password: + ## If you have existing Kubernetes secrets containing db credentials, use + ## these values + secrets: {} + # user: + # name: "rds-artifactory" + # key: "db-user" + # password: + # name: "{{ .Release.Name}}}}-postgresql" + # key: "postgresql-password" + # url: + # name: "rds-artifactory" + # key: "db-url" + +logger: + image: + repository: 'busybox' + tag: '1.30' + +# Artifactory +artifactory: + name: artifactory-ha + image: + # repository: "docker.bintray.io/jfrog/artifactory-pro" + repository: "earlyaccess.jfrog.io/artifactory-pro" + # Note that by default we use appVersion to get image tag + # version: + pullPolicy: IfNotPresent + + # Create a priority class for the Artifactory pods or use an existing one + # NOTE - Maximum allowed value of a user defined priority is 1000000000 + priorityClass: + create: false + value: 1000000000 + ## Override default name + # name: + ## Use an existing priority class + # existingPriorityClass: + + # Delete the db.properties file in ARTIFACTORY_HOME/etc/db.properties + deleteDBPropertiesOnStartup: true + database: + maxOpenConnections: 80 + + # This directory is intended for use with NFS eventual configuration for HA + haDataDir: + enabled: false + path: + + # Files to copy to ARTIFACTORY_HOME/ on each Artifactory startup + copyOnEveryStartup: + # # Absolute path + # - source: /artifactory_extra_conf/binarystore.xml + # # Relative to ARTIFACTORY_HOME/ + # target: etc/ + # # Absolute path + # - source: /artifactory_extra_conf/artifactory.lic + # # Relative to ARTIFACTORY_HOME/ + # target: etc/ + + # Sidecar containers for tailing Artifactory logs + loggers: [] + # - request.log + # - event.log + # - binarystore.log + # - request_trace.log + # - access.log + # - artifactory.log + # - build_info_migration.log + + # Sidecar containers for tailing Tomcat (catalina) logs + catalinaLoggers: [] + # - catalina.log + # - host-manager.log + # - localhost.log + # - manager.log + + ## Add custom init containers execution before predefined init containers + customInitContainersBegin: | + - name: "custom-setup" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" + command: + - 'sh' + - '-c' + - 'chown -R 1030:1030 {{ .Values.artifactory.persistence.mountPath }}' + securityContext: + runAsUser: 0 + volumeMounts: + - mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + name: volume + ## Add custom init containers + + ## Add custom init containers execution after predefined init containers + customInitContainers: | + # - name: "custom-setup" + # image: "{{ .Values.initContainerImage }}" + # imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" + # command: + # - 'sh' + # - '-c' + # - 'touch {{ .Values.artifactory.persistence.mountPath }}/example-custom-setup' + # volumeMounts: + # - mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + # name: volume + + ## Add custom sidecar containers + # - The provided example uses a custom volume (customVolumes) + # - The provided example shows running container as root (id 0) + customSidecarContainers: | + # - name: "sidecar-list-etc" + # image: "{{ .Values.initContainerImage }}" + # imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" + # securityContext: + # runAsUser: 0 + # fsGroup: 0 + # command: + # - 'sh' + # - '-c' + # - 'sh /scripts/script.sh' + # volumeMounts: + # - mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + # name: volume + # - mountPath: "/scripts/script.sh" + # name: custom-script + # subPath: script.sh + # resources: + # requests: + # memory: "32Mi" + # cpu: "50m" + # limits: + # memory: "128Mi" + # cpu: "100m" + + ## Add custom volumes + customVolumes: | + # - name: custom-script + # configMap: + # name: custom-script + + ## Add custom volumesMounts + customVolumeMounts: | + # - name: custom-script + # mountPath: "/scripts/script.sh" + # subPath: script.sh + # - name: posthook-start + # mountPath: "/scripts/posthoook-start.sh" + # subPath: posthoook-start.sh + # - name: prehook-start + # mountPath: "/scripts/prehook-start.sh" + # subPath: prehook-start.sh + + # Add custom persistent volume mounts - Available for the pod + customPersistentPodVolumeClaim: {} + # name: + # mountPath: + # accessModes: + # - "-" + # size: + # storageClassName: + + # Add custom persistent volume mounts - Available to the entire namespace + customPersistentVolumeClaim: {} + # name: + # mountPath: + # accessModes: + # - "-" + # size: + # storageClassName: + + ## Artifactory HA requires a unique master key. Each Artifactory node must have the same master key! + ## You can generate one with the command: 'openssl rand -hex 16' + ## Pass it to helm with '--set artifactory.masterKey=${MASTER_KEY}' + ## Alternatively, you can use a pre-existing secret with a key called master-key by specifying masterKeySecretName + ## IMPORTANT: You should NOT use the example masterKey for a production deployment! + masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + # masterKeySecretName: + + ## Join Key to connect to other services to Artifactory + ## IMPORTANT: You should NOT use the example joinKey for a production deployment! + joinKey: EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE + + binarystore: + enabled: true + + accessAdmin: + ip: "127.0.0.1" + password: + secret: + dataKey: + + ## Artifactory license. + license: + ## licenseKey is the license key in plain text. Use either this or the license.secret setting + licenseKey: + ## If artifactory.license.secret is passed, it will be mounted as + ## ARTIFACTORY_HOME/etc/artifactory.lic and loaded at run time. + secret: + ## The dataKey should be the name of the secret data key created. + dataKey: + + ## Create configMap with artifactory.config.import.xml and security.import.xml and pass name of configMap in following parameter + configMapName: + + # Add any list of configmaps to Artifactory + configMaps: | + # posthook-start.sh: |- + # echo "This is a post start script" + # posthook-end.sh: |- + # echo "This is a post end script" + + ## List of secrets for Artifactory user plugins. + ## One Secret per plugin's files. + userPluginSecrets: + # - archive-old-artifacts + # - build-cleanup + # - webhook + # - '{{ template "my-chart.fullname" . }}' + + ## Extra pre-start command to install JDBC driver for MySql/MariaDb/Oracle + # preStartCommand: "wget -O /opt/jfrog/artifactory/tomcat/lib/mysql-connector-java-5.1.41.jar https://jcenter.bintray.com/mysql/mysql-connector-java/5.1.41/mysql-connector-java-5.1.41.jar" + ## Extra post-start command to run extra commands after container starts + # postStartCommand: + + ## Extra environment variables that can be used to tune Artifactory to your needs. + ## Uncomment and set value as needed + #extraEnvironmentVariables: | + # - name: JF_SHARED_DATABSE_USERNAME + # value: "artifactory" + # - name: JF_SHARED_DATABASE_PASSWORD + # valueFrom: + # secretKeyRef: + # name: {{ .Release.Name }}-postgresql + # key: postgresql-password + # - name: POSTGRES_DB + # value: "artifactory" + + # TODO: Fix javaOpts for member nodes (currently uses primary settings for all nodes) + systemYaml: | + shared: + extraJavaOpts: > + {{- with .Values.artifactory.primary.javaOpts }} + -Dartifactory.async.corePoolSize={{ .corePoolSize }} + {{- if .xms }} + -Xms{{ .xms }} + {{- end }} + {{- if .xmx }} + -Xmx{{ .xmx }} + {{- end }} + {{- if .jmx.enabled }} + -Dcom.sun.management.jmxremote + -Dcom.sun.management.jmxremote.port={{ .jmx.port }} + -Dcom.sun.management.jmxremote.rmi.port={{ .jmx.port }} + -Dcom.sun.management.jmxremote.ssl={{ .jmx.ssl }} + {{- if .jmx.host }} + -Djava.rmi.server.hostname={{ tpl .jmx.host $ }} + {{- else }} + -Djava.rmi.server.hostname={{ template "artifactory-ha.fullname" $ }} + {{- end }} + {{- if .jmx.authenticate }} + -Dcom.sun.management.jmxremote.authenticate=true + -Dcom.sun.management.jmxremote.access.file={{ .jmx.accessFile }} + -Dcom.sun.management.jmxremote.password.file={{ .jmx.passwordFile }} + {{- else }} + -Dcom.sun.management.jmxremote.authenticate=false + {{- end }} + {{- end }} + {{- if .other }} + {{ .other }} + {{- end }} + {{- end }} + database: + {{- if .Values.postgresql.enabled }} + type: postgresql + url: 'jdbc:postgresql://{{ .Release.Name }}-postgresql:{{ .Values.postgresql.service.port }}/{{ .Values.postgresql.postgresqlDatabase }}' + host: '' + driver: org.postgresql.Driver + username: '{{ .Values.postgresql.postgresqlUsername }}' + password: '{{ .Values.postgresql.postgresqlPassword }}' + {{ else }} + type: '{{ .Values.database.type }}' + url: '{{ .Values.database.url }}' + driver: '{{ .Values.database.driver }}' + username: '{{ .Values.database.user }}' + password: '{{ .Values.database.password }}' + {{- end }} + security: + joinKey: '{{ .Values.artifactory.joinKey }}' + masterKey: '{{ .Values.artifactory.masterKey }}' + artifactory: + {{- if .Values.artifactory.haDataDir.enabled }} + node: + haDataDir: {{ .Values.artifactory.haDataDir.path }} + {{- end }} + database: + maxOpenConnections: {{ .Values.artifactory.database.maxOpenConnections }} + access: + database: + maxOpenConnections: '{{ .Values.access.database.maxOpenConnections }}' + {{- if .Values.access.database.enabled }} + type: '{{ .Values.access.database.type }}' + url: '{{ .Values.access.database.url }}' + driver: '{{ .Values.access.database.driver }}' + username: '{{ .Values.access.database.user }}' + password: '{{ .Values.access.database.password }}' + {{- end }} + + ## IMPORTANT: If overriding artifactory.internalPort: + ## DO NOT use port lower than 1024 as Artifactory runs as non-root and cannot bind to ports lower than 1024! + externalPort: 8082 + internalPort: 8082 + externalArtifactoryPort: 8081 + internalArtifactoryPort: 8081 + uid: 1030 + terminationGracePeriodSeconds: 30 + ## The following settings are to configure the frequency of the liveness and readiness probes + livenessProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 180 + failureThreshold: 10 + timeoutSeconds: 10 + periodSeconds: 10 + successThreshold: 1 + + readinessProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 60 + failureThreshold: 10 + timeoutSeconds: 10 + periodSeconds: 10 + successThreshold: 1 + + persistence: + enabled: true + local: false + redundancy: 3 + mountPath: "/var/opt/jfrog/artifactory" + accessMode: ReadWriteOnce + size: 200Gi + + ## Use a custom Secret to be mounted as your binarystore.xml + ## NOTE: This will ignore all settings below that make up binarystore.xml + customBinarystoreXmlSecret: + + maxCacheSize: 50000000000 + cacheProviderDir: cache + eventual: + numberOfThreads: 10 + ## artifactory data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClassName: "-" + + ## Set the persistence storage type. This will apply the matching binarystore.xml to Artifactory config + ## Supported types are: + ## file-system (default) + ## nfs + ## google-storage + ## aws-s3 + ## azure-blob + type: file-system + + ## Use binarystoreXml to provide a custom binarystore.xml + ## This can be a template or hardcoded. + binarystoreXml: | + {{- if eq .Values.artifactory.persistence.type "file-system" }} + + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + + + + + + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) -}} + + {{- end }} + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + // Specify the read and write strategy and redundancy for the sharding binary provider + + roundRobin + percentageFreeSpace + 2 + + + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) -}} + //For each sub-provider (mount), specify the filestore location + + filestore{{ $sharedClaimNumber }} + + {{- end }} + + {{- else }} + + + + + crossNetworkStrategy + crossNetworkStrategy + {{ .Values.artifactory.persistence.redundancy }} + 2 + 2 + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + + shard-fs-1 + local + + + + + 30 + tester-remote1 + 10000 + remote + + + + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "google-storage" }} + + + + + crossNetworkStrategy + crossNetworkStrategy + {{ .Values.artifactory.persistence.redundancy }} + 2 + + + + + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + local + + + + 30 + 10000 + remote + + + + {{ .Values.artifactory.persistence.mountPath }}/data/filestore + /tmp + + + + google-cloud-storage + {{ .Values.artifactory.persistence.googleStorage.endpoint }} + {{ .Values.artifactory.persistence.googleStorage.httpsOnly }} + {{ .Values.artifactory.persistence.googleStorage.bucketName }} + {{ .Values.artifactory.persistence.googleStorage.identity }} + {{ .Values.artifactory.persistence.googleStorage.credential }} + {{ .Values.artifactory.persistence.googleStorage.path }} + {{ .Values.artifactory.persistence.googleStorage.bucketExists }} + + + {{- end }} + {{- if eq .Values.artifactory.persistence.type "aws-s3-v3" }} + + + + + + + + + + + + + + + + + crossNetworkStrategy + crossNetworkStrategy + {{ .Values.artifactory.persistence.redundancy }} + + + + + remote + + + + local + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + {{- with .Values.artifactory.persistence.awsS3V3 }} + + {{ .testConnection }} + {{- if .identity }} + {{ .identity }} + {{- end }} + {{- if .credential }} + {{ .credential }} + {{- end }} + {{ .region }} + {{ .bucketName }} + {{ .path }} + {{ .endpoint }} + {{- with .kmsServerSideEncryptionKeyId }} + {{ . }} + {{- end }} + {{- with .kmsKeyRegion }} + {{ . }} + {{- end }} + {{- with .kmsCryptoMode }} + {{ . }} + {{- end }} + true + {{ .usePresigning }} + {{ .signatureExpirySeconds }} + {{- with .cloudFrontDomainName }} + {{ . }} + {{- end }} + {{- with .cloudFrontKeyPairId }} + {{ .cloudFrontKeyPairId }} + {{- end }} + {{- with .cloudFrontPrivateKey }} + {{ . }} + {{- end }} + + {{- end }} + + {{- end }} + + {{- if eq .Values.artifactory.persistence.type "aws-s3" }} + + + + + + + + + + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + local + + + + 30 + 10000 + remote + + + + crossNetworkStrategy + crossNetworkStrategy + {{ .Values.artifactory.persistence.redundancy }} + + + + + {{ .Values.artifactory.persistence.awsS3.endpoint }} + {{- if .Values.artifactory.persistence.awsS3.roleName }} + {{ .Values.artifactory.persistence.awsS3.roleName }} + true + {{- else }} + {{ .Values.artifactory.persistence.awsS3.refreshCredentials }} + {{- end }} + {{ .Values.artifactory.persistence.awsS3.s3AwsVersion }} + {{ .Values.artifactory.persistence.awsS3.testConnection }} + {{ .Values.artifactory.persistence.awsS3.httpsOnly }} + {{ .Values.artifactory.persistence.awsS3.region }} + {{ .Values.artifactory.persistence.awsS3.bucketName }} + {{- if .Values.artifactory.persistence.awsS3.identity }} + {{ .Values.artifactory.persistence.awsS3.identity }} + {{- end }} + {{- if .Values.artifactory.persistence.awsS3.credential }} + {{ .Values.artifactory.persistence.awsS3.credential }} + {{- end }} + {{ .Values.artifactory.persistence.awsS3.path }} + {{- range $key, $value := .Values.artifactory.persistence.awsS3.properties }} + + {{- end }} + + + {{- end }} + {{- if eq .Values.artifactory.persistence.type "azure-blob" }} + + + + + + + + + + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + + crossNetworkStrategy + crossNetworkStrategy + 2 + 1 + + + + + remote + + + + local + + + + + {{ .Values.artifactory.persistence.azureBlob.accountName }} + {{ .Values.artifactory.persistence.azureBlob.accountKey }} + {{ .Values.artifactory.persistence.azureBlob.endpoint }} + {{ .Values.artifactory.persistence.azureBlob.containerName }} + {{ .Values.artifactory.persistence.azureBlob.testConnection }} + + + {{- end }} + + ## For artifactory.persistence.type file-system + fileSystem: + ## You may also use existing shared claims for the data and backup storage. This allows storage (NAS for example) to be used for Data and Backup dirs which are safe to share across multiple artifactory nodes. + ## You may specify numberOfExistingClaims to indicate how many of these existing shared claims to mount. (Default = 1) + ## Create PVCs with ReadWriteMany that match the naming convetions: + ## {{ template "artifactory-ha.fullname" . }}-data-pvc- + ## {{ template "artifactory-ha.fullname" . }}-backup-pvc- + ## Example (using numberOfExistingClaims: 2) + ## myexample-artifactory-ha-data-pvc-0 + ## myexample-artifactory-ha-backup-pvc-0 + ## myexample-artifactory-ha-data-pvc-1 + ## myexample-artifactory-ha-backup-pvc-1 + ## Note: While you need two PVC fronting two PVs, multiple PVs can be attached to the same storage in many cases allowing you to share an underlying drive. + + ## Need to have the following set + existingSharedClaim: + enabled: false + numberOfExistingClaims: 1 + ## Should be a child directory of {{ .Values.artifactory.persistence.mountPath }} + dataDir: "{{ .Values.artifactory.persistence.mountPath }}/artifactory-data" + backupDir: "/var/opt/jfrog/artifactory-backup" + + + ## For artifactory.persistence.type nfs + ## If using NFS as the shared storage, you must have a running NFS server that is accessible by your Kubernetes + ## cluster nodes. + ## Need to have the following set + nfs: + # Must pass actual IP of NFS server with '--set For artifactory.persistence.nfs.ip=${NFS_IP}' + ip: + haDataMount: "/data" + haBackupMount: "/backup" + dataDir: "/var/opt/jfrog/artifactory-ha" + backupDir: "/var/opt/jfrog/artifactory-backup" + capacity: 200Gi + mountOptions: [] + ## For artifactory.persistence.type google-storage + googleStorage: + endpoint: storage.googleapis.com + httpsOnly: false + # Set a unique bucket name + bucketName: "artifactory-ha-gcp" + identity: + credential: + path: "artifactory-ha/filestore" + bucketExists: false + + ## For artifactory.persistence.type aws-s3-v3 + awsS3V3: + testConnection: false + identity: + credential: + region: + bucketName: artifactory-aws + path: artifactory/filestore + endpoint: + kmsServerSideEncryptionKeyId: + kmsKeyRegion: + kmsCryptoMode: + useInstanceCredentials: true + usePresigning: false + signatureExpirySeconds: 300 + cloudFrontDomainName: + cloudFrontKeyPairId: + cloudFrontPrivateKey: + + ## For artifactory.persistence.type aws-s3 + ## IMPORTANT: Make sure S3 `endpoint` and `region` match! See https://docs.aws.amazon.com/general/latest/gr/rande.html + awsS3: + # Set a unique bucket name + bucketName: "artifactory-ha-aws" + endpoint: + region: + roleName: + identity: + credential: + path: "artifactory-ha/filestore" + refreshCredentials: true + httpsOnly: true + testConnection: false + s3AwsVersion: "AWS4-HMAC-SHA256" + + ## Additional properties to set on the s3 provider + properties: {} + # httpclient.max-connections: 100 + ## For artifactory.persistence.type azure-blob + azureBlob: + accountName: + accountKey: + endpoint: + containerName: + testConnection: false + service: + name: artifactory + type: ClusterIP + ## For supporting whitelist on the Artifactory service (useful if setting service.type=LoadBalancer) + ## Set this to a list of IP CIDR ranges + ## Example: loadBalancerSourceRanges: ['10.10.10.5/32', '10.11.10.5/32'] + ## or pass from helm command line + ## Example: helm install ... --set nginx.service.loadBalancerSourceRanges='{10.10.10.5/32,10.11.10.5/32}' + loadBalancerSourceRanges: [] + annotations: {} + ## Which nodes in the cluster should be in the external load balancer pool (have external traffic routed to them) + ## Supported pool values + ## members + ## all + pool: members + + ## The following Java options are passed to the java process running Artifactory. + ## This will be passed to all cluster members. Primary and member nodes. + javaOpts: {} + # other: "" + annotations: {} + + ## Type specific configurations. + ## There is a difference between the primary and the member nodes. + ## Customising their resources and java parameters is done here. + primary: + name: artifactory-ha-primary + labels: {} + persistence: + ## Set existingClaim to true or false + ## If true, you must prepare a PVC with the name e.g `volume-myrelease-artifactory-ha-primary-0` + existingClaim: false + ## Resources for the primary node + resources: {} + # requests: + # memory: "1Gi" + # cpu: "500m" + # limits: + # memory: "2Gi" + # cpu: "1" + ## The following Java options are passed to the java process running Artifactory primary node. + ## You should set them according to the resources set above + javaOpts: + # xms: "1g" + # xmx: "2g" + corePoolSize: 16 + jmx: + enabled: false + port: 9010 + host: + ssl: false + # When authenticate is true, accessFile and passwordFile are required + authenticate: false + accessFile: + passwordFile: + # other: "" + nodeSelector: {} + + tolerations: [] + + affinity: {} + ## Only used if "affinity" is empty + podAntiAffinity: + ## Valid values are "soft" or "hard"; any other value indicates no anti-affinity + type: "" + topologyKey: "kubernetes.io/hostname" + + node: + name: artifactory-ha-member + labels: {} + persistence: + ## Set existingClaim to true or false + ## If true, you must prepare a PVC with the name e.g `volume-myrelease-artifactory-ha-member-0` + existingClaim: false + replicaCount: 2 + minAvailable: 1 + ## Resources for the member nodes + resources: {} + # requests: + # memory: "1Gi" + # cpu: "500m" + # limits: + # memory: "2Gi" + # cpu: "1" + ## The following Java options are passed to the java process running Artifactory member nodes. + ## You should set them according to the resources set above + javaOpts: + # xms: "1g" + # xmx: "2g" + corePoolSize: 16 + jmx: + enabled: false + port: 9010 + host: + ssl: false + # When authenticate is true, accessFile and passwordFile are required + authenticate: false + accessFile: + passwordFile: + # other: "" + # xms: "1g" + # xmx: "2g" + # other: "" + nodeSelector: {} + waitForPrimaryStartup: + enabled: true + time: 60 + + tolerations: [] + + ## Complete specification of the "affinity" of the member nodes; if this is non-empty, + ## "podAntiAffinity" values are not used. + affinity: {} + + ## Only used if "affinity" is empty + podAntiAffinity: + ## Valid values are "soft" or "hard"; any other value indicates no anti-affinity + type: "" + topologyKey: "kubernetes.io/hostname" + +access: + database: + maxOpenConnections: 80 + +# Init containers +initContainers: + resources: {} +# requests: +# memory: "64Mi" +# cpu: "10m" +# limits: +# memory: "128Mi" +# cpu: "250m" + + +# Nginx +nginx: + enabled: true + name: nginx + labels: {} + replicaCount: 1 + uid: 104 + gid: 107 + image: + # repository: "docker.bintray.io/jfrog/nginx-artifactory-pro" + repository: "earlyaccess.jfrog.io/nginx-artifactory-pro" + # Note that by default we use appVersion to get image tag + # version: + pullPolicy: IfNotPresent + + + # Sidecar containers for tailing Nginx logs + loggers: [] + # - access.log + # - error.log + + mainConf: | + # Main Nginx configuration file + worker_processes 4; + error_log {{ .Values.nginx.persistence.mountPath }}/logs//error.log warn; + pid /tmp/nginx.pid; + events { + worker_connections 1024; + } + http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + variables_hash_max_size 1024; + variables_hash_bucket_size 64; + server_names_hash_max_size 4096; + server_names_hash_bucket_size 128; + types_hash_max_size 2048; + types_hash_bucket_size 64; + proxy_read_timeout 2400s; + client_header_timeout 2400s; + client_body_timeout 2400s; + proxy_connect_timeout 75s; + proxy_send_timeout 2400s; + proxy_buffer_size 32k; + proxy_buffers 40 32k; + proxy_busy_buffers_size 64k; + proxy_temp_file_write_size 250m; + proxy_http_version 1.1; + client_body_buffer_size 128k; + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + log_format timing 'ip = $remote_addr ' + 'user = \"$remote_user\" ' + 'local_time = \"$time_local\" ' + 'host = $host ' + 'request = \"$request\" ' + 'status = $status ' + 'bytes = $body_bytes_sent ' + 'upstream = \"$upstream_addr\" ' + 'upstream_time = $upstream_response_time ' + 'request_time = $request_time ' + 'referer = \"$http_referer\" ' + 'UA = \"$http_user_agent\"'; + access_log {{ .Values.nginx.persistence.mountPath }}/logs/access.log timing; + sendfile on; + #tcp_nopush on; + keepalive_timeout 65; + #gzip on; + include /etc/nginx/conf.d/*.conf; + } + + artifactoryConf: | + ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; + ssl_certificate {{ .Values.nginx.persistence.mountPath }}/ssl/tls.crt; + ssl_certificate_key {{ .Values.nginx.persistence.mountPath }}/ssl/tls.key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + ## server configuration + server { + {{- if .Values.nginx.internalPortHttps }} + listen {{ .Values.nginx.internalPortHttps }} ssl; + {{- else -}} + {{- if .Values.nginx.https.enabled }} + listen {{ .Values.nginx.https.internalPort }} ssl; + {{- end }} + {{- end }} + {{- if .Values.nginx.internalPortHttp }} + listen {{ .Values.nginx.internalPortHttp }}; + {{- else -}} + {{- if .Values.nginx.http.enabled }} + listen {{ .Values.nginx.http.internalPort }}; + {{- end }} + {{- end }} + server_name ~(?.+)\.{{ include "artifactory-ha.fullname" . }} {{ include "artifactory-ha.fullname" . }} + {{- range .Values.ingress.hosts -}} + {{- if contains "." . -}} + {{ "" | indent 0 }} ~(?.+)\.{{ (splitn "." 2 .)._1 }} {{ . }} + {{- end -}} + {{- end -}}; + + if ($http_x_forwarded_proto = '') { + set $http_x_forwarded_proto $scheme; + } + ## Application specific logs + ## access_log /var/log/nginx/artifactory-access.log timing; + ## error_log /var/log/nginx/artifactory-error.log; + rewrite ^/artifactory/?$ / redirect; + if ( $repo != "" ) { + rewrite ^/(v1|v2)/(.*) /artifactory/api/docker/$repo/$1/$2 break; + } + chunked_transfer_encoding on; + client_max_body_size 0; + + location / { + proxy_read_timeout 900; + proxy_pass_header Server; + proxy_cookie_path ~*^/.* /; + proxy_pass http://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalPort }}/; + proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host:$server_port; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + + location /artifactory/ { + if ( $request_uri ~ ^/artifactory/(.*)$ ) { + proxy_pass http://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalArtifactoryPort }}/artifactory/$1; + } + proxy_pass http://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalArtifactoryPort }}/artifactory/; + } + } + } + + service: + ## For minikube, set this to NodePort, elsewhere use LoadBalancer + #type: NodePort + type: LoadBalancer + #type: ClusterIP + ## For supporting whitelist on the Nginx LoadBalancer service + ## Set this to a list of IP CIDR ranges + ## Example: loadBalancerSourceRanges: ['10.10.10.5/32', '10.11.10.5/32'] + ## or pass from helm command line + ## Example: helm install ... --set nginx.service.loadBalancerSourceRanges='{10.10.10.5/32,10.11.10.5/32}' + loadBalancerSourceRanges: [] + ## Provide static ip address + loadBalancerIP: + ## There are two available options: “Cluster” (default) and “Local”. + externalTrafficPolicy: Cluster + labels: {} + # label-key: label-value + http: + enabled: true + externalPort: 80 + internalPort: 80 + https: + enabled: true + externalPort: 443 + internalPort: 443 + # DEPRECATED: The following will be replaced by L1065-L1076 in a future release + # externalPortHttp: 80 + # internalPortHttp: 80 + # externalPortHttps: 443 + # internalPortHttps: 443 + + ## The following settings are to configure the frequency of the liveness and readiness probes + livenessProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 60 + failureThreshold: 10 + timeoutSeconds: 10 + periodSeconds: 10 + successThreshold: 1 + + readinessProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 10 + failureThreshold: 10 + timeoutSeconds: 10 + periodSeconds: 10 + successThreshold: 1 + ## The SSL secret that will be used by the Nginx pod + # tlsSecretName: chart-example-tls + ## Custom ConfigMap for nginx.conf + customConfigMap: + ## Custom ConfigMap for artifactory.conf + customArtifactoryConfigMap: + persistence: + mountPath: "/var/opt/jfrog/nginx" + enabled: false + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + accessMode: ReadWriteOnce + size: 5Gi + ## nginx data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClassName: "-" + resources: {} + # requests: + # memory: "250Mi" + # cpu: "100m" + # limits: + # memory: "250Mi" + # cpu: "500m" + + nodeSelector: {} + + tolerations: [] + + affinity: {} + +# Filebeat Sidecar container +## The provided filebeat configuration is for Artifactory logs. It assumes you have a logstash installed and configured properly. +filebeat: + enabled: false + name: artifactory-filebeat + image: + repository: "docker.elastic.co/beats/filebeat" + version: 7.5.1 + logstashUrl: "logstash:5044" + + terminationGracePeriod: 10 + + livenessProbe: + exec: + command: + - sh + - -c + - | + #!/usr/bin/env bash -e + curl --fail 127.0.0.1:5066 + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + + readinessProbe: + exec: + command: + - sh + - -c + - | + #!/usr/bin/env bash -e + filebeat test output + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + + resources: {} +# requests: +# memory: "100Mi" +# cpu: "100m" +# limits: +# memory: "100Mi" +# cpu: "100m" + + filebeatYml: | + logging.level: info + path.data: {{ .Values.artifactory.persistence.mountPath }}/log/filebeat + name: artifactory-filebeat + queue.spool: ~ + filebeat.inputs: + - type: log + enabled: true + close_eof: ${CLOSE:false} + paths: + - {{ .Values.artifactory.persistence.mountPath }}/log/*.log + fields: + service: "jfrt" + log_type: "artifactory" + output: + logstash: + hosts: ["{{ .Values.filebeat.logstashUrl }}"] diff --git a/Openshift4/artifactory-ha-operator/watches.yaml b/Openshift4/artifactory-ha-operator/watches.yaml new file mode 100644 index 0000000..42941f3 --- /dev/null +++ b/Openshift4/artifactory-ha-operator/watches.yaml @@ -0,0 +1,5 @@ +--- +- version: v1alpha1 + group: charts.helm.k8s.io + kind: OpenshiftArtifactoryHa + chart: helm-charts/openshift-artifactory-ha diff --git a/Openshift4/artifactoryha-helm/CHANGELOG.md b/Openshift4/artifactoryha-helm/CHANGELOG.md new file mode 100755 index 0000000..075bfa2 --- /dev/null +++ b/Openshift4/artifactoryha-helm/CHANGELOG.md @@ -0,0 +1,557 @@ +# JFrog Artifactory-ha Chart Changelog +All changes to this chart will be documented in this file. + +## [1.3.7] - Jan 07, 2020 +* Add support for customizable `mountOptions` of NFS PVs + +## [1.3.6] - Dec 30, 2019 +* Fix for nginx probes failing when launched with http disabled + +## [1.3.5] - Dec 24, 2019 +* Better support for custom `artifactory.internalPort` + +## [1.3.4] - Dec 23, 2019 +* Mark empty map values with `{}` + +## [1.3.3] - Dec 16, 2019 +* Another fix for toggling nginx service ports + +## [1.3.2] - Dec 12, 2019 +* Fix for toggling nginx service ports + +## [1.3.1] - Dec 10, 2019 +* Add support for toggling nginx service ports + +## [1.3.0] - Dec 1, 2019 +* Updated Artifactory version to 6.16.0 + +## [1.2.4] - Nov 28, 2019 +* Add support for using existing PriorityClass + +## [1.2.3] - Nov 27, 2019 +* Add support for PriorityClass + +## [1.2.2] - Nov 20, 2019 +* Update Artifactory logo + +## [1.2.1] - Nov 18, 2019 +* Add the option to provide service account annotations (in order to support stuff like https://docs.aws.amazon.com/eks/latest/userguide/specify-service-account-role.html) + +## [1.2.0] - Nov 18, 2019 +* Updated Artifactory version to 6.15.0 + +## [1.1.12] - Nov 17, 2019 +* Fix `README.md` format (broken table) + +## [1.1.11] - Nov 17, 2019 +* Update comment on Artifactory master key + +## [1.1.10] - Nov 17, 2019 +* Fix creation of double slash in nginx artifactory configuration + +## [1.1.9] - Nov 14, 2019 +* Set explicit `postgresql.postgresqlPassword=""` to avoid helm v3 error + +## [1.1.8] - Nov 12, 2019 +* Updated Artifactory version to 6.14.1 + +## [1.1.7] - Nov 11, 2019 +* Additional documentation for masterKey + +## [1.1.6] - Nov 10, 2019 +* Update PostgreSQL chart version to 7.0.1 +* Use formal PostgreSQL configuration format + +## [1.1.5] - Nov 8, 2019 +* Add support `artifactory.service.loadBalancerSourceRanges` for whitelisting when setting `artifactory.service.type=LoadBalancer` + +## [1.1.4] - Nov 6, 2019 +* Add support for any type of environment variable by using `extraEnvironmentVariables` as-is + +## [1.1.3] - Nov 6, 2019 +* Add nodeselector support for Postgresql + +## [1.1.2] - Nov 5, 2019 +* Add support for the aws-s3-v3 filestore, which adds support for pod IAM roles + +## [1.1.1] - Nov 4, 2019 +* When using `copyOnEveryStartup`, make sure that the target base directories are created before copying the files + +## [1.1.0] - Nov 3, 2019 +* Updated Artifactory version to 6.14.0 + +## [1.0.1] - Nov 3, 2019 +* Make sure the artifactory pod exits when one of the pre-start stages fail + +## [1.0.0] - Oct 27, 2019 +**IMPORTANT - BREAKING CHANGES!**
+**DOWNTIME MIGHT BE REQUIRED FOR AN UPGRADE!** +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you**! +* If this is an upgrade and you are using the default PostgreSQL (`postgresql.enabled=true`), must use the upgrade instructions in [UPGRADE_NOTES.md](UPGRADE_NOTES.md)! +* PostgreSQL sub chart was upgraded to version `6.5.x`. This version is **not backward compatible** with the old version (`0.9.5`)! +* Note the following **PostgreSQL** Helm chart changes + * The chart configuration has changed! See [values.yaml](values.yaml) for the new keys used + * **PostgreSQL** is deployed as a StatefulSet + * See [PostgreSQL helm chart](https://hub.helm.sh/charts/stable/postgresql) for all available configurations + +## [0.17.3] - Oct 24, 2019 +* Change the preStartCommand to support templating + +## [0.17.2] - Oct 21, 2019 +* Add support for setting `artifactory.primary.labels` +* Add support for setting `artifactory.node.labels` +* Add support for setting `nginx.labels` + +## [0.17.1] - Oct 10, 2019 +* Updated Artifactory version to 6.13.1 + +## [0.17.0] - Oct 7, 2019 +* Updated Artifactory version to 6.13.0 + +## [0.16.7] - Sep 24, 2019 +* Option to skip wait-for-db init container with '--set waitForDatabase=false' + +## [0.16.6] - Sep 24, 2019 +* Add support for setting `nginx.service.labels` + +## [0.16.5] - Sep 23, 2019 +* Add support for setting `artifactory.customInitContainersBegin` + +## [0.16.4] - Sep 20, 2019 +* Add support for setting `initContainers.resources` + +## [0.16.3] - Sep 11, 2019 +* Updated Artifactory version to 6.12.2 + +## [0.16.2] - Sep 9, 2019 +* Updated Artifactory version to 6.12.1 + +## [0.16.1] - Aug 22, 2019 +* Fix the nginx server_name directive used with ingress.hosts + +## [0.16.0] - Aug 21, 2019 +* Updated Artifactory version to 6.12.0 + +## [0.15.15] - Aug 18, 2019 +* Fix existingSharedClaim permissions issue and example + +## [0.15.14] - Aug 14, 2019 +* Updated Artifactory version to 6.11.6 + +## [0.15.13] - Aug 11, 2019 +* Fix Ingress routing and add an example + +## [0.15.12] - Aug 6, 2019 +* Do not mount `access/etc/bootstrap.creds` unless user specifies a custom password or secret (Access already generates a random password if not provided one) +* If custom `bootstrap.creds` is provided (using keys or custom secret), prepare it with an init container so the temp file does not persist + +## [0.15.11] - Aug 5, 2019 +* Improve binarystore config + 1. Convert to a secret + 2. Move config to values.yaml + 3. Support an external secret + +## [0.15.10] - Aug 5, 2019 +* Don't create the nginx configmaps when nginx.enabled is false + +## [0.15.9] - Aug 1, 2019 +* Fix masterkey/masterKeySecretName not specified warning render logic in NOTES.txt + +## [0.15.8] - Jul 28, 2019 +* Simplify nginx setup and shorten initial wait for probes + +## [0.15.7] - Jul 25, 2019 +* Updated README about how to apply Artifactory licenses + +## [0.15.6] - Jul 22, 2019 +* Change Ingress API to be compatible with recent kubernetes versions + +## [0.15.5] - Jul 22, 2019 +* Updated Artifactory version to 6.11.3 + +## [0.15.4] - Jul 11, 2019 +* Add `artifactory.customVolumeMounts` support to member node statefulset template + +## [0.15.3] - Jul 11, 2019 +* Add ingress.hosts to the Nginx server_name directive when ingress is enabled to help with Docker repository sub domain configuration + +## [0.15.2] - Jul 3, 2019 +* Add the option for changing nginx config using values.yaml and remove outdated reverse proxy documentation + +## [0.15.1] - Jul 1, 2019 +* Updated Artifactory version to 6.11.1 + +## [0.15.0] - Jun 27, 2019 +* Updated Artifactory version to 6.11.0 and Restart Primary node when bootstrap.creds file has been modified in artifactory-ha + +## [0.14.4] - Jun 24, 2019 +* Add the option to provide an IP for the access-admin endpoints + +## [0.14.3] - Jun 24, 2019 +* Update chart maintainers + +## [0.14.2] - Jun 24, 2019 +* Change Nginx to point to the artifactory externalPort + +## [0.14.1] - Jun 23, 2019 +* Add values files for small, medium and large installations + +## [0.14.0] - Jun 20, 2019 +* Use ConfigMaps for nginx configuration and remove nginx postStart command + +## [0.13.10] - Jun 19, 2019 +* Updated Artifactory version to 6.10.4 + +## [0.13.9] - Jun 18, 2019 +* Add the option to provide additional ingress rules + +## [0.13.8] - Jun 14, 2019 +* Updated readme with improved external database setup example + +## [0.13.7] - Jun 6, 2019 +* Updated Artifactory version to 6.10.3 +* Updated installer-info template + +## [0.13.6] - Jun 6, 2019 +* Updated Google Cloud Storage API URL and https settings + +## [0.13.5] - Jun 5, 2019 +* Delete the db.properties file on Artifactory startup + +## [0.13.4] - Jun 3, 2019 +* Updated Artifactory version to 6.10.2 + +## [0.13.3] - May 21, 2019 +* Updated Artifactory version to 6.10.1 + +## [0.13.2] - May 19, 2019 +* Fix missing logger image tag + +## [0.13.1] - May 15, 2019 +* Support `artifactory.persistence.cacheProviderDir` for on-premise cluster + +## [0.13.0] - May 7, 2019 +* Updated Artifactory version to 6.10.0 + +## [0.12.23] - May 5, 2019 +* Add support for setting `artifactory.async.corePoolSize` + +## [0.12.22] - May 2, 2019 +* Remove unused property `artifactory.releasebundle.feature.enabled` + +## [0.12.21] - Apr 30, 2019 +* Add support for JMX monitoring + +## [0.12.20] - Apr29, 2019 +* Added support for headless services + +## [0.12.19] - Apr 28, 2019 +* Added support for `cacheProviderDir` + +## [0.12.18] - Apr 18, 2019 +* Changing API StatefulSet version to `v1` and permission fix for custom `artifactory.conf` for Nginx + +## [0.12.17] - Apr 16, 2019 +* Updated documentation for Reverse Proxy Configuration + +## [0.12.16] - Apr 12, 2019 +* Added support for `customVolumeMounts` + +## [0.12.15] - Aprl 12, 2019 +* Added support for `bucketExists` flag for googleStorage + +## [0.12.14] - Apr 11, 2019 +* Replace `curl` examples with `wget` due to the new base image + +## [0.12.13] - Aprl 07, 2019 +* Add support for providing the Artifactory license as a parameter + +## [0.12.12] - Apr 10, 2019 +* Updated Artifactory version to 6.9.1 + +## [0.12.11] - Aprl 04, 2019 +* Add support for templated extraEnvironmentVariables + +## [0.12.10] - Aprl 07, 2019 +* Change network policy API group + +## [0.12.9] - Aprl 04, 2019 +* Apply the existing PVC for members (in addition to primary) + +## [0.12.8] - Aprl 03, 2019 +* Bugfix for userPluginSecrets + +## [0.12.7] - Apr 4, 2019 +* Add information about upgrading Artifactory with auto-generated postgres password + +## [0.12.6] - Aprl 03, 2019 +* Added installer info + +## [0.12.5] - Aprl 03, 2019 +* Allow secret names for user plugins to contain template language + +## [0.12.4] - Apr 02, 2019 +* Fix issue #253 (use existing PVC for data and backup storage) + +## [0.12.3] - Apr 02, 2019 +* Allow NetworkPolicy configurations (defaults to allow all) + +## [0.12.2] - Aprl 01, 2019 +* Add support for user plugin secret + +## [0.12.1] - Mar 26, 2019 +* Add the option to copy a list of files to ARTIFACTORY_HOME on startup + +## [0.12.0] - Mar 26, 2019 +* Updated Artifactory version to 6.9.0 + +## [0.11.18] - Mar 25, 2019 +* Add CI tests for persistence, ingress support and nginx + +## [0.11.17] - Mar 22, 2019 +* Add the option to change the default access-admin password + +## [0.11.16] - Mar 22, 2019 +* Added support for `.Probe.path` to customise the paths used for health probes + +## [0.11.15] - Mar 21, 2019 +* Added support for `artifactory.customSidecarContainers` to create custom sidecar containers +* Added support for `artifactory.customVolumes` to create custom volumes + +## [0.11.14] - Mar 21, 2019 +* Make ingress path configurable + +## [0.11.13] - Mar 19, 2019 +* Move the copy of bootstrap config from postStart to preStart for Primary + +## [0.11.12] - Mar 19, 2019 +* Fix existingClaim example + +## [0.11.11] - Mar 18, 2019 +* Disable the option to use nginx PVC with more than one replica + +## [0.11.10] - Mar 15, 2019 +* Wait for nginx configuration file before using it + +## [0.11.9] - Mar 15, 2019 +* Revert securityContext changes since they were causing issues + +## [0.11.8] - Mar 15, 2019 +* Fix issue #247 (init container failing to run) + +## [0.11.7] - Mar 14, 2019 +* Updated Artifactory version to 6.8.7 + +## [0.11.6] - Mar 13, 2019 +* Move securityContext to container level + +## [0.11.5] - Mar 11, 2019 +* Add the option to use existing volume claims for Artifactory storage + +## [0.11.4] - Mar 11, 2019 +* Updated Artifactory version to 6.8.6 + +## [0.11.3] - Mar 5, 2019 +* Updated Artifactory version to 6.8.4 + +## [0.11.2] - Mar 4, 2019 +* Add support for catalina logs sidecars + +## [0.11.1] - Feb 27, 2019 +* Updated Artifactory version to 6.8.3 + +## [0.11.0] - Feb 25, 2019 +* Add nginx support for tail sidecars + +## [0.10.3] - Feb 21, 2019 +* Add s3AwsVersion option to awsS3 configuration for use with IAM roles + +## [0.10.2] - Feb 19, 2019 +* Updated Artifactory version to 6.8.2 + +## [0.10.1] - Feb 17, 2019 +* Updated Artifactory version to 6.8.1 +* Add example of `SERVER_XML_EXTRA_CONNECTOR` usage + +## [0.10.0] - Feb 15, 2019 +* Updated Artifactory version to 6.8.0 + +## [0.9.7] - Feb 13, 2019 +* Updated Artifactory version to 6.7.3 + +## [0.9.6] - Feb 7, 2019 +* Add support for tail sidecars to view logs from k8s api + +## [0.9.5] - Feb 6, 2019 +* Fix support for customizing statefulset `terminationGracePeriodSeconds` + +## [0.9.4] - Feb 5, 2019 +* Add support for customizing statefulset `terminationGracePeriodSeconds` + +## [0.9.3] - Feb 5, 2019 +* Remove the inactive server remove plugin + +## [0.9.2] - Feb 3, 2019 +* Updated Artifactory version to 6.7.2 + +## [0.9.1] - Jan 27, 2019 +* Fix support for Azure Blob Storage Binary provider + +## [0.9.0] - Jan 23, 2019 +* Updated Artifactory version to 6.7.0 + +## [0.8.10] - Jan 22, 2019 +* Added support for `artifactory.customInitContainers` to create custom init containers + +## [0.8.9] - Jan 18, 2019 +* Added support of values ingress.labels + +## [0.8.8] - Jan 16, 2019 +* Mount replicator.yaml (config) directly to /replicator_extra_conf + +## [0.8.7] - Jan 15, 2018 +* Add support for Azure Blob Storage Binary provider + +## [0.8.6] - Jan 13, 2019 +* Fix documentation about nginx group id + +## [0.8.5] - Jan 13, 2019 +* Updated Artifactory version to 6.6.5 + +## [0.8.4] - Jan 8, 2019 +* Make artifactory.replicator.publicUrl required when the replicator is enabled + +## [0.8.3] - Jan 1, 2019 +* Updated Artifactory version to 6.6.3 +* Add support for `artifactory.extraEnvironmentVariables` to pass more environment variables to Artifactory + +## [0.8.2] - Dec 28, 2018 +* Fix location `replicator.yaml` is copied to + +## [0.8.1] - Dec 27, 2018 +* Updated Artifactory version to 6.6.1 + +## [0.8.0] - Dec 20, 2018 +* Updated Artifactory version to 6.6.0 + +## [0.7.17] - Dec 17, 2018 +* Updated Artifactory version to 6.5.13 + +## [0.7.16] - Dec 12, 2018 +* Fix documentation about Artifactory license setup using secret + +## [0.7.15] - Dec 9, 2018 +* AWS S3 add `roleName` for using IAM role + +## [0.7.14] - Dec 6, 2018 +* AWS S3 `identity` and `credential` are now added only if have a value to allow using IAM role + +## [0.7.13] - Dec 5, 2018 +* Remove Distribution certificates creation. + +## [0.7.12] - Dec 2, 2018 +* Remove Java option "-Dartifactory.locking.provider.type=db". This is already the default setting. + +## [0.7.11] - Nov 30, 2018 +* Updated Artifactory version to 6.5.9 + +## [0.7.10] - Nov 29, 2018 +* Fixed the volumeMount for the replicator.yaml + +## [0.7.9] - Nov 29, 2018 +* Optionally include primary node into poddisruptionbudget + +## [0.7.8] - Nov 29, 2018 +* Updated postgresql version to 9.6.11 + +## [0.7.7] - Nov 27, 2018 +* Updated Artifactory version to 6.5.8 + +## [0.7.6] - Nov 18, 2018 +* Added support for configMap to use custom Reverse Proxy Configuration with Nginx + +## [0.7.5] - Nov 14, 2018 +* Updated Artifactory version to 6.5.3 + +## [0.7.4] - Nov 13, 2018 +* Allow pod anti-affinity settings to include primary node + +## [0.7.3] - Nov 12, 2018 +* Support artifactory.preStartCommand for running command before entrypoint starts + +## [0.7.2] - Nov 7, 2018 +* Support database.url parameter (DB_URL) + +## [0.7.1] - Oct 29, 2018 +* Change probes port to 8040 (so they will not be blocked when all tomcat threads on 8081 are exhausted) + +## [0.7.0] - Oct 28, 2018 +* Update postgresql chart to version 0.9.5 to be able and use `postgresConfig` options + +## [0.6.9] - Oct 23, 2018 +* Fix providing external secret for database credentials + +## [0.6.8] - Oct 22, 2018 +* Allow user to configure externalTrafficPolicy for Loadbalancer + +## [0.6.7] - Oct 22, 2018 +* Updated ingress annotation support (with examples) to support docker registry v2 + +## [0.6.6] - Oct 21, 2018 +* Updated Artifactory version to 6.5.2 + +## [0.6.5] - Oct 19, 2018 +* Allow providing pre-existing secret containing master key +* Allow arbitrary annotations on primary and member node pods +* Enforce size limits when using local storage with `emptyDir` +* Allow `soft` or `hard` specification of member node anti-affinity +* Allow providing pre-existing secrets containing external database credentials +* Fix `s3` binary store provider to properly use the `cache-fs` provider +* Allow arbitrary properties when using the `s3` binary store provider + +## [0.6.4] - Oct 18, 2018 +* Updated Artifactory version to 6.5.1 + +## [0.6.3] - Oct 17, 2018 +* Add Apache 2.0 license + +## [0.6.2] - Oct 14, 2018 +* Make S3 endpoint configurable (was hardcoded with `s3.amazonaws.com`) + +## [0.6.1] - Oct 11, 2018 +* Allows ingress default `backend` to be enabled or disabled (defaults to enabled) + +## [0.6.0] - Oct 11, 2018 +* Updated Artifactory version to 6.5.0 + +## [0.5.3] - Oct 9, 2018 +* Quote ingress hosts to support wildcard names + +## [0.5.2] - Oct 2, 2018 +* Add `helm repo add jfrog https://charts.jfrog.io` to README + +## [0.5.1] - Oct 2, 2018 +* Set Artifactory to 6.4.1 + +## [0.5.0] - Sep 27, 2018 +* Set Artifactory to 6.4.0 + +## [0.4.7] - Sep 26, 2018 +* Add ci/test-values.yaml + +## [0.4.6] - Sep 25, 2018 +* Add PodDisruptionBudget for member nodes, defaulting to minAvailable of 1 + +## [0.4.4] - Sep 2, 2018 +* Updated Artifactory version to 6.3.2 + +## [0.4.0] - Aug 22, 2018 +* Added support to run as non root +* Updated Artifactory version to 6.2.0 + +## [0.3.0] - Aug 22, 2018 +* Enabled RBAC Support +* Added support for PostStartCommand (To download Database JDBC connector) +* Increased postgresql max_connections +* Added support for `nginx.conf` ConfigMap +* Updated Artifactory version to 6.1.0 diff --git a/Openshift4/artifactoryha-helm/Chart.yaml b/Openshift4/artifactoryha-helm/Chart.yaml new file mode 100755 index 0000000..0e1989f --- /dev/null +++ b/Openshift4/artifactoryha-helm/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +appVersion: 7.0.2 +description: Universal Repository Manager supporting all major packaging formats, + build tools and CI servers. +home: https://www.jfrog.com/artifactory/ +icon: https://raw.githubusercontent.com/jfrog/charts/master/stable/artifactory-ha/logo/artifactory-logo.png +keywords: +- artifactory +- jfrog +- devops +maintainers: +- email: amithk@jfrog.com + name: amithins +- email: daniele@jfrog.com + name: danielezer +- email: eldada@jfrog.com + name: eldada +- email: rimasm@jfrog.com + name: rimusz +name: openshift-artifactory-ha +sources: +- https://bintray.com/jfrog/product/JFrog-Artifactory-Pro/view +- https://github.com/jfrog/charts +version: 2.0.4 diff --git a/Openshift4/artifactoryha-helm/LICENSE b/Openshift4/artifactoryha-helm/LICENSE new file mode 100755 index 0000000..8dada3e --- /dev/null +++ b/Openshift4/artifactoryha-helm/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Openshift4/artifactoryha-helm/README.md b/Openshift4/artifactoryha-helm/README.md new file mode 100755 index 0000000..76bd6af --- /dev/null +++ b/Openshift4/artifactoryha-helm/README.md @@ -0,0 +1,1266 @@ +# JFrog Artifactory High Availability Helm Chart + +## Prerequisites Details + +* Kubernetes 1.8+ +* Artifactory HA license + +## Chart Details +This chart will do the following: + +* Deploy Artifactory highly available cluster. 1 primary node and 2 member nodes. +* Deploy a PostgreSQL database +* Deploy an Nginx server + +## Artifactory HA architecture +The Artifactory HA cluster in this chart is made up of +- A single primary node +- Two member nodes, which can be resized at will + +Load balancing is done to the member nodes only. +This leaves the primary node free to handle jobs and tasks and not be interrupted by inbound traffic. +> This can be controlled by the parameter `artifactory.service.pool`. + +## Installing the Chart + +### Add JFrog Helm repository +Before installing JFrog helm charts, you need to add the [JFrog helm repository](https://charts.jfrog.io/) to your helm client +```bash +helm repo add jfrog https://charts.jfrog.io +``` + +### Install Chart +To install the chart with the release name `artifactory-ha`: +```bash +helm install --name artifactory-ha --set postgresql.postgresqlPassword= jfrog/artifactory-ha +``` + +### System Configuration +Artifactory uses a common system configuration file - `system.yaml`. See [official documentation](https://www.jfrog.com/confluence/display/JFROG/System+YAML+Configuration+File) on its usage. +In order to override the default `system.yaml` configuration, do the following: +```bash +artifactory: + systemYaml: | + +``` + +### Accessing Artifactory +**NOTE:** It might take a few minutes for Artifactory's public IP to become available, and the nodes to complete initial setup. +Follow the instructions outputted by the install command to get the Artifactory IP and URL to access it. + +### Updating Artifactory +Once you have a new chart version, you can update your deployment with +```bash +helm upgrade artifactory-ha jfrog/artifactory-ha +``` + +If artifactory was installed without providing a value to postgresql.postgresqlPassword (a password was autogenerated), follow these instructions: +1. Get the current password by running: +```bash +POSTGRES_PASSWORD=$(kubectl get secret -n -postgresql -o jsonpath="{.data.postgresql-password}" | base64 --decode) +``` +2. Upgrade the release by passing the previously auto-generated secret: +```bash +helm upgrade jfrog/artifactory-ha --set postgresql.postgresqlPassword=${POSTGRES_PASSWORD} +``` + +This will apply any configuration changes on your existing deployment. + +### Artifactory memory and CPU resources +The Artifactory HA Helm chart comes with support for configured resource requests and limits to all pods. By default, these settings are commented out. +It is **highly** recommended to set these so you have full control of the allocated resources and limits. + +See more information on [setting resources for your Artifactory based on planned usage](https://www.jfrog.com/confluence/display/RTF/System+Requirements#SystemRequirements-RecommendedHardware). + +```bash +# Example of setting resource requests and limits to all pods (including passing java memory settings to Artifactory) +helm install --name artifactory-ha \ + --set artifactory.primary.resources.requests.cpu="500m" \ + --set artifactory.primary.resources.limits.cpu="2" \ + --set artifactory.primary.resources.requests.memory="1Gi" \ + --set artifactory.primary.resources.limits.memory="4Gi" \ + --set artifactory.primary.javaOpts.xms="1g" \ + --set artifactory.primary.javaOpts.xmx="4g" \ + --set artifactory.node.resources.requests.cpu="500m" \ + --set artifactory.node.resources.limits.cpu="2" \ + --set artifactory.node.resources.requests.memory="1Gi" \ + --set artifactory.node.resources.limits.memory="4Gi" \ + --set artifactory.node.javaOpts.xms="1g" \ + --set artifactory.node.javaOpts.xmx="4g" \ + --set initContainers.resources.requests.cpu="10m" \ + --set initContainers.resources.limits.cpu="250m" \ + --set initContainers.resources.requests.memory="64Mi" \ + --set initContainers.resources.limits.memory="128Mi" \ + --set postgresql.resources.requests.cpu="200m" \ + --set postgresql.resources.limits.cpu="1" \ + --set postgresql.resources.requests.memory="500Mi" \ + --set postgresql.resources.limits.memory="1Gi" \ + --set nginx.resources.requests.cpu="100m" \ + --set nginx.resources.limits.cpu="250m" \ + --set nginx.resources.requests.memory="250Mi" \ + --set nginx.resources.limits.memory="500Mi" \ + jfrog/artifactory-ha +``` +> Artifactory java memory parameters can (and should) also be set to match the allocated resources with `artifactory.[primary|node].javaOpts.xms` and `artifactory.[primary|node].javaOpts.xmx`. + +Get more details on configuring Artifactory in the [official documentation](https://www.jfrog.com/confluence/). + +Although it is possible to set resources limits and requests this way, it is recommended to use the pre-built values files +for small, medium and large installation and change them according to your needs (if necessary), as described [here](#Deploying-Artifactory-for-small/medium/large-installations) + +### Deploying Artifactory for small/medium/large installations +In the chart directory, we have added three values files, one for each installation type - small/medium/large. These values files are recommendations for setting resources requests and limits for your installation. The values are derived from the following [documentation](https://www.jfrog.com/confluence/display/EP/Installing+on+Kubernetes#InstallingonKubernetes-Systemrequirements). You can find them in the corresponding chart directory - values-small.yaml, values-medium.yaml and values-large.yaml + +### Artifactory storage +Artifactory HA support a wide range of storage back ends. You can see more details on [Artifactory HA storage options](https://www.jfrog.com/confluence/display/RTF/HA+Installation+and+Setup#HAInstallationandSetup-SettingUpYourStorageConfiguration) + +In this chart, you set the type of storage you want with `artifactory.persistence.type` and pass the required configuration settings. +The default storage in this chart is the `file-system` replication, where the data is replicated to all nodes. + +> **IMPORTANT:** All storage configurations (except NFS) come with a default `artifactory.persistence.redundancy` parameter. +This is used to set how many replicas of a binary should be stored in the cluster's nodes. +Once this value is set on initial deployment, you can not update it using helm. +It is recommended to set this to a number greater than half of your cluster's size, and never scale your cluster down to a size smaller than this number. + +#### Existing volume claim + +###### Primary node +In order to use an existing volume claim for the Artifactory primary storage, you need to: +- Create a persistent volume claim by the name `volume--artifactory-ha-primary-0` e.g `volume-myrelease-artifactory-ha-primary-0` +- Pass a parameter to `helm install` and `helm upgrade` +```bash +... +--set artifactory.primary.persistence.existingClaim=true +``` + +###### Member nodes +In order to use an existing volume claim for the Artifactory member nodes storage, you need to: +- Create persistent volume claims according to the number of replicas defined at `artifactory.node.replicaCount` by the names `volume--artifactory-ha-member-`, e.g `volume-myrelease-artifactory-ha-member-0` and `volume-myrelease-artifactory-ha-primary-1`. +- Pass a parameter to `helm install` and `helm upgrade` +```bash +... +--set artifactory.node.persistence.existingClaim=true +``` + +#### Existing shared volume claim + +In order to use an existing claim (for data and backup) that is to be shared across all nodes, you need to: + +- Create PVCs with ReadWriteMany that match the naming conventions: +``` + {{ template "artifactory-ha.fullname" . }}-data-pvc- + {{ template "artifactory-ha.fullname" . }}-backup-pvc- +``` +An example that shows 2 existing claims to be used: +``` + myexample-artifactory-ha-data-pvc-0 + myexample-artifactory-ha-backup-pvc-0 + myexample-artifactory-ha-data-pvc-1 + myexample-artifactory-ha-backup-pvc-1 +``` +- Set the artifactory.persistence.fileSystem.existingSharedClaim.enabled in values.yaml to true: +``` +-- set artifactory.persistence.fileSystem.existingSharedClaim.enabled=true +-- set artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims=2 +``` + +#### NFS +To use an NFS server as your cluster's storage, you need to +- Setup an NFS server. Get its IP as `NFS_IP` +- Create a `data` and `backup` directories on the NFS exported directory with write permissions to all +- Pass NFS parameters to `helm install` and `helm upgrade` +```bash +... +--set artifactory.persistence.type=nfs \ +--set artifactory.persistence.nfs.ip=${NFS_IP} \ +... +``` + +#### Google Storage +To use a Google Storage bucket as the cluster's filestore. See [Google Storage Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-GoogleStorageBinaryProvider) +- Pass Google Storage parameters to `helm install` and `helm upgrade` +```bash +... +--set artifactory.persistence.type=google-storage \ +--set artifactory.persistence.googleStorage.identity=${GCP_ID} \ +--set artifactory.persistence.googleStorage.credential=${GCP_KEY} \ +... +``` + +#### AWS S3 +**NOTE** Keep in mind that when using the `aws-s3` persistence type, you will not be able to provide an IAM on the pod level. +In order to grant permissions to Artifactory using an IAM role, you will have to attach the IAM role to the machine(s) on which Artifactory is running. +This is due to the fact that the `aws-s3` template uses the `JetS3t` library to interact with AWS. If you want to grant an IAM role at the pod level, see the `AWS S3 Vs` section. + +To use an AWS S3 bucket as the cluster's filestore. See [S3 Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-S3BinaryProvider) +- Pass AWS S3 parameters to `helm install` and `helm upgrade` +```bash +... +# With explicit credentials: +--set artifactory.persistence.type=aws-s3 \ +--set artifactory.persistence.awsS3.endpoint=${AWS_S3_ENDPOINT} \ +--set artifactory.persistence.awsS3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3.identity=${AWS_ACCESS_KEY_ID} \ +--set artifactory.persistence.awsS3.credential=${AWS_SECRET_ACCESS_KEY} \ +... + +... +# With using existing IAM role +--set artifactory.persistence.type=aws-s3 \ +--set artifactory.persistence.awsS3.endpoint=${AWS_S3_ENDPOINT} \ +--set artifactory.persistence.awsS3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3.roleName=${AWS_ROLE_NAME} \ +... +``` +**NOTE:** Make sure S3 `endpoint` and `region` match. See [AWS documentation on endpoint](https://docs.aws.amazon.com/general/latest/gr/rande.html) + +#### AWS S3 V3 +To use an AWS S3 bucket as the cluster's filestore and access it with the official AWS SDK, See [S3 Official SDK Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-AmazonS3OfficialSDKTemplate). +This filestore template uses the official AWS SDK, unlike the `aws-s3` implementation that uses the `JetS3t` library. +Use this template if you want to attach an IAM role to the Artifactory pod directly (as opposed to attaching it to the machine/s that Artifactory will run on). + +**NOTE** This will have to be combined with a k8s mechanism for attaching IAM roles to pods, like [kube2iam](https://github.com/helm/charts/tree/master/stable/kube2iam) or anything similar. + +- Pass AWS S3 V3 parameters and the annotation pointing to the IAM role (when using an IAM role. this is kube2iam specific and may vary depending on the implementation) to `helm install` and `helm upgrade` + +```bash +# With explicit credentials: +--set artifactory.persistence.type=aws-s3-v3 \ +--set artifactory.persistence.awsS3V3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3V3.bucketName=${AWS_S3_BUCKET_NAME} \ +--set artifactory.persistence.awsS3V3.identity=${AWS_ACCESS_KEY_ID} \ +--set artifactory.persistence.awsS3V3.credential=${AWS_SECRET_ACCESS_KEY} \ +... +``` + +```bash +# With using existing IAM role +--set artifactory.persistence.type=aws-s3-v3 \ +--set artifactory.persistence.awsS3V3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3V3.bucketName=${AWS_S3_BUCKET_NAME} \ +--set artifactory.annotations.'iam\.amazonaws\.com/role'=${AWS_IAM_ROLE_ARN} +... +``` + +#### Microsoft Azure Blob Storage +To use Azure Blob Storage as the cluster's filestore. See [Azure Blob Storage Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-AzureBlobStorageClusterBinaryProvider) +- Pass Azure Blob Storage parameters to `helm install` and `helm upgrade` +```bash +... +--set artifactory.persistence.type=azure-blob \ +--set artifactory.persistence.azureBlob.accountName=${AZURE_ACCOUNT_NAME} \ +--set artifactory.persistence.azureBlob.accountKey=${AZURE_ACCOUNT_KEY} \ +--set artifactory.persistence.azureBlob.endpoint=${AZURE_ENDPOINT} \ +--set artifactory.persistence.azureBlob.containerName=${AZURE_CONTAINER_NAME} \ +... +``` + +#### Custom binarystore.xml +You have an option to provide a custom [binarystore.xml](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore).
+There are two options for this + +1. Editing directly in [values.yaml](values.yaml) +```yaml +artifactory: + persistence: + binarystoreXml: | + + + + + +``` + +2. Create your own [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) and pass it to your `helm install` command +```yaml +# Prepare your custom Secret file (custom-binarystore.yaml) +kind: Secret +apiVersion: v1 +metadata: + name: custom-binarystore + labels: + app: artifactory + chart: artifactory +stringData: + binarystore.xml: |- + + + + +``` + +```bash +# Create a secret from the file +kubectl apply -n artifactory -f ./custom-binarystore.yaml + +# Pass it to your helm install command: +helm install --name artifactory-ha --set artifactory.persistence.customBinarystoreXmlSecret=custom-binarystore jfrog/artifactory-ha +``` + +### Create a unique Master Key +Artifactory HA cluster requires a unique master key. By default the chart has one set in values.yaml (`artifactory.masterKey`). + +**This key is for demo purpose and should not be used in a production environment!** + +You should generate a unique one and pass it to the template at install/upgrade time. +```bash +# Create a key +export MASTER_KEY=$(openssl rand -hex 32) +echo ${MASTER_KEY} + +# Pass the created master key to helm +helm install --name artifactory-ha --set artifactory.masterKey=${MASTER_KEY} jfrog/artifactory-ha +``` + +Alternatively, you can create a secret containing the master key manually and pass it to the template at install/upgrade time. +```bash +# Create a key +export MASTER_KEY=$(openssl rand -hex 32) +echo ${MASTER_KEY} + +# Create a secret containing the key. The key in the secret must be named master-key +kubectl create secret generic my-secret --from-literal=master-key=${MASTER_KEY} + +# Pass the created secret to helm +helm install --name artifactory-ha --set artifactory.masterKeySecretName=my-secret jfrog/artifactory-ha +``` +**NOTE:** In either case, make sure to pass the same master key on all future calls to `helm install` and `helm upgrade`! In the first case, this means always passing `--set artifactory.masterKey=${MASTER_KEY}`. In the second, this means always passing `--set artifactory.masterKeySecretName=my-secret` and ensuring the contents of the secret remain unchanged. + +### Create a unique Join Key +Artifactory requires a unique join key. By default the chart has one set in values.yaml (`artifactory.joinKey`). + +**This key is for demo purpose and should not be used in a production environment!** + +You should generate a unique key and pass it to the template at install/upgrade time. +```bash +# Create a key +export JOIN_KEY=$(openssl rand -hex 16) +echo ${JOIN_KEY} + +# Pass the created master key to helm +helm install --name artifactory --set artifactory.joinKey=${JOIN_KEY} jfrog/artifactory +``` + +**NOTE:** In either case, make sure to pass the same join key on all future calls to `helm install` and `helm upgrade`! This means always passing `--set artifactory.joinKey=${JOIN_KEY}`. + +### Install Artifactory HA license +For activating Artifactory HA, you must install an appropriate license. There are three ways to manage the license. **Artifactory UI**, **REST API**, or a **Kubernetes Secret**. + +The easier and recommended way is the **Artifactory UI**. Using the **Kubernetes Secret** or **REST API** is for advanced users and is better suited for automation. + +**IMPORTANT:** You should use only one of the following methods. Switching between them while a cluster is running might disable your Artifactory HA cluster! + +##### Artifactory UI +Once primary cluster is running, open Artifactory UI and insert the license(s) in the UI. See [HA installation and setup](https://www.jfrog.com/confluence/display/RTF/HA+Installation+and+Setup) for more details. **Note that you should enter all licenses at once, with each license is separated by a newline.** If you add the licenses one at a time, you may get redirected to a node without a license and the UI won't load for that node. + +##### REST API +You can add licenses via REST API (https://www.jfrog.com/confluence/display/RTF/Artifactory+REST+API#ArtifactoryRESTAPI-InstallHAClusterLicenses). Note that the REST API expects "\n" for the newlines in the licenses. + +##### Kubernetes Secret +You can deploy the Artifactory license(s) as a [Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/). +Prepare a text file with the license(s) written in it. If writing multiple licenses (must be in the same file), it's important to put **two new lines between each license block**! +```bash +# Create the Kubernetes secret (assuming the local license file is 'art.lic') +kubectl create secret generic artifactory-cluster-license --from-file=./art.lic + +# Pass the license to helm +helm install --name artifactory-ha --set artifactory.license.secret=artifactory-cluster-license,artifactory.license.dataKey=art.lic jfrog/artifactory-ha +``` +**NOTE:** This method is relevant for initial deployment only! Once Artifactory is deployed, you should not keep passing these parameters as the license is already persisted into Artifactory's storage (they will be ignored). +Updating the license should be done via Artifactory UI or REST API. + +##### Create the secret as part of the helm release +values.yaml +```yaml +artifactory: + license: + licenseKey: |- + + + + + + + +``` + +```bash +helm install --name artifactory-ha -f values.yaml jfrog/artifactory-ha +``` +**NOTE:** This method is relevant for initial deployment only! Once Artifactory is deployed, you should not keep passing these parameters as the license is already persisted into Artifactory's storage (they will be ignored). +Updating the license should be done via Artifactory UI or REST API. +If you want to keep managing the artifactory license using the same method, you can use the copyOnEveryStartup example shown in the values.yaml file + + +### copyOnEveryStartup feature +Files stored in the `/artifactory-extra-conf` directory are only copied to the `ARTIFACTORY_HOME/etc` directory upon the first startup. +In some cases, you want your configuration files to be copied to the `ARTIFACTORY_HOME/etc` directory on every startup. +Two examples for that would be: + +1. the binarstore.xml file. If you use the default behaviour, your binarystore.xml configuration will only be copied on the first startup, +which means that changes you make over time to the `binaryStoreXml` configuration will not be applied. In order to make sure your changes are applied on every startup, do the following: +Create a values file with the following values: +```yaml +artifactory: + copyOnEveryStartup: + - source: /artifactory_extra_conf/binarystore.xml + target: etc/ +``` + +Install the helm chart with the values file you created: +```bash +helm upgrade --install artifactory-ha jfrog/artifactory-ha -f values.yaml +``` + +2. Any custom configuration file you have to configure artifactory, such as `logabck.xml`: +Create a config map with your `logback.xml` configuration. + +Create a values file with the following values: +```yaml +artifactory: + ## Create a volume pointing to the config map with your configuration file + customVolumes: | + - name: logback-xml-configmap + configMap: + name: logback-xml-configmap + customVolumeMounts: | + - name: logback-xml-configmap + mountPath: /tmp/artifactory-logback/ + copyOnEveryStartup: + - source: /tmp/artifactory-logback/* + target: etc/ +``` + +Install the helm chart with the values file you created: +```bash +helm upgrade --install artifactory-ha jfrog/artifactory-ha -f values.yaml +``` + +### Configure NetworkPolicy + +NetworkPolicy specifies what ingress and egress is allowed in this namespace. It is encouraged to be more specific whenever possible to increase security of the system. + +In the `networkpolicy` section of values.yaml you can specify a list of NetworkPolicy objects. + +For podSelector, ingress and egress, if nothing is provided then a default `- {}` is applied which is to allow everything. + +A full (but very wide open) example that results in 2 NetworkPolicy objects being created: +```yaml +networkpolicy: + # Allows all ingress and egress to/from artifactory primary and member pods. + - name: artifactory + podSelector: + matchLabels: + app: artifactory-ha + egress: + - {} + ingress: + - {} + # Allows connectivity from artifactory-ha pods to postgresql pods, but no traffic leaving postgresql pod. + - name: postgresql + podSelector: + matchLabels: + app: postgresql + ingress: + - from: + - podSelector: + matchLabels: + app: artifactory-ha +``` + +### Artifactory JMX Configuration +** You can see some information about the exposed MBeans here - https://www.jfrog.com/confluence/display/RTF/Artifactory+JMX+MBeans + +Enable JMX in your deployment: +```bash +helm install --name artifactory \ + --set artifactory.primary.javaOpts.jmx.enabled=true \ + --set artifactory.node.javaOpts.jmx.enabled=true \ + jfrog/artifactory-ha +``` +This will enable access to Artifactory with JMX on the default port (9010). +** You have the option to change the port by setting ```artifactory.primary.javaOpts.jmx.port``` and ```artifactory.node.javaOpts.jmx.port``` +to your choice of port + +In order to connect to Artifactory using JMX with jconsole (or any similar tool) installed on your computer, follow the following steps: +1. Enable JMX as described above and Change the Artifactory service to be of type LoadBalancer: +```bash +helm install --name artifactory \ + --set artifactory.primary.javaOpts.jmx.enabled=true \ + --set artifactory.node.javaOpts.jmx.enabled=true \ + --set artifactory.service.type=LoadBalancer \ + jfrog/artifactory-ha +``` +2. The default setting for java.rmi.server.hostname is the service name (this is also configurable with +```artifactory.primary.javaOpts.jmx.host``` and ```artifactory.node.javaOpts.jmx.host```), So in order to connect to Artifactory +with jconsole you should map the Artifactory kuberentes service IP to the service name using your hosts file as such: +``` + artifactory-ha--primary + +``` +3. Launch jconsole with the service address and port: +```bash +jconsole artifactory-ha--primary: +jconsole : +``` + +### Access creds. bootstraping +**IMPORTANT:** Bootsrapping access creds. will allow access for the user access-admin from certain IP's. + +* User guide to [bootstrap Artifactory Access credentials](https://www.jfrog.com/confluence/display/ACC/Configuring+Access) + +1. Create `access-creds-values.yaml` and provide the IP (By default 127.0.0.1) and password: +```yaml +artifactory: + accessAdmin: + ip: "" #Example: "*" + password: "" +``` + +2. Apply the `access-creds-values.yaml` file: +```bash +helm upgrade --install artifactory-ha jfrog/artifactory-ha -f access-creds-values.yaml +``` + +### Bootstrapping Artifactory +**IMPORTANT:** Bootstrapping Artifactory needs license. Pass license as shown in above section. + +* User guide to [bootstrap Artifactory Global Configuration](https://www.jfrog.com/confluence/display/RTF/Configuration+Files#ConfigurationFiles-BootstrappingtheGlobalConfiguration) +* User guide to [bootstrap Artifactory Security Configuration](https://www.jfrog.com/confluence/display/RTF/Configuration+Files#ConfigurationFiles-BootstrappingtheSecurityConfiguration) + +1. Create `bootstrap-config.yaml` with artifactory.config.import.xml and security.import.xml as shown below: +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: my-release-bootstrap-config +data: + artifactory.config.import.xml: | + + security.import.xml: | + +``` + +2. Create configMap in Kubernetes: +```bash +kubectl apply -f bootstrap-config.yaml +``` +3. Pass the configMap to helm +```bash +helm install --name artifactory-ha --set artifactory.license.secret=artifactory-cluster-license,artifactory.license.dataKey=art.lic,artifactory.configMapName=my-release-bootstrap-config jfrog/artifactory-ha +``` + +### Use custom nginx.conf with Nginx + +Steps to create configMap with nginx.conf +* Create `nginx.conf` file. +```bash +kubectl create configmap nginx-config --from-file=nginx.conf +``` +* Pass configMap to helm install +```bash +helm install --name artifactory-ha --set nginx.customConfigMap=nginx-config jfrog/artifactory-ha +``` + +### Scaling your Artifactory cluster +A key feature in Artifactory HA is the ability to set an initial cluster size with `--set artifactory.node.replicaCount=${CLUSTER_SIZE}` and if needed, resize it. + +##### Before scaling +**IMPORTANT:** When scaling, you need to explicitly pass the database password if it's an auto generated one (this is the default with the enclosed PostgreSQL helm chart). + +Get the current database password +```bash +export DB_PASSWORD=$(kubectl get $(kubectl get secret -o name | grep postgresql) -o jsonpath="{.data.postgresql-password}" | base64 --decode) +``` +Use `--set postgresql.postgresqlPassword=${DB_PASSWORD}` with every scale action to prevent a miss configured cluster! + +##### Scale up +Let's assume you have a cluster with **2** member nodes, and you want to scale up to **3** member nodes (a total of 4 nodes). +```bash +# Scale to 4 nodes (1 primary and 3 member nodes) +helm upgrade --install artifactory-ha --set artifactory.node.replicaCount=3 --set postgresql.postgresqlPassword=${DB_PASSWORD} jfrog/artifactory-ha +``` + +##### Scale down +Let's assume you have a cluster with **3** member nodes, and you want to scale down to **2** member node. + +```bash +# Scale down to 2 member nodes +helm upgrade --install artifactory-ha --set artifactory.node.replicaCount=2 --set postgresql.postgresqlPassword=${DB_PASSWORD} jfrog/artifactory-ha +``` +- **NOTE:** Since Artifactory is running as a Kubernetes Stateful Set, the removal of the node will **not** remove the persistent volume. You need to explicitly remove it +```bash +# List PVCs +kubectl get pvc + +# Remove the PVC with highest ordinal! +# In this example, the highest node ordinal was 2, so need to remove its storage. +kubectl delete pvc volume-artifactory-node-2 +``` + +### Use an external Database + +#### PostgreSQL +There are cases where you will want to use external PostgreSQL with a different database name e.g. `my-artifactory-db`, then you need set a custom PostgreSQL connection URL, where `my-artifactory-db` is the database name. + +This can be done with the following parameters +```bash +... +--set postgresql.enabled=false \ +--set database.type=postgresql \ +--set database.driver=org.postgresql.Driver \ +--set database.url='jdbc:postgresql://${DB_HOST}:${DB_PORT}/my-artifactory-db' \ +--set database.user=${DB_USER} \ +--set database.password=${DB_PASSWORD} \ +... +``` +**NOTE:** You must set `postgresql.enabled=false` in order for the chart to use the `database.*` parameters. Without it, they will be ignored! + +#### Other DB type +There are cases where you will want to use a different database and not the enclosed **PostgreSQL**. +See more details on [configuring the database](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Database) +> The official Artifactory Docker images include the PostgreSQL database driver. +> For other database types, you will have to add the relevant database driver to Artifactory's tomcat/lib + +This can be done with the following parameters +```bash +# Make sure your Artifactory Docker image has the MySQL database driver in it +... +--set postgresql.enabled=false \ +--set artifactory.preStartCommand="wget -O /opt/jfrog/artifactory/tomcat/lib/mysql-connector-java-5.1.41.jar https://jcenter.bintray.com/mysql/mysql-connector-java/5.1.41/mysql-connector-java-5.1.41.jar" \ +--set database.type=mysql \ +--set database.driver=com.mysql.jdbc.Driver \ +--set database.url=${DB_URL} \ +--set database.user=${DB_USER} \ +--set database.password=${DB_PASSWORD} \ +... +``` +**NOTE:** You must set `postgresql.enabled=false` in order for the chart to use the `database.*` parameters. Without it, they will be ignored! + +#### Using pre-existing Kubernetes Secret +If you store your database credentials in a pre-existing Kubernetes `Secret`, you can specify them via `database.secrets` instead of `database.user` and `database.password`: +```bash +# Create a secret containing the database credentials +kubectl create secret generic my-secret --from-literal=user=${DB_USER} --from-literal=password=${DB_PASSWORD} +... +--set postgresql.enabled=false \ +--set database.secrets.user.name=my-secret \ +--set database.secrets.user.key=user \ +--set database.secrets.password.name=my-secret \ +--set database.secrets.password.key=password \ +... +``` + +### Deleting Artifactory +To delete the Artifactory HA cluster +```bash +helm delete --purge artifactory-ha +``` +This will completely delete your Artifactory HA cluster. +**NOTE:** Since Artifactory is running as Kubernetes Stateful Sets, the removal of the helm release will **not** remove the persistent volumes. You need to explicitly remove them +```bash +kubectl delete pvc -l release=artifactory-ha +``` +See more details in the official [Kubernetes Stateful Set removal page](https://kubernetes.io/docs/tasks/run-application/delete-stateful-set/) + +### Custom Docker registry for your images +If you need to pull your Docker images from a private registry (for example, when you have a custom image with a MySQL database driver), you need to create a +[Kubernetes Docker registry secret](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/) and pass it to helm +```bash +# Create a Docker registry secret called 'regsecret' +kubectl create secret docker-registry regsecret --docker-server=${DOCKER_REGISTRY} --docker-username=${DOCKER_USER} --docker-password=${DOCKER_PASS} --docker-email=${DOCKER_EMAIL} +``` +Once created, you pass it to `helm` +```bash +helm install --name artifactory-ha --set imagePullSecrets=regsecret jfrog/artifactory-ha +``` + +### Logger sidecars +This chart provides the option to add sidecars to tail various logs from Artifactory. See the available values in [values.yaml](values.yaml) + +Get list of containers in the pod +```bash +kubectl get pods -n -o jsonpath='{.spec.containers[*].name}' | tr ' ' '\n' +``` + +View specific log +```bash +kubectl logs -n -c +``` + + +### Custom init containers +There are cases where a special, unsupported init processes is needed like checking something on the file system or testing something before spinning up the main container. + +For this, there is a section for writing custom init containers before and after the predefined init containers in the [values.yaml](values.yaml) . By default it's commented out +```yaml +artifactory: + ## Add custom init containers executed before predefined init containers + customInitContainersBegin: | + ## Init containers template goes here ## + ## Add custom init containers executed after predefined init containers + customInitContainers: | + ## Init containers template goes here ## +``` + +### Custom sidecar containers +There are cases where an extra sidecar container is needed. For example monitoring agents or log collection. + +For this, there is a section for writing a custom sidecar container in the [values.yaml](values.yaml). By default it's commented out +```yaml +artifactory: + ## Add custom sidecar containers + customSidecarContainers: | + ## Sidecar containers template goes here ## +``` + +You can configure the sidecar to run as a custom user if needed by setting the following in the container template +```yaml + # Example of running container as root (id 0) + securityContext: + runAsUser: 0 + fsGroup: 0 +``` + +### Custom volumes +If you need to use a custom volume in a custom init or sidecar container, you can use this option. + +For this, there is a section for defining custom volumes in the [values.yaml](values.yaml). By default it's commented out +```yaml +artifactory: + ## Add custom volumes + customVolumes: | + ## Custom volume comes here ## +``` + +### Add Artifactory User Plugin during installation +If you need to add [Artifactory User Plugin](https://github.com/jfrog/artifactory-user-plugins), you can use this option. + +Create a secret with [Artifactory User Plugin](https://github.com/jfrog/artifactory-user-plugins) by following command: +```bash +# Secret with single user plugin +kubectl create secret generic archive-old-artifacts --from-file=archiveOldArtifacts.groovy --namespace=artifactory-ha + +# Secret with single user plugin with configuration file +kubectl create secret generic webhook --from-file=webhook.groovy --from-file=webhook.config.json.sample --namespace=artifactory-ha +``` + +Add plugin secret names to `plugins.yaml` as following: +```yaml +artifactory: + userPluginSecrets: + - archive-old-artifacts + - webhook +``` + +You can now pass the created `plugins.yaml` file to helm install command to deploy Artifactory with user plugins as follows: +```bash +helm install --name artifactory-ha -f plugins.yaml jfrog/artifactory-ha +``` + +Alternatively, you may be in a situation in which you would like to create a secret in a Helm chart that depends on this chart. In this scenario, the name of the secret is likely dynamically generated via template functions, so passing a statically named secret isn't possible. In this case, the chart supports evaluating strings as templates via the [`tpl`](https://helm.sh/docs/charts_tips_and_tricks/#using-the-tpl-function) function - simply pass the raw string containing the templating language used to name your secret as a value instead by adding the following to your chart's `values.yaml` file: +```yaml +artifactory-ha: # Name of the artifactory-ha dependency + artifactory: + userPluginSecrets: + - '{{ template "my-chart.fullname" . }}' +``` +NOTE: By defining userPluginSecrets, this overrides any pre-defined plugins from the container image that are stored in /tmp/plugins. At this time [artifactory-pro:6.9.0](https://bintray.com/jfrog/artifactory-pro) is distributed with `internalUser.groovy` plugin. If you need this plugin in addition to your user plugins, you should include these additional plugins as part of your userPluginSecrets. + +### Provide custom configMaps to Artifactory +If you want to mount a custom file to Artifactory, either an init shell script or a custom configuration file (such as `logback.xml`), you can use this option. + +Create a `configmaps.yaml` file with the following content: +```yaml +artifactory: + configMaps: | + logback.xml: | + + + + + %date [%-5level] \(%-20c{3}:%L\) %message%n + + + + + + + + + + + + + + + my-custom-post-start-hook.sh: | + echo "This is my custom post start hook" + + customVolumeMounts: | + - name: artifactory-configmaps + mountPath: /tmp/my-config-map + + postStartCommand: | + chmod +x /tmp/my-config-map/my-custom-post-start-hook.sh; + /tmp/my-config-map/my-custom-post-start-hook.sh; + + copyOnEveryStartup: + - source: /tmp/my-config-map/logback.xml + target: etc/ + +``` + +and use it with you helm install/upgrade: +```bash +helm install --name artifactory-ha -f configmaps.yaml jfrog/artifactory-ha +``` + +This will, in turn: +* create a configMap with the files you specified above +* create a volume pointing to the configMap with the name `artifactory-configmaps` +* Mount said configMap onto `/tmp/my-config-map` using a `customVolumeMounts` +* Set the shell script we mounted as the `postStartCommand` +* Copy the `logback.xml` file to its proper location in the `$ARTIFACTORY_HOME/etc` directory. + + +### Artifactory filebeat +If you want to collect logs from your Artifactory installation and send them to a central log collection solution like ELK, you can use this option. + +Create a `filebeat.yaml` values file with the following content: +```yaml +filebeat: + enabled: true + logstashUrl: + resources: + requests: + memory: "100Mi" + cpu: "100m" + limits: + memory: "100Mi" + cpu: "100m" +``` + +You can optionally customize the `filebeat.yaml` to send output to a different location like so: +```yaml +filebeat: + enabled: true + filebeatYml: | + +``` + +and use it with you helm install/upgrade: +```bash +helm install --name artifactory -f filebeat.yaml jfrog/artifactory +``` + +This will start sending your Artifactory logs to the log aggregator of your choice, based on your configuration in the `filebeatYml` + +## Configuration +The following table lists the configurable parameters of the artifactory chart and their default values. + +| Parameter | Description | Default | +|------------------------------|-----------------------------------|-------------------------------------------------------| +| `imagePullSecrets` | Docker registry pull secret | | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the fullname template | +| `serviceAccount.annotations` | Artifactory service account annotations | `` | +| `rbac.create` | Specifies whether RBAC resources should be created | `true` | +| `rbac.role.rules` | Rules to create | `[]` | +| `logger.image.repository` | repository for logger image | `busybox` | +| `logger.image.tag` | tag for logger image | `1.30` | +| `artifactory.name` | Artifactory name | `artifactory` | +| `artifactory.image.pullPolicy` | Container pull policy | `IfNotPresent` | +| `artifactory.image.repository` | Container image | `docker.bintray.io/jfrog/artifactory-pro` | +| `artifactory.image.version` | Container image tag | `.Chart.AppVersion` | +| `artifactory.priorityClass.create` | Create a PriorityClass object | `false` | +| `artifactory.priorityClass.value` | Priority Class value | `1000000000` | +| `artifactory.priorityClass.name` | Priority Class name | `{{ template "artifactory-ha.fullname" . }}` | +| `artifactory.priorityClass.existingPriorityClass` | Use existing priority class | `` | +| `artifactory.loggers` | Artifactory loggers (see values.yaml for possible values) | `[]` | +| `artifactory.catalinaLoggers` | Artifactory Tomcat loggers (see values.yaml for possible values) | `[]` | +| `artifactory.customInitContainersBegin`| Custom init containers to run before existing init containers | | +| `artifactory.customInitContainers`| Custom init containers to run after existing init containers | | +| `artifactory.customSidecarContainers`| Custom sidecar containers | | +| `artifactory.customVolumes` | Custom volumes | | +| `artifactory.customVolumeMounts` | Custom Artifactory volumeMounts | | +| `artifactory.customPersistentPodVolumeClaim` | Custom PVC spec to create and attach a unique PVC for each pod on startup with the volumeClaimTemplates feature in StatefulSet | | +| `artifactory.customPersistentVolumeClaim` | Custom PVC spec to be mounted to the all artifactory containers using a volume | | +| `artifactory.userPluginSecrets` | Array of secret names for Artifactory user plugins | | +| `artifactory.masterKey` | Artifactory master key. A 128-Bit key size (hexadecimal encoded) string (32 hex characters). Can be generated with `openssl rand -hex 16`. NOTE: This key can be generated only once and cannot be updated once created |`FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF`| +| `artifactory.masterKeySecretName` | Artifactory Master Key secret name | | +| `artifactory.joinKey` | Join Key to connect other services to Artifactory. Can be generated with `openssl rand -hex 16` | `EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE` | +| `artifactory.accessAdmin.ip` | Artifactory access-admin ip to be set upon startup, can use (*) for 0.0.0.0| 127.0.0.1 | +| `artifactory.accessAdmin.password` | Artifactory access-admin password to be set upon startup| | +| `artifactory.accessAdmin.secret` | Artifactory access-admin secret name | | +| `artifactory.accessAdmin.dataKey` | Artifactory access-admin secret data key | | +| `artifactory.preStartCommand` | Command to run before entrypoint starts | | +| `artifactory.postStartCommand` | Command to run after container starts | | +| `artifactory.license.licenseKey` | Artifactory license key. Providing the license key as a parameter will cause a secret containing the license key to be created as part of the release. Use either this setting or the license.secret and license.dataKey. If you use both, the latter will be used. | | +| `artifactory.configMaps` | configMaps to be created as volume by the name `artifactory-configmaps`. In order to use these configMaps, you will need to add `customVolumeMounts` to point to the created volume and mount it onto a container | | +| `artifactory.license.secret` | Artifactory license secret name | | +| `artifactory.license.dataKey`| Artifactory license secret data key | | +| `artifactory.service.name` | Artifactory service name to be set in Nginx configuration | `artifactory` | +| `artifactory.service.type` | Artifactory service type | `ClusterIP` | +| `artifactory.service.clusterIP`| Specific cluster IP or `None` for headless services | `nil` | +| `artifactory.service.loadBalancerSourceRanges`| Artifactory service array of IP CIDR ranges to whitelist (only when service type is LoadBalancer) | | +| `artifactory.service.annotations` | Artifactory service annotations | `{}` | +| `artifactory.service.pool` | Artifactory instances to be in the load balancing pool. `members` or `all` | `members` | +| `artifactory.externalPort` | Artifactory service external port | `8082` | +| `artifactory.internalPort` | Artifactory service internal port (**DO NOT** use port lower than 1024) | `8082` | +| `artifactory.internalArtifactoryPort` | Artifactory service internal port (**DO NOT** use port lower than 1024) | `8081` | +| `artifactory.externalArtifactoryPort` | Artifactory service external port | `8081` | +| `artifactory.extraEnvironmentVariables` | Extra environment variables to pass to Artifactory. Supports evaluating strings as templates via the [`tpl`](https://helm.sh/docs/charts_tips_and_tricks/#using-the-tpl-function) function. See [documentation](https://www.jfrog.com/confluence/display/RTF/Installing+with+Docker#InstallingwithDocker-SupportedEnvironmentVariables) | | +| `artifactory.livenessProbe.enabled` | Enable liveness probe | `true` | +| `artifactory.livenessProbe.path` | liveness probe HTTP Get path | `/router/api/v1/system/health` | +| `artifactory.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 180 | +| `artifactory.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `artifactory.livenessProbe.timeoutSeconds` | When the probe times out | 10 | +| `artifactory.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 | +| `artifactory.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 10 | +| `artifactory.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `artifactory.readinessProbe.path` | readiness probe HTTP Get path | `/router/api/v1/system/health` | +| `artifactory.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 60 | +| `artifactory.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `artifactory.readinessProbe.timeoutSeconds` | When the probe times out | 10 | +| `artifactory.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 | +| `artifactory.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 10 | +| `artifactory.copyOnEveryStartup` | List of files to copy on startup from source (which is absolute) to target (which is relative to ARTIFACTORY_HOME | | +| `artifactory.deleteDBPropertiesOnStartup` | Whether to delete the ARTIFACTORY_HOME/etc/db.properties file on startup. Disabling this will remove the ability for the db.properties to be updated with any DB-related environment variables change (e.g. DB_HOST, DB_URL) | `true` | +| `artifactory.database.maxOpenConnections` | Maximum amount of open connections from Artifactory to the DB | `80` | +| `artifactory.haDataDir.enabled` | Enable haDataDir for eventual storage in the HA cluster | `false` | +| `artifactory.haDataDir.path` | Path to the directory intended for use with NFS eventual configuration for HA | | +| `artifactory.persistence.mountPath` | Artifactory persistence volume mount path | `"/var/opt/jfrog/artifactory"` | +| `artifactory.persistence.enabled` | Artifactory persistence volume enabled | `true` | +| `artifactory.persistence.accessMode` | Artifactory persistence volume access mode | `ReadWriteOnce` | +| `artifactory.persistence.size` | Artifactory persistence or local volume size | `200Gi` | +| `artifactory.persistence.binarystore.enabled` | whether you want to mount the binarystore.xml file from a secret created by the chart. If `false` you will need need to get the binarystore.xml file into the file-system from either an `initContainer` or using a `preStartCommand` | `true` | +| `artifactory.persistence.binarystoreXml` | Artifactory binarystore.xml template | See `values.yaml` | +| `artifactory.persistence.customBinarystoreXmlSecret` | A custom Secret for binarystore.xml | `` | +| `artifactory.persistence.maxCacheSize` | Artifactory cache-fs provider maxCacheSize in bytes | `50000000000` | +| `artifactory.persistence.cacheProviderDir` | the root folder of binaries for the filestore cache. If the value specified starts with a forward slash ("/") it is considered the fully qualified path to the filestore folder. Otherwise, it is considered relative to the *baseDataDir*. | `cache` | +| `artifactory.persistence.type` | Artifactory HA storage type | `file-system` | +| `artifactory.persistence.redundancy` | Artifactory HA storage redundancy | `3` | +| `artifactory.persistence.nfs.ip` | NFS server IP | | +| `artifactory.persistence.nfs.haDataMount` | NFS data directory | `/data` | +| `artifactory.persistence.nfs.haBackupMount` | NFS backup directory | `/backup` | +| `artifactory.persistence.nfs.dataDir` | HA data directory | `/var/opt/jfrog/artifactory-ha` | +| `artifactory.persistence.nfs.backupDir` | HA backup directory | `/var/opt/jfrog/artifactory-backup` | +| `artifactory.persistence.nfs.capacity` | NFS PVC size | `200Gi` | +| `artifactory.persistence.nfs.mountOptions` | NFS mount options | `[]` | +| `artifactory.persistence.eventual.numberOfThreads` | Eventual number of threads | `10` | +| `artifactory.persistence.googleStorage.endpoint` | Google Storage API endpoint| `storage.googleapis.com` | +| `artifactory.persistence.googleStorage.httpsOnly` | Google Storage API has to be consumed https only| `false` | +| `artifactory.persistence.googleStorage.bucketName` | Google Storage bucket name | `artifactory-ha` | +| `artifactory.persistence.googleStorage.identity` | Google Storage service account id | | +| `artifactory.persistence.googleStorage.credential` | Google Storage service account key | | +| `artifactory.persistence.googleStorage.path` | Google Storage path in bucket | `artifactory-ha/filestore` | +| `artifactory.persistence.googleStorage.bucketExists`| Google Storage bucket exists therefore does not need to be created.| `false` | +| `artifactory.persistence.awsS3.bucketName` | AWS S3 bucket name | `artifactory-ha` | +| `artifactory.persistence.awsS3.endpoint` | AWS S3 bucket endpoint | See https://docs.aws.amazon.com/general/latest/gr/rande.html | +| `artifactory.persistence.awsS3.region` | AWS S3 bucket region | | +| `artifactory.persistence.awsS3.roleName` | AWS S3 IAM role name | | +| `artifactory.persistence.awsS3.identity` | AWS S3 AWS_ACCESS_KEY_ID | | +| `artifactory.persistence.awsS3.credential` | AWS S3 AWS_SECRET_ACCESS_KEY | | +| `artifactory.persistence.awsS3.properties` | AWS S3 additional properties | | +| `artifactory.persistence.awsS3.path` | AWS S3 path in bucket | `artifactory-ha/filestore` | +| `artifactory.persistence.awsS3.refreshCredentials` | AWS S3 renew credentials on expiration | `true` (When roleName is used, this parameter will be set to true) | +| `artifactory.persistence.awsS3.httpsOnly` | AWS S3 https access to the bucket only | `true` | +| `artifactory.persistence.awsS3.testConnection` | AWS S3 test connection on start up | `false` | +| `artifactory.persistence.awsS3.s3AwsVersion` | AWS S3 signature version | `AWS4-HMAC-SHA256` | +| `artifactory.persistence.awsS3V3.testConnection` | AWS S3 test connection on start up | `false` | +| `artifactory.persistence.awsS3V3.identity` | AWS S3 AWS_ACCESS_KEY_ID | | +| `artifactory.persistence.awsS3V3.credential` | AWS S3 AWS_SECRET_ACCESS_KEY | | +| `artifactory.persistence.awsS3V3.region` | AWS S3 bucket region | | +| `artifactory.persistence.awsS3V3.bucketName` | AWS S3 bucket name | `artifactory-aws` | +| `artifactory.persistence.awsS3V3.path` | AWS S3 path in bucket | `artifactory/filestore` | +| `artifactory.persistence.awsS3V3.endpoint` | AWS S3 bucket endpoint | See https://docs.aws.amazon.com/general/latest/gr/rande.html | +| `artifactory.persistence.awsS3V3.kmsServerSideEncryptionKeyId` | AWS S3 encryption key ID or alias | | +| `artifactory.persistence.awsS3V3.kmsKeyRegion` | AWS S3 KMS Key region | | +| `artifactory.persistence.awsS3V3.kmsCryptoMode` | AWS S3 KMS encryption mode | See https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-AmazonS3OfficialSDKTemplate | +| `artifactory.persistence.awsS3V3.useInstanceCredentials` | AWS S3 Use default authentication mechanism | See https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-authentication | +| `artifactory.persistence.awsS3V3.usePresigning` | AWS S3 Use URL signing | `false` | +| `artifactory.persistence.awsS3V3.signatureExpirySeconds` | AWS S3 Validity period in seconds for signed URLs | `300` | +| `artifactory.persistence.awsS3V3.cloudFrontDomainName` | AWS CloudFront domain name | See https://www.jfrog.com/confluence/display/RTF/Direct+Cloud+Storage+Download#DirectCloudStorageDownload-UsingCloudFront(Optional)| +| `artifactory.persistence.awsS3V3.cloudFrontKeyPairId` | AWS CloudFront key pair ID | See https://www.jfrog.com/confluence/display/RTF/Direct+Cloud+Storage+Download#DirectCloudStorageDownload-UsingCloudFront(Optional)| +| `artifactory.persistence.awsS3V3.cloudFrontPrivateKey` | AWS CloudFront private key | See https://www.jfrog.com/confluence/display/RTF/Direct+Cloud+Storage+Download#DirectCloudStorageDownload-UsingCloudFront(Optional)| +| `artifactory.persistence.azureBlob.accountName` | Azure Blob Storage account name | `` | +| `artifactory.persistence.azureBlob.accountKey` | Azure Blob Storage account key | `` | +| `artifactory.persistence.azureBlob.endpoint` | Azure Blob Storage endpoint | `` | +| `artifactory.persistence.azureBlob.containerName` | Azure Blob Storage container name | `` | +| `artifactory.persistence.azureBlob.testConnection` | Azure Blob Storage test connection | `false` | +| `artifactory.persistence.fileSystem.existingSharedClaim` | Enable using an existing shared pvc | `false` | +| `artifactory.persistence.fileStorage.dataDir` | HA data directory | `/var/opt/jfrog/artifactory/artifactory-data` | +| `artifactory.persistence.fileStorage.backupDir` | HA backup directory | `/var/opt/jfrog/artifactory-backup` | +| `artifactory.javaOpts.other` | Artifactory additional java options (for all nodes) | | +| `artifactory.primary.labels` | Artifactory primary node labels | `{}` | +| `artifactory.primary.resources.requests.memory` | Artifactory primary node initial memory request | | +| `artifactory.primary.resources.requests.cpu` | Artifactory primary node initial cpu request | | +| `artifactory.primary.resources.limits.memory` | Artifactory primary node memory limit | | +| `artifactory.primary.resources.limits.cpu` | Artifactory primary node cpu limit | | +| `artifactory.primary.javaOpts.xms` | Artifactory primary node java Xms size | | +| `artifactory.primary.javaOpts.xmx` | Artifactory primary node java Xms size | | +| `artifactory.primary.javaOpts.corePoolSize` | The number of async processes that can run in parallel in the primary node - https://jfrog.com/knowledge-base/how-do-i-tune-artifactory-for-heavy-loads/ | `16` | +| `artifactory.primary.javaOpts.jmx.enabled` | Enable JMX monitoring | `false` | +| `artifactory.primary.javaOpts.jmx.port` | JMX Port number | `9010` | +| `artifactory.primary.javaOpts.jmx.host` | JMX hostname (parsed as a helm template) | `{{ template "artifactory-ha.primary.name" $ }}` | +| `artifactory.primary.javaOpts.jmx.ssl` | Enable SSL | `false` | +| `artifactory.primary.javaOpts.jmx.authenticate` | Enable JMX authentication | `false` | +| `artifactory.primary.javaOpts.jmx.accessFile` | The path to the JMX access file, when JMX authentication is enabled | | +| `artifactory.primary.javaOpts.jmx.passwordFile` | The path to the JMX password file, when JMX authentication is enabled | | +| `artifactory.primary.javaOpts.other` | Artifactory primary node additional java options | | +| `artifactory.primary.persistence.existingClaim` | Whether to use an existing pvc for the primary node | `false` | +| `artifactory.node.labels` | Artifactory member node labels | `{}` | +| `artifactory.node.replicaCount` | Artifactory member node replica count | `2` | +| `artifactory.node.minAvailable` | Artifactory member node min available count | `1` | +| `artifactory.node.resources.requests.memory` | Artifactory member node initial memory request | | +| `artifactory.node.resources.requests.cpu` | Artifactory member node initial cpu request | | +| `artifactory.node.resources.limits.memory` | Artifactory member node memory limit | | +| `artifactory.node.resources.limits.cpu` | Artifactory member node cpu limit | | +| `artifactory.node.javaOpts.xms` | Artifactory member node java Xms size | | +| `artifactory.node.javaOpts.xmx` | Artifactory member node java Xms size | | +| `artifactory.node.javaOpts.corePoolSize` | The number of async processes that can run in parallel in the member nodes - https://jfrog.com/knowledge-base/how-do-i-tune-artifactory-for-heavy-loads/ | `16` | +| `artifactory.node.javaOpts.jmx.enabled` | Enable JMX monitoring | `false` | +| `artifactory.node.javaOpts.jmx.port` | JMX Port number | `9010` | +| `artifactory.node.javaOpts.jmx.host` | JMX hostname (parsed as a helm template) | `{{ template "artifactory-ha.fullname" $ }}` | +| `artifactory.node.javaOpts.jmx.ssl` | Enable SSL | `false` | +| `artifactory.node.javaOpts.jmx.authenticate` | Enable JMX authentication | `false` | +| `artifactory.node.javaOpts.jmx.accessFile` | The path to the JMX access file, when JMX authentication is enabled | | +| `artifactory.node.javaOpts.jmx.passwordFile` | The path to the JMX password file, when JMX authentication is enabled | | +| `artifactory.node.javaOpts.other` | Artifactory member node additional java options | | +| `artifactory.node.persistence.existingClaim` | Whether to use existing PVCs for the member nodes | `false` | +| `artifactory.node.waitForPrimaryStartup.enabled` | Whether to wait for the primary node to start before starting up the member nodes | `false` | +| `artifactory.node.waitForPrimaryStartup.time` | The amount of time to wait for the primary node to start before starting up the member nodes | `60` | +| `artifactory.systemYaml` | Artifactory system configuration (`system.yaml`) as described here - https://www.jfrog.com/confluence/display/JFROG/Artifactory+System+YAML | `see values.yaml` | +| `access.database.maxOpenConnections` | Maximum amount of open connections from Access to the DB | `80` | +| `initContainers.resources.requests.memory` | Init containers initial memory request | | +| `initContainers.resources.requests.cpu` | Init containers initial cpu request | | +| `initContainers.resources.limits.memory` | Init containers memory limit | | +| `initContainers.resources.limits.cpu` | Init containers cpu limit | | +| `ingress.enabled` | If true, Artifactory Ingress will be created | `false` | +| `ingress.annotations` | Artifactory Ingress annotations | `{}` | +| `ingress.labels` | Artifactory Ingress labels | `{}` | +| `ingress.hosts` | Artifactory Ingress hostnames | `[]` | +| `ingress.routerPath` | Router Ingress path | `/` | +| `ingress.artifactoryPath` | Artifactory Ingress path | `/artifactory` | +| `ingress.tls` | Artifactory Ingress TLS configuration (YAML) | `[]` | +| `ingress.defaultBackend.enabled` | If true, the default `backend` will be added using serviceName and servicePort | `true` | +| `ingress.annotations` | Ingress annotations, which are written out if annotations section exists in values. Everything inside of the annotations section will appear verbatim inside the resulting manifest. See `Ingress annotations` section below for examples of how to leverage the annotations, specifically for how to enable docker authentication. | | +| `ingress.additionalRules` | Ingress additional rules to be added to the Artifactory ingress. | `[]` | +| `nginx.enabled` | Deploy nginx server | `true` | +| `nginx.name` | Nginx name | `nginx` | +| `nginx.replicaCount` | Nginx replica count | `1` | +| `nginx.uid` | Nginx User Id | `104` | +| `nginx.gid` | Nginx Group Id | `107` | +| `nginx.image.repository` | Container image | `docker.bintray.io/jfrog/nginx-artifactory-pro` | +| `nginx.image.version` | Container version | `.Chart.AppVersion` | +| `nginx.image.pullPolicy` | Container pull policy | `IfNotPresent` | +| `nginx.labels` | Nginx deployment labels | `{}` | +| `nginx.loggers` | Artifactory loggers (see values.yaml for possible values) | `[]` | +| `nginx.mainConf` | Content of the Artifactory nginx main nginx.conf config file | `see values.yaml` | +| `nginx.artifactoryConf` | Content of Artifactory nginx artifactory.conf config file | `see values.yaml` | +| `nginx.service.type` | Nginx service type | `LoadBalancer` | +| `nginx.service.clusterIP` | Specific cluster IP or `None` for headless services | `nil` | +| `nginx.service.loadBalancerSourceRanges`| Nginx service array of IP CIDR ranges to whitelist (only when service type is LoadBalancer) | | +| `nginx.service.labels` | Nginx service labels | `{}` | +| `nginx.service.annotations` | Nginx service annotations | `{}` | +| `nginx.service.externalTrafficPolicy`| Nginx service desires to route external traffic to node-local or cluster-wide endpoints. | `Cluster` | +| `nginx.loadBalancerIP`| Provide Static IP to configure with Nginx | | +| `nginx.http.enabled` | Nginx http service enabled/disabled | true | +| `nginx.http.externalPort` | Nginx service external port | `80` | +| `nginx.http.internalPort` | Nginx service internal port | `80` | +| `nginx.https.enabled` | Nginx http service enabled/disabled | true | +| `nginx.https.externalPort` | Nginx service external port | `443` | +| `nginx.https.internalPort` | Nginx service internal port | `443` | +| `nginx.externalPortHttp` | DEPRECATED: Nginx service external port | `80` | +| `nginx.internalPortHttp` | DEPRECATED: Nginx service internal port | `80` | +| `nginx.externalPortHttps` | DEPRECATED: Nginx service external port | `443` | +| `nginx.internalPortHttps` | DEPRECATED: Nginx service internal port | `443` | +| `nginx.livenessProbe.enabled` | would you like a liveness Probe to be enabled | `true` | +| `nginx.livenessProbe.path` | liveness probe HTTP Get path | `/router/api/v1/system/health` | +| `nginx.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 100 | +| `nginx.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `nginx.livenessProbe.timeoutSeconds` | When the probe times out | 10 | +| `nginx.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 | +| `nginx.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 10 | +| `nginx.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `nginx.readinessProbe.path` | Readiness probe HTTP Get path | `/router/api/v1/system/health` | +| `nginx.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 60 | +| `nginx.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `nginx.readinessProbe.timeoutSeconds` | When the probe times out | 10 | +| `nginx.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 | +| `nginx.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 10 | +| `nginx.tlsSecretName` | SSL secret that will be used by the Nginx pod | | +| `nginx.customConfigMap` | Nginx CustomeConfigMap name for `nginx.conf` | ` ` | +| `nginx.customArtifactoryConfigMap`| Nginx CustomeConfigMap name for `artifactory-ha.conf` | ` ` | +| `nginx.resources.requests.memory` | Nginx initial memory request | `250Mi` | +| `nginx.resources.requests.cpu` | Nginx initial cpu request | `100m` | +| `nginx.resources.limits.memory` | Nginx memory limit | `250Mi` | +| `nginx.resources.limits.cpu` | Nginx cpu limit | `500m` | +| `nginx.persistence.mountPath` | Nginx persistence volume mount path | `"/var/opt/jfrog/nginx"` | +| `nginx.persistence.enabled` | Nginx persistence volume enabled. This is only available when the nginx.replicaCount is set to 1 | `false` | +| `nginx.persistence.accessMode` | Nginx persistence volume access mode | `ReadWriteOnce` | +| `nginx.persistence.size` | Nginx persistence volume size | `5Gi` | +| `waitForDatabase` | Wait for database (using wait-for-db init container) | `true` | +| `postgresql.enabled` | Use enclosed PostgreSQL as database | `true` | +| `postgresql.imageTag` | PostgreSQL version | `9.6.11` | +| `postgresql.postgresqlDatabase` | PostgreSQL database name | `artifactory` | +| `postgresql.postgresqlUsername` | PostgreSQL database user | `artifactory` | +| `postgresql.postgresqlPassword` | PostgreSQL database password | | +| `postgresql.persistence.enabled` | PostgreSQL use persistent storage | `true` | +| `postgresql.persistence.size` | PostgreSQL persistent storage size | `50Gi` | +| `postgresql.service.port` | PostgreSQL database port | `5432` | +| `postgresql.resources.requests.memory` | PostgreSQL initial memory request | | +| `postgresql.resources.requests.cpu` | PostgreSQL initial cpu request | | +| `postgresql.resources.limits.memory` | PostgreSQL memory limit | | +| `postgresql.resources.limits.cpu` | PostgreSQL cpu limit | | +| `database.type` | External database type (`postgresql`, `mysql`, `oracle` or `mssql`) | | +| `database.driver` | External database driver e.g. `org.postgresql.Driver` | | +| `database.url` | External database connection URL | | +| `database.user` | External database username | | +| `database.password` | External database password | | +| `database.secrets.user.name` | External database username `Secret` name | | +| `database.secrets.user.key` | External database username `Secret` key | | +| `database.secrets.password.name` | External database password `Secret` name | | +| `database.secrets.password.key` | External database password `Secret` key | | +| `database.secrets.url.name ` | External database url `Secret` name | | +| `database.secrets.url.key` | External database url `Secret` key | | +| `networkpolicy.name` | Becomes part of the NetworkPolicy object name | `artifactory` | +| `networkpolicy.podselector` | Contains the YAML that specifies how to match pods. Usually using matchLabels. | | +| `networkpolicy.ingress` | YAML snippet containing to & from rules applied to incoming traffic | `- {}` (open to all inbound traffic) | +| `networkpolicy.egress` | YAML snippet containing to & from rules applied to outgoing traffic | `- {}` (open to all outbound traffic) | +| `filebeat.enabled` | Enable a filebeat container to send your logs to a log management solution like ELK | `false` | +| `filebeat.name` | filebeat container name | `artifactory-filebeat` | +| `filebeat.image.repository` | filebeat Docker image repository | `docker.elastic.co/beats/filebeat` | +| `filebeat.image.version` | filebeat Docker image version | `7.5.1` | +| `filebeat.logstashUrl` | The URL to the central Logstash service, if you have one | `logstash:5044` | +| `filebeat.livenessProbe.exec.command` | liveness probe exec command | see [values.yaml](stable/artifactory-ha/values.yaml) | +| `filebeat.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 10 | +| `filebeat.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 180 | +| `filebeat.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `filebeat.readinessProbe.exec.command` | readiness probe exec command | see [values.yaml](stable/artifactory-ha/values.yaml) | +| `filebeat.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 10 | +| `filebeat.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 180 | +| `filebeat.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `filebeat.resources.requests.memory` | Filebeat initial memory request | | +| `filebeat.resources.requests.cpu` | Filebeat initial cpu request | | +| `filebeat.resources.limits.memory` | Filebeat memory limit | | +| `filebeat.resources.limits.cpu` | Filebeat cpu limit | | +| `filebeat.filebeatYml` | Filebeat yaml configuration file | see [values.yaml](stable/artifactory-ha/values.yaml) | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +### Ingress and TLS +To get Helm to create an ingress object with a hostname, add these two lines to your Helm command: +```bash +helm install --name artifactory-ha \ + --set ingress.enabled=true \ + --set ingress.hosts[0]="artifactory.company.com" \ + --set artifactory.service.type=NodePort \ + --set nginx.enabled=false \ + jfrog/artifactory-ha +``` + +If your cluster allows automatic creation/retrieval of TLS certificates (e.g. [cert-manager](https://github.com/jetstack/cert-manager)), please refer to the documentation for that mechanism. + +To manually configure TLS, first create/retrieve a key & certificate pair for the address(es) you wish to protect. Then create a TLS secret in the namespace: + +```bash +kubectl create secret tls artifactory-tls --cert=path/to/tls.cert --key=path/to/tls.key +``` + +Include the secret's name, along with the desired hostnames, in the Artifactory Ingress TLS section of your custom `values.yaml` file: + +```yaml + ingress: + ## If true, Artifactory Ingress will be created + ## + enabled: true + + ## Artifactory Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: + - artifactory.domain.com + annotations: + kubernetes.io/tls-acme: "true" + ## Artifactory Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: + - secretName: artifactory-tls + hosts: + - artifactory.domain.com +``` + +### Ingress annotations + +This example specifically enables Artifactory to work as a Docker Registry using the Repository Path method. See [Artifactory as Docker Registry](https://www.jfrog.com/confluence/display/RTF/Getting+Started+with+Artifactory+as+a+Docker+Registry) documentation for more information about this setup. + +```yaml +ingress: + enabled: true + defaultBackend: + enabled: false + hosts: + - myhost.example.com + annotations: + ingress.kubernetes.io/force-ssl-redirect: "true" + ingress.kubernetes.io/proxy-body-size: "0" + ingress.kubernetes.io/proxy-read-timeout: "600" + ingress.kubernetes.io/proxy-send-timeout: "600" + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite ^/(v2)/token /artifactory/api/docker/null/v2/token; + rewrite ^/(v2)/([^\/]*)/(.*) /artifactory/api/docker/$2/$1/$3; + nginx.ingress.kubernetes.io/proxy-body-size: "0" + tls: + - hosts: + - "myhost.example.com" +``` + +### Ingress additional rules + +You have the option to add additional ingress rules to the Artifactory ingress. An example for this use case can be routing the /xray path to Xray. +In order to do that, simply add the following to a `artifactory-ha-values.yaml` file: +```yaml +ingress: + enabled: true + + defaultBackend: + enabled: false + + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite "(?i)/xray(/|$)(.*)" /$2 break; + + additionalRules: | + - host: + http: + paths: + - path: / + backend: + serviceName: + servicePort: + - path: /xray + backend: + serviceName: + servicePort: + - path: /artifactory + backend: + serviceName: {{ template "artifactory.nginx.fullname" . }} + servicePort: {{ .Values.nginx.externalPortHttp }} +``` + +and running: +```bash +helm upgrade --install artifactory-ha jfrog/artifactory-ha -f artifactory-ha-values.yaml +``` + + + +## Useful links +- https://www.jfrog.com/confluence/display/EP/Getting+Started +- https://www.jfrog.com/confluence/display/RTF/Installing+Artifactory +- https://www.jfrog.com/confluence/ diff --git a/Openshift4/artifactoryha-helm/ReverseProxyConfiguration.md b/Openshift4/artifactoryha-helm/ReverseProxyConfiguration.md new file mode 100755 index 0000000..8515932 --- /dev/null +++ b/Openshift4/artifactoryha-helm/ReverseProxyConfiguration.md @@ -0,0 +1,140 @@ +# JFrog Artifactory Reverse Proxy Settings using Nginx + +#### Reverse Proxy +* To use Artifactory as docker registry it's mandatory to use Reverse Proxy. +* Artifactory provides a Reverse Proxy Configuration Generator screen in which you can fill in a set of fields to generate +the required configuration snippet which you can then download and install directly in the corresponding directory of your reverse proxy server. +* To learn about configuring NGINX or Apache for reverse proxy refer to documentation provided on [JFrog wiki](https://www.jfrog.com/confluence/display/RTF/Configuring+a+Reverse+Proxy) +* By default Artifactory helm chart uses Nginx for reverse proxy and load balancing. + +**Note**: Nginx image distributed with Artifactory helm chart is custom image managed and maintained by JFrog. + +#### Features of Artifactory Nginx +* Provides default configuration with self signed SSL certificate generated on each helm install/upgrade. +* Persist configuration and SSL certificate in `/var/opt/jfrog/nginx` directory + +#### Changing the default Artifactory nginx conf +Use a values.yaml file for changing the value of nginx.mainConf or nginx.artifactoryConf +These configuration will be mounted to the nginx container using a configmap. +For example: +1. Create a values file `nginx-values.yaml` with the following values: +```yaml +nginx: + artifactoryConf: | + ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; + ssl_certificate {{ .Values.nginx.persistence.mountPath }}/ssl/tls.crt; + ssl_certificate_key {{ .Values.nginx.persistence.mountPath }}/ssl/tls.key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + ## server configuration + server { + listen {{ .Values.nginx.internalPortHttps }} ssl; + listen {{ .Values.nginx.internalPortHttp }} ; + ## Change to you DNS name you use to access Artifactory + server_name ~(?.+)\.{{ include "artifactory-ha.fullname" . }} {{ include "artifactory-ha.fullname" . }}; + + if ($http_x_forwarded_proto = '') { + set $http_x_forwarded_proto $scheme; + } + ## Application specific logs + ## access_log /var/log/nginx/artifactory-access.log timing; + ## error_log /var/log/nginx/artifactory-error.log; + rewrite ^/$ /artifactory/webapp/ redirect; + rewrite ^/artifactory/?(/webapp)?$ /artifactory/webapp/ redirect; + if ( $repo != "" ) { + rewrite ^/(v1|v2)/(.*) /artifactory/api/docker/$repo/$1/$2 break; + } + rewrite ^/(v1|v2)/([^/]+)(.*)$ /artifactory/api/docker/$2/$1/$3; + rewrite ^/(v1|v2)/ /artifactory/api/docker/$1/; + chunked_transfer_encoding on; + client_max_body_size 0; + location /artifactory/ { + proxy_read_timeout 900; + proxy_pass_header Server; + proxy_cookie_path ~*^/.* /; + if ( $request_uri ~ ^/artifactory/(.*)$ ) { + proxy_pass http://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalPort }}/artifactory/$1; + } + proxy_pass http://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalPort }}/artifactory/; + proxy_set_header X-Artifactory-Override-Base-Url $http_x_forwarded_proto://$host:$server_port/artifactory; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + } +``` + +2. Install/upgrade artifactory: +```bash +helm upgrade --install artifactory-ha jfrog/artifactory-ha -f nginx-values.yaml +``` + + +#### Steps to use static configuration for reverse proxy in nginx. +1. Get Artifactory service name using this command `kubectl get svc -n $NAMESPACE` + +2. Create `artifactory.conf` file with nginx configuration. More [nginx configuration examples](https://github.com/jfrog/artifactory-docker-examples/tree/master/files/nginx/conf.d) + + Following is example `artifactory.conf` + + **Note**: + * Create file with name `artifactory.conf` as it's fixed in configMap key. + * Replace `artifactory-artifactory` with service name taken from step 1. + + ```bash + ## add ssl entries when https has been set in config + ssl_certificate /var/opt/jfrog/nginx/ssl/tls.crt; + ssl_certificate_key /var/opt/jfrog/nginx/ssl/tls.key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + ## server configuration + server { + listen 443 ssl; + listen 80; + ## Change to you DNS name you use to access Artifactory + server_name ~(?.+)\.artifactory-artifactory artifactory-artifactory; + + if ($http_x_forwarded_proto = '') { + set $http_x_forwarded_proto $scheme; + } + ## Application specific logs + ## access_log /var/log/nginx/artifactory-access.log timing; + ## error_log /var/log/nginx/artifactory-error.log; + rewrite ^/$ /artifactory/webapp/ redirect; + rewrite ^/artifactory/?(/webapp)?$ /artifactory/webapp/ redirect; + if ( $repo != "" ) { + rewrite ^/(v1|v2)/(.*) /artifactory/api/docker/$repo/$1/$2 break; + } + rewrite ^/(v1|v2)/([^/]+)(.*)$ /artifactory/api/docker/$2/$1/$3; + rewrite ^/(v1|v2)/ /artifactory/api/docker/$1/; + chunked_transfer_encoding on; + client_max_body_size 0; + location /artifactory/ { + proxy_read_timeout 900; + proxy_pass_header Server; + proxy_cookie_path ~*^/.* /; + if ( $request_uri ~ ^/artifactory/(.*)$ ) { + proxy_pass http://artifactory-artifactory:8081/artifactory/$1 break; + } + proxy_pass http://artifactory-artifactory:8081/artifactory/; + proxy_set_header X-Artifactory-Override-Base-Url $http_x_forwarded_proto://$host:$server_port/artifactory; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + } + ``` + +3. Create configMap of `artifactory.conf` created with step above. + ```bash + kubectl create configmap art-nginx-conf --from-file=artifactory.conf + ``` +4. Deploy Artifactory using helm chart. + You can achieve this by providing the name of configMap created above to `nginx.customArtifactoryConfigMap` in [values.yaml](values.yaml) + + Following is command to set values at runtime: + ```bash + helm install --name artifactory-ha nginx.customArtifactoryConfigMap=art-nginx-conf jfrog/artifactory-ha + ``` \ No newline at end of file diff --git a/Openshift4/artifactoryha-helm/UPGRADE_NOTES.md b/Openshift4/artifactoryha-helm/UPGRADE_NOTES.md new file mode 100755 index 0000000..640474f --- /dev/null +++ b/Openshift4/artifactoryha-helm/UPGRADE_NOTES.md @@ -0,0 +1,33 @@ +# JFrog Artifactory Chart Upgrade Notes +This file describes special upgrade notes needed at specific versions + +## Upgrade from 0.X to 1.X +**DOWNTIME IS REQUIRED FOR AN UPGRADE!** +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you!** +* PostgreSQL sub chart was upgraded to version `6.5.x`. This version is not backward compatible with the old version (`0.9.5`)! +* Note the following **PostgreSQL** Helm chart changes + * The chart configuration has changed! See [values.yaml](values.yaml) for the new keys used + * **PostgreSQL** is deployed as a StatefulSet + * See [PostgreSQL helm chart](https://hub.helm.sh/charts/stable/postgresql) for all available configurations +* Upgrade + * Due to breaking changes in the **PostgreSQL** Helm chart, a migration of the database is needed from the old to the new database + * The recommended migration process is the [full system export and import](https://www.jfrog.com/confluence/display/RTF/Importing+and+Exporting) + * **NOTE:** To save time, export only metadata and configuration (check `Exclude Content` in the `System Import & Export`) since the Artifactory filestore is persisted + * Upgrade steps: + 1. Block user access to Artifactory (do not shutdown) + a. Scale down the cluster to primary node only (`node.replicaCount=0`) so the exported db and configuration will be kept on one known node (the primary) + b. If your Artifactory HA K8s service is set to member nodes only (`service.pool=members`) you will need to access the primary node directly (use `kubectl port-forward`) + 2. Perform `Export System` from the `Admin` -> `Import & Export` -> `System` -> `Export System` + a. Check `Exclude Content` to save export size (as Artifactory filestore will persist across upgrade) + b. Choose to save the export on the persisted Artifactory volume (`/var/opt/jfrog/artifactory/`) + c. Click `Export` (this can take some time) + 3. Run the `helm upgrade` with the new version. Old PostgreSQL will be removed and new one deployed + a. You must pass explicit "ready for upgrade flag" with `--set databaseUpgradeReady=yes`. Failing to provide this will block the upgrade! + 4. Once ready, open Artifactory UI (you might need to re-enter a valid license). Skip all onboarding wizard steps + a. **NOTE:** Don't worry you can't see the old config and files. It will all restore with the system import in the next step + 5. Perform `Import System` from the `Admin` -> `Import & Export` -> `System` -> `Import System` + a. Browse to where the export was saved Artifactory volume (`/var/opt/jfrog/artifactory/`) + b. Click `Import` (this can take some time) + 6. Restore access to Artifactory + a. Scale the cluster member nodes back to the original size + * Artifactory should now be ready to get back to normal operation diff --git a/Openshift4/artifactoryha-helm/charts/postgresql/.helmignore b/Openshift4/artifactoryha-helm/charts/postgresql/.helmignore new file mode 100755 index 0000000..a1c17ae --- /dev/null +++ b/Openshift4/artifactoryha-helm/charts/postgresql/.helmignore @@ -0,0 +1,2 @@ +.git +OWNERS \ No newline at end of file diff --git a/Openshift4/artifactoryha-helm/charts/postgresql/Chart.yaml b/Openshift4/artifactoryha-helm/charts/postgresql/Chart.yaml new file mode 100755 index 0000000..4687eb3 --- /dev/null +++ b/Openshift4/artifactoryha-helm/charts/postgresql/Chart.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +appVersion: 11.5.0 +description: Chart for PostgreSQL, an object-relational database management system + (ORDBMS) with an emphasis on extensibility and on standards-compliance. +engine: gotpl +home: https://www.postgresql.org/ +icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-110x117.png +keywords: +- postgresql +- postgres +- database +- sql +- replication +- cluster +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: postgresql +sources: +- https://github.com/bitnami/bitnami-docker-postgresql +version: 7.0.1 diff --git a/Openshift4/artifactoryha-helm/charts/postgresql/README.md b/Openshift4/artifactoryha-helm/charts/postgresql/README.md new file mode 100755 index 0000000..e6621b4 --- /dev/null +++ b/Openshift4/artifactoryha-helm/charts/postgresql/README.md @@ -0,0 +1,487 @@ +# PostgreSQL + +[PostgreSQL](https://www.postgresql.org/) is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance. + +## TL;DR; + +```console +$ helm install stable/postgresql +``` + +## Introduction + +This chart bootstraps a [PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.11+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart +To install the chart with the release name `my-release`: + +```console +$ helm install --name my-release stable/postgresql +``` + +The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following tables lists the configurable parameters of the PostgreSQL chart and their default values. + +| Parameter | Description | Default | +| --------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------- | +| `global.imageRegistry` | Global Docker Image registry | `nil` | +| `global.postgresql.postgresqlDatabase` | PostgreSQL database (overrides `postgresqlDatabase`) | `nil` | +| `global.postgresql.postgresqlUsername` | PostgreSQL username (overrides `postgresqlUsername`) | `nil` | +| `global.postgresql.existingSecret` | Name of existing secret to use for PostgreSQL passwords (overrides `existingSecret`) | `nil` | +| `global.postgresql.postgresqlPassword` | PostgreSQL admin password (overrides `postgresqlPassword`) | `nil` | +| `global.postgresql.servicePort` | PostgreSQL port (overrides `service.port`) | `nil` | +| `global.postgresql.replicationPassword` | Replication user password (overrides `replication.password`) | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `image.registry` | PostgreSQL Image registry | `docker.io` | +| `image.repository` | PostgreSQL Image name | `bitnami/postgresql` | +| `image.tag` | PostgreSQL Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `image.debug` | Specify if debug values should be set | `false` | +| `nameOverride` | String to partially override postgresql.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override postgresql.fullname template with a string | `nil` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `stretch` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.securityContext.runAsUser` | User ID for the init container | `0` | +| `usePasswordFile` | Have the secrets mounted as a file instead of env vars | `false` | +| `replication.enabled` | Enable replication | `false` | +| `replication.user` | Replication user | `repl_user` | +| `replication.password` | Replication user password | `repl_password` | +| `replication.slaveReplicas` | Number of slaves replicas | `1` | +| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` | +| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `replication.slaveReplicas`. | `0` | +| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` | +| `existingSecret` | Name of existing secret to use for PostgreSQL passwords | `nil` | +| `postgresqlUsername` | PostgreSQL admin user | `postgres` | +| `postgresqlPassword` | PostgreSQL admin password | _random 10 character alphanumeric string_ | +| `postgresqlDatabase` | PostgreSQL database | `nil` | +| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql` (same value as persistence.mountPath) | +| `postgresqlInitdbArgs` | PostgreSQL initdb extra arguments | `nil` | +| `postgresqlInitdbWalDir` | PostgreSQL location for transaction log | `nil` | +| `postgresqlConfiguration` | Runtime Config Parameters | `nil` | +| `postgresqlExtendedConf` | Extended Runtime Config Parameters (appended to main or default configuration) | `nil` | +| `pgHbaConfiguration` | Content of pg\_hba.conf | `nil (do not create pg_hba.conf)` | +| `configurationConfigMap` | ConfigMap with the PostgreSQL configuration files (Note: Overrides `postgresqlConfiguration` and `pgHbaConfiguration`). The value is evaluated as a template. | `nil` | +| `extendedConfConfigMap` | ConfigMap with the extended PostgreSQL configuration files. The value is evaluated as a template. | `nil` | +| `initdbScripts` | Dictionary of initdb scripts | `nil` | +| `initdbUsername` | PostgreSQL user to execute the .sql and sql.gz scripts | `nil` | +| `initdbPassword` | Password for the user specified in `initdbUsername` | `nil` | +| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`). The value is evaluated as a template. | `nil` | +| `initdbScriptsSecret` | Secret with initdb scripts that contain sensitive information (Note: can be used with `initdbScriptsConfigMap` or `initdbScripts`). The value is evaluated as a template. | `nil` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | PostgreSQL port | `5432` | +| `service.nodePort` | Kubernetes Service nodePort | `nil` | +| `service.annotations` | Annotations for PostgreSQL service, the value is evaluated as a template. | {} | +| `service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | +| `service.loadBalancerSourceRanges` | Address that are allowed when svc is LoadBalancer | [] | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `persistence.enabled` | Enable persistence using PVC | `true` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template. | `nil` | +| `persistence.mountPath` | Path to mount the volume at | `/bitnami/postgresql` | +| `persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `persistence.storageClass` | PVC Storage Class for PostgreSQL volume | `nil` | +| `persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `[ReadWriteOnce]` | +| `persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `master.nodeSelector` | Node labels for pod assignment (postgresql master) | `{}` | +| `master.affinity` | Affinity labels for pod assignment (postgresql master) | `{}` | +| `master.tolerations` | Toleration labels for pod assignment (postgresql master) | `[]` | +| `master.anotations` | Map of annotations to add to the statefulset (postgresql master) | `{}` | +| `master.labels` | Map of labels to add to the statefulset (postgresql master) | `{}` | +| `master.podAnnotations` | Map of annotations to add to the pods (postgresql master) | `{}` | +| `master.podLabels` | Map of labels to add to the pods (postgresql master) | `{}` | +| `master.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql master) | `[]` | +| `master.extraVolumes` | Additional volumes to add to the pods (postgresql master) | `[]` | +| `slave.nodeSelector` | Node labels for pod assignment (postgresql slave) | `{}` | +| `slave.affinity` | Affinity labels for pod assignment (postgresql slave) | `{}` | +| `slave.tolerations` | Toleration labels for pod assignment (postgresql slave) | `[]` | +| `slave.anotations` | Map of annotations to add to the statefulsets (postgresql slave) | `{}` | +| `slave.labels` | Map of labels to add to the statefulsets (postgresql slave) | `{}` | +| `slave.podAnnotations` | Map of annotations to add to the pods (postgresql slave) | `{}` | +| `slave.podLabels` | Map of labels to add to the pods (postgresql slave) | `{}` | +| `slave.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumes` | Additional volumes to add to the pods (postgresql slave) | `[]` | +| `terminationGracePeriodSeconds` | Seconds the pod needs to terminate gracefully | `nil` | +| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `250m` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `serviceAccount.enabled` | Enable service account (Note: Service Account will only be automatically created if `serviceAccount.name` is not set) | `false` | +| `serviceAcccount.name` | Name of existing service account | `nil` | +| `livenessProbe.enabled` | Would you like a livenessProbe to be enabled | `true` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 5 | +| `readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `metrics.enabled` | Start a prometheus exporter | `false` | +| `metrics.service.type` | Kubernetes Service type | `ClusterIP` | +| `service.clusterIP` | Static clusterIP or None for headless services | `nil` | +| `metrics.service.annotations` | Additional annotations for metrics exporter pod | `{ prometheus.io/scrape: "true", prometheus.io/port: "9187"}` | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.namespace` | Optional namespace in which to create ServiceMonitor | `nil` | +| `metrics.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `metrics.serviceMonitor.scrapeTimeout` | Scrape timeout. If not set, the Prometheus default scrape timeout is used | `nil` | +| `metrics.image.registry` | PostgreSQL Image registry | `docker.io` | +| `metrics.image.repository` | PostgreSQL Image name | `bitnami/postgres-exporter` | +| `metrics.image.tag` | PostgreSQL Image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `metrics.securityContext.enabled` | Enable security context for metrics | `false` | +| `metrics.securityContext.runAsUser` | User ID for the container for metrics | `1001` | +| `metrics.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `metrics.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `metrics.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 5 | +| `metrics.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `extraEnv` | Any extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `{}` | +| `updateStrategy` | Update strategy policy | `{type: "RollingUpdate"}` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install --name my-release \ + --set postgresqlPassword=secretpassword,postgresqlDatabase=my-database \ + stable/postgresql +``` + +The above command sets the PostgreSQL `postgres` account password to `secretpassword`. Additionally it creates a database named `my-database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install --name my-release -f values.yaml stable/postgresql +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration and horizontal scaling + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Enable replication: +```diff +- replication.enabled: false ++ replication.enabled: true +``` + +- Number of slaves replicas: +```diff +- replication.slaveReplicas: 1 ++ replication.slaveReplicas: 2 +``` + +- Set synchronous commit mode: +```diff +- replication.synchronousCommit: "off" ++ replication.synchronousCommit: "on" +``` + +- Number of replicas that will have synchronous replication: +```diff +- replication.numSynchronousReplicas: 0 ++ replication.numSynchronousReplicas: 1 +``` + +- Start a prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +To horizontally scale this chart, you can use the `--replicas` flag to modify the number of nodes in your PostgreSQL deployment. Also you can use the `values-production.yaml` file or modify the parameters shown above. + +### Change PostgreSQL version + +To modify the PostgreSQL version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/postgresql/tags/) using the `image.tag` parameter. For example, `image.tag=12.0.0-debian-9-r0` + +### postgresql.conf / pg_hba.conf files as configMap + +This helm chart also supports to customize the whole configuration file. + +Add your custom file to "files/postgresql.conf" in your working directory. This file will be mounted as configMap to the containers and it will be used for configuring the PostgreSQL server. + +Alternatively, you can specify PostgreSQL configuration parameters using the `postgresqlConfiguration` parameter as a dict, using camelCase, e.g. {"sharedBuffers": "500MB"}. + +In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `configurationConfigMap` parameter. Note that this will override the two previous options. + +### Allow settings to be loaded from files other than the default `postgresql.conf` + +If you don't want to provide the whole PostgreSQL configuration file and only specify certain parameters, you can add your extended `.conf` files to "files/conf.d/" in your working directory. +Those files will be mounted as configMap to the containers adding/overwriting the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +Alternatively, you can also set an external ConfigMap with all the extra configuration files. This is done by setting the `extendedConfConfigMap` parameter. Note that this will override the previous option. + +### Initialize a fresh instance + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. + +Alternatively, you can specify custom scripts using the `initdbScripts` parameter as dict. + +In addition to these options, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `initdbScriptsSecret` parameter. + +The allowed extensions are `.sh`, `.sql` and `.sql.gz`. + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). + +The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details. + +### Use of global variables + +In more complex scenarios, we may have the following tree of dependencies + +``` + +--------------+ + | | + +------------+ Chart 1 +-----------+ + | | | | + | --------+------+ | + | | | + | | | + | | | + | | | + v v v ++-------+------+ +--------+------+ +--------+------+ +| | | | | | +| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 | +| | | | | | ++--------------+ +---------------+ +---------------+ +``` + +The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters: + +``` +postgresql.postgresqlPassword=testtest +subchart1.postgresql.postgresqlPassword=testtest +subchart2.postgresql.postgresqlPassword=testtest +postgresql.postgresqlDatabase=db1 +subchart1.postgresql.postgresqlDatabase=db1 +subchart2.postgresql.postgresqlDatabase=db1 +``` + +If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows: + +``` +global.postgresql.postgresqlPassword=testtest +global.postgresql.postgresqlDatabase=db1 +``` + +This way, the credentials will be available in all of the subcharts. + +## Persistence + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. +See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to [code](https://github.com/bitnami/bitnami-docker-postgresql/blob/8725fe1d7d30ebe8d9a16e9175d05f7ad9260c93/9.6/debian-9/rootfs/libpostgresql.sh#L518-L556). If you need to use those data, please covert them to sql and import after `helm install` finished. + +## NetworkPolicy + +To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + +```console +$ kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 5432. + +For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL. +This label will be displayed in the output of a successful install. + +## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image + +- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image. +- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift. + +### Deploy chart using Docker Official PostgreSQL Image + +From chart version 4.0.0, it is possible to use this chart with the Docker Official PostgreSQL image. +Besides specifying the new Docker repository and tag, it is important to modify the PostgreSQL data directory and volume mount point. Basically, the PostgreSQL data dir cannot be the mount point directly, it has to be a subdirectory. + +``` +helm install --name postgres \ + --set image.repository=postgres \ + --set image.tag=10.6 \ + --set postgresqlDataDir=/data/pgdata \ + --set persistence.mountPath=/data/ \ + stable/postgresql +``` + +## Upgrade + +It's necessary to specify the existing passwords while performing an upgrade to ensure the secrets are not updated with invalid randomly generated passwords. Remember to specify the existing values of the `postgresqlPassword` and `replication.password` parameters when upgrading the chart: + +```bash +$ helm upgrade my-release bitnami/influxdb \ + --set postgresqlPassword=[POSTGRESQL_PASSWORD] \ + --set replication.password=[REPLICATION_PASSWORD] +``` + +> Note: you need to substitute the placeholders _[POSTGRESQL_PASSWORD]_, and _[REPLICATION_PASSWORD]_ with the values obtained from instructions in the installation notes. + +## 7.0.0 + +Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec. + +In https://github.com/helm/charts/pull/17281 the `apiVersion` of the statefulset resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage. + +This major version bump signifies this change. + +## 5.0.0 + +In this version, the **chart is using PostgreSQL 11 instead of PostgreSQL 10**. You can find the main difference and notable changes in the following links: [https://www.postgresql.org/about/news/1894/](https://www.postgresql.org/about/news/1894/) and [https://www.postgresql.org/about/featurematrix/](https://www.postgresql.org/about/featurematrix/). + +For major releases of PostgreSQL, the internal data storage format is subject to change, thus complicating upgrades, you can see some errors like the following one in the logs: + +```bash +Welcome to the Bitnami postgresql container +Subscribe to project updates by watching https://github.com/bitnami/bitnami-docker-postgresql +Submit issues and feature requests at https://github.com/bitnami/bitnami-docker-postgresql/issues +Send us your feedback at containers@bitnami.com + +INFO ==> ** Starting PostgreSQL setup ** +NFO ==> Validating settings in POSTGRESQL_* env vars.. +INFO ==> Initializing PostgreSQL database... +INFO ==> postgresql.conf file not detected. Generating it... +INFO ==> pg_hba.conf file not detected. Generating it... +INFO ==> Deploying PostgreSQL with persisted data... +INFO ==> Configuring replication parameters +INFO ==> Loading custom scripts... +INFO ==> Enabling remote connections +INFO ==> Stopping PostgreSQL... +INFO ==> ** PostgreSQL setup finished! ** + +INFO ==> ** Starting PostgreSQL ** + [1] FATAL: database files are incompatible with server + [1] DETAIL: The data directory was initialized by PostgreSQL version 10, which is not compatible with this version 11.3. +``` +In this case, you should migrate the data from the old chart to the new one following an approach similar to that described in [this section](https://www.postgresql.org/docs/current/upgrading.html#UPGRADING-VIA-PGDUMPALL) from the official documentation. Basically, create a database dump in the old chart, move and restore it in the new one. + +### 4.0.0 + +This chart will use by default the Bitnami PostgreSQL container starting from version `10.7.0-r68`. This version moves the initialization logic from node.js to bash. This new version of the chart requires setting the `POSTGRES_PASSWORD` in the slaves as well, in order to properly configure the `pg_hba.conf` file. Users from previous versions of the chart are advised to upgrade immediately. + +IMPORTANT: If you do not want to upgrade the chart version then make sure you use the `10.7.0-r68` version of the container. Otherwise, you will get this error + +``` +The POSTGRESQL_PASSWORD environment variable is empty or not set. Set the environment variable ALLOW_EMPTY_PASSWORD=yes to allow the container to be started with blank passwords. This is recommended only for development +``` + +### 3.0.0 + +This releases make it possible to specify different nodeSelector, affinity and tolerations for master and slave pods. +It also fixes an issue with `postgresql.master.fullname` helper template not obeying fullnameOverride. + +#### Breaking changes + +- `affinty` has been renamed to `master.affinity` and `slave.affinity`. +- `tolerations` has been renamed to `master.tolerations` and `slave.tolerations`. +- `nodeSelector` has been renamed to `master.nodeSelector` and `slave.nodeSelector`. + +### 2.0.0 + +In order to upgrade from the `0.X.X` branch to `1.X.X`, you should follow the below steps: + + - Obtain the service name (`SERVICE_NAME`) and password (`OLD_PASSWORD`) of the existing postgresql chart. You can find the instructions to obtain the password in the NOTES.txt, the service name can be obtained by running + + ```console +$ kubectl get svc + ``` + +- Install (not upgrade) the new version + +```console +$ helm repo update +$ helm install --name my-release stable/postgresql +``` + +- Connect to the new pod (you can obtain the name by running `kubectl get pods`): + +```console +$ kubectl exec -it NAME bash +``` + +- Once logged in, create a dump file from the previous database using `pg_dump`, for that we should connect to the previous postgresql chart: + +```console +$ pg_dump -h SERVICE_NAME -U postgres DATABASE_NAME > /tmp/backup.sql +``` + +After run above command you should be prompted for a password, this password is the previous chart password (`OLD_PASSWORD`). +This operation could take some time depending on the database size. + +- Once you have the backup file, you can restore it with a command like the one below: + +```console +$ psql -U postgres DATABASE_NAME < /tmp/backup.sql +``` + +In this case, you are accessing to the local postgresql, so the password should be the new one (you can find it in NOTES.txt). + +If you want to restore the database and the database schema does not exist, it is necessary to first follow the steps described below. + +```console +$ psql -U postgres +postgres=# drop database DATABASE_NAME; +postgres=# create database DATABASE_NAME; +postgres=# create user USER_NAME; +postgres=# alter role USER_NAME with password 'BITNAMI_USER_PASSWORD'; +postgres=# grant all privileges on database DATABASE_NAME to USER_NAME; +postgres=# alter database DATABASE_NAME owner to USER_NAME; +``` diff --git a/Openshift4/artifactoryha-helm/charts/postgresql/files/README.md b/Openshift4/artifactoryha-helm/charts/postgresql/files/README.md new file mode 100755 index 0000000..1813a2f --- /dev/null +++ b/Openshift4/artifactoryha-helm/charts/postgresql/files/README.md @@ -0,0 +1 @@ +Copy here your postgresql.conf and/or pg_hba.conf files to use it as a config map. diff --git a/Openshift4/artifactoryha-helm/charts/postgresql/files/conf.d/README.md b/Openshift4/artifactoryha-helm/charts/postgresql/files/conf.d/README.md new file mode 100755 index 0000000..184c187 --- /dev/null +++ b/Openshift4/artifactoryha-helm/charts/postgresql/files/conf.d/README.md @@ -0,0 +1,4 @@ +If you don't want to provide the whole configuration file and only specify certain parameters, you can copy here your extended `.conf` files. +These files will be injected as a config maps and add/overwrite the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +More info in the [bitnami-docker-postgresql README](https://github.com/bitnami/bitnami-docker-postgresql#configuration-file). diff --git a/Openshift4/artifactoryha-helm/charts/postgresql/files/docker-entrypoint-initdb.d/README.md b/Openshift4/artifactoryha-helm/charts/postgresql/files/docker-entrypoint-initdb.d/README.md new file mode 100755 index 0000000..cba3809 --- /dev/null +++ b/Openshift4/artifactoryha-helm/charts/postgresql/files/docker-entrypoint-initdb.d/README.md @@ -0,0 +1,3 @@ +You can copy here your custom `.sh`, `.sql` or `.sql.gz` file so they are executed during the first boot of the image. + +More info in the [bitnami-docker-postgresql](https://github.com/bitnami/bitnami-docker-postgresql#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/Openshift4/artifactoryha-helm/charts/postgresql/templates/NOTES.txt b/Openshift4/artifactoryha-helm/charts/postgresql/templates/NOTES.txt new file mode 100755 index 0000000..798fa10 --- /dev/null +++ b/Openshift4/artifactoryha-helm/charts/postgresql/templates/NOTES.txt @@ -0,0 +1,50 @@ +** Please be patient while the chart is being deployed ** + +PostgreSQL can be accessed via port {{ template "postgresql.port" . }} on the following DNS name from within your cluster: + + {{ template "postgresql.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read/Write connection +{{- if .Values.replication.enabled }} + {{ template "postgresql.fullname" . }}-read.{{ .Release.Namespace }}.svc.cluster.local - Read only connection +{{- end }} +To get the password for "{{ template "postgresql.username" . }}" run: + + export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-password}" | base64 --decode) + +To connect to your database run the following command: + + kubectl run {{ template "postgresql.fullname" . }}-client --rm --tty -i --restart='Never' --namespace {{ .Release.Namespace }} --image {{ template "postgresql.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + --labels="{{ template "postgresql.fullname" . }}-client=true" {{- end }} --command -- psql --host {{ template "postgresql.fullname" . }} -U {{ .Values.postgresqlUsername }}{{- if .Values.postgresqlDatabase }} -d {{ .Values.postgresqlDatabase }}{{- end }} -p {{ template "postgresql.port" . }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label {{ template "postgresql.fullname" . }}-client=true" will be able to connect to this PostgreSQL cluster. +{{- end }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "postgresql.fullname" . }}) + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $NODE_IP --port $NODE_PORT -U {{ .Values.postgresqlUsername }}{{- if .Values.postgresqlDatabase }} -d {{ .Values.postgresqlDatabase }}{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "postgresql.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "postgresql.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $SERVICE_IP --port {{ template "postgresql.port" . }} -U {{ .Values.postgresqlUsername }}{{- if .Values.postgresqlDatabase }} -d {{ .Values.postgresqlDatabase }}{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "postgresql.fullname" . }} {{ template "postgresql.port" . }}:{{ template "postgresql.port" . }} & + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host 127.0.0.1 -U {{ .Values.postgresqlUsername }}{{- if .Values.postgresqlDatabase }} -d {{ .Values.postgresqlDatabase }}{{- end }} -p {{ template "postgresql.port" . }} + +{{- end }} + +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} + +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ + +{{- end }} diff --git a/Openshift4/artifactoryha-helm/charts/postgresql/templates/_helpers.tpl b/Openshift4/artifactoryha-helm/charts/postgresql/templates/_helpers.tpl new file mode 100755 index 0000000..b379f29 --- /dev/null +++ b/Openshift4/artifactoryha-helm/charts/postgresql/templates/_helpers.tpl @@ -0,0 +1,374 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "postgresql.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.master.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- $fullname := default (printf "%s-%s" .Release.Name $name) .Values.fullnameOverride -}} +{{- if .Values.replication.enabled -}} +{{- printf "%s-%s" $fullname "master" | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s" $fullname | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "postgresql.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"extensions/v1beta1" +{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"networking.k8s.io/v1" +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "postgresql.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL image name +*/}} +{{- define "postgresql.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL password +*/}} +{{- define "postgresql.password" -}} +{{- if .Values.global.postgresql.postgresqlPassword }} + {{- .Values.global.postgresql.postgresqlPassword -}} +{{- else if .Values.postgresqlPassword -}} + {{- .Values.postgresqlPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL replication password +*/}} +{{- define "postgresql.replication.password" -}} +{{- if .Values.global.postgresql.replicationPassword }} + {{- .Values.global.postgresql.replicationPassword -}} +{{- else if .Values.replication.password -}} + {{- .Values.replication.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL username +*/}} +{{- define "postgresql.username" -}} +{{- if .Values.global.postgresql.postgresqlUsername }} + {{- .Values.global.postgresql.postgresqlUsername -}} +{{- else -}} + {{- .Values.postgresqlUsername -}} +{{- end -}} +{{- end -}} + + +{{/* +Return PostgreSQL replication username +*/}} +{{- define "postgresql.replication.username" -}} +{{- if .Values.global.postgresql.replicationUser }} + {{- .Values.global.postgresql.replicationUser -}} +{{- else -}} + {{- .Values.replication.user -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL port +*/}} +{{- define "postgresql.port" -}} +{{- if .Values.global.postgresql.servicePort }} + {{- .Values.global.postgresql.servicePort -}} +{{- else -}} + {{- .Values.service.port -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL created database +*/}} +{{- define "postgresql.database" -}} +{{- if .Values.global.postgresql.postgresqlDatabase }} + {{- .Values.global.postgresql.postgresqlDatabase -}} +{{- else if .Values.postgresqlDatabase -}} + {{- .Values.postgresqlDatabase -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name to change the volume permissions +*/}} +{{- define "postgresql.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL metrics image name +*/}} +{{- define "postgresql.metrics.image" -}} +{{- $registryName := default "docker.io" .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := default "latest" .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "postgresql.secretName" -}} +{{- if .Values.global.postgresql.existingSecret }} + {{- printf "%s" .Values.global.postgresql.existingSecret -}} +{{- else if .Values.existingSecret -}} + {{- printf "%s" .Values.existingSecret -}} +{{- else -}} + {{- printf "%s" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created +*/}} +{{- define "postgresql.createSecret" -}} +{{- if .Values.global.postgresql.existingSecret }} +{{- else if .Values.existingSecret -}} +{{- else -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the configuration ConfigMap name. +*/}} +{{- define "postgresql.configurationCM" -}} +{{- if .Values.configurationConfigMap -}} +{{- printf "%s" (tpl .Values.configurationConfigMap $) -}} +{{- else -}} +{{- printf "%s-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the extended configuration ConfigMap name. +*/}} +{{- define "postgresql.extendedConfigurationCM" -}} +{{- if .Values.extendedConfConfigMap -}} +{{- printf "%s" (tpl .Values.extendedConfConfigMap $) -}} +{{- else -}} +{{- printf "%s-extended-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "postgresql.initdbScriptsCM" -}} +{{- if .Values.initdbScriptsConfigMap -}} +{{- printf "%s" (tpl .Values.initdbScriptsConfigMap $) -}} +{{- else -}} +{{- printf "%s-init-scripts" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts Secret name. +*/}} +{{- define "postgresql.initdbScriptsSecret" -}} +{{- printf "%s" (tpl .Values.initdbScriptsSecret $) -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "postgresql.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Get the readiness probe command +*/}} +{{- define "postgresql.readinessProbeCommand" -}} +- | +{{- if (include "postgresql.database" .) }} + pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- else }} + pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- end }} +{{- if contains "bitnami/" .Values.image.repository }} + [ -f /opt/bitnami/postgresql/tmp/.initialized ] +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "postgresql.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "postgresql.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "postgresql.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "postgresql.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} diff --git a/Openshift4/artifactoryha-helm/charts/postgresql/templates/configmap.yaml b/Openshift4/artifactoryha-helm/charts/postgresql/templates/configmap.yaml new file mode 100755 index 0000000..d2178c0 --- /dev/null +++ b/Openshift4/artifactoryha-helm/charts/postgresql/templates/configmap.yaml @@ -0,0 +1,26 @@ +{{ if and (or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration) (not .Values.configurationConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-configuration + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +data: +{{- if (.Files.Glob "files/postgresql.conf") }} +{{ (.Files.Glob "files/postgresql.conf").AsConfig | indent 2 }} +{{- else if .Values.postgresqlConfiguration }} + postgresql.conf: | +{{- range $key, $value := default dict .Values.postgresqlConfiguration }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- if (.Files.Glob "files/pg_hba.conf") }} +{{ (.Files.Glob "files/pg_hba.conf").AsConfig | indent 2 }} +{{- else if .Values.pgHbaConfiguration }} + pg_hba.conf: | +{{ .Values.pgHbaConfiguration | indent 4 }} +{{- end }} +{{ end }} diff --git a/Openshift4/artifactoryha-helm/charts/postgresql/templates/extended-config-configmap.yaml b/Openshift4/artifactoryha-helm/charts/postgresql/templates/extended-config-configmap.yaml new file mode 100755 index 0000000..8a41195 --- /dev/null +++ b/Openshift4/artifactoryha-helm/charts/postgresql/templates/extended-config-configmap.yaml @@ -0,0 +1,21 @@ +{{- if and (or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf) (not .Values.extendedConfConfigMap)}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-extended-configuration + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +data: +{{- with .Files.Glob "files/conf.d/*.conf" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{ with .Values.postgresqlExtendedConf }} + override.conf: | +{{- range $key, $value := . }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- end }} diff --git a/Openshift4/artifactoryha-helm/charts/postgresql/templates/initialization-configmap.yaml b/Openshift4/artifactoryha-helm/charts/postgresql/templates/initialization-configmap.yaml new file mode 100755 index 0000000..8eb5e05 --- /dev/null +++ b/Openshift4/artifactoryha-helm/charts/postgresql/templates/initialization-configmap.yaml @@ -0,0 +1,24 @@ +{{- if and (or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-init-scripts + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz" }} +binaryData: +{{- range $path, $bytes := . }} + {{ base $path }}: {{ $.Files.Get $path | b64enc | quote }} +{{- end }} +{{- end }} +data: +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{- with .Values.initdbScripts }} +{{ toYaml . | indent 2 }} +{{- end }} +{{- end }} diff --git a/Openshift4/artifactoryha-helm/charts/postgresql/templates/metrics-svc.yaml b/Openshift4/artifactoryha-helm/charts/postgresql/templates/metrics-svc.yaml new file mode 100755 index 0000000..f370041 --- /dev/null +++ b/Openshift4/artifactoryha-helm/charts/postgresql/templates/metrics-svc.yaml @@ -0,0 +1,26 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-metrics + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + annotations: +{{ toYaml .Values.metrics.service.annotations | indent 4 }} +spec: + type: {{ .Values.metrics.service.type }} + {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - name: metrics + port: 9187 + targetPort: metrics + selector: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name }} + role: master +{{- end }} diff --git a/Openshift4/artifactoryha-helm/charts/postgresql/templates/networkpolicy.yaml b/Openshift4/artifactoryha-helm/charts/postgresql/templates/networkpolicy.yaml new file mode 100755 index 0000000..8fb23f2 --- /dev/null +++ b/Openshift4/artifactoryha-helm/charts/postgresql/templates/networkpolicy.yaml @@ -0,0 +1,34 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "postgresql.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + - ports: + - port: {{ template "postgresql.port" . }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "postgresql.fullname" . }}-client: "true" + - podSelector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: slave + {{- end }} + # Allow prometheus scrapes + - ports: + - port: 9187 +{{- end }} diff --git a/Openshift4/artifactoryha-helm/charts/postgresql/templates/secrets.yaml b/Openshift4/artifactoryha-helm/charts/postgresql/templates/secrets.yaml new file mode 100755 index 0000000..e0bc3b2 --- /dev/null +++ b/Openshift4/artifactoryha-helm/charts/postgresql/templates/secrets.yaml @@ -0,0 +1,17 @@ +{{- if (include "postgresql.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +type: Opaque +data: + postgresql-password: {{ include "postgresql.password" . | b64enc | quote }} + {{- if .Values.replication.enabled }} + postgresql-replication-password: {{ include "postgresql.replication.password" . | b64enc | quote }} + {{- end }} +{{- end -}} diff --git a/Openshift4/artifactoryha-helm/charts/postgresql/templates/serviceaccount.yaml b/Openshift4/artifactoryha-helm/charts/postgresql/templates/serviceaccount.yaml new file mode 100755 index 0000000..27e5b51 --- /dev/null +++ b/Openshift4/artifactoryha-helm/charts/postgresql/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and (.Values.serviceAccount.enabled) (not .Values.serviceAccount.name) }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + name: {{ template "postgresql.fullname" . }} +{{- end }} \ No newline at end of file diff --git a/Openshift4/artifactoryha-helm/charts/postgresql/templates/servicemonitor.yaml b/Openshift4/artifactoryha-helm/charts/postgresql/templates/servicemonitor.yaml new file mode 100755 index 0000000..9211d51 --- /dev/null +++ b/Openshift4/artifactoryha-helm/charts/postgresql/templates/servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "postgresql.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} +{{ toYaml .Values.metrics.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} +spec: + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/Openshift4/artifactoryha-helm/charts/postgresql/templates/statefulset-slaves.yaml b/Openshift4/artifactoryha-helm/charts/postgresql/templates/statefulset-slaves.yaml new file mode 100755 index 0000000..74a81d0 --- /dev/null +++ b/Openshift4/artifactoryha-helm/charts/postgresql/templates/statefulset-slaves.yaml @@ -0,0 +1,247 @@ +{{- if .Values.replication.enabled }} +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: "{{ template "postgresql.fullname" . }}-slave" + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.slave.labels }} +{{ toYaml . | indent 4 }} +{{- end }} +{{- with .Values.slave.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: {{ .Values.replication.slaveReplicas }} + selector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: slave + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + role: slave +{{- with .Values.slave.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.slave.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: +{{ toYaml .Values.slave.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.slave.affinity }} + affinity: +{{ toYaml .Values.slave.affinity | indent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: +{{ toYaml .Values.slave.tolerations | indent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name}} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + initContainers: + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: "{{ .Values.volumePermissions.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - | + mkdir -p {{ .Values.persistence.mountPath }}/data + chmod 700 {{ .Values.persistence.mountPath }}/data + find {{ .Values.persistence.mountPath }} -mindepth 0 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | \ + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + volumeMounts: + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + - name: POSTGRES_REPLICATION_MODE + value: "slave" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + - name: POSTGRES_MASTER_HOST + value: {{ template "postgresql.fullname" . }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ include "postgresql.port" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + ports: + - name: postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{ end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.slave.extraVolumeMounts }} + {{- toYaml .Values.slave.extraVolumeMounts | nindent 12 }} + {{- end }} + volumes: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- if .Values.slave.extraVolumes }} + {{- toYaml .Values.slave.extraVolumes | nindent 8 }} + {{- end }} + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} +{{- end }} diff --git a/Openshift4/artifactoryha-helm/charts/postgresql/templates/statefulset.yaml b/Openshift4/artifactoryha-helm/charts/postgresql/templates/statefulset.yaml new file mode 100755 index 0000000..64c297f --- /dev/null +++ b/Openshift4/artifactoryha-helm/charts/postgresql/templates/statefulset.yaml @@ -0,0 +1,355 @@ +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "postgresql.master.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.master.labels }} +{{ toYaml . | indent 4 }} +{{- end }} +{{- with .Values.master.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: 1 + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: master + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + role: master +{{- with .Values.master.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.master.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.master.nodeSelector }} + nodeSelector: +{{ toYaml .Values.master.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.master.affinity }} + affinity: +{{ toYaml .Values.master.affinity | indent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: +{{ toYaml .Values.master.tolerations | indent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + initContainers: + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: "{{ .Values.volumePermissions.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - | + mkdir -p {{ .Values.persistence.mountPath }}/data + chmod 700 {{ .Values.persistence.mountPath }}/data + find {{ .Values.persistence.mountPath }} -mindepth 0 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | \ + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + volumeMounts: + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + {{- if .Values.postgresqlInitdbArgs }} + - name: POSTGRES_INITDB_ARGS + value: {{ .Values.postgresqlInitdbArgs | quote }} + {{- end }} + {{- if .Values.postgresqlInitdbWalDir }} + - name: POSTGRES_INITDB_WALDIR + value: {{ .Values.postgresqlInitdbWalDir | quote }} + {{- end }} + {{- if .Values.initdbUser }} + - name: POSTGRESQL_INITSCRIPTS_USERNAME + value: {{ .Values.initdbUser }} + {{- end }} + {{- if .Values.initdbPassword }} + - name: POSTGRESQL_INITSCRIPTS_PASSWORD + value: .Values.initdbPassword + {{- end }} + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + {{- if .Values.replication.enabled }} + - name: POSTGRES_REPLICATION_MODE + value: "master" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + {{- if not (eq .Values.replication.synchronousCommit "off")}} + - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE + value: {{ .Values.replication.synchronousCommit | quote }} + - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS + value: {{ .Values.replication.numSynchronousReplicas | quote }} + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + {{- end }} + - name: POSTGRES_USER + value: {{ include "postgresql.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + {{- if (include "postgresql.database" .) }} + - name: POSTGRES_DB + value: {{ (include "postgresql.database" .) | quote }} + {{- end }} + {{- if .Values.extraEnv }} + {{- include "postgresql.tplValue" (dict "value" .Values.extraEnv "context" $) | nindent 12 }} + {{- end }} + ports: + - name: postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/ + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.master.extraVolumeMounts }} + {{- toYaml .Values.master.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "postgresql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.metrics.securityContext.runAsUser }} + {{- end }} + env: + {{- $database := required "In order to enable metrics you need to specify a database (.Values.postgresqlDatabase or .Values.global.postgresql.postgresqlDatabase)" (include "postgresql.database" .) }} + - name: DATA_SOURCE_URI + value: {{ printf "127.0.0.1:%d/%s?sslmode=disable" (int (include "postgresql.port" .)) $database | quote }} + {{- if .Values.usePasswordFile }} + - name: DATA_SOURCE_PASS_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: DATA_SOURCE_USER + value: {{ template "postgresql.username" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + ports: + - name: metrics + containerPort: 9187 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} +{{- end }} + volumes: + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "postgresql.initdbScriptsCM" . }} + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "postgresql.initdbScriptsSecret" . }} + {{- end }} + {{- if .Values.master.extraVolumes }} + {{- toYaml .Values.master.extraVolumes | nindent 8 }} + {{- end }} +{{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: +{{- with .Values.persistence.existingClaim }} + claimName: {{ tpl . $ }} +{{- end }} +{{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} +{{- else if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} diff --git a/Openshift4/artifactoryha-helm/charts/postgresql/templates/svc-headless.yaml b/Openshift4/artifactoryha-helm/charts/postgresql/templates/svc-headless.yaml new file mode 100755 index 0000000..0bc60be --- /dev/null +++ b/Openshift4/artifactoryha-helm/charts/postgresql/templates/svc-headless.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-headless + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: postgresql + port: {{ template "postgresql.port" . }} + targetPort: postgresql + selector: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} diff --git a/Openshift4/artifactoryha-helm/charts/postgresql/templates/svc-read.yaml b/Openshift4/artifactoryha-helm/charts/postgresql/templates/svc-read.yaml new file mode 100755 index 0000000..17bda04 --- /dev/null +++ b/Openshift4/artifactoryha-helm/charts/postgresql/templates/svc-read.yaml @@ -0,0 +1,31 @@ +{{- if .Values.replication.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-read + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.service.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and .Values.service.loadBalancerIP (eq .Values.service.type "LoadBalancer") }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + ports: + - name: postgresql + port: {{ template "postgresql.port" . }} + targetPort: postgresql + {{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + selector: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: slave +{{- end }} diff --git a/Openshift4/artifactoryha-helm/charts/postgresql/templates/svc.yaml b/Openshift4/artifactoryha-helm/charts/postgresql/templates/svc.yaml new file mode 100755 index 0000000..3b880b7 --- /dev/null +++ b/Openshift4/artifactoryha-helm/charts/postgresql/templates/svc.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.service.annotations }} + annotations: +{{ tpl (toYaml .) $ | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and .Values.service.loadBalancerIP (eq .Values.service.type "LoadBalancer") }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{ with .Values.service.loadBalancerSourceRanges }} +{{ toYaml . | indent 4 }} +{{- end }} + {{- end }} + {{- if and (eq .Values.service.type "ClusterIP") .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + ports: + - name: postgresql + port: {{ template "postgresql.port" . }} + targetPort: postgresql + {{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + selector: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: master diff --git a/Openshift4/artifactoryha-helm/charts/postgresql/values-production.yaml b/Openshift4/artifactoryha-helm/charts/postgresql/values-production.yaml new file mode 100755 index 0000000..353848a --- /dev/null +++ b/Openshift4/artifactoryha-helm/charts/postgresql/values-production.yaml @@ -0,0 +1,392 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 11.5.0-debian-9-r84 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: true + image: + registry: docker.io + repository: bitnami/minideb + tag: stretch + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +replication: + enabled: true + user: repl_user + password: repl_password + slaveReplicas: 2 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: "on" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 1 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin user +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity and tolerations labels for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity and tolerations labels for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +networkPolicy: + ## Enable creation of NetworkPolicy resources. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Configure metrics exporter +## +metrics: + enabled: true + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.6.0-debian-9-r0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +# Define custom environment variables to pass to the image here +extraEnv: [] diff --git a/Openshift4/artifactoryha-helm/charts/postgresql/values.schema.json b/Openshift4/artifactoryha-helm/charts/postgresql/values.schema.json new file mode 100755 index 0000000..ac2de6e --- /dev/null +++ b/Openshift4/artifactoryha-helm/charts/postgresql/values.schema.json @@ -0,0 +1,103 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "postgresqlUsername": { + "type": "string", + "title": "Admin user", + "form": true + }, + "postgresqlPassword": { + "type": "string", + "title": "Password", + "form": true + }, + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi" + } + } + }, + "resources": { + "type": "object", + "title": "Required Resources", + "description": "Configure resource requests", + "form": true, + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "string", + "form": true, + "render": "slider", + "title": "Memory Request", + "sliderMin": 10, + "sliderMax": 2048, + "sliderUnit": "Mi" + }, + "cpu": { + "type": "string", + "form": true, + "render": "slider", + "title": "CPU Request", + "sliderMin": 10, + "sliderMax": 2000, + "sliderUnit": "m" + } + } + } + } + }, + "replication": { + "type": "object", + "form": true, + "title": "Replication Details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Replication", + "form": true + }, + "slaveReplicas": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "condition": false, + "value": "replication.enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup" + } + } + }, + "metrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Configure metrics exporter", + "form": true + } + } + } + } +} diff --git a/Openshift4/artifactoryha-helm/charts/postgresql/values.yaml b/Openshift4/artifactoryha-helm/charts/postgresql/values.yaml new file mode 100755 index 0000000..e13d0a7 --- /dev/null +++ b/Openshift4/artifactoryha-helm/charts/postgresql/values.yaml @@ -0,0 +1,392 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 11.5.0-debian-9-r84 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: true + image: + registry: docker.io + repository: bitnami/minideb + tag: stretch + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +replication: + enabled: false + user: repl_user + password: repl_password + slaveReplicas: 1 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: "off" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 0 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin user +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity and tolerations labels for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity and tolerations labels for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +networkPolicy: + ## Enable creation of NetworkPolicy resources. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Configure metrics exporter +## +metrics: + enabled: false + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.6.0-debian-9-r0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +# Define custom environment variables to pass to the image here +extraEnv: [] diff --git a/Openshift4/artifactoryha-helm/helminstall.sh b/Openshift4/artifactoryha-helm/helminstall.sh new file mode 100755 index 0000000..6f0132d --- /dev/null +++ b/Openshift4/artifactoryha-helm/helminstall.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +if [ -z "$1" ]; then echo "Skipping creation of persistent volume examples. Ensure there is available PVs 200Gi per node for HA."; else oc create -f pv-examples/; fi + +oc new-project jfrog-artifactory +oc create serviceaccount svcaccount -n jfrog-artifactory +oc adm policy add-scc-to-user privileged system:serviceaccount:jfrog-artifactory:svcaccount +oc adm policy add-scc-to-user anyuid system:serviceaccount:jfrog-artifactory:svcaccount +oc adm policy add-scc-to-group anyuid system:authenticated + +# enables hostPath plugin for openshift system wide +oc create -f scc.yaml -n jfrog-artifactory +oc patch securitycontextconstraints.security.openshift.io/hostpath --type=merge --patch='{"allowHostDirVolumePlugin": true}' +oc adm policy add-scc-to-user hostpath system:serviceaccount:jfrog-artifactory:svcaccount + +# create the license secret +oc create secret generic artifactory-license --from-file=./artifactory.cluster.license + +# install via helm +helm install . --generate-name \ + --set artifactory.node.replicaCount=1 \ + --set nginx.service.type=NodePort \ + --set artifactory.license.secret=artifactory-license,artifactory.license.dataKey=artifactory.cluster.license diff --git a/Openshift4/artifactoryha-helm/pv-examples/pv0001-large.yaml b/Openshift4/artifactoryha-helm/pv-examples/pv0001-large.yaml new file mode 100644 index 0000000..8a36385 --- /dev/null +++ b/Openshift4/artifactoryha-helm/pv-examples/pv0001-large.yaml @@ -0,0 +1,15 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: pv0001-large +spec: + capacity: + storage: 200Gi + hostPath: + path: /mnt/pv-data/pv0001-large + accessModes: + - ReadWriteOnce + - ReadWriteMany + - ReadOnlyMany + persistentVolumeReclaimPolicy: Recycle + volumeMode: Filesystem diff --git a/Openshift4/artifactoryha-helm/pv-examples/pv0002-large.yaml b/Openshift4/artifactoryha-helm/pv-examples/pv0002-large.yaml new file mode 100644 index 0000000..b96fa47 --- /dev/null +++ b/Openshift4/artifactoryha-helm/pv-examples/pv0002-large.yaml @@ -0,0 +1,15 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: pv0002-large +spec: + capacity: + storage: 200Gi + hostPath: + path: /mnt/pv-data/pv0002-large + accessModes: + - ReadWriteOnce + - ReadWriteMany + - ReadOnlyMany + persistentVolumeReclaimPolicy: Recycle + volumeMode: Filesystem diff --git a/Openshift4/artifactoryha-helm/pv-examples/pv0003-large.yaml b/Openshift4/artifactoryha-helm/pv-examples/pv0003-large.yaml new file mode 100644 index 0000000..476ad41 --- /dev/null +++ b/Openshift4/artifactoryha-helm/pv-examples/pv0003-large.yaml @@ -0,0 +1,15 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: pv0003-large +spec: + capacity: + storage: 200Gi + hostPath: + path: /mnt/pv-data/pv0003-large + accessModes: + - ReadWriteOnce + - ReadWriteMany + - ReadOnlyMany + persistentVolumeReclaimPolicy: Recycle + volumeMode: Filesystem diff --git a/Openshift4/artifactoryha-helm/pv-examples/pv0004-large.yaml b/Openshift4/artifactoryha-helm/pv-examples/pv0004-large.yaml new file mode 100644 index 0000000..ae2fbda --- /dev/null +++ b/Openshift4/artifactoryha-helm/pv-examples/pv0004-large.yaml @@ -0,0 +1,15 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: pv0004-large +spec: + capacity: + storage: 200Gi + hostPath: + path: /mnt/pv-data/pv0004-large + accessModes: + - ReadWriteOnce + - ReadWriteMany + - ReadOnlyMany + persistentVolumeReclaimPolicy: Recycle + volumeMode: Filesystem diff --git a/Openshift4/artifactoryha-helm/pv-examples/pv0005-large.yaml b/Openshift4/artifactoryha-helm/pv-examples/pv0005-large.yaml new file mode 100644 index 0000000..9488514 --- /dev/null +++ b/Openshift4/artifactoryha-helm/pv-examples/pv0005-large.yaml @@ -0,0 +1,15 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: pv0005-large +spec: + capacity: + storage: 200Gi + hostPath: + path: /mnt/pv-data/pv0005-large + accessModes: + - ReadWriteOnce + - ReadWriteMany + - ReadOnlyMany + persistentVolumeReclaimPolicy: Recycle + volumeMode: Filesystem diff --git a/Openshift4/artifactoryha-helm/requirements.lock b/Openshift4/artifactoryha-helm/requirements.lock new file mode 100755 index 0000000..7037cff --- /dev/null +++ b/Openshift4/artifactoryha-helm/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: postgresql + repository: https://kubernetes-charts.storage.googleapis.com/ + version: 7.0.1 +digest: sha256:dcdafe9ab91ccf0e5883e2b5dd9ba13e82190b5e16e6dee6d39fd16a04123ce8 +generated: 2019-11-10T13:12:29.836238+02:00 diff --git a/Openshift4/artifactoryha-helm/requirements.yaml b/Openshift4/artifactoryha-helm/requirements.yaml new file mode 100755 index 0000000..756a19c --- /dev/null +++ b/Openshift4/artifactoryha-helm/requirements.yaml @@ -0,0 +1,5 @@ +dependencies: + - name: postgresql + version: 7.0.1 + repository: https://kubernetes-charts.storage.googleapis.com/ + condition: postgresql.enabled diff --git a/Openshift4/artifactoryha-helm/scc.yaml b/Openshift4/artifactoryha-helm/scc.yaml new file mode 100644 index 0000000..13eef79 --- /dev/null +++ b/Openshift4/artifactoryha-helm/scc.yaml @@ -0,0 +1,18 @@ +kind: SecurityContextConstraints +apiVersion: v1 +metadata: + name: hostpath +allowPrivilegedContainer: false +runAsUser: + type: RunAsAny +seLinuxContext: + type: RunAsAny +fsGroup: + type: RunAsAny +supplementalGroups: + type: RunAsAny +users: +- artifactory +groups: +- artifactory +- jfrog-artifactory diff --git a/Openshift4/artifactoryha-helm/templates/NOTES.txt b/Openshift4/artifactoryha-helm/templates/NOTES.txt new file mode 100755 index 0000000..d08fa88 --- /dev/null +++ b/Openshift4/artifactoryha-helm/templates/NOTES.txt @@ -0,0 +1,113 @@ +Congratulations. You have just deployed JFrog Artifactory HA! + +{{- if and (not .Values.artifactory.masterKeySecretName) (eq .Values.artifactory.masterKey "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF") }} + + +***************************************** WARNING ****************************************** +* Your Artifactory master key is still set to the provided example: * +* artifactory.masterKey=FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF * +* * +* You should change this to your own generated key: * +* $ export MASTER_KEY=$(openssl rand -hex 32) * +* $ echo ${MASTER_KEY} * +* * +* Pass the created master key to helm with '--set artifactory.masterKey=${MASTER_KEY}' * +* * +* Alternatively, you can use a pre-existing secret with a key called master-key with * +* '--set artifactory.masterKeySecretName=${SECRET_NAME}' * +******************************************************************************************** +{{- end }} + +{{ if eq .Values.artifactory.joinKey "EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE" }} + + +***************************************** WARNING ****************************************** +* Your Artifactory join key is still set to the provided example: * +* artifactory.joinKey=EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE * +* * +* You should change this to your own generated key: * +* $ export JOIN_KEY=$(openssl rand -hex 16) * +* $ echo ${JOIN_KEY} * +* * +* Pass the created master key to helm with '--set artifactory.joinKey=${JOIN_KEY}' * +* * +******************************************************************************************** +{{- end }} + +{{- if .Values.postgresql.enabled }} + +DATABASE: +To extract the database password, run the following +export DB_PASSWORD=$(kubectl get --namespace {{ .Release.Namespace }} $(kubectl get secret --namespace {{ .Release.Namespace }} -o name | grep postgresql) -o jsonpath="{.data.postgresql-password}" | base64 --decode) +echo ${DB_PASSWORD} +{{- end }} + +SETUP: +1. Get the Artifactory IP and URL + + {{- if contains "NodePort" .Values.nginx.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "artifactory-ha.nginx.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT/ + + {{- else if contains "LoadBalancer" .Values.nginx.service.type }} + NOTE: It may take a few minutes for the LoadBalancer public IP to be available! + + You can watch the status of the service by running 'kubectl get svc -w {{ template "artifactory-ha.nginx.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "artifactory-ha.nginx.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP/ + + {{- else if contains "ClusterIP" .Values.nginx.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "component={{ .Values.nginx.name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl port-forward --namespace {{ .Release.Namespace }} $POD_NAME 8080:80 + echo http://127.0.0.1:8080 + + {{- end }} + +2. Open Artifactory in your browser + Default credential for Artifactory: + user: admin + password: password + + {{- if .Values.artifactory.license.secret }} + +3. Manage Artifactory license through the {{ .Values.artifactory.license.secret }} secret ONLY! + Since the artifactory license(s) is managed with a secret ({{ .Values.artifactory.license.secret }}), any change through the Artifactory UI might not be saved! + + {{- else }} + +3. Add HA licenses to activate Artifactory HA through the Artifactory UI + NOTE: Each Artifactory node requires a valid license. See https://www.jfrog.com/confluence/display/RTF/HA+Installation+and+Setup for more details. + + {{- end }} + +{{ if or .Values.artifactory.primary.javaOpts.jmx.enabled .Values.artifactory.node.javaOpts.jmx.enabled }} +JMX configuration: +{{- if not (contains "LoadBalancer" .Values.artifactory.service.type) }} +If you want to access JMX from you computer with jconsole, you should set ".Values.artifactory.service.type=LoadBalancer" !!! +{{ end }} + +1. Get the Artifactory service IP: +{{- if .Values.artifactory.primary.javaOpts.jmx.enabled }} +export PRIMARY_SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "artifactory-ha.primary.name" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +{{- end }} +{{- if .Values.artifactory.node.javaOpts.jmx.enabled }} +export MEMBER_SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "artifactory-ha.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +{{- end }} + +2. Map the service name to the service IP in /etc/hosts: +{{- if .Values.artifactory.primary.javaOpts.jmx.enabled }} +sudo sh -c "echo \"${PRIMARY_SERVICE_IP} {{ template "artifactory-ha.primary.name" . }}\" >> /etc/hosts" +{{- end }} +{{- if .Values.artifactory.node.javaOpts.jmx.enabled }} +sudo sh -c "echo \"${MEMBER_SERVICE_IP} {{ template "artifactory-ha.fullname" . }}\" >> /etc/hosts" +{{- end }} + +3. Launch jconsole: +{{- if .Values.artifactory.primary.javaOpts.jmx.enabled }} +jconsole {{ template "artifactory-ha.primary.name" . }}:{{ .Values.artifactory.primary.javaOpts.jmx.port }} +{{- end }} +{{- if .Values.artifactory.node.javaOpts.jmx.enabled }} +jconsole {{ template "artifactory-ha.fullname" . }}:{{ .Values.artifactory.node.javaOpts.jmx.port }} +{{- end }} +{{- end }} diff --git a/Openshift4/artifactoryha-helm/templates/_helpers.tpl b/Openshift4/artifactoryha-helm/templates/_helpers.tpl new file mode 100755 index 0000000..32171c2 --- /dev/null +++ b/Openshift4/artifactoryha-helm/templates/_helpers.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "artifactory-ha.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +The primary node name +*/}} +{{- define "artifactory-ha.primary.name" -}} +{{- if .Values.nameOverride -}} +{{- printf "%s-primary" .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := .Release.Name | trunc 29 -}} +{{- printf "%s-%s-primary" $name .Chart.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +The member node name +*/}} +{{- define "artifactory-ha.node.name" -}} +{{- if .Values.nameOverride -}} +{{- printf "%s-member" .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := .Release.Name | trunc 29 -}} +{{- printf "%s-%s-member" $name .Chart.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Expand the name nginx service. +*/}} +{{- define "artifactory-ha.nginx.name" -}} +{{- default .Values.nginx.name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "artifactory-ha.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "artifactory-ha.nginx.fullname" -}} +{{- if .Values.nginx.fullnameOverride -}} +{{- .Values.nginx.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nginx.name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "artifactory-ha.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} +{{ default (include "artifactory-ha.fullname" .) .Values.serviceAccount.name }} +{{- else -}} +{{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "artifactory-ha.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Generate SSL certificates +*/}} +{{- define "artifactory-ha.gen-certs" -}} +{{- $altNames := list ( printf "%s.%s" (include "artifactory-ha.name" .) .Release.Namespace ) ( printf "%s.%s.svc" (include "artifactory-ha.name" .) .Release.Namespace ) -}} +{{- $ca := genCA "artifactory-ca" 365 -}} +{{- $cert := genSignedCert ( include "artifactory-ha.name" . ) nil $altNames 365 $ca -}} +tls.crt: {{ $cert.Cert | b64enc }} +tls.key: {{ $cert.Key | b64enc }} +{{- end -}} diff --git a/Openshift4/artifactoryha-helm/templates/access-bootstrap-creds.yaml b/Openshift4/artifactoryha-helm/templates/access-bootstrap-creds.yaml new file mode 100755 index 0000000..f4e34a2 --- /dev/null +++ b/Openshift4/artifactoryha-helm/templates/access-bootstrap-creds.yaml @@ -0,0 +1,15 @@ +{{- if not (and .Values.artifactory.accessAdmin.secret .Values.artifactory.accessAdmin.dataKey) }} +{{- if .Values.artifactory.accessAdmin.password }} +kind: Secret +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.fullname" . }}-bootstrap-creds + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + bootstrap.creds: {{ (printf "access-admin@%s=%s" .Values.artifactory.accessAdmin.ip .Values.artifactory.accessAdmin.password) | b64enc }} +{{- end }} +{{- end }} diff --git a/Openshift4/artifactoryha-helm/templates/artifactory-binarystore-secret.yaml b/Openshift4/artifactoryha-helm/templates/artifactory-binarystore-secret.yaml new file mode 100755 index 0000000..2e7cac7 --- /dev/null +++ b/Openshift4/artifactoryha-helm/templates/artifactory-binarystore-secret.yaml @@ -0,0 +1,14 @@ +{{- if not .Values.artifactory.persistence.customBinarystoreXmlSecret }} +kind: Secret +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.fullname" . }}-binarystore + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +stringData: + binarystore.xml: |- +{{ tpl .Values.artifactory.persistence.binarystoreXml . | indent 4 }} +{{- end }} diff --git a/Openshift4/artifactoryha-helm/templates/artifactory-configmaps.yaml b/Openshift4/artifactoryha-helm/templates/artifactory-configmaps.yaml new file mode 100755 index 0000000..1385bc5 --- /dev/null +++ b/Openshift4/artifactoryha-helm/templates/artifactory-configmaps.yaml @@ -0,0 +1,13 @@ +{{ if .Values.artifactory.configMaps }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory-ha.fullname" . }}-configmaps + labels: + app: {{ template "artifactory-ha.fullname" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: +{{ tpl .Values.artifactory.configMaps . | indent 2 }} +{{ end -}} \ No newline at end of file diff --git a/Openshift4/artifactoryha-helm/templates/artifactory-installer-info.yaml b/Openshift4/artifactoryha-helm/templates/artifactory-installer-info.yaml new file mode 100755 index 0000000..0c9a4f4 --- /dev/null +++ b/Openshift4/artifactoryha-helm/templates/artifactory-installer-info.yaml @@ -0,0 +1,25 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.fullname" . }}-installer-info + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + installer-info.json: | + { + "productId": "Helm_artifactory-ha/{{ .Chart.Version }}", + "features": [ + { + "featureId": "ArtifactoryVersion/{{ default .Chart.AppVersion .Values.artifactory.image.version }}" + }, + { + "featureId": "{{ if .Values.postgresql.enabled }}postgresql{{ else }}{{ default "derby" .Values.database.type }}{{ end }}/0.0.0" + }, + { + "featureId": "Platform/{{ default "kubernetes" .Values.installer.platform }}" + } + ] + } diff --git a/Openshift4/artifactoryha-helm/templates/artifactory-license-secret.yaml b/Openshift4/artifactoryha-helm/templates/artifactory-license-secret.yaml new file mode 100755 index 0000000..3f629c6 --- /dev/null +++ b/Openshift4/artifactoryha-helm/templates/artifactory-license-secret.yaml @@ -0,0 +1,14 @@ +{{- with .Values.artifactory.license.licenseKey }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory-ha.fullname" $ }}-license + labels: + app: {{ template "artifactory-ha.name" $ }} + chart: {{ template "artifactory-ha.chart" $ }} + heritage: {{ $.Release.Service }} + release: {{ $.Release.Name }} +type: Opaque +data: + artifactory.lic: {{ . | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/Openshift4/artifactoryha-helm/templates/artifactory-networkpolicy.yaml b/Openshift4/artifactoryha-helm/templates/artifactory-networkpolicy.yaml new file mode 100755 index 0000000..371dc9a --- /dev/null +++ b/Openshift4/artifactoryha-helm/templates/artifactory-networkpolicy.yaml @@ -0,0 +1,34 @@ +{{- range .Values.networkpolicy }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ template "artifactory-ha.fullname" $ }}-{{ .name }}-networkpolicy + labels: + app: {{ template "artifactory-ha.name" $ }} + chart: {{ template "artifactory-ha.chart" $ }} + release: {{ $.Release.Name }} + heritage: {{ $.Release.Service }} +spec: +{{- if .podSelector }} + podSelector: +{{ .podSelector | toYaml | trimSuffix "\n" | indent 4 -}} +{{ else }} + podSelector: {} +{{- end }} + policyTypes: + {{- if .ingress }} + - Ingress + {{- end }} + {{- if .egress }} + - Egress + {{- end }} +{{- if .ingress }} + ingress: +{{ .ingress | toYaml | trimSuffix "\n" | indent 2 -}} +{{- end }} +{{- if .egress }} + egress: +{{ .egress | toYaml | trimSuffix "\n" | indent 2 -}} +{{- end }} +--- +{{- end -}} \ No newline at end of file diff --git a/Openshift4/artifactoryha-helm/templates/artifactory-nfs-pvc.yaml b/Openshift4/artifactoryha-helm/templates/artifactory-nfs-pvc.yaml new file mode 100755 index 0000000..6ed7d82 --- /dev/null +++ b/Openshift4/artifactoryha-helm/templates/artifactory-nfs-pvc.yaml @@ -0,0 +1,101 @@ +{{- if eq .Values.artifactory.persistence.type "nfs" }} +### Artifactory HA data +apiVersion: v1 +kind: PersistentVolume +metadata: + name: {{ template "artifactory-ha.fullname" . }}-data-pv + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + id: {{ template "artifactory-ha.name" . }}-data-pv + type: nfs-volume +spec: + {{- if .Values.artifactory.persistence.nfs.mountOptions }} + mountOptions: +{{ toYaml .Values.artifactory.persistence.nfs.mountOptions | indent 4 }} + {{- end }} + capacity: + storage: {{ .Values.artifactory.persistence.nfs.capacity }} + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + nfs: + server: {{ .Values.artifactory.persistence.nfs.ip }} + path: "{{ .Values.artifactory.persistence.nfs.haDataMount }}" + readOnly: false +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.fullname" . }}-data-pvc + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + type: nfs-volume +spec: + accessModes: + - ReadWriteOnce + storageClassName: "" + resources: + requests: + storage: {{ .Values.artifactory.persistence.nfs.capacity }} + selector: + matchLabels: + id: {{ template "artifactory-ha.name" . }}-data-pv + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} +--- +### Artifactory HA backup +apiVersion: v1 +kind: PersistentVolume +metadata: + name: {{ template "artifactory-ha.fullname" . }}-backup-pv + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + id: {{ template "artifactory-ha.name" . }}-backup-pv + type: nfs-volume +spec: + {{- if .Values.artifactory.persistence.nfs.mountOptions }} + mountOptions: +{{ toYaml .Values.artifactory.persistence.nfs.mountOptions | indent 4 }} + {{- end }} + capacity: + storage: {{ .Values.artifactory.persistence.nfs.capacity }} + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + nfs: + server: {{ .Values.artifactory.persistence.nfs.ip }} + path: "{{ .Values.artifactory.persistence.nfs.haBackupMount }}" + readOnly: false +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.fullname" . }}-backup-pvc + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + type: nfs-volume +spec: + accessModes: + - ReadWriteOnce + storageClassName: "" + resources: + requests: + storage: {{ .Values.artifactory.persistence.nfs.capacity }} + selector: + matchLabels: + id: {{ template "artifactory-ha.name" . }}-backup-pv + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/Openshift4/artifactoryha-helm/templates/artifactory-node-pdb.yaml b/Openshift4/artifactoryha-helm/templates/artifactory-node-pdb.yaml new file mode 100755 index 0000000..871bb21 --- /dev/null +++ b/Openshift4/artifactoryha-helm/templates/artifactory-node-pdb.yaml @@ -0,0 +1,19 @@ +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "artifactory-ha.fullname" . }}-node + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + selector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + {{- if eq .Values.artifactory.service.pool "members" }} + role: {{ template "artifactory-ha.node.name" . }} + {{- end }} + release: {{ .Release.Name }} + minAvailable: {{ .Values.artifactory.node.minAvailable }} diff --git a/Openshift4/artifactoryha-helm/templates/artifactory-node-statefulset.yaml b/Openshift4/artifactoryha-helm/templates/artifactory-node-statefulset.yaml new file mode 100755 index 0000000..67e1ab3 --- /dev/null +++ b/Openshift4/artifactoryha-helm/templates/artifactory-node-statefulset.yaml @@ -0,0 +1,510 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "artifactory-ha.node.name" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + force-update: "{{ randAlpha 63 | lower }}" +{{- if .Values.artifactory.node.labels }} +{{ toYaml .Values.artifactory.node.labels | indent 4 }} +{{- end }} +spec: + serviceName: {{ template "artifactory-ha.node.name" . }} + replicas: {{ .Values.artifactory.node.replicaCount }} + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + role: {{ template "artifactory-ha.node.name" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + role: {{ template "artifactory-ha.node.name" . }} + heritage: {{ .Release.Service }} + component: {{ .Values.artifactory.name }} + release: {{ .Release.Name }} + annotations: + checksum/binarystore: {{ include (print $.Template.BasePath "/artifactory-binarystore-secret.yaml") . | sha256sum }} + checksum/systemyaml: {{ include (print $.Template.BasePath "/artifactory-system-yaml.yaml") . | sha256sum }} + {{- range $key, $value := .Values.artifactory.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + spec: + {{- if .Values.artifactory.priorityClass.existingPriorityClass }} + priorityClassName: {{ .Values.artifactory.priorityClass.existingPriorityClass }} + {{- else -}} + {{- if .Values.artifactory.priorityClass.create }} + priorityClassName: {{ default (include "artifactory-ha.fullname" .) .Values.artifactory.priorityClass.name }} + {{- end }} + {{- end }} + serviceAccountName: {{ template "artifactory-ha.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.artifactory.terminationGracePeriodSeconds }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.imagePullSecrets }} + {{- end }} + securityContext: + runAsUser: {{ .Values.artifactory.uid }} + fsGroup: {{ .Values.artifactory.uid }} + initContainers: + {{- if .Values.artifactory.customInitContainersBegin }} +{{ tpl .Values.artifactory.customInitContainersBegin . | indent 6 }} + {{- end }} + {{- if .Values.artifactory.persistence.enabled }} + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + - name: "create-artifactory-data-dir" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > + mkdir -p {{ tpl .Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir . }}; + volumeMounts: + - mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + name: volume + {{- end }} + {{- end }} + {{- if .Values.artifactory.deleteDBPropertiesOnStartup }} + - name: "delete-db-properties" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - 'rm -fv {{ .Values.artifactory.persistence.mountPath }}/etc/db.properties' + volumeMounts: + - mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + name: volume + {{- end }} + - name: "remove-lost-found" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > + rm -rfv {{ .Values.artifactory.persistence.mountPath }}/lost+found + volumeMounts: + - mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + name: volume + {{- end }} + - name: 'copy-system-yaml' + image: '{{ .Values.initContainerImage }}' + command: + - '/bin/sh' + - '-c' + - > + {{- if .Values.artifactory.node.waitForPrimaryStartup.enabled }} + echo "Sleeping to allow time for primary node to come up"; + sleep {{ .Values.artifactory.node.waitForPrimaryStartup.seconds }}; + {{- end }} + echo "Copy system.yaml to {{ .Values.artifactory.persistence.mountPath }}/etc"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/access/keys/trusted; + cp -fv /tmp/etc/system.yaml {{ .Values.artifactory.persistence.mountPath }}/etc/system.yaml; + echo "Remove {{ .Values.artifactory.persistence.mountPath }}/lost+found folder if exists"; + rm -rfv {{ .Values.artifactory.persistence.mountPath }}/lost+found; + volumeMounts: + - name: volume + mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + - name: systemyaml + mountPath: "/tmp/etc/system.yaml" + subPath: system.yaml + {{- if .Values.artifactory.customPersistentPodVolumeClaim }} + - name: "prepare-custom-persistent-volume" + image: "{{ .Values.initContainerImage }}" + command: + - 'sh' + - '-c' + - > + chown -Rv {{ .Values.artifactory.uid }}:{{ .Values.artifactory.uid }} {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + securityContext: + runAsUser: 0 + volumeMounts: + - name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + {{- end }} + {{- if .Values.waitForDatabase }} + {{- if or .Values.postgresql.enabled }} + - name: "wait-for-db" + image: "{{ .Values.initContainerImage }}" + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > + until nc -z -w 2 {{ .Release.Name }}-postgresql {{ .Values.postgresql.service.port }} && echo database ok; do + sleep 2; + done; + {{- end }} + {{- end }} + {{- if .Values.artifactory.customInitContainers }} +{{ tpl .Values.artifactory.customInitContainers . | indent 6 }} + {{- end }} + containers: + - name: {{ .Values.artifactory.name }} + image: '{{ .Values.artifactory.image.repository }}:{{ default .Chart.AppVersion .Values.artifactory.image.version }}' + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + securityContext: + allowPrivilegeEscalation: false + command: + - '/bin/sh' + - '-c' + - > + {{- if .Values.artifactory.userPluginSecrets }} + echo "Copying plugins"; + cp -Lrf /tmp/plugin/*/* /tmp/plugins; + {{- end }} + {{- if .Values.artifactory.preStartCommand }} + echo "Running custom preStartCommand command"; + {{ tpl .Values.artifactory.preStartCommand . }}; + {{- end }} + /entrypoint-artifactory.sh + lifecycle: + postStart: + exec: + command: + - '/bin/sh' + - '-c' + - > + echo; + {{- if .Values.artifactory.postStartCommand }} + {{ .Values.artifactory.postStartCommand }} + {{- end }} + env: + {{- if .Values.database.secrets.user }} + - name: JF_SHARED_DATABSE_USERNAME + valueFrom: + secretKeyRef: + name: {{ .Values.database.secrets.user.name }} + key: {{ .Values.database.secrets.user.key }} + {{- end }} + {{- if .Values.database.secrets.password }} + - name: JF_SHARED_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.database.secrets.password.name }} + key: {{ .Values.database.secrets.password.key }} + {{- end }} + {{- if .Values.database.secrets.url }} + - name: JF_SHARED_DATABSE_URL + valueFrom: + secretKeyRef: + name: {{ .Values.database.secrets.url.name }} + key: {{ .Values.database.secrets.url.key }} + {{- end }} + - name: JF_SHARED_NODE_PRIMARY + value: "false" + - name: JF_SHARED_NODE_HAENABLED + value: "true" + - name: JF_SHARED_DATABSE_USERNAME + value: "artifactory" + - name: JF_SHARED_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Release.Name }}-postgresql + key: postgresql-password + ports: + - containerPort: {{ .Values.artifactory.internalPort }} + - containerPort: {{ .Values.artifactory.internalArtifactoryPort }} + {{- if .Values.artifactory.node.javaOpts.jmx.enabled }} + - containerPort: {{ .Values.artifactory.node.javaOpts.jmx.port }} + {{- end }} + volumeMounts: + {{- if .Values.artifactory.customPersistentVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentVolumeClaim.mountPath }} + {{- end }} + {{- if .Values.artifactory.customPersistentPodVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + {{- end }} + {{- if .Values.artifactory.userPluginSecrets }} + - name: tmp-plugins + mountPath: "/tmp/plugins/" + {{- range .Values.artifactory.userPluginSecrets }} + - name: {{ tpl . $ }} + mountPath: "/tmp/plugin/{{ tpl . $ }}" + {{- end }} + {{- end }} + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + mountPath: "{{ tpl $.Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir $ }}/filestore{{ $sharedClaimNumber }}" + {{- end }} + - name: artifactory-ha-backup + mountPath: "{{ $.Values.artifactory.persistence.fileSystem.existingSharedClaim.backupDir }}" + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-ha-data + mountPath: "{{ .Values.artifactory.persistence.nfs.dataDir }}" + - name: artifactory-ha-backup + mountPath: "{{ .Values.artifactory.persistence.nfs.backupDir }}" + {{- else }} + {{- if .Values.artifactory.binarystore.enabled }} + - name: binarystore-xml + mountPath: "/artifactory_extra_conf/binarystore.xml" + subPath: binarystore.xml + {{- end }} + {{- end }} + - name: installer-info + mountPath: "/artifactory_extra_conf/info/installer-info.json" + subPath: installer-info.json + {{- if .Values.artifactory.customVolumeMounts }} +{{ tpl .Values.artifactory.customVolumeMounts . | indent 8 }} + {{- end }} + resources: +{{ toYaml .Values.artifactory.node.resources | indent 10 }} + {{- if .Values.artifactory.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.artifactory.readinessProbe.path }} + port: {{ .Values.artifactory.internalPort }} + initialDelaySeconds: {{ .Values.artifactory.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.artifactory.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.artifactory.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.artifactory.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.artifactory.readinessProbe.successThreshold }} + {{- end }} + {{- if .Values.artifactory.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.artifactory.livenessProbe.path }} + port: {{ .Values.artifactory.internalPort }} + initialDelaySeconds: {{ .Values.artifactory.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.artifactory.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.artifactory.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.artifactory.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.artifactory.livenessProbe.successThreshold }} + {{- end }} + {{- $image := .Values.logger.image.repository }} + {{- $tag := .Values.logger.image.tag }} + {{- $mountPath := .Values.artifactory.persistence.mountPath }} + {{- range .Values.artifactory.loggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: '{{ $image }}:{{ $tag }}' + command: + - tail + args: + - '-F' + - '{{ $mountPath }}/logs/{{ . }}' + volumeMounts: + - name: volume + mountPath: {{ $mountPath }} + {{- end }} + {{ if .Values.artifactory.catalinaLoggers }} + {{- range .Values.artifactory.catalinaLoggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: '{{ $image }}:{{ $tag }}' + command: + - 'sh' + - '-c' + - 'sh /scripts/tail-log.sh {{ $mountPath }}/logs/catalina {{ . }}' + volumeMounts: + - name: volume + mountPath: {{ $mountPath }} + - name: catalina-logger + mountPath: /scripts/tail-log.sh + subPath: tail-log.sh + {{- end }} + {{- end }} + {{- if .Values.filebeat.enabled }} + - name: {{ .Values.filebeat.name }} + image: "{{ .Values.filebeat.image.repository }}:{{ .Values.filebeat.image.version }}" + imagePullPolicy: {{ .Values.filebeat.image.pullPolicy }} + args: + - "-e" + - "-E" + - "http.enabled=true" + securityContext: + runAsUser: 0 + volumeMounts: + - name: filebeat-config + mountPath: /usr/share/filebeat/filebeat.yml + readOnly: true + subPath: filebeat.yml + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + livenessProbe: +{{ toYaml .Values.filebeat.livenessProbe | indent 10 }} + readinessProbe: +{{ toYaml .Values.filebeat.readinessProbe | indent 10 }} + resources: +{{ toYaml .Values.filebeat.resources | indent 10 }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriod }} + {{- end }} + {{- if .Values.artifactory.customSidecarContainers }} +{{ tpl .Values.artifactory.customSidecarContainers . | indent 6 }} + {{- end }} + {{- with .Values.artifactory.node.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if .Values.artifactory.node.affinity }} + {{- with .Values.artifactory.node.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- else if eq .Values.artifactory.node.podAntiAffinity.type "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: {{ .Values.artifactory.node.podAntiAffinity.topologyKey }} + labelSelector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} + {{- if eq .Values.artifactory.service.pool "members" }} + role: {{ template "artifactory-ha.node.name" . }} + {{- end }} + {{- else if eq .Values.artifactory.node.podAntiAffinity.type "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: {{ .Values.artifactory.node.podAntiAffinity.topologyKey }} + labelSelector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} + {{- if eq .Values.artifactory.service.pool "members" }} + role: {{ template "artifactory-ha.node.name" . }} + {{- end }} + {{- end }} + {{- with .Values.artifactory.node.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + {{- if .Values.artifactory.binarystore.enabled }} + - name: binarystore-xml + secret: + {{- if .Values.artifactory.persistence.customBinarystoreXmlSecret }} + secretName: {{ .Values.artifactory.persistence.customBinarystoreXmlSecret }} + {{- else }} + secretName: {{ template "artifactory-ha.fullname" . }}-binarystore + {{- end }} + {{- end }} + - name: installer-info + configMap: + name: {{ template "artifactory-ha.fullname" . }}-installer-info + {{- if .Values.artifactory.userPluginSecrets }} + - name: tmp-plugins + emptyDir: {} + {{- range .Values.artifactory.userPluginSecrets }} + - name: {{ tpl . $ }} + secret: + secretName: {{ tpl . $ }} + {{- end }} + {{- end }} + {{- if .Values.artifactory.catalinaLoggers }} + - name: catalina-logger + configMap: + name: {{ template "artifactory-ha.fullname" . }}-catalina-logger + {{- end }} + {{- if .Values.artifactory.configMaps }} + - name: artifactory-configmaps + configMap: + name: {{ template "artifactory-ha.fullname" . }}-configmaps + {{- end }} + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" $ }}-data-pvc-{{ $sharedClaimNumber }} + {{- end }} + - name: artifactory-ha-backup + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" $ }}-backup-pvc + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-ha-data + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" . }}-data-pvc + - name: artifactory-ha-backup + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" . }}-backup-pvc + {{- end }} + - name: systemyaml + secret: + secretName: {{ template "artifactory-ha.primary.name" . }}-system-yaml + {{- if .Values.artifactory.customPersistentVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + persistentVolumeClaim: + claimName: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + {{- end }} + {{- if .Values.filebeat.enabled }} + - name: filebeat-config + configMap: + name: {{ template "artifactory-ha.fullname" . }}-filebeat-config + {{- end }} + {{- if .Values.artifactory.customVolumes }} +{{ tpl .Values.artifactory.customVolumes . | indent 6 }} + {{- end }} + {{- if not .Values.artifactory.persistence.enabled }} + - name: volume + emptyDir: + sizeLimit: {{ .Values.artifactory.persistence.size }} + {{- end }} + volumeClaimTemplates: + {{- if .Values.artifactory.persistence.enabled }} + - metadata: + name: volume + {{- if not .Values.artifactory.node.persistence.existingClaim }} + spec: + {{- if .Values.artifactory.persistence.storageClassName }} + {{- if (eq "-" .Values.artifactory.persistence.storageClassName) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.artifactory.persistence.storageClassName }}" + {{- end }} + {{- end }} + accessModes: [ "{{ .Values.artifactory.persistence.accessMode }}" ] + resources: + requests: + storage: {{ .Values.artifactory.persistence.size }} + {{- end }} + {{- end }} + {{- if .Values.artifactory.customPersistentPodVolumeClaim }} + - metadata: + name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + spec: + {{- if .Values.artifactory.customPersistentPodVolumeClaim.storageClassName }} + {{- if (eq "-" .Values.artifactory.customPersistentPodVolumeClaim.storageClassName) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.artifactory.customPersistentPodVolumeClaim.storageClassName }}" + {{- end }} + {{- end }} + accessModes: + {{- range .Values.artifactory.customPersistentPodVolumeClaim.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.artifactory.customPersistentPodVolumeClaim.size }} + {{- end }} diff --git a/Openshift4/artifactoryha-helm/templates/artifactory-primary-statefulset.yaml b/Openshift4/artifactoryha-helm/templates/artifactory-primary-statefulset.yaml new file mode 100755 index 0000000..0e03476 --- /dev/null +++ b/Openshift4/artifactoryha-helm/templates/artifactory-primary-statefulset.yaml @@ -0,0 +1,593 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "artifactory-ha.primary.name" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + force-update: "{{ randAlpha 63 | lower }}" +{{- if .Release.IsUpgrade }} + unifiedUpgradeAllowed: {{ required "\n\n**************************************\nSTOP! UPGRADE from Artifactory 6.x currently not supported!\nIf this is an upgrade over an existing Artifactory 7.x, explicitly pass 'unifiedUpgradeAllowed=true' to upgrade.\n**************************************\n" .Values.unifiedUpgradeAllowed | quote }} +{{- end }} +{{- if and .Release.IsUpgrade .Values.postgresql.enabled }} + databaseUpgradeReady: {{ required "\n\n*********\nIMPORTANT: UPGRADE FAILED to prevent data loss!\nReview CHANGELOG.md (https://github.com/jfrog/charts/blob/master/stable/artifactory-ha/CHANGELOG.md) and prepare PostgreSQL DB migration before upgrading.\nOnce ready, explicitly pass 'databaseUpgradeReady=yes' to upgrade and complete migration after server starts!\n" .Values.databaseUpgradeReady | quote }} +{{- end }} +{{- if .Values.artifactory.primary.labels }} +{{ toYaml .Values.artifactory.primary.labels | indent 4 }} +{{- end }} +spec: + serviceName: {{ template "artifactory-ha.primary.name" . }} + replicas: 1 + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + role: {{ template "artifactory-ha.primary.name" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + role: {{ template "artifactory-ha.primary.name" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + annotations: + checksum/binarystore: {{ include (print $.Template.BasePath "/artifactory-binarystore-secret.yaml") . | sha256sum }} + checksum/systemyaml: {{ include (print $.Template.BasePath "/artifactory-system-yaml.yaml") . | sha256sum }} + {{- if not (and .Values.artifactory.accessAdmin.secret .Values.artifactory.accessAdmin.dataKey) }} + checksum/access-creds: {{ include (print $.Template.BasePath "/access-bootstrap-creds.yaml") . | sha256sum }} + {{- end }} + {{- range $key, $value := .Values.artifactory.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + spec: + {{- if .Values.artifactory.priorityClass.existingPriorityClass }} + priorityClassName: {{ .Values.artifactory.priorityClass.existingPriorityClass }} + {{- else -}} + {{- if .Values.artifactory.priorityClass.create }} + priorityClassName: {{ default (include "artifactory-ha.fullname" .) .Values.artifactory.priorityClass.name }} + {{- end }} + {{- end }} + serviceAccountName: {{ template "artifactory-ha.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.artifactory.terminationGracePeriodSeconds }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.imagePullSecrets }} + {{- end }} + securityContext: + runAsUser: {{ .Values.artifactory.uid }} + fsGroup: {{ .Values.artifactory.uid }} + initContainers: + {{- if .Values.artifactory.customInitContainersBegin }} +{{ tpl .Values.artifactory.customInitContainersBegin . | indent 6 }} + {{- end }} + {{- if .Values.artifactory.persistence.enabled }} + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + - name: "create-artifactory-data-dir" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > + mkdir -p {{ tpl .Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir . }}; + volumeMounts: + - mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + name: volume + {{- end }} + {{- end }} + {{- if .Values.artifactory.deleteDBPropertiesOnStartup }} + - name: "delete-db-properties" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - 'rm -fv {{ .Values.artifactory.persistence.mountPath }}/etc/db.properties' + volumeMounts: + - mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + name: volume + {{- end }} + - name: "remove-lost-found" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > + rm -rfv {{ .Values.artifactory.persistence.mountPath }}/lost+found; + rm -rfv {{ tpl .Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir . }}/lost+found; + rm -rfv {{ .Values.artifactory.persistence.fileSystem.existingSharedClaim.backupDir }}/lost+found; + volumeMounts: + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + mountPath: "{{ tpl $.Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir $ }}/filestore{{ $sharedClaimNumber }}" + {{- end }} + - name: artifactory-ha-backup + mountPath: "{{ $.Values.artifactory.persistence.fileSystem.existingSharedClaim.backupDir }}" + {{- end }} + {{- end }} + {{- if or (and .Values.artifactory.accessAdmin.secret .Values.artifactory.accessAdmin.dataKey) .Values.artifactory.accessAdmin.password }} + - name: "access-bootstrap-creds" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > + echo "Preparing custom Access bootstrap.creds"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/access/etc; + cp -Lrf /tmp/access/bootstrap.creds {{ .Values.artifactory.persistence.mountPath }}/access/etc/bootstrap.creds; + chmod 600 {{ .Values.artifactory.persistence.mountPath }}/access/etc/bootstrap.creds; + volumeMounts: + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + - name: access-bootstrap-creds + mountPath: "/tmp/access/bootstrap.creds" + {{- if and .Values.artifactory.accessAdmin.secret .Values.artifactory.accessAdmin.dataKey }} + subPath: {{ .Values.artifactory.accessAdmin.dataKey }} + {{- else }} + subPath: bootstrap.creds + {{- end }} + {{- end }} + {{- end }} + - name: 'copy-system-yaml' + image: '{{ .Values.initContainerImage }}' + command: + - '/bin/sh' + - '-c' + - > + echo "Copy system.yaml to {{ .Values.artifactory.persistence.mountPath }}/etc"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/access/keys/trusted; + cp -fv /tmp/etc/system.yaml {{ .Values.artifactory.persistence.mountPath }}/etc/system.yaml; + echo "Remove {{ .Values.artifactory.persistence.mountPath }}/lost+found folder if exists"; + rm -rfv {{ .Values.artifactory.persistence.mountPath }}/lost+found; + volumeMounts: + - name: volume + mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + - name: systemyaml + mountPath: "/tmp/etc/system.yaml" + subPath: system.yaml + {{- if .Values.artifactory.customPersistentPodVolumeClaim }} + - name: "prepare-custom-persistent-volume" + image: "{{ .Values.initContainerImage }}" + command: + - 'sh' + - '-c' + - > + chown -Rv {{ .Values.artifactory.uid }}:{{ .Values.artifactory.uid }} {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + securityContext: + runAsUser: 0 + volumeMounts: + - name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + {{- end }} + {{- if .Values.waitForDatabase }} + {{- if or .Values.postgresql.enabled }} + - name: "wait-for-db" + image: "{{ .Values.initContainerImage }}" + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > + until nc -z -w 2 {{ .Release.Name }}-postgresql {{ .Values.postgresql.service.port }} && echo database ok; do + sleep 2; + done; + {{- end }} + {{- end }} + {{- if .Values.artifactory.customInitContainers }} +{{ tpl .Values.artifactory.customInitContainers . | indent 6 }} + {{- end }} + containers: + - name: {{ .Values.artifactory.name }} + image: '{{ .Values.artifactory.image.repository }}:{{ default .Chart.AppVersion .Values.artifactory.image.version }}' + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + securityContext: + allowPrivilegeEscalation: false + command: + - '/bin/sh' + - '-c' + - > + set -e; + {{- if .Values.artifactory.configMapName }} + echo "Copying bootstrap configs"; + cp -Lrf /bootstrap/* /artifactory_extra_conf/; + {{- end }} + {{- if .Values.artifactory.userPluginSecrets }} + echo "Copying plugins"; + cp -Lrf /tmp/plugin/*/* /tmp/plugins; + {{- end }} + {{- range .Values.artifactory.copyOnEveryStartup }} + {{- $targetPath := printf "%s/%s" $.Values.artifactory.persistence.mountPath .target }} + {{- $baseDirectory := regexFind ".*/" $targetPath }} + mkdir -p {{ $baseDirectory }}; + cp -Lrf {{ .source }} {{ $.Values.artifactory.persistence.mountPath }}/{{ .target }}; + {{- end }} + {{- if .Values.artifactory.preStartCommand }} + echo "Running custom preStartCommand command"; + {{ tpl .Values.artifactory.preStartCommand . }}; + {{- end }} + /entrypoint-artifactory.sh + lifecycle: + postStart: + exec: + command: + - '/bin/sh' + - '-c' + - > + echo; + {{- if .Values.artifactory.postStartCommand }} + {{ .Values.artifactory.postStartCommand }} + {{- end }} + env: + {{- if .Values.database.secrets.user }} + - name: JF_SHARED_DATABSE_USERNAME + valueFrom: + secretKeyRef: + name: {{ .Values.database.secrets.user.name }} + key: {{ .Values.database.secrets.user.key }} + {{- end }} + {{- if .Values.database.secrets.password }} + - name: JF_SHARED_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.database.secrets.password.name }} + key: {{ .Values.database.secrets.password.key }} + {{- end }} + {{- if .Values.database.secrets.url }} + - name: JF_SHARED_DATABSE_URL + valueFrom: + secretKeyRef: + name: {{ .Values.database.secrets.url.name }} + key: {{ .Values.database.secrets.url.key }} + {{- end }} + - name: JF_SHARED_NODE_PRIMARY + value: "true" + - name: JF_SHARED_NODE_HAENABLED + value: "true" + - name: JF_SHARED_DATABSE_USERNAME + value: "artifactory" + - name: JF_SHARED_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Release.Name }}-postgresql + key: postgresql-password + ports: + - containerPort: {{ .Values.artifactory.internalPort }} + - containerPort: {{ .Values.artifactory.internalArtifactoryPort }} + {{- if .Values.artifactory.primary.javaOpts.jmx.enabled }} + - containerPort: {{ .Values.artifactory.primary.javaOpts.jmx.port }} + {{- end }} + volumeMounts: + {{- if .Values.artifactory.customPersistentVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentVolumeClaim.mountPath }} + {{- end }} + {{- if .Values.artifactory.customPersistentPodVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + {{- end }} + {{- if .Values.artifactory.userPluginSecrets }} + - name: tmp-plugins + mountPath: "/tmp/plugins/" + {{- range .Values.artifactory.userPluginSecrets }} + - name: {{ tpl . $ }} + mountPath: "/tmp/plugin/{{ tpl . $ }}" + {{- end }} + {{- end }} + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + mountPath: "{{ tpl $.Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir $ }}/filestore{{ $sharedClaimNumber }}" + {{- end }} + - name: artifactory-ha-backup + mountPath: "{{ $.Values.artifactory.persistence.fileSystem.existingSharedClaim.backupDir }}" + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-ha-data + mountPath: "{{ .Values.artifactory.persistence.nfs.dataDir }}" + - name: artifactory-ha-backup + mountPath: "{{ .Values.artifactory.persistence.nfs.backupDir }}" + {{- else }} + {{- if .Values.artifactory.binarystore.enabled }} + - name: binarystore-xml + mountPath: "/artifactory_extra_conf/binarystore.xml" + subPath: binarystore.xml + {{- end }} + {{- end }} + {{- if .Values.artifactory.configMapName }} + - name: bootstrap-config + mountPath: "/bootstrap/" + {{- end }} + {{- if or .Values.artifactory.license.secret .Values.artifactory.license.licenseKey }} + - name: artifactory-license + mountPath: "/artifactory_extra_conf/artifactory.cluster.license" + {{- if .Values.artifactory.license.secret }} + subPath: {{ .Values.artifactory.license.dataKey }} + {{- else if .Values.artifactory.license.licenseKey }} + subPath: artifactory.lic + {{- end }} + {{- end }} + - name: installer-info + mountPath: "/artifactory_extra_conf/info/installer-info.json" + subPath: installer-info.json + {{- if .Values.artifactory.customVolumeMounts }} +{{ tpl .Values.artifactory.customVolumeMounts . | indent 8 }} + {{- end }} + resources: +{{ toYaml .Values.artifactory.primary.resources | indent 10 }} + {{- if .Values.artifactory.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.artifactory.readinessProbe.path }} + port: {{ .Values.artifactory.internalPort }} + initialDelaySeconds: {{ .Values.artifactory.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.artifactory.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.artifactory.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.artifactory.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.artifactory.readinessProbe.successThreshold }} + {{- end }} + {{- if .Values.artifactory.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.artifactory.livenessProbe.path }} + port: {{ .Values.artifactory.internalPort }} + initialDelaySeconds: {{ .Values.artifactory.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.artifactory.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.artifactory.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.artifactory.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.artifactory.livenessProbe.successThreshold }} + {{- end }} + {{- $image := .Values.logger.image.repository }} + {{- $tag := .Values.logger.image.tag }} + {{- $mountPath := .Values.artifactory.persistence.mountPath }} + {{- range .Values.artifactory.loggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: '{{ $image }}:{{ $tag }}' + command: + - tail + args: + - '-F' + - '{{ $mountPath }}/logs/{{ . }}' + volumeMounts: + - name: volume + mountPath: {{ $mountPath }} + {{- end }} + {{ if .Values.artifactory.catalinaLoggers }} + {{- range .Values.artifactory.catalinaLoggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: '{{ $image }}:{{ $tag }}' + command: + - 'sh' + - '-c' + - 'sh /scripts/tail-log.sh {{ $mountPath }}/logs/catalina {{ . }}' + volumeMounts: + - name: volume + mountPath: {{ $mountPath }} + - name: catalina-logger + mountPath: /scripts/tail-log.sh + subPath: tail-log.sh + {{- end }} + {{- end }} + {{- if .Values.filebeat.enabled }} + - name: {{ .Values.filebeat.name }} + image: "{{ .Values.filebeat.image.repository }}:{{ .Values.filebeat.image.version }}" + imagePullPolicy: {{ .Values.filebeat.image.pullPolicy }} + args: + - "-e" + - "-E" + - "http.enabled=true" + securityContext: + runAsUser: 0 + volumeMounts: + - name: filebeat-config + mountPath: /usr/share/filebeat/filebeat.yml + readOnly: true + subPath: filebeat.yml + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + livenessProbe: +{{ toYaml .Values.filebeat.livenessProbe | indent 10 }} + readinessProbe: +{{ toYaml .Values.filebeat.readinessProbe | indent 10 }} + resources: +{{ toYaml .Values.filebeat.resources | indent 10 }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriod }} + {{- end }} + {{- if .Values.artifactory.customSidecarContainers }} +{{ tpl .Values.artifactory.customSidecarContainers . | indent 6 }} + {{- end }} + {{- with .Values.artifactory.primary.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if .Values.artifactory.primary.affinity }} + {{- with .Values.artifactory.primary.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- else if eq .Values.artifactory.primary.podAntiAffinity.type "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: {{ .Values.artifactory.primary.podAntiAffinity.topologyKey }} + labelSelector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} + {{- else if eq .Values.artifactory.primary.podAntiAffinity.type "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: {{ .Values.artifactory.primary.podAntiAffinity.topologyKey }} + labelSelector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} + {{- end }} + {{- with .Values.artifactory.primary.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + {{- if .Values.artifactory.binarystore.enabled }} + - name: binarystore-xml + secret: + {{- if .Values.artifactory.persistence.customBinarystoreXmlSecret }} + secretName: {{ .Values.artifactory.persistence.customBinarystoreXmlSecret }} + {{- else }} + secretName: {{ template "artifactory-ha.fullname" . }}-binarystore + {{- end }} + {{- end }} + - name: installer-info + configMap: + name: {{ template "artifactory-ha.fullname" . }}-installer-info + {{- if .Values.artifactory.userPluginSecrets }} + - name: tmp-plugins + emptyDir: {} + {{- range .Values.artifactory.userPluginSecrets }} + - name: {{ tpl . $ }} + secret: + secretName: {{ tpl . $ }} + {{- end }} + {{- end }} + {{- if .Values.artifactory.configMapName }} + - name: bootstrap-config + configMap: + name: {{ .Values.artifactory.configMapName }} + {{- end}} + {{- if .Values.artifactory.catalinaLoggers }} + - name: catalina-logger + configMap: + name: {{ template "artifactory-ha.fullname" . }}-catalina-logger + {{- end }} + {{- if .Values.artifactory.configMaps }} + - name: artifactory-configmaps + configMap: + name: {{ template "artifactory-ha.fullname" . }}-configmaps + {{- end }} + {{- if or .Values.artifactory.license.secret .Values.artifactory.license.licenseKey }} + - name: artifactory-license + secret: + {{- if .Values.artifactory.license.secret }} + secretName: {{ .Values.artifactory.license.secret }} + {{- else if .Values.artifactory.license.licenseKey }} + secretName: {{ template "artifactory-ha.fullname" . }}-license + {{- end }} + {{- end }} + {{- if or (and .Values.artifactory.accessAdmin.secret .Values.artifactory.accessAdmin.dataKey) .Values.artifactory.accessAdmin.password }} + - name: access-bootstrap-creds + secret: + {{- if and .Values.artifactory.accessAdmin.secret .Values.artifactory.accessAdmin.dataKey }} + secretName: {{ .Values.artifactory.accessAdmin.secret }} + {{- else }} + secretName: {{ template "artifactory-ha.fullname" . }}-bootstrap-creds + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" $ }}-data-pvc-{{ $sharedClaimNumber }} + {{- end }} + - name: artifactory-ha-backup + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" $ }}-backup-pvc + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-ha-data + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" . }}-data-pvc + - name: artifactory-ha-backup + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" . }}-backup-pvc + {{- end }} + - name: systemyaml + secret: + secretName: {{ template "artifactory-ha.primary.name" . }}-system-yaml + {{- if .Values.artifactory.customPersistentVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + persistentVolumeClaim: + claimName: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + {{- end }} + {{- if .Values.filebeat.enabled }} + - name: filebeat-config + configMap: + name: {{ template "artifactory-ha.fullname" . }}-filebeat-config + {{- end }} + {{- if .Values.artifactory.customVolumes }} +{{ tpl .Values.artifactory.customVolumes . | indent 6 }} + {{- end }} + {{- if not .Values.artifactory.persistence.enabled }} + - name: volume + emptyDir: + sizeLimit: {{ .Values.artifactory.persistence.size }} + {{- end }} + volumeClaimTemplates: + {{- if .Values.artifactory.persistence.enabled }} + - metadata: + name: volume + {{- if not .Values.artifactory.primary.persistence.existingClaim }} + spec: + {{- if .Values.artifactory.persistence.storageClassName }} + {{- if (eq "-" .Values.artifactory.persistence.storageClassName) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.artifactory.persistence.storageClassName }}" + {{- end }} + {{- end }} + accessModes: [ "{{ .Values.artifactory.persistence.accessMode }}" ] + resources: + requests: + storage: {{ .Values.artifactory.persistence.size }} + {{- end }} + {{- end }} + {{- if .Values.artifactory.customPersistentPodVolumeClaim }} + - metadata: + name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + spec: + {{- if .Values.artifactory.customPersistentPodVolumeClaim.storageClassName }} + {{- if (eq "-" .Values.artifactory.customPersistentPodVolumeClaim.storageClassName) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.artifactory.customPersistentPodVolumeClaim.storageClassName }}" + {{- end }} + {{- end }} + accessModes: + {{- range .Values.artifactory.customPersistentPodVolumeClaim.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.artifactory.customPersistentPodVolumeClaim.size }} + {{- end }} diff --git a/Openshift4/artifactoryha-helm/templates/artifactory-priority-class.yaml b/Openshift4/artifactoryha-helm/templates/artifactory-priority-class.yaml new file mode 100755 index 0000000..417ec5c --- /dev/null +++ b/Openshift4/artifactoryha-helm/templates/artifactory-priority-class.yaml @@ -0,0 +1,9 @@ +{{- if .Values.artifactory.priorityClass.create }} +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: {{ default (include "artifactory-ha.fullname" .) .Values.artifactory.priorityClass.name }} +value: {{ .Values.artifactory.priorityClass.value }} +globalDefault: false +description: "Artifactory priority class" +{{- end }} diff --git a/Openshift4/artifactoryha-helm/templates/artifactory-role.yaml b/Openshift4/artifactoryha-helm/templates/artifactory-role.yaml new file mode 100755 index 0000000..c86bffd --- /dev/null +++ b/Openshift4/artifactoryha-helm/templates/artifactory-role.yaml @@ -0,0 +1,14 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "artifactory-ha.fullname" . }} +rules: +{{ toYaml .Values.rbac.role.rules }} +{{- end }} diff --git a/Openshift4/artifactoryha-helm/templates/artifactory-rolebinding.yaml b/Openshift4/artifactoryha-helm/templates/artifactory-rolebinding.yaml new file mode 100755 index 0000000..4412870 --- /dev/null +++ b/Openshift4/artifactoryha-helm/templates/artifactory-rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "artifactory-ha.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "artifactory-ha.serviceAccountName" . }} +roleRef: + kind: Role + apiGroup: rbac.authorization.k8s.io + name: {{ template "artifactory-ha.fullname" . }} +{{- end }} diff --git a/Openshift4/artifactoryha-helm/templates/artifactory-secrets.yaml b/Openshift4/artifactoryha-helm/templates/artifactory-secrets.yaml new file mode 100755 index 0000000..2665d32 --- /dev/null +++ b/Openshift4/artifactoryha-helm/templates/artifactory-secrets.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory-ha.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +data: +{{- if not .Values.artifactory.masterKeySecretName }} + master-key: {{ .Values.artifactory.masterKey | b64enc | quote }} +{{- end }} +{{- if .Values.database.password }} + db-password: {{ .Values.database.password | b64enc | quote }} +{{- end }} diff --git a/Openshift4/artifactoryha-helm/templates/artifactory-service.yaml b/Openshift4/artifactoryha-helm/templates/artifactory-service.yaml new file mode 100755 index 0000000..38d3355 --- /dev/null +++ b/Openshift4/artifactoryha-helm/templates/artifactory-service.yaml @@ -0,0 +1,88 @@ +# Service for all Artifactory cluster nodes. +apiVersion: v1 +kind: Service +metadata: + name: {{ template "artifactory-ha.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- if .Values.artifactory.service.annotations }} + annotations: +{{ toYaml .Values.artifactory.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.artifactory.service.type }} + {{- if and (eq .Values.artifactory.service.type "ClusterIP") .Values.artifactory.service.clusterIP }} + clusterIP: {{ .Values.artifactory.service.clusterIP }} + {{- end }} + {{- if .Values.artifactory.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.artifactory.service.loadBalancerSourceRanges | indent 4 }} + {{- end }} + ports: + - port: {{ .Values.artifactory.externalPort }} + targetPort: {{ .Values.artifactory.internalPort }} + protocol: TCP + name: {{ .Release.Name }}-router + - port: {{ .Values.artifactory.externalArtifactoryPort }} + targetPort: {{ .Values.artifactory.internalArtifactoryPort }} + protocol: TCP + name: {{ .Release.Name }}-artifactory + {{- with .Values.artifactory.node.javaOpts.jmx }} + {{- if .enabled }} + - port: {{ .port }} + targetPort: {{ .port }} + protocol: TCP + name: jmx + {{- end }} + {{- end }} + selector: +{{- if eq .Values.artifactory.service.pool "members" }} + role: {{ template "artifactory-ha.node.name" . }} +{{- end }} + app: {{ template "artifactory-ha.name" . }} + component: "{{ .Values.artifactory.name }}" + release: {{ .Release.Name }} +--- +# Internal service for Artifactory primary node only! +# Used by member nodes to check readiness of primary node before starting up +apiVersion: v1 +kind: Service +metadata: + name: {{ template "artifactory-ha.primary.name" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + type: {{ .Values.artifactory.service.type }} + {{- if and (eq .Values.artifactory.service.type "ClusterIP") .Values.artifactory.service.clusterIP }} + clusterIP: {{ .Values.artifactory.service.clusterIP }} + {{- end }} + ports: + - port: {{ .Values.artifactory.externalPort }} + targetPort: {{ .Values.artifactory.internalPort }} + protocol: TCP + name: {{ .Release.Name }}-router + - port: {{ .Values.artifactory.externalArtifactoryPort }} + targetPort: {{ .Values.artifactory.internalArtifactoryPort }} + protocol: TCP + name: {{ .Release.Name }}-artifactory + {{- with .Values.artifactory.primary.javaOpts.jmx }} + {{- if .enabled }} + - port: {{ .port }} + targetPort: {{ .port }} + protocol: TCP + name: jmx + {{- end }} + {{- end }} + selector: + role: {{ template "artifactory-ha.primary.name" . }} + app: {{ template "artifactory-ha.name" . }} + component: "{{ .Values.artifactory.name }}" + release: {{ .Release.Name }} diff --git a/Openshift4/artifactoryha-helm/templates/artifactory-serviceaccount.yaml b/Openshift4/artifactoryha-helm/templates/artifactory-serviceaccount.yaml new file mode 100755 index 0000000..6ea0b10 --- /dev/null +++ b/Openshift4/artifactoryha-helm/templates/artifactory-serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: +{{- if .Values.serviceAccount.annotations }} + annotations: +{{ tpl (toYaml .) $ | indent 4 }} +{{- end}} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "artifactory-ha.serviceAccountName" . }} +{{- end }} diff --git a/Openshift4/artifactoryha-helm/templates/artifactory-storage-pvc.yaml b/Openshift4/artifactoryha-helm/templates/artifactory-storage-pvc.yaml new file mode 100755 index 0000000..e0bfa6b --- /dev/null +++ b/Openshift4/artifactoryha-helm/templates/artifactory-storage-pvc.yaml @@ -0,0 +1,27 @@ +{{ if .Values.artifactory.customPersistentVolumeClaim }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + labels: + app: {{ template "artifactory-ha.name" . }} + version: "{{ .Values.artifactory.version }}" + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + accessModes: + {{- range .Values.artifactory.customPersistentVolumeClaim.accessModes }} + - {{ . | quote }} + {{- end }} + {{- if .Values.artifactory.customPersistentVolumeClaim.storageClassName }} + {{- if (eq "-" .Values.artifactory.customPersistentVolumeClaim.storageClassName) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.artifactory.customPersistentVolumeClaim.storageClassName }}" + {{- end }} + {{- end }} + resources: + requests: + storage: {{ .Values.artifactory.customPersistentVolumeClaim.size | quote }} +{{ end -}} \ No newline at end of file diff --git a/Openshift4/artifactoryha-helm/templates/artifactory-system-yaml.yaml b/Openshift4/artifactoryha-helm/templates/artifactory-system-yaml.yaml new file mode 100755 index 0000000..cf8ffba --- /dev/null +++ b/Openshift4/artifactoryha-helm/templates/artifactory-system-yaml.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory-ha.primary.name" . }}-system-yaml + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +stringData: + system.yaml: | +{{ tpl .Values.artifactory.systemYaml . | indent 4 }} diff --git a/Openshift4/artifactoryha-helm/templates/catalina-logger-configmap.yaml b/Openshift4/artifactoryha-helm/templates/catalina-logger-configmap.yaml new file mode 100755 index 0000000..807fe72 --- /dev/null +++ b/Openshift4/artifactoryha-helm/templates/catalina-logger-configmap.yaml @@ -0,0 +1,53 @@ +{{- if .Values.artifactory.catalinaLoggers }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory-ha.fullname" . }}-catalina-logger + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + tail-log.sh: | + #!/bin/sh + + LOG_DIR=$1 + LOG_NAME=$2 + PID= + + # Wait for log dir to appear + while [ ! -d ${LOG_DIR} ]; do + sleep 1 + done + sleep 5 + + cd ${LOG_DIR} + + LOG_PREFIX=$(echo ${LOG_NAME} | awk -F\. '{print $1}') + + # Find the log to tail + LOG_FILE=$(ls -1t ./${LOG_PREFIX}.*.log | head -1) + + # echo "Tailing ${LOG_FILE}" + tail -F ${LOG_FILE} & + PID=$! + + # Loop forever to see if a new log was created + while true; do + # Find the latest log + NEW_LOG_FILE=$(ls -1t ./${LOG_PREFIX}.*.log | head -1) + + # If a new log file is found, kill old tail and switch to tailing it + if [ "${LOG_FILE}" != "${NEW_LOG_FILE}" ]; then + kill -9 ${PID} + wait $! 2>/dev/null + LOG_FILE=${NEW_LOG_FILE} + + # echo "Tailing ${LOG_FILE}" + tail -F ${LOG_FILE} & + PID=$! + fi + sleep 2 + done +{{- end }} diff --git a/Openshift4/artifactoryha-helm/templates/filebeat-configmap.yaml b/Openshift4/artifactoryha-helm/templates/filebeat-configmap.yaml new file mode 100755 index 0000000..d2db2a0 --- /dev/null +++ b/Openshift4/artifactoryha-helm/templates/filebeat-configmap.yaml @@ -0,0 +1,15 @@ +{{- if .Values.filebeat.enabled }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory-ha.name" . }}-filebeat-config + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} +data: + filebeat.yml: | +{{ tpl .Values.filebeat.filebeatYml . | indent 4 }} +{{- end -}} \ No newline at end of file diff --git a/Openshift4/artifactoryha-helm/templates/ingress.yaml b/Openshift4/artifactoryha-helm/templates/ingress.yaml new file mode 100755 index 0000000..e8e2fd2 --- /dev/null +++ b/Openshift4/artifactoryha-helm/templates/ingress.yaml @@ -0,0 +1,56 @@ +{{- if .Values.ingress.enabled -}} +{{- $serviceName := include "artifactory-ha.fullname" . -}} +{{- $servicePort := .Values.artifactory.externalPort -}} +{{- $artifactoryServicePort := .Values.artifactory.externalArtifactoryPort -}} +{{- if semverCompare ">=v1.14.0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: networking.k8s.io/v1beta1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ template "artifactory-ha.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.ingress.labels }} +{{ .Values.ingress.labels | toYaml | trimSuffix "\n"| indent 4 -}} +{{- end}} +{{- if .Values.ingress.annotations }} + annotations: + force-update: "{{ randAlpha 63 | lower }}" +{{ .Values.ingress.annotations | toYaml | trimSuffix "\n" | indent 4 -}} +{{- end }} +spec: + {{- if .Values.ingress.defaultBackend.enabled }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end }} + rules: +{{- if .Values.ingress.hosts }} + {{- range $host := .Values.ingress.hosts }} + - host: {{ $host | quote }} + http: + paths: + - path: {{ $.Values.ingress.routerPath }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + - path: {{ $.Values.ingress.artifactoryPath }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $artifactoryServicePort }} + {{- end -}} +{{- end -}} + {{- with .Values.ingress.additionalRules }} +{{ tpl . $ | indent 2 }} + {{- end }} + + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} diff --git a/Openshift4/artifactoryha-helm/templates/nginx-artifactory-conf.yaml b/Openshift4/artifactoryha-helm/templates/nginx-artifactory-conf.yaml new file mode 100755 index 0000000..eb1f0e6 --- /dev/null +++ b/Openshift4/artifactoryha-helm/templates/nginx-artifactory-conf.yaml @@ -0,0 +1,14 @@ +{{- if and (not .Values.nginx.customArtifactoryConfigMap) .Values.nginx.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory-ha.fullname" . }}-nginx-artifactory-conf + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + artifactory.conf: | +{{ tpl .Values.nginx.artifactoryConf . | indent 4 }} +{{- end }} \ No newline at end of file diff --git a/Openshift4/artifactoryha-helm/templates/nginx-certificate-secret.yaml b/Openshift4/artifactoryha-helm/templates/nginx-certificate-secret.yaml new file mode 100755 index 0000000..2c1430a --- /dev/null +++ b/Openshift4/artifactoryha-helm/templates/nginx-certificate-secret.yaml @@ -0,0 +1,14 @@ +{{- if and (not .Values.nginx.tlsSecretName) .Values.nginx.enabled }} +apiVersion: v1 +kind: Secret +type: kubernetes.io/tls +metadata: + name: {{ template "artifactory-ha.fullname" . }}-nginx-certificate + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: +{{ ( include "artifactory-ha.gen-certs" . ) | indent 2 }} +{{- end }} diff --git a/Openshift4/artifactoryha-helm/templates/nginx-conf.yaml b/Openshift4/artifactoryha-helm/templates/nginx-conf.yaml new file mode 100755 index 0000000..5f424d5 --- /dev/null +++ b/Openshift4/artifactoryha-helm/templates/nginx-conf.yaml @@ -0,0 +1,14 @@ +{{- if and (not .Values.nginx.customConfigMap) .Values.nginx.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory-ha.fullname" . }}-nginx-conf + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + nginx.conf: | +{{ tpl .Values.nginx.mainConf . | indent 4 }} +{{- end }} diff --git a/Openshift4/artifactoryha-helm/templates/nginx-deployment.yaml b/Openshift4/artifactoryha-helm/templates/nginx-deployment.yaml new file mode 100755 index 0000000..4bc3f79 --- /dev/null +++ b/Openshift4/artifactoryha-helm/templates/nginx-deployment.yaml @@ -0,0 +1,185 @@ +{{- if .Values.nginx.enabled -}} +{{- $serviceName := include "artifactory-ha.fullname" . -}} +{{- $servicePort := .Values.artifactory.externalPort -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "artifactory-ha.nginx.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: {{ .Values.nginx.name }} +{{- if .Values.nginx.labels }} +{{ toYaml .Values.nginx.labels | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.nginx.replicaCount }} + selector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} + component: {{ .Values.nginx.name }} + template: + metadata: + annotations: + checksum/nginx-conf: {{ include (print $.Template.BasePath "/nginx-conf.yaml") . | sha256sum }} + checksum/nginx-artifactory-conf: {{ include (print $.Template.BasePath "/nginx-artifactory-conf.yaml") . | sha256sum }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.nginx.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + spec: + serviceAccountName: {{ template "artifactory-ha.serviceAccountName" . }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.imagePullSecrets }} + {{- end }} + initContainers: + - name: "setup" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.nginx.image.pullPolicy }} + command: + - '/bin/sh' + - '-c' + - > + rm -rfv {{ .Values.nginx.persistence.mountPath }}/lost+found; + mkdir -p {{ .Values.nginx.persistence.mountPath }}/logs; + volumeMounts: + - mountPath: {{ .Values.nginx.persistence.mountPath | quote }} + name: nginx-volume + securityContext: + runAsUser: {{ .Values.nginx.uid }} + fsGroup: {{ .Values.nginx.gid }} + containers: + - name: {{ .Values.nginx.name }} + image: '{{ .Values.nginx.image.repository }}:{{ default .Chart.AppVersion .Values.nginx.image.version }}' + imagePullPolicy: {{ .Values.nginx.image.pullPolicy }} + command: + - 'nginx' + - '-g' + - 'daemon off;' + ports: + # DEPRECATION NOTE: The following is to maintain support for values pre 1.3.1 and + # will be cleaned up in a later version + {{- if .Values.nginx.http }} + {{- if .Values.nginx.http.enabled }} + - containerPort: {{ .Values.nginx.http.internalPort }} + {{- end }} + {{- else }} # DEPRECATED + - containerPort: {{ .Values.nginx.internalPortHttp }} + {{- end }} + {{- if .Values.nginx.https }} + {{- if .Values.nginx.https.enabled }} + - containerPort: {{ .Values.nginx.https.internalPort }} + {{- end }} + {{- else }} # DEPRECATED + - containerPort: {{ .Values.nginx.internalPortHttps }} + {{- end }} + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/nginx.conf + subPath: nginx.conf + - name: nginx-artifactory-conf + mountPath: "{{ .Values.nginx.persistence.mountPath }}/conf.d/" + - name: nginx-volume + mountPath: {{ .Values.nginx.persistence.mountPath | quote }} + - name: ssl-certificates + mountPath: "{{ .Values.nginx.persistence.mountPath }}/ssl" + resources: +{{ toYaml .Values.nginx.resources | indent 10 }} + {{- if .Values.nginx.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.nginx.readinessProbe.path }} + {{- if .Values.nginx.http.enabled }} + port: {{ .Values.nginx.http.internalPort }} + scheme: HTTP + {{- else }} + port: {{ .Values.nginx.https.internalPort }} + scheme: HTTPS + {{- end }} + initialDelaySeconds: {{ .Values.nginx.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.nginx.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.nginx.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.nginx.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.nginx.readinessProbe.successThreshold }} + {{- end }} + {{- if .Values.nginx.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.nginx.livenessProbe.path }} + {{- if .Values.nginx.http.enabled }} + port: {{ .Values.nginx.http.internalPort }} + scheme: HTTP + {{- else }} + port: {{ .Values.nginx.https.internalPort }} + scheme: HTTPS + {{- end }} + initialDelaySeconds: {{ .Values.nginx.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.nginx.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.nginx.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.nginx.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.nginx.livenessProbe.successThreshold }} + {{- end }} + {{- $image := .Values.logger.image.repository }} + {{- $tag := .Values.logger.image.tag }} + {{- $mountPath := .Values.nginx.persistence.mountPath }} + {{- range .Values.nginx.loggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: '{{ $image }}:{{ $tag }}' + command: + - tail + args: + - '-F' + - '{{ $mountPath }}/logs/{{ . }}' + volumeMounts: + - name: nginx-volume + mountPath: {{ $mountPath }} + {{- end }} + {{- with .Values.nginx.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.nginx.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.nginx.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + - name: nginx-conf + configMap: + {{- if .Values.nginx.customConfigMap }} + name: {{ .Values.nginx.customConfigMap }} + {{- else }} + name: {{ template "artifactory-ha.fullname" . }}-nginx-conf + {{- end }} + - name: nginx-artifactory-conf + configMap: + {{- if .Values.nginx.customArtifactoryConfigMap }} + name: {{ .Values.nginx.customArtifactoryConfigMap }} + {{- else }} + name: {{ template "artifactory-ha.fullname" . }}-nginx-artifactory-conf + {{- end }} + + - name: nginx-volume + {{- if .Values.nginx.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ .Values.nginx.persistence.existingClaim | default (include "artifactory-ha.nginx.fullname" .) }} + {{- else }} + emptyDir: {} + {{- end }} + - name: ssl-certificates + secret: + {{- if .Values.nginx.tlsSecretName }} + secretName: {{ .Values.nginx.tlsSecretName }} + {{- else }} + secretName: {{ template "artifactory-ha.fullname" . }}-nginx-certificate + {{- end }} +{{- end }} diff --git a/Openshift4/artifactoryha-helm/templates/nginx-pvc.yaml b/Openshift4/artifactoryha-helm/templates/nginx-pvc.yaml new file mode 100755 index 0000000..68a89ce --- /dev/null +++ b/Openshift4/artifactoryha-helm/templates/nginx-pvc.yaml @@ -0,0 +1,26 @@ +{{- if and .Values.nginx.persistence.enabled (.Values.nginx.enabled) (eq (int .Values.nginx.replicaCount) 1) }} +{{- if (not .Values.nginx.persistence.existingClaim) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.nginx.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + accessModes: + - {{ .Values.nginx.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.nginx.persistence.size | quote }} +{{- if .Values.nginx.persistence.storageClass }} +{{- if (eq "-" .Values.nginx.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.nginx.persistence.storageClass }}" +{{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/Openshift4/artifactoryha-helm/templates/nginx-service.yaml b/Openshift4/artifactoryha-helm/templates/nginx-service.yaml new file mode 100755 index 0000000..7a212e0 --- /dev/null +++ b/Openshift4/artifactoryha-helm/templates/nginx-service.yaml @@ -0,0 +1,69 @@ +{{- if .Values.nginx.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "artifactory-ha.nginx.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: {{ .Values.nginx.name }} + {{- if .Values.nginx.service.labels }} +{{ toYaml .Values.nginx.service.labels | indent 4 }} + {{- end }} +{{- if .Values.nginx.service.annotations }} + annotations: +{{ toYaml .Values.nginx.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.nginx.service.type }} + {{- if and (eq .Values.nginx.service.type "ClusterIP") .Values.nginx.service.clusterIP }} + clusterIP: {{ .Values.nginx.service.clusterIP }} + {{- end }} +{{- if eq .Values.nginx.service.type "LoadBalancer" }} + {{ if .Values.nginx.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.nginx.service.loadBalancerIP }} + {{ end -}} + {{- if .Values.nginx.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.nginx.service.externalTrafficPolicy }} + {{- end }} +{{- end }} +{{- if .Values.nginx.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.nginx.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} + ports: + # DEPRECATION NOTE: The following is to maintain support for values pre 1.3.0 and + # will be cleaned up in a later verion + {{- if .Values.nginx.http }} + {{- if .Values.nginx.http.enabled }} + - port: {{ .Values.nginx.http.externalPort }} + targetPort: {{ .Values.nginx.http.internalPort }} + protocol: TCP + name: http + {{- end }} + {{- else }} # DEPRECATED + - port: {{ .Values.nginx.externalPortHttp }} + targetPort: {{ .Values.nginx.internalPortHttp }} + protocol: TCP + name: http + {{- end }} + {{- if .Values.nginx.https }} + {{- if .Values.nginx.https.enabled }} + - port: {{ .Values.nginx.https.externalPort }} + targetPort: {{ .Values.nginx.https.internalPort }} + protocol: TCP + name: https + {{- end }} + {{- else }} # DEPRECATED + - port: {{ .Values.nginx.externalPortHttps }} + targetPort: {{ .Values.nginx.internalPortHttps }} + protocol: TCP + name: https + {{- end }} + selector: + app: {{ template "artifactory-ha.name" . }} + component: {{ .Values.nginx.name }} + release: {{ .Release.Name }} +{{- end }} diff --git a/Openshift4/artifactoryha-helm/values-large.yaml b/Openshift4/artifactoryha-helm/values-large.yaml new file mode 100755 index 0000000..ec05d2a --- /dev/null +++ b/Openshift4/artifactoryha-helm/values-large.yaml @@ -0,0 +1,24 @@ +artifactory: + primary: + resources: + requests: + memory: "6Gi" + cpu: "4" + limits: + memory: "10Gi" + cpu: "8" + javaOpts: + xms: "6g" + xmx: "8g" + node: + replicaCount: 3 + resources: + requests: + memory: "6Gi" + cpu: "4" + limits: + memory: "10Gi" + cpu: "8" + javaOpts: + xms: "6g" + xmx: "8g" diff --git a/Openshift4/artifactoryha-helm/values-medium.yaml b/Openshift4/artifactoryha-helm/values-medium.yaml new file mode 100755 index 0000000..33879c0 --- /dev/null +++ b/Openshift4/artifactoryha-helm/values-medium.yaml @@ -0,0 +1,24 @@ +artifactory: + primary: + resources: + requests: + memory: "4Gi" + cpu: "2" + limits: + memory: "8Gi" + cpu: "6" + javaOpts: + xms: "4g" + xmx: "6g" + node: + replicaCount: 2 + resources: + requests: + memory: "4Gi" + cpu: "2" + limits: + memory: "8Gi" + cpu: "6" + javaOpts: + xms: "4g" + xmx: "6g" diff --git a/Openshift4/artifactoryha-helm/values-small.yaml b/Openshift4/artifactoryha-helm/values-small.yaml new file mode 100755 index 0000000..4babf97 --- /dev/null +++ b/Openshift4/artifactoryha-helm/values-small.yaml @@ -0,0 +1,24 @@ +artifactory: + primary: + resources: + requests: + memory: "4Gi" + cpu: "2" + limits: + memory: "6Gi" + cpu: "4" + javaOpts: + xms: "4g" + xmx: "4g" + node: + replicaCount: 1 + resources: + requests: + memory: "4Gi" + cpu: "2" + limits: + memory: "6Gi" + cpu: "4" + javaOpts: + xms: "4g" + xmx: "4g" diff --git a/Openshift4/artifactoryha-helm/values.yaml b/Openshift4/artifactoryha-helm/values.yaml new file mode 100755 index 0000000..ce07ccb --- /dev/null +++ b/Openshift4/artifactoryha-helm/values.yaml @@ -0,0 +1,1330 @@ +# Default values for artifactory-ha. +# This is a YAML-formatted file. +# Beware when changing values here. You should know what you are doing! +# Access the values with {{ .Values.key.subkey }} + +# Common +initContainerImage: "alpine:3.10" + +installer: + type: + platform: + +# For supporting pulling from private registries +imagePullSecrets: + +## Role Based Access Control +## Ref: https://kubernetes.io/docs/admin/authorization/rbac/ +rbac: + create: true + role: + ## Rules to create. It follows the role specification + rules: + - apiGroups: + - '' + resources: + - services + - endpoints + - pods + verbs: + - get + - watch + - list + +## Service Account +## Ref: https://kubernetes.io/docs/admin/service-accounts-admin/ +## +serviceAccount: + create: true + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + annotations: {} + +ingress: + enabled: false + defaultBackend: + enabled: true + # Used to create an Ingress record. + hosts: [] + routerPath: / + artifactoryPath: /artifactory/ + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + labels: {} + # traffic-type: external + # traffic-type: internal + tls: [] + # Secrets must be manually created in the namespace. + # - secretName: chart-example-tls + # hosts: + # - artifactory.domain.example + + # Additional ingress rules + additionalRules: [] + + +networkpolicy: + # Allows all ingress and egress + - name: artifactory + podSelector: + matchLabels: + app: artifactory-ha + egress: + - {} + ingress: + - {} + # Uncomment to allow only artifactory pods to communicate with postgresql (if postgresql.enabled is true) + # - name: postgresql + # podSelector: + # matchLabels: + # app: postgresql + # ingress: + # - from: + # - podSelector: + # matchLabels: + # app: artifactory-ha + + +## Database configurations +## Use the wait-for-db init container. Set to false to skip +waitForDatabase: true + +## Configuration values for the postgresql dependency +## ref: https://github.com/kubernetes/charts/blob/master/stable/postgresql/README.md +## +postgresql: + enabled: true + image: + registry: docker.bintray.io + repository: bitnami/postgresql + tag: 9.6.15-debian-9-r91 + postgresqlUsername: artifactory + postgresqlPassword: "" + postgresqlDatabase: artifactory + postgresqlConfiguration: + listenAddresses: "'*'" + maxConnections: "1500" + persistence: + enabled: true + size: 50Gi + service: + port: 5432 + resources: {} + # requests: + # memory: "512Mi" + # cpu: "100m" + # limits: + # memory: "1Gi" + # cpu: "500m" + nodeSelector: {} + +## If NOT using the PostgreSQL in this chart (postgresql.enabled=false), +## you MUST specify custom database details here or Artifactory will NOT start +database: + type: + driver: + ## If you set the url, leave host and port empty + url: + ## If you would like this chart to create the secret containing the db + ## password, use these values + user: + password: + ## If you have existing Kubernetes secrets containing db credentials, use + ## these values + secrets: {} + # user: + # name: "rds-artifactory" + # key: "db-user" + # password: + # name: "{{ .Release.Name}}}}-postgresql" + # key: "postgresql-password" + # url: + # name: "rds-artifactory" + # key: "db-url" + +logger: + image: + repository: 'busybox' + tag: '1.30' + +# Artifactory +artifactory: + name: artifactory-ha + image: + # repository: "docker.bintray.io/jfrog/artifactory-pro" + repository: "peters95/artifactory-pro" + # Note that by default we use appVersion to get image tag + # version: + pullPolicy: IfNotPresent + + # Create a priority class for the Artifactory pods or use an existing one + # NOTE - Maximum allowed value of a user defined priority is 1000000000 + priorityClass: + create: false + value: 1000000000 + ## Override default name + # name: + ## Use an existing priority class + # existingPriorityClass: + + # Delete the db.properties file in ARTIFACTORY_HOME/etc/db.properties + deleteDBPropertiesOnStartup: true + database: + maxOpenConnections: 80 + + # This directory is intended for use with NFS eventual configuration for HA + haDataDir: + enabled: false + path: + + # Files to copy to ARTIFACTORY_HOME/ on each Artifactory startup + copyOnEveryStartup: + # # Absolute path + # - source: /artifactory_extra_conf/binarystore.xml + # # Relative to ARTIFACTORY_HOME/ + # target: etc/ + # # Absolute path + # - source: /artifactory_extra_conf/artifactory.lic + # # Relative to ARTIFACTORY_HOME/ + # target: etc/ + + # Sidecar containers for tailing Artifactory logs + loggers: [] + # - request.log + # - event.log + # - binarystore.log + # - request_trace.log + # - access.log + # - artifactory.log + # - build_info_migration.log + + # Sidecar containers for tailing Tomcat (catalina) logs + catalinaLoggers: [] + # - catalina.log + # - host-manager.log + # - localhost.log + # - manager.log + + ## Add custom init containers execution before predefined init containers + customInitContainersBegin: | + - name: "custom-setup" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" + command: + - 'sh' + - '-c' + - 'chown -R 1030:1030 {{ .Values.artifactory.persistence.mountPath }}' + securityContext: + runAsUser: 0 + volumeMounts: + - mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + name: volume + ## Add custom init containers + + ## Add custom init containers execution after predefined init containers + customInitContainers: | + # - name: "custom-setup" + # image: "{{ .Values.initContainerImage }}" + # imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" + # command: + # - 'sh' + # - '-c' + # - 'touch {{ .Values.artifactory.persistence.mountPath }}/example-custom-setup' + # volumeMounts: + # - mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + # name: volume + + ## Add custom sidecar containers + # - The provided example uses a custom volume (customVolumes) + # - The provided example shows running container as root (id 0) + customSidecarContainers: | + # - name: "sidecar-list-etc" + # image: "{{ .Values.initContainerImage }}" + # imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" + # securityContext: + # runAsUser: 0 + # fsGroup: 0 + # command: + # - 'sh' + # - '-c' + # - 'sh /scripts/script.sh' + # volumeMounts: + # - mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + # name: volume + # - mountPath: "/scripts/script.sh" + # name: custom-script + # subPath: script.sh + # resources: + # requests: + # memory: "32Mi" + # cpu: "50m" + # limits: + # memory: "128Mi" + # cpu: "100m" + + ## Add custom volumes + customVolumes: | + # - name: custom-script + # configMap: + # name: custom-script + + ## Add custom volumesMounts + customVolumeMounts: | + # - name: custom-script + # mountPath: "/scripts/script.sh" + # subPath: script.sh + # - name: posthook-start + # mountPath: "/scripts/posthoook-start.sh" + # subPath: posthoook-start.sh + # - name: prehook-start + # mountPath: "/scripts/prehook-start.sh" + # subPath: prehook-start.sh + + # Add custom persistent volume mounts - Available for the pod + customPersistentPodVolumeClaim: {} + # name: + # mountPath: + # accessModes: + # - "-" + # size: + # storageClassName: + + # Add custom persistent volume mounts - Available to the entire namespace + customPersistentVolumeClaim: {} + # name: + # mountPath: + # accessModes: + # - "-" + # size: + # storageClassName: + + ## Artifactory HA requires a unique master key. Each Artifactory node must have the same master key! + ## You can generate one with the command: 'openssl rand -hex 16' + ## Pass it to helm with '--set artifactory.masterKey=${MASTER_KEY}' + ## Alternatively, you can use a pre-existing secret with a key called master-key by specifying masterKeySecretName + ## IMPORTANT: You should NOT use the example masterKey for a production deployment! + masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + # masterKeySecretName: + + ## Join Key to connect to other services to Artifactory + ## IMPORTANT: You should NOT use the example joinKey for a production deployment! + joinKey: EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE + + binarystore: + enabled: true + + accessAdmin: + ip: "127.0.0.1" + password: + secret: + dataKey: + + ## Artifactory license. + license: + ## licenseKey is the license key in plain text. Use either this or the license.secret setting + licenseKey: + ## If artifactory.license.secret is passed, it will be mounted as + ## ARTIFACTORY_HOME/etc/artifactory.lic and loaded at run time. + secret: + ## The dataKey should be the name of the secret data key created. + dataKey: + + ## Create configMap with artifactory.config.import.xml and security.import.xml and pass name of configMap in following parameter + configMapName: + + # Add any list of configmaps to Artifactory + configMaps: | + # posthook-start.sh: |- + # echo "This is a post start script" + # posthook-end.sh: |- + # echo "This is a post end script" + + ## List of secrets for Artifactory user plugins. + ## One Secret per plugin's files. + userPluginSecrets: + # - archive-old-artifacts + # - build-cleanup + # - webhook + # - '{{ template "my-chart.fullname" . }}' + + ## Extra pre-start command to install JDBC driver for MySql/MariaDb/Oracle + # preStartCommand: "wget -O /opt/jfrog/artifactory/tomcat/lib/mysql-connector-java-5.1.41.jar https://jcenter.bintray.com/mysql/mysql-connector-java/5.1.41/mysql-connector-java-5.1.41.jar" + ## Extra post-start command to run extra commands after container starts + # postStartCommand: + + ## Extra environment variables that can be used to tune Artifactory to your needs. + ## Uncomment and set value as needed + #extraEnvironmentVariables: | + # - name: JF_SHARED_DATABSE_USERNAME + # value: "artifactory" + # - name: JF_SHARED_DATABASE_PASSWORD + # valueFrom: + # secretKeyRef: + # name: {{ .Release.Name }}-postgresql + # key: postgresql-password + # - name: POSTGRES_DB + # value: "artifactory" + + # TODO: Fix javaOpts for member nodes (currently uses primary settings for all nodes) + systemYaml: | + shared: + extraJavaOpts: > + {{- with .Values.artifactory.primary.javaOpts }} + -Dartifactory.async.corePoolSize={{ .corePoolSize }} + {{- if .xms }} + -Xms{{ .xms }} + {{- end }} + {{- if .xmx }} + -Xmx{{ .xmx }} + {{- end }} + {{- if .jmx.enabled }} + -Dcom.sun.management.jmxremote + -Dcom.sun.management.jmxremote.port={{ .jmx.port }} + -Dcom.sun.management.jmxremote.rmi.port={{ .jmx.port }} + -Dcom.sun.management.jmxremote.ssl={{ .jmx.ssl }} + {{- if .jmx.host }} + -Djava.rmi.server.hostname={{ tpl .jmx.host $ }} + {{- else }} + -Djava.rmi.server.hostname={{ template "artifactory-ha.fullname" $ }} + {{- end }} + {{- if .jmx.authenticate }} + -Dcom.sun.management.jmxremote.authenticate=true + -Dcom.sun.management.jmxremote.access.file={{ .jmx.accessFile }} + -Dcom.sun.management.jmxremote.password.file={{ .jmx.passwordFile }} + {{- else }} + -Dcom.sun.management.jmxremote.authenticate=false + {{- end }} + {{- end }} + {{- if .other }} + {{ .other }} + {{- end }} + {{- end }} + database: + {{- if .Values.postgresql.enabled }} + type: postgresql + url: 'jdbc:postgresql://{{ .Release.Name }}-postgresql:{{ .Values.postgresql.service.port }}/{{ .Values.postgresql.postgresqlDatabase }}' + host: '' + driver: org.postgresql.Driver + username: '{{ .Values.postgresql.postgresqlUsername }}' + password: '{{ .Values.postgresql.postgresqlPassword }}' + {{ else }} + type: '{{ .Values.database.type }}' + url: '{{ .Values.database.url }}' + driver: '{{ .Values.database.driver }}' + username: '{{ .Values.database.user }}' + password: '{{ .Values.database.password }}' + {{- end }} + security: + joinKey: '{{ .Values.artifactory.joinKey }}' + masterKey: '{{ .Values.artifactory.masterKey }}' + artifactory: + {{- if .Values.artifactory.haDataDir.enabled }} + node: + haDataDir: {{ .Values.artifactory.haDataDir.path }} + {{- end }} + database: + maxOpenConnections: {{ .Values.artifactory.database.maxOpenConnections }} + access: + database: + maxOpenConnections: '{{ .Values.access.database.maxOpenConnections }}' + {{- if .Values.access.database.enabled }} + type: '{{ .Values.access.database.type }}' + url: '{{ .Values.access.database.url }}' + driver: '{{ .Values.access.database.driver }}' + username: '{{ .Values.access.database.user }}' + password: '{{ .Values.access.database.password }}' + {{- end }} + + ## IMPORTANT: If overriding artifactory.internalPort: + ## DO NOT use port lower than 1024 as Artifactory runs as non-root and cannot bind to ports lower than 1024! + externalPort: 8082 + internalPort: 8082 + externalArtifactoryPort: 8081 + internalArtifactoryPort: 8081 + uid: 1030 + terminationGracePeriodSeconds: 30 + ## The following settings are to configure the frequency of the liveness and readiness probes + livenessProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 180 + failureThreshold: 10 + timeoutSeconds: 10 + periodSeconds: 10 + successThreshold: 1 + + readinessProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 60 + failureThreshold: 10 + timeoutSeconds: 10 + periodSeconds: 10 + successThreshold: 1 + + persistence: + enabled: true + local: false + redundancy: 3 + mountPath: "/var/opt/jfrog/artifactory" + accessMode: ReadWriteOnce + size: 200Gi + + ## Use a custom Secret to be mounted as your binarystore.xml + ## NOTE: This will ignore all settings below that make up binarystore.xml + customBinarystoreXmlSecret: + + maxCacheSize: 50000000000 + cacheProviderDir: cache + eventual: + numberOfThreads: 10 + ## artifactory data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClassName: "-" + + ## Set the persistence storage type. This will apply the matching binarystore.xml to Artifactory config + ## Supported types are: + ## file-system (default) + ## nfs + ## google-storage + ## aws-s3 + ## azure-blob + type: file-system + + ## Use binarystoreXml to provide a custom binarystore.xml + ## This can be a template or hardcoded. + binarystoreXml: | + {{- if eq .Values.artifactory.persistence.type "file-system" }} + + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + + + + + + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) -}} + + {{- end }} + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + // Specify the read and write strategy and redundancy for the sharding binary provider + + roundRobin + percentageFreeSpace + 2 + + + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) -}} + //For each sub-provider (mount), specify the filestore location + + filestore{{ $sharedClaimNumber }} + + {{- end }} + + {{- else }} + + + + + crossNetworkStrategy + crossNetworkStrategy + {{ .Values.artifactory.persistence.redundancy }} + 2 + 2 + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + + shard-fs-1 + local + + + + + 30 + tester-remote1 + 10000 + remote + + + + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "google-storage" }} + + + + + crossNetworkStrategy + crossNetworkStrategy + {{ .Values.artifactory.persistence.redundancy }} + 2 + + + + + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + local + + + + 30 + 10000 + remote + + + + {{ .Values.artifactory.persistence.mountPath }}/data/filestore + /tmp + + + + google-cloud-storage + {{ .Values.artifactory.persistence.googleStorage.endpoint }} + {{ .Values.artifactory.persistence.googleStorage.httpsOnly }} + {{ .Values.artifactory.persistence.googleStorage.bucketName }} + {{ .Values.artifactory.persistence.googleStorage.identity }} + {{ .Values.artifactory.persistence.googleStorage.credential }} + {{ .Values.artifactory.persistence.googleStorage.path }} + {{ .Values.artifactory.persistence.googleStorage.bucketExists }} + + + {{- end }} + {{- if eq .Values.artifactory.persistence.type "aws-s3-v3" }} + + + + + + + + + + + + + + + + + crossNetworkStrategy + crossNetworkStrategy + {{ .Values.artifactory.persistence.redundancy }} + + + + + remote + + + + local + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + {{- with .Values.artifactory.persistence.awsS3V3 }} + + {{ .testConnection }} + {{- if .identity }} + {{ .identity }} + {{- end }} + {{- if .credential }} + {{ .credential }} + {{- end }} + {{ .region }} + {{ .bucketName }} + {{ .path }} + {{ .endpoint }} + {{- with .kmsServerSideEncryptionKeyId }} + {{ . }} + {{- end }} + {{- with .kmsKeyRegion }} + {{ . }} + {{- end }} + {{- with .kmsCryptoMode }} + {{ . }} + {{- end }} + true + {{ .usePresigning }} + {{ .signatureExpirySeconds }} + {{- with .cloudFrontDomainName }} + {{ . }} + {{- end }} + {{- with .cloudFrontKeyPairId }} + {{ .cloudFrontKeyPairId }} + {{- end }} + {{- with .cloudFrontPrivateKey }} + {{ . }} + {{- end }} + + {{- end }} + + {{- end }} + + {{- if eq .Values.artifactory.persistence.type "aws-s3" }} + + + + + + + + + + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + local + + + + 30 + 10000 + remote + + + + crossNetworkStrategy + crossNetworkStrategy + {{ .Values.artifactory.persistence.redundancy }} + + + + + {{ .Values.artifactory.persistence.awsS3.endpoint }} + {{- if .Values.artifactory.persistence.awsS3.roleName }} + {{ .Values.artifactory.persistence.awsS3.roleName }} + true + {{- else }} + {{ .Values.artifactory.persistence.awsS3.refreshCredentials }} + {{- end }} + {{ .Values.artifactory.persistence.awsS3.s3AwsVersion }} + {{ .Values.artifactory.persistence.awsS3.testConnection }} + {{ .Values.artifactory.persistence.awsS3.httpsOnly }} + {{ .Values.artifactory.persistence.awsS3.region }} + {{ .Values.artifactory.persistence.awsS3.bucketName }} + {{- if .Values.artifactory.persistence.awsS3.identity }} + {{ .Values.artifactory.persistence.awsS3.identity }} + {{- end }} + {{- if .Values.artifactory.persistence.awsS3.credential }} + {{ .Values.artifactory.persistence.awsS3.credential }} + {{- end }} + {{ .Values.artifactory.persistence.awsS3.path }} + {{- range $key, $value := .Values.artifactory.persistence.awsS3.properties }} + + {{- end }} + + + {{- end }} + {{- if eq .Values.artifactory.persistence.type "azure-blob" }} + + + + + + + + + + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + + crossNetworkStrategy + crossNetworkStrategy + 2 + 1 + + + + + remote + + + + local + + + + + {{ .Values.artifactory.persistence.azureBlob.accountName }} + {{ .Values.artifactory.persistence.azureBlob.accountKey }} + {{ .Values.artifactory.persistence.azureBlob.endpoint }} + {{ .Values.artifactory.persistence.azureBlob.containerName }} + {{ .Values.artifactory.persistence.azureBlob.testConnection }} + + + {{- end }} + + ## For artifactory.persistence.type file-system + fileSystem: + ## You may also use existing shared claims for the data and backup storage. This allows storage (NAS for example) to be used for Data and Backup dirs which are safe to share across multiple artifactory nodes. + ## You may specify numberOfExistingClaims to indicate how many of these existing shared claims to mount. (Default = 1) + ## Create PVCs with ReadWriteMany that match the naming convetions: + ## {{ template "artifactory-ha.fullname" . }}-data-pvc- + ## {{ template "artifactory-ha.fullname" . }}-backup-pvc- + ## Example (using numberOfExistingClaims: 2) + ## myexample-artifactory-ha-data-pvc-0 + ## myexample-artifactory-ha-backup-pvc-0 + ## myexample-artifactory-ha-data-pvc-1 + ## myexample-artifactory-ha-backup-pvc-1 + ## Note: While you need two PVC fronting two PVs, multiple PVs can be attached to the same storage in many cases allowing you to share an underlying drive. + + ## Need to have the following set + existingSharedClaim: + enabled: false + numberOfExistingClaims: 1 + ## Should be a child directory of {{ .Values.artifactory.persistence.mountPath }} + dataDir: "{{ .Values.artifactory.persistence.mountPath }}/artifactory-data" + backupDir: "/var/opt/jfrog/artifactory-backup" + + + ## For artifactory.persistence.type nfs + ## If using NFS as the shared storage, you must have a running NFS server that is accessible by your Kubernetes + ## cluster nodes. + ## Need to have the following set + nfs: + # Must pass actual IP of NFS server with '--set For artifactory.persistence.nfs.ip=${NFS_IP}' + ip: + haDataMount: "/data" + haBackupMount: "/backup" + dataDir: "/var/opt/jfrog/artifactory-ha" + backupDir: "/var/opt/jfrog/artifactory-backup" + capacity: 200Gi + mountOptions: [] + ## For artifactory.persistence.type google-storage + googleStorage: + endpoint: storage.googleapis.com + httpsOnly: false + # Set a unique bucket name + bucketName: "artifactory-ha-gcp" + identity: + credential: + path: "artifactory-ha/filestore" + bucketExists: false + + ## For artifactory.persistence.type aws-s3-v3 + awsS3V3: + testConnection: false + identity: + credential: + region: + bucketName: artifactory-aws + path: artifactory/filestore + endpoint: + kmsServerSideEncryptionKeyId: + kmsKeyRegion: + kmsCryptoMode: + useInstanceCredentials: true + usePresigning: false + signatureExpirySeconds: 300 + cloudFrontDomainName: + cloudFrontKeyPairId: + cloudFrontPrivateKey: + + ## For artifactory.persistence.type aws-s3 + ## IMPORTANT: Make sure S3 `endpoint` and `region` match! See https://docs.aws.amazon.com/general/latest/gr/rande.html + awsS3: + # Set a unique bucket name + bucketName: "artifactory-ha-aws" + endpoint: + region: + roleName: + identity: + credential: + path: "artifactory-ha/filestore" + refreshCredentials: true + httpsOnly: true + testConnection: false + s3AwsVersion: "AWS4-HMAC-SHA256" + + ## Additional properties to set on the s3 provider + properties: {} + # httpclient.max-connections: 100 + ## For artifactory.persistence.type azure-blob + azureBlob: + accountName: + accountKey: + endpoint: + containerName: + testConnection: false + service: + name: artifactory + type: ClusterIP + ## For supporting whitelist on the Artifactory service (useful if setting service.type=LoadBalancer) + ## Set this to a list of IP CIDR ranges + ## Example: loadBalancerSourceRanges: ['10.10.10.5/32', '10.11.10.5/32'] + ## or pass from helm command line + ## Example: helm install ... --set nginx.service.loadBalancerSourceRanges='{10.10.10.5/32,10.11.10.5/32}' + loadBalancerSourceRanges: [] + annotations: {} + ## Which nodes in the cluster should be in the external load balancer pool (have external traffic routed to them) + ## Supported pool values + ## members + ## all + pool: members + + ## The following Java options are passed to the java process running Artifactory. + ## This will be passed to all cluster members. Primary and member nodes. + javaOpts: {} + # other: "" + annotations: {} + + ## Type specific configurations. + ## There is a difference between the primary and the member nodes. + ## Customising their resources and java parameters is done here. + primary: + name: artifactory-ha-primary + labels: {} + persistence: + ## Set existingClaim to true or false + ## If true, you must prepare a PVC with the name e.g `volume-myrelease-artifactory-ha-primary-0` + existingClaim: false + ## Resources for the primary node + resources: {} + # requests: + # memory: "1Gi" + # cpu: "500m" + # limits: + # memory: "2Gi" + # cpu: "1" + ## The following Java options are passed to the java process running Artifactory primary node. + ## You should set them according to the resources set above + javaOpts: + # xms: "1g" + # xmx: "2g" + corePoolSize: 16 + jmx: + enabled: false + port: 9010 + host: + ssl: false + # When authenticate is true, accessFile and passwordFile are required + authenticate: false + accessFile: + passwordFile: + # other: "" + nodeSelector: {} + + tolerations: [] + + affinity: {} + ## Only used if "affinity" is empty + podAntiAffinity: + ## Valid values are "soft" or "hard"; any other value indicates no anti-affinity + type: "" + topologyKey: "kubernetes.io/hostname" + + node: + name: artifactory-ha-member + labels: {} + persistence: + ## Set existingClaim to true or false + ## If true, you must prepare a PVC with the name e.g `volume-myrelease-artifactory-ha-member-0` + existingClaim: false + replicaCount: 2 + minAvailable: 1 + ## Resources for the member nodes + resources: {} + # requests: + # memory: "1Gi" + # cpu: "500m" + # limits: + # memory: "2Gi" + # cpu: "1" + ## The following Java options are passed to the java process running Artifactory member nodes. + ## You should set them according to the resources set above + javaOpts: + # xms: "1g" + # xmx: "2g" + corePoolSize: 16 + jmx: + enabled: false + port: 9010 + host: + ssl: false + # When authenticate is true, accessFile and passwordFile are required + authenticate: false + accessFile: + passwordFile: + # other: "" + # xms: "1g" + # xmx: "2g" + # other: "" + nodeSelector: {} + waitForPrimaryStartup: + enabled: true + time: 60 + + tolerations: [] + + ## Complete specification of the "affinity" of the member nodes; if this is non-empty, + ## "podAntiAffinity" values are not used. + affinity: {} + + ## Only used if "affinity" is empty + podAntiAffinity: + ## Valid values are "soft" or "hard"; any other value indicates no anti-affinity + type: "" + topologyKey: "kubernetes.io/hostname" + +access: + database: + maxOpenConnections: 80 + +# Init containers +initContainers: + resources: {} +# requests: +# memory: "64Mi" +# cpu: "10m" +# limits: +# memory: "128Mi" +# cpu: "250m" + + +# Nginx +nginx: + enabled: true + name: nginx + labels: {} + replicaCount: 1 + uid: 104 + gid: 107 + image: + # repository: "docker.bintray.io/jfrog/nginx-artifactory-pro" + repository: "peters95/nginx-artifactory-pro" + # Note that by default we use appVersion to get image tag + # version: + pullPolicy: IfNotPresent + + + # Sidecar containers for tailing Nginx logs + loggers: [] + # - access.log + # - error.log + + mainConf: | + # Main Nginx configuration file + worker_processes 4; + error_log {{ .Values.nginx.persistence.mountPath }}/logs//error.log warn; + pid /tmp/nginx.pid; + events { + worker_connections 1024; + } + http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + variables_hash_max_size 1024; + variables_hash_bucket_size 64; + server_names_hash_max_size 4096; + server_names_hash_bucket_size 128; + types_hash_max_size 2048; + types_hash_bucket_size 64; + proxy_read_timeout 2400s; + client_header_timeout 2400s; + client_body_timeout 2400s; + proxy_connect_timeout 75s; + proxy_send_timeout 2400s; + proxy_buffer_size 32k; + proxy_buffers 40 32k; + proxy_busy_buffers_size 64k; + proxy_temp_file_write_size 250m; + proxy_http_version 1.1; + client_body_buffer_size 128k; + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + log_format timing 'ip = $remote_addr ' + 'user = \"$remote_user\" ' + 'local_time = \"$time_local\" ' + 'host = $host ' + 'request = \"$request\" ' + 'status = $status ' + 'bytes = $body_bytes_sent ' + 'upstream = \"$upstream_addr\" ' + 'upstream_time = $upstream_response_time ' + 'request_time = $request_time ' + 'referer = \"$http_referer\" ' + 'UA = \"$http_user_agent\"'; + access_log {{ .Values.nginx.persistence.mountPath }}/logs/access.log timing; + sendfile on; + #tcp_nopush on; + keepalive_timeout 65; + #gzip on; + include /etc/nginx/conf.d/*.conf; + } + + artifactoryConf: | + ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; + ssl_certificate {{ .Values.nginx.persistence.mountPath }}/ssl/tls.crt; + ssl_certificate_key {{ .Values.nginx.persistence.mountPath }}/ssl/tls.key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + ## server configuration + server { + {{- if .Values.nginx.internalPortHttps }} + listen {{ .Values.nginx.internalPortHttps }} ssl; + {{- else -}} + {{- if .Values.nginx.https.enabled }} + listen {{ .Values.nginx.https.internalPort }} ssl; + {{- end }} + {{- end }} + {{- if .Values.nginx.internalPortHttp }} + listen {{ .Values.nginx.internalPortHttp }}; + {{- else -}} + {{- if .Values.nginx.http.enabled }} + listen {{ .Values.nginx.http.internalPort }}; + {{- end }} + {{- end }} + server_name ~(?.+)\.{{ include "artifactory-ha.fullname" . }} {{ include "artifactory-ha.fullname" . }} + {{- range .Values.ingress.hosts -}} + {{- if contains "." . -}} + {{ "" | indent 0 }} ~(?.+)\.{{ (splitn "." 2 .)._1 }} {{ . }} + {{- end -}} + {{- end -}}; + + if ($http_x_forwarded_proto = '') { + set $http_x_forwarded_proto $scheme; + } + ## Application specific logs + ## access_log /var/log/nginx/artifactory-access.log timing; + ## error_log /var/log/nginx/artifactory-error.log; + rewrite ^/artifactory/?$ / redirect; + if ( $repo != "" ) { + rewrite ^/(v1|v2)/(.*) /artifactory/api/docker/$repo/$1/$2 break; + } + chunked_transfer_encoding on; + client_max_body_size 0; + + location / { + proxy_read_timeout 900; + proxy_pass_header Server; + proxy_cookie_path ~*^/.* /; + proxy_pass http://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalPort }}/; + proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host:$server_port; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + + location /artifactory/ { + if ( $request_uri ~ ^/artifactory/(.*)$ ) { + proxy_pass http://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalArtifactoryPort }}/artifactory/$1; + } + proxy_pass http://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalArtifactoryPort }}/artifactory/; + } + } + } + + service: + ## For minikube, set this to NodePort, elsewhere use LoadBalancer + #type: NodePort + type: LoadBalancer + #type: ClusterIP + ## For supporting whitelist on the Nginx LoadBalancer service + ## Set this to a list of IP CIDR ranges + ## Example: loadBalancerSourceRanges: ['10.10.10.5/32', '10.11.10.5/32'] + ## or pass from helm command line + ## Example: helm install ... --set nginx.service.loadBalancerSourceRanges='{10.10.10.5/32,10.11.10.5/32}' + loadBalancerSourceRanges: [] + ## Provide static ip address + loadBalancerIP: + ## There are two available options: “Cluster” (default) and “Local”. + externalTrafficPolicy: Cluster + labels: {} + # label-key: label-value + http: + enabled: true + externalPort: 80 + internalPort: 80 + https: + enabled: true + externalPort: 443 + internalPort: 443 + # DEPRECATED: The following will be replaced by L1065-L1076 in a future release + # externalPortHttp: 80 + # internalPortHttp: 80 + # externalPortHttps: 443 + # internalPortHttps: 443 + + ## The following settings are to configure the frequency of the liveness and readiness probes + livenessProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 60 + failureThreshold: 10 + timeoutSeconds: 10 + periodSeconds: 10 + successThreshold: 1 + + readinessProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 10 + failureThreshold: 10 + timeoutSeconds: 10 + periodSeconds: 10 + successThreshold: 1 + ## The SSL secret that will be used by the Nginx pod + # tlsSecretName: chart-example-tls + ## Custom ConfigMap for nginx.conf + customConfigMap: + ## Custom ConfigMap for artifactory.conf + customArtifactoryConfigMap: + persistence: + mountPath: "/var/opt/jfrog/nginx" + enabled: false + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + accessMode: ReadWriteOnce + size: 5Gi + ## nginx data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClassName: "-" + resources: {} + # requests: + # memory: "250Mi" + # cpu: "100m" + # limits: + # memory: "250Mi" + # cpu: "500m" + + nodeSelector: {} + + tolerations: [] + + affinity: {} + +# Filebeat Sidecar container +## The provided filebeat configuration is for Artifactory logs. It assumes you have a logstash installed and configured properly. +filebeat: + enabled: false + name: artifactory-filebeat + image: + repository: "docker.elastic.co/beats/filebeat" + version: 7.5.1 + logstashUrl: "logstash:5044" + + terminationGracePeriod: 10 + + livenessProbe: + exec: + command: + - sh + - -c + - | + #!/usr/bin/env bash -e + curl --fail 127.0.0.1:5066 + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + + readinessProbe: + exec: + command: + - sh + - -c + - | + #!/usr/bin/env bash -e + filebeat test output + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + + resources: {} +# requests: +# memory: "100Mi" +# cpu: "100m" +# limits: +# memory: "100Mi" +# cpu: "100m" + + filebeatYml: | + logging.level: info + path.data: {{ .Values.artifactory.persistence.mountPath }}/log/filebeat + name: artifactory-filebeat + queue.spool: ~ + filebeat.inputs: + - type: log + enabled: true + close_eof: ${CLOSE:false} + paths: + - {{ .Values.artifactory.persistence.mountPath }}/log/*.log + fields: + service: "jfrt" + log_type: "artifactory" + output: + logstash: + hosts: ["{{ .Values.filebeat.logstashUrl }}"]