From e99814b3447d16cd3afac591f8a112306c35d8bb Mon Sep 17 00:00:00 2001 From: John Peterson Date: Wed, 23 Sep 2020 11:47:51 -0700 Subject: [PATCH] Openshift pipelines work in progress --- Openshift4/openshift-pipelines/CHANGELOG.md | 5 + Openshift4/openshift-pipelines/Chart.yaml | 16 + Openshift4/openshift-pipelines/LICENSE | 201 ++ Openshift4/openshift-pipelines/README.md | 223 ++ Openshift4/openshift-pipelines/helminstall.sh | 35 + Openshift4/openshift-pipelines/out | 3093 +++++++++++++++++ .../openshift-pipelines/requirements.lock | 6 + .../openshift-pipelines/requirements.yaml | 4 + Openshift4/openshift-pipelines/values.yaml | 1117 ++++++ 9 files changed, 4700 insertions(+) create mode 100755 Openshift4/openshift-pipelines/CHANGELOG.md create mode 100755 Openshift4/openshift-pipelines/Chart.yaml create mode 100755 Openshift4/openshift-pipelines/LICENSE create mode 100755 Openshift4/openshift-pipelines/README.md create mode 100755 Openshift4/openshift-pipelines/helminstall.sh create mode 100644 Openshift4/openshift-pipelines/out create mode 100644 Openshift4/openshift-pipelines/requirements.lock create mode 100644 Openshift4/openshift-pipelines/requirements.yaml create mode 100755 Openshift4/openshift-pipelines/values.yaml diff --git a/Openshift4/openshift-pipelines/CHANGELOG.md b/Openshift4/openshift-pipelines/CHANGELOG.md new file mode 100755 index 0000000..eed6c8c --- /dev/null +++ b/Openshift4/openshift-pipelines/CHANGELOG.md @@ -0,0 +1,5 @@ +# JFrog Openshift Pipelines Chart Changelog +All changes to this chart will be documented in this file. + +## [1.4.5] Sept 21, 2020 +* Adding Openshift Pipelines helm chart version 1.4.5 app version 1.7.2 diff --git a/Openshift4/openshift-pipelines/Chart.yaml b/Openshift4/openshift-pipelines/Chart.yaml new file mode 100755 index 0000000..64af46c --- /dev/null +++ b/Openshift4/openshift-pipelines/Chart.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +appVersion: 1.7.2 +description: A Helm chart for JFrog Pipelines +home: https://jfrog.com/pipelines/ +icon: https://raw.githubusercontent.com/jfrog/charts/master/stable/pipelines/icon/pipelines-logo.png +keywords: + - pipelines + - jfrog + - devops +maintainers: +- email: vinaya@jfrog.com + name: Vinay Aggarwal +- email: johnp@jfrog.com + name: John Peterson +name: openshift-pipelines +version: 1.4.5 diff --git a/Openshift4/openshift-pipelines/LICENSE b/Openshift4/openshift-pipelines/LICENSE new file mode 100755 index 0000000..8dada3e --- /dev/null +++ b/Openshift4/openshift-pipelines/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Openshift4/openshift-pipelines/README.md b/Openshift4/openshift-pipelines/README.md new file mode 100755 index 0000000..32cae28 --- /dev/null +++ b/Openshift4/openshift-pipelines/README.md @@ -0,0 +1,223 @@ +# JFrog Pipelines on Kubernetes Helm Chart + +[JFrog Pipelines](https://jfrog.com/pipelines/) + +## Prerequisites Details + +* Kubernetes 1.12+ + +## Chart Details + +This chart will do the following: + +- Deploy PostgreSQL (optionally with an external PostgreSQL instance) +- Deploy RabbitMQ (optionally as an HA cluster) +- Deploy Redis (optionally as an HA cluster) +- Deploy Vault (optionally as an HA cluster) +- Deploy JFrog Pipelines + +## Requirements + +- A running Kubernetes cluster + - Dynamic storage provisioning enabled + - Default StorageClass set to allow services using the default StorageClass for persistent storage +- A running Artifactory 7.7.x with Enterprise+ License + - Precreated repository `jfrogpipelines` in Artifactory type `Generic` with layout `maven-2-default` +- [Kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) installed and setup to use the cluster +- [Helm](https://helm.sh/) v2 or v3 installed + + +## Install JFrog Pipelines + +### Add ChartCenter Helm repository + +Before installing JFrog helm charts, you need to add the [ChartCenter helm repository](https://chartcenter.io) to your helm client + +```bash +helm repo add center https://repo.chartcenter.io +helm repo update +``` + +### Artifactory Connection Details + +In order to connect Pipelines to your Artifactory installation, you have to use a Join Key, hence it is *MANDATORY* to provide a Join Key and Jfrog Url to your Pipelines installation. Here's how you do that: + +Retrieve the connection details of your Artifactory installation, from the UI - https://www.jfrog.com/confluence/display/JFROG/General+Security+Settings#GeneralSecuritySettings-ViewingtheJoinKey. + +### Install Pipelines Chart with Ingress + +#### Pre-requisites + +Before deploying Pipelines you need to have the following +- A running Kubernetes cluster +- An [Artifactory ](https://hub.helm.sh/charts/jfrog/artifactory) or [Artifactory HA](https://hub.helm.sh/charts/jfrog/artifactory-ha) with Enterprise+ License + - Precreated repository `jfrogpipelines` in Artifactiry type `Generic` with layout `maven-2-default` +- Deployed [Nginx-ingress controller](https://hub.helm.sh/charts/stable/nginx-ingress) +- [Optional] Deployed [Cert-manager](https://hub.helm.sh/charts/jetstack/cert-manager) for automatic management of TLS certificates with [Lets Encrypt](https://letsencrypt.org/) +- [Optional] TLS secret needed for https access + +#### Prepare configurations + +Fetch the JFrog Pipelines helm chart to get the needed configuration files + +```bash +helm fetch center/jfrog/pipelines --untar +``` + +Edit local copies of `values-ingress.yaml`, `values-ingress-passwords.yaml` and `values-ingress-external-secret.yaml` with the needed configuration values + +- URLs in `values-ingress.yaml` + - Artifactory URL + - Ingress hosts + - Ingress tls secrets +- Passwords `uiUserPassword`, `postgresqlPassword` and `rabbitmq.password` must be set, and same for `masterKey` and `joinKey` in `values-ingress-passwords.yaml` + +#### Install JFrog Pipelines + +Install JFrog Pipelines + +```bash +kubectl create ns pipelines +helm upgrade --install pipelines --namespace pipelines center/jfrog/pipelines -f pipelines/values-ingress.yaml -f pipelines/values-ingress-passwords.yaml +``` + +### Use external secret + +**Note:** Best practice is to use external secrets instead of storing passwords in `values.yaml` files. + +Don't forget to **update** URLs in `values-ingress-external-secret.yaml` file. + +Fill in all required passwords, `masterKey` and `joinKey` in `values-ingress-passwords.yaml` and then create and install the external secret. + +**Note:** Helm release name for secrets generation and `helm install` must be set the same, in this case it is `pipelines`. + +With Helm v2: + +```bash +## Generate pipelines-system-yaml secret +helm template --name-template pipelines pipelines/ -x templates/pipelines-system-yaml.yaml \ + -f pipelines/values-ingress-external-secret.yaml -f pipelines/values-ingress-passwords.yaml | kubectl apply --namespace pipelines -f - + +## Generate pipelines-database secret +helm template --name-template pipelines pipelines/ -x templates/database-secret.yaml \ + -f pipelines/values-ingress-passwords.yaml | kubectl apply --namespace pipelines -f - + +## Generate pipelines-rabbitmq-secret secret +helm template --name-template pipelines pipelines/ -x templates/rabbitmq-secret.yaml \ + -f pipelines/values-ingress-passwords.yaml | kubectl apply --namespace pipelines -f - +``` + +With Helm v3: + +```bash +## Generate pipelines-system-yaml secret +helm template --name-template pipelines pipelines/ -s templates/pipelines-system-yaml.yaml \ + -f pipelines/values-ingress-external-secret.yaml -f pipelines/values-ingress-passwords.yaml | kubectl apply --namespace pipelines -f - + +## Generate pipelines-database secret +helm template --name-template pipelines pipelines/ -s templates/database-secret.yaml \ + -f pipelines/values-ingress-passwords.yaml | kubectl apply --namespace pipelines -f - + +## Generate pipelines-rabbitmq-secret secret +helm template --name-template pipelines pipelines/ -s templates/rabbitmq-secret.yaml \ + -f pipelines/values-ingress-passwords.yaml | kubectl apply --namespace pipelines -f - +``` + +Install JFrog Pipelines: + +```bash +helm upgrade --install pipelines --namespace pipelines center/jfrog/pipelines -f values-ingress-external-secret.yaml +``` + +### Using external Rabbitmq + +If you want to use external Rabbitmq, set `rabbitmq.enabled=false` and create `values-external-rabbitmq.yaml` with below yaml configuration + +```yaml +rabbitmq: + enabled: false + internal_ip: "{{ .Release.Name }}-rabbitmq" + msg_hostname: "{{ .Release.Name }}-rabbitmq" + port: 5672 + manager_port: 15672 + ms_username: admin + ms_password: password + cp_username: admin + cp_password: password + build_username: admin + build_password: password + root_vhost_exchange_name: rootvhost + erlang_cookie: secretcookie + build_vhost_name: pipelines + root_vhost_name: pipelinesRoot + protocol: amqp +``` + +```bash +helm upgrade --install pipelines --namespace pipelines center/jfrog/pipelines -f values-external-rabbitmq.yaml +``` + +### Using external Vault + +If you want to use external Vault, set `vault.enabled=false` and create `values-external-vault.yaml` with below yaml configuration + +```yaml +vault: + enabled: false + +global: + vault: + host: vault_url + port: vault_port + token: vault_token + ## Set Vault token using existing secret + # existingSecret: vault-secret +``` + +If you store external Vault token in a pre-existing Kubernetes Secret, you can specify it via `existingSecret`. + +To create a secret containing the Vault token: + +```bash +kubectl create secret generic vault-secret --from-literal=token=${VAULT_TOKEN} +``` + +```bash +helm upgrade --install pipelines --namespace pipelines center/jfrog/pipelines -f values-external-vault.yaml +``` + +### Status + +See the status of deployed **helm** release: + +With Helm v2: + +```bash +helm status pipelines +``` + +With Helm v3: + +```bash +helm status pipelines --namespace pipelines +``` + +### Pipelines Version +- By default, the pipelines images will use the value `appVersion` in the Chart.yml. This can be over-ridden by adding `version` to the pipelines section of the values.yml + +### Build Plane + +#### Build Plane with static and dynamic node-pool VMs + +To start using Pipelines you need to setup a Build Plane: +- For Static VMs Node-pool setup, please read [Managing Node Pools](https://www.jfrog.com/confluence/display/JFROG/Managing+Pipelines+Node+Pools#ManagingPipelinesNodePools-static-node-poolsAdministeringStaticNodePools). + +- For Dynamic VMs Node-pool setup, please read [Managing Dynamic Node Pools](https://www.jfrog.com/confluence/display/JFROG/Managing+Pipelines+Node+Pools#ManagingPipelinesNodePools-dynamic-node-poolsAdministeringDynamicNodePools). + +- For Kubernetes Node-pool setup, please read [Managing Dynamic Node Pools](https://www.jfrog.com/confluence/display/JFROG/Managing+Pipelines+Node+Pools#ManagingPipelinesNodePools-dynamic-node-poolsAdministeringDynamicNodePools). + +## Useful links + +- https://www.jfrog.com/confluence/display/JFROG/Pipelines+Quickstart +- https://www.jfrog.com/confluence/display/JFROG/Using+Pipelines +- https://www.jfrog.com/confluence/display/JFROG/Managing+Runtimes diff --git a/Openshift4/openshift-pipelines/helminstall.sh b/Openshift4/openshift-pipelines/helminstall.sh new file mode 100755 index 0000000..b67bd7e --- /dev/null +++ b/Openshift4/openshift-pipelines/helminstall.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +echo "Installing Pipelines" + +if [ -z "$MASTER_KEY" ] +then + MASTER_KEY=FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF +fi + +if [ -z "$JOIN_KEY" ] +then + JOIN_KEY=EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE +fi + +helm upgrade --install pipelines . \ + --set pipelines.pipelines.jfrogUrl=http://openshiftartifactoryha-nginx \ + --set pipelines.pipelines.jfrogUrlUI=http://openshiftartifactoryha-nginx \ + --set pipelines.pipelines.masterKey=$MASTER_KEY \ + --set pipelines.pipelines.joinKey=$JOIN_KEY \ + --set pipelines.pipelines.accessControlAllowOrigins_0=http://openshiftartifactoryha-nginx \ + --set pipelines.pipelines.accessControlAllowOrigins_1=http://openshiftartifactoryha-nginx \ + --set pipelines.pipelines.msg.uiUser=guest \ + --set pipelines.pipelines.msg.uiUserPassword=guest \ + --set pipelines.postgresql.enabled=false \ + --set pipelines.global.postgresql.host=postgres-postgresql \ + --set pipelines.global.postgresql.port=5432 \ + --set pipelines.global.postgresql.database=pipelinesdb \ + --set pipelines.global.postgresql.user=artifactory \ + --set pipelines.global.postgresql.password=password \ + --set pipelines.global.postgresql.ssl=false \ + --set pipelines.rabbitmq.rabbitmq.username=monitor \ + --set pipelines.rabbitmq.rabbitmq.password=monitor \ + --set pipelines.rabbitmq.externalUrl=amqps://pipelines-rabbit.jfrog.tech \ + --set pipelines.pipelines.api.externalUrl=http://pipelines-api.jfrog.tech \ + --set pipelines.pipelines.www.externalUrl=http://pipelines-www.jfrog.tech diff --git a/Openshift4/openshift-pipelines/out b/Openshift4/openshift-pipelines/out new file mode 100644 index 0000000..5e20f17 --- /dev/null +++ b/Openshift4/openshift-pipelines/out @@ -0,0 +1,3093 @@ +NAME: pipelines +LAST DEPLOYED: Wed Sep 23 10:16:50 2020 +NAMESPACE: default +STATUS: pending-install +REVISION: 1 +TEST SUITE: None +USER-SUPPLIED VALUES: +pipelines: + global: + postgresql: + database: pipelinesdb + host: postgres-postgresql + password: password + port: 5432 + ssl: false + user: artifactory + pipelines: + accessControlAllowOrigins_0: http://openshiftartifactoryha-nginx + accessControlAllowOrigins_1: http://openshiftartifactoryha-nginx + api: + externalUrl: http://pipelines-api.jfrog.tech + jfrogUrl: http://openshiftartifactoryha-nginx + jfrogUrlUI: http://openshiftartifactoryha-nginx + joinKey: EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE + masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + msg: + uiUser: monitor + uiUserPassword: monitor + www: + externalUrl: http://pipelines-www.jfrog.tech + postgresql: + enabled: false + rabbitmq: + externalUrl: amqps://pipelines-rabbit.jfrog.tech + rabbitmq: + password: guest + username: guest + +COMPUTED VALUES: +pipelines: + buildPlane: + dynamic: + customer: + accountId: "" + nodePoolName: "" + nodelimit: "" + provider: + aws: + accessKey: "" + enabled: false + existingSecret: null + instanceType: c4.xlarge + keyPairName: testaccountSSHKeyPair + nodePoolName: aws-dynamic-node-pool + nodelimit: "3" + region: us-east-1 + secretKey: "" + securityGroupId: testsecuritygroupId + subnetId: test-subnetId + vpcId: testVPCId + k8s: + cpu: "1" + enabled: false + existingSecret: null + kubeconfig: "" + labels: null + memory: "1000" + namespace: default + nodePoolName: k8s-dynamic-node-pool + nodelimit: "3" + storageClass: standard + existingSecret: null + filebeat: + enabled: false + filebeatYml: | + logging.level: info + path.data: {{ .Values.pipelines.logPath }}/filebeat + name: pipelines-filebeat + queue.spool: ~ + filebeat.inputs: + - type: log + enabled: true + close_eof: ${CLOSE:false} + paths: + - {{ .Values.pipelines.logPath }}/*.log + fields: + service: "jfpip" + log_type: "pipelines" + output: + logstash: + hosts: ["{{ .Values.filebeat.logstashUrl }}"] + image: + repository: docker.elastic.co/beats/filebeat + version: 7.5.1 + livenessProbe: + exec: + command: + - sh + - -c + - | + #!/usr/bin/env bash -e + curl --fail 127.0.0.1:5066 + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + logstashUrl: logstash:5044 + name: pipelines-filebeat + readinessProbe: + exec: + command: + - sh + - -c + - | + #!/usr/bin/env bash -e + filebeat test output + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + resources: {} + terminationGracePeriod: 10 + global: + postgresql: + database: pipelinesdb + host: postgres-postgresql + password: password + port: 5432 + ssl: false + user: artifactory + vault: + host: OVERRIDE + port: OVERRIDE + token: OVERRIDE + imagePullSecrets: null + imageRegistry: registry.connect.redhat.com + initContainer: + image: quay.io/jfrog/init:1.0.0 + pullPolicy: IfNotPresent + pipelines: + accessControlAllowOrigins_0: http://openshiftartifactoryha-nginx + accessControlAllowOrigins_1: http://openshiftartifactoryha-nginx + affinity: {} + api: + externalUrl: http://pipelines-api.jfrog.tech + image: + pullPolicy: IfNotPresent + repository: jfrog/pipelines-api + ingress: + annotations: {} + enabled: false + hosts: + - chart-example.local + path: / + tls: [] + resources: {} + service: + annotations: null + loadBalancerIP: null + loadBalancerSourceRanges: [] + port: 30000 + type: ClusterIP + artifactoryServiceId: FFFFFFFFFFFF + authToken: c7595edd-b63d-4fd6-9e1e-13924d6637f0 + autoscaling: + enabled: false + maxReplicas: 3 + minReplicas: 1 + targetCPUUtilizationPercentage: 70 + configMaps: "" + cron: + image: + pullPolicy: IfNotPresent + repository: jfrog/pipelines-micro + resources: {} + customInitContainers: | + - name: "redhat-custom-setup" + image: quay.io/jfrog/init:1.0.0 + imagePullPolicy: Always + command: + - 'sh' + - '-c' + - 'chown -R 1117:1117 /opt/jfrog/pipelines/var/etc' + securityContext: + runAsUser: 0 + volumeMounts: + - mountPath: "/opt/jfrog/pipelines/var/etc" + name: volume + customSidecarContainers: "" + customVolumeMounts: "" + customVolumes: "" + extensionSync: + image: + pullPolicy: IfNotPresent + repository: jfrog/pipelines-micro + resources: {} + hookHandler: + image: + pullPolicy: IfNotPresent + repository: jfrog/pipelines-micro + resources: {} + jfrogUrl: http://openshiftartifactoryha-nginx + jfrogUrlUI: http://openshiftartifactoryha-nginx + joinKey: EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE + licenseId: FFFFFFFFF + logPath: /opt/jfrog/pipelines/var/log + logup: + image: + pullPolicy: IfNotPresent + repository: jfrog/pipelines-micro + resources: {} + marshaller: + image: + pullPolicy: IfNotPresent + repository: jfrog/pipelines-micro + resources: {} + masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + mountPath: /opt/jfrog/pipelines/var/etc + msg: + uiUser: monitor + uiUserPassword: monitor + nexec: + image: + pullPolicy: IfNotPresent + repository: jfrog/pipelines-micro + resources: {} + nodeSelector: {} + pipelineSync: + image: + pullPolicy: IfNotPresent + repository: jfrog/pipelines-micro + resources: {} + pipelinesInit: + image: + pullPolicy: IfNotPresent + repository: jfrog/pipelines-installer + resources: {} + rabbitmqHealthCheckIntervalInMins: 1 + rbac: + role: + rules: + - apiGroups: + - "" + - extensions + - apps + resources: + - deployments + - persistentvolumes + - persistentvolumeclaims + - pods + - deployments/scale + verbs: + - '*' + replicaCount: 1 + rootBucket: jfrogpipelines + router: + externalPort: 8082 + image: + pullPolicy: IfNotPresent + repository: jfrog/pipelines-router + internalPort: 8046 + mountPath: /opt/jfrog/router/var/etc + resources: {} + runTrigger: + image: + pullPolicy: IfNotPresent + repository: jfrog/pipelines-micro + resources: {} + serviceId: jfpip@12345 + stepTrigger: + image: + pullPolicy: IfNotPresent + repository: jfrog/pipelines-micro + resources: {} + systemYaml: | + shared: + ## Artifactory configuration + ## + artifactory: + ## Artifactory URL + ## + baseUrl: "{{ tpl (required "\n\npipelines.jfrogUrl is required!\n" .Values.pipelines.jfrogUrl) . }}" + ## Unified UI URL + ## + baseUrlUI: "{{ tpl (required "\n\npipelines.jfrogUrlUI is required!\n" .Values.pipelines.jfrogUrlUI) . }}" + ## Pipelines Service ID + ## + serviceId: "{{ .Values.pipelines.serviceId }}" + ## Artifactory Service ID + ## + artifactoryServiceId: "{{ .Values.pipelines.artifactoryServiceId }}" + ## Artifactory License ID + ## + licenseId: "{{ .Values.pipelines.licenseId }}" + ## Proxy to connect to Artifactory + ## + proxy: + url: "" + username: "" + password: "" + + ## Router configuration + ## + router: + ip: "" + accessPort: {{ .Values.pipelines.router.internalPort }} + dataPort: {{ .Values.pipelines.router.externalPort }} + joinKey: "{{ .Values.pipelines.joinKey }}" + + security: + masterKey: "{{ .Values.pipelines.masterKey }}" + + ## Database configuration + ## + db: + type: "postgres" + {{- if .Values.postgresql.enabled }} + ip: {{ tpl .Release.Name . }}-postgresql + port: "{{ .Values.postgresql.service.port }}" + name: {{ .Values.postgresql.postgresqlDatabase }} + username: {{ .Values.postgresql.postgresqlUsername }} + password: {{ .Values.postgresql.postgresqlPassword }} + {{- else }} + ip: {{ tpl .Values.global.postgresql.host . }} + port: "{{ .Values.global.postgresql.port }}" + name: {{ .Values.global.postgresql.database }} + username: {{ .Values.global.postgresql.user }} + password: {{ .Values.global.postgresql.password }} + {{- end }} + externalUrl: "" + {{- if .Values.postgresql.enabled }} + connectionString: "{{ tpl (printf "postgres://%s:%s@%s-postgresql:%v/%s" .Values.postgresql.postgresqlUsername .Values.postgresql.postgresqlPassword .Release.Name .Values.postgresql.service.port .Values.postgresql.postgresqlDatabase) . }}" + {{- else if and (not .Values.postgresql.enabled) (.Values.global.postgresql.ssl) }} + connectionString: "{{ tpl (printf "postgres://%s:%s@%v:%v/%s?sslmode=require" .Values.global.postgresql.user .Values.global.postgresql.password .Values.global.postgresql.host .Values.global.postgresql.port .Values.global.postgresql.database) . }}" + {{- else }} + connectionString: "{{ tpl (printf "postgres://%s:%s@%v:%v/%s" .Values.global.postgresql.user .Values.global.postgresql.password .Values.global.postgresql.host .Values.global.postgresql.port .Values.global.postgresql.database) . }}" + {{- end }} + + ## RabbitMQ configuration + ## + msg: + {{- if .Values.rabbitmq.enabled }} + ip: {{ .Release.Name }}-rabbitmq + port: {{ .Values.rabbitmq.service.port }} + adminPort: {{ .Values.rabbitmq.service.managerPort }} + erlangCookie: {{ .Values.rabbitmq.rabbitmq.erlangCookie }} + username: {{ .Values.rabbitmq.rabbitmq.username }} + password: {{ .Values.rabbitmq.rabbitmq.password }} + defaultExchange: pipelinesEx + amqpVhost: pipelines + amqpRootVhost: pipelinesRoot + {{- else }} + ip: {{ tpl .Values.rabbitmq.internal_ip . }} + port: {{ .Values.rabbitmq.port}} + adminPort: {{ .Values.rabbitmq.manager_port }} + erlangCookie: {{ .Values.rabbitmq.erlang_cookie }} + username: {{ .Values.rabbitmq.ms_username }} + password: {{ .Values.rabbitmq.ms_password }} + defaultExchange: {{ .Values.rabbitmq.root_vhost_exchange_name }} + amqpVhost: {{ .Values.rabbitmq.build_vhost_name}} + amqpRootVhost: {{ .Values.rabbitmq.root_vhost_name }} + protocol: {{ .Values.rabbitmq.protocol }} + {{- end }} + queues: + - "core.pipelineSync" + - "core.runTrigger" + - "core.stepTrigger" + - "core.marshaller" + - "cluster.init" + - "core.logup" + - "www.signals" + - "core.nexec" + - "core.hookHandler" + - "core.extensionSync" + ui: + {{- if .Values.rabbitmq.enabled }} + username: {{ .Values.pipelines.msg.uiUser }} + password: {{ .Values.pipelines.msg.uiUserPassword }} + {{- else }} + protocol: http + username: {{ .Values.rabbitmq.cp_username }} + password: {{ .Values.rabbitmq.cp_password }} + {{- end }} + external: + ## URL for build plane VMs to access RabbitMQ + {{- if .Values.rabbitmq.externalUrl }} + url: {{ .Values.rabbitmq.externalUrl }} + {{- else if (and .Values.rabbitmq.serviceVmLb.enabled .Values.rabbitmq.serviceVmLb.loadBalancerIP) }} + url: amqp://{{ .Values.rabbitmq.serviceVmLb.loadBalancerIP }} + {{- else if .Values.rabbitmq.enabled }} + url: amqp://{{ tpl .Release.Name . }}-rabbitmq + {{- else }} + url: {{ .Values.rabbitmq.protocol }}://{{ tpl .Values.rabbitmq.msg_hostname . }}:{{ .Values.rabbitmq.port }} + {{- end }} + rootUrl: "" + adminUrl: "" + {{- if not .Values.rabbitmq.enabled }} + build: + username: {{ .Values.rabbitmq.build_username }} + password: {{ .Values.rabbitmq.build_password }} + {{- end }} + + ## Vault configuration + ## + vault: + {{- if .Values.vault.enabled }} + ip: {{ include "pipelines.vault.name" . }} + port: {{ .Values.vault.service.port }} + {{- else }} + ip: {{ .Values.global.vault.host }} + port: {{ .Values.global.vault.port }} + {{- end }} + ## DO NOT CHANGE THE TOKEN VALUE!!! + token: "_VAULT_TOKEN_" + unsealKeys: + - "" + - "" + - "" + - "" + - "" + + ## Redis configuration + ## + redis: + ip: {{ .Release.Name }}-redis-master + port: 6379 + clusterEnabled: false + + ## This section is used for bringing up the core services and setting up + ## configurations required by the installer & the services + ## + core: + ## id is automatically determined based on the current hostname + ## or set using the SHARED_NODE_ID environment variable. + ## + id: "afd8df9d08bf257ae9b7d7dbbf348b7a3a574ebdd3a61d350d4b64e3129dee85" + installerIP: "1.2.3.4" + installerAuthToken: "{{ .Values.pipelines.authToken }}" + installerImage: "jfrog/pipelines-installer" + registryUrl: "{{ .Values.imageRegistry }}" + os: "Ubuntu_16.04" + osDistribution: "xenial" + architecture: "x86_64" + dockerVersion: "" + runMode: "{{ .Values.runMode }}" + user: "" + group: "" + noVerifySsl: false + ignoreTLSErrors: false + controlplaneVersion: "{{ default .Chart.AppVersion .Values.pipelines.version }}" + buildplaneVersion: "{{ default .Chart.AppVersion .Values.pipelines.version }}" + accessControlAllowOrigins: + - {{ .Values.pipelines.accessControlAllowOrigins_0 }} + - {{ .Values.pipelines.accessControlAllowOrigins_1 }} + rabbitmqHealthCheckIntervalInMins: {{ .Values.pipelines.rabbitmqHealthCheckIntervalInMins}} + ## Global proxy settings, to be applied to all services + ## + proxy: + httpProxy: "" + httpsProxy: "" + noProxy: "" + username: "" + password: "" + + ## Mailserver settings + ## + mailserver: + host: "" + port: "" + username: "" + password: "" + tls: "" + ssl: "" + apiRetryIntervalMs: 3000 + accountSyncFrequencyHr: 1 + imageRegistrySecret: "{{ .Values.imagePullSecrets }}" + hardDeleteIntervalInMins: 60 + configBackupCount: 5 + lastUpdateTime: "" + callHomeUrl: "https://api.bintray.com/products/jfrog/pipelines/stats/usage" + allowCallHome: true + serviceInstanceHealthCheckIntervalInMins: 1 + serviceInstanceStatsCutOffIntervalInHours: 24 + + ## Service configuration + ## + services: + api: + name: {{ include "pipelines.api.name" . }} + port: {{ .Values.pipelines.api.service.port }} + {{- if (and .Values.pipelines.api.ingress.enabled .Values.pipelines.api.ingress.tls) }} + {{- range .Values.pipelines.api.ingress.hosts }} + externalUrl: https://{{ . }} + {{- end }} + {{- else if .Values.pipelines.api.ingress.enabled }} + {{- range .Values.pipelines.api.ingress.hosts }} + externalUrl: http://{{ . }} + {{- end }} + {{- else }} + externalUrl: {{ .Values.pipelines.api.externalUrl }} + {{- end }} + www: + name: {{ include "pipelines.www.name" . }} + port: {{ .Values.pipelines.www.service.port }} + {{- if (and .Values.pipelines.www.ingress.enabled .Values.pipelines.www.ingress.tls) }} + {{- range .Values.pipelines.www.ingress.hosts }} + externalUrl: https://{{ . }} + {{- end }} + {{- else if .Values.pipelines.www.ingress.enabled }} + {{- range .Values.pipelines.www.ingress.hosts }} + externalUrl: http://{{ . }} + {{- end }} + {{- else }} + externalUrl: {{ .Values.pipelines.www.externalUrl }} + {{- end }} + sessionSecret: "{{ .Values.pipelines.authToken }}" + pipelineSync: + name: pipelineSync + runTrigger: + name: runTrigger + stepTrigger: + name: stepTrigger + cron: + name: cron + nexec: + name: nexec + hookHandler: + name: hookHandler + marshaller: + name: marshaller + extensionSync: + name: extensionSync + + ## Runtime configuration + ## + runtime: + rootBucket: "{{ .Values.pipelines.rootBucket }}" + defaultMinionCount: 1 + nodeCacheIntervalMS: 600000 + jobConsoleBatchSize: 10 + jobConsoleBufferIntervalMs: 3 + maxDiskUsagePercentage: 90 + stepTimeoutMS: 3600000 + nodeStopDayOfWeek: 0 + nodeStopIntervalDays: 30 + maxNodeCheckInDelayMin: 15 + defaultMinionInstanceSize: "c4.large" + allowDynamicNodes: true + allowCustomNodes: true + {{- range $key, $value := .Values.runtimeOverride }} + {{ $key }}: {{ $value | quote }} + {{- end }} + languageImages: + - architecture: x86_64 + os: Ubuntu_16.04 + language: node + registryUrl: docker.bintray.io + image: jfrog/pipelines-u16node + isDefault: true + defaultVersion: 10.18.0 + - architecture: x86_64 + os: Ubuntu_16.04 + language: java + registryUrl: docker.bintray.io + image: jfrog/pipelines-u16java + defaultVersion: 13 + - architecture: x86_64 + os: Ubuntu_16.04 + language: cpp + registryUrl: docker.bintray.io + image: jfrog/pipelines-u16cpp + defaultVersion: 9.0.0 + - architecture: x86_64 + os: Ubuntu_16.04 + language: go + registryUrl: docker.bintray.io + image: jfrog/pipelines-u16go + defaultVersion: 1.12.14 + - architecture: x86_64 + os: Ubuntu_18.04 + language: node + registryUrl: docker.bintray.io + image: jfrog/pipelines-u18node + isDefault: true + defaultVersion: 10.18.0 + - architecture: x86_64 + os: Ubuntu_18.04 + language: java + registryUrl: docker.bintray.io + image: jfrog/pipelines-u18java + defaultVersion: 13 + - architecture: x86_64 + os: Ubuntu_18.04 + language: cpp + registryUrl: docker.bintray.io + image: jfrog/pipelines-u18cpp + defaultVersion: 9.0.0 + - architecture: x86_64 + os: Ubuntu_18.04 + language: go + registryUrl: docker.bintray.io + image: jfrog/pipelines-u18go + defaultVersion: 1.12.14 + - architecture: x86_64 + os: CentOS_7 + language: node + registryUrl: docker.bintray.io + image: jfrog/pipelines-c7node + isDefault: true + defaultVersion: 10.18.0 + - architecture: x86_64 + os: CentOS_7 + language: java + registryUrl: docker.bintray.io + image: jfrog/pipelines-c7java + defaultVersion: 11 + - architecture: x86_64 + os: CentOS_7 + language: cpp + registryUrl: docker.bintray.io + image: jfrog/pipelines-c7cpp + defaultVersion: 3.4.2 + - architecture: x86_64 + os: CentOS_7 + language: go + registryUrl: docker.bintray.io + image: jfrog/pipelines-c7go + defaultVersion: 1.12.14 + - architecture: x86_64 + os: WindowsServer_2019 + language: node + registryUrl: docker.bintray.io + image: jfrog/pipelines-w19node + defaultVersion: 10.18.0 + - architecture: x86_64 + os: WindowsServer_2019 + language: java + registryUrl: docker.bintray.io + image: jfrog/pipelines-w19java + defaultVersion: 11 + - architecture: x86_64 + os: WindowsServer_2019 + language: cpp + registryUrl: docker.bintray.io + image: jfrog/pipelines-w19cpp + defaultVersion: 9.0.0 + - architecture: x86_64 + os: WindowsServer_2019 + language: go + registryUrl: docker.bintray.io + image: jfrog/pipelines-w19go + defaultVersion: 1.12.14 + - architecture: x86_64 + os: WindowsServer_2019 + language: dotnetcore + registryUrl: docker.bintray.io + image: jfrog/pipelines-w19dotnetcore + isDefault: true + defaultVersion: 3.1 + - architecture: x86_64 + os: RHEL_7 + language: node + registryUrl: docker.bintray.io + image: jfrog/pipelines-c7node + isDefault: true + defaultVersion: 10.18.0 + - architecture: x86_64 + os: RHEL_7 + language: java + registryUrl: docker.bintray.io + image: jfrog/pipelines-c7java + defaultVersion: 11 + - architecture: x86_64 + os: RHEL_7 + language: cpp + registryUrl: docker.bintray.io + image: jfrog/pipelines-c7cpp + defaultVersion: 3.4.2 + - architecture: x86_64 + os: RHEL_7 + language: go + registryUrl: docker.bintray.io + image: jfrog/pipelines-c7go + defaultVersion: 1.12.14 + tolerations: [] + updateStrategy: RollingUpdate + version: 1.7.1 + www: + externalUrl: http://pipelines-www.jfrog.tech + image: + pullPolicy: IfNotPresent + repository: jfrog/pipelines-www + ingress: + annotations: {} + enabled: false + hosts: + - chart-example.local + path: / + tls: [] + resources: {} + service: + annotations: null + loadBalancerIP: null + loadBalancerSourceRanges: [] + port: 30001 + type: ClusterIP + postgresql: + enabled: false + extraEnv: [] + global: + postgresql: + database: pipelinesdb + host: null + password: "" + port: 5432 + ssl: false + user: apiuser + vault: + host: null + port: null + token: null + image: + debug: false + pullPolicy: IfNotPresent + registry: docker.bintray.io + repository: bitnami/postgresql + tag: 9.6.18-debian-10-r7 + ldap: + baseDN: "" + bind_password: null + bindDN: "" + enabled: false + port: "" + prefix: "" + scheme: "" + search_attr: "" + search_filter: "" + server: "" + suffix: "" + tls: false + url: "" + livenessProbe: + enabled: true + failureThreshold: 6 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + master: + affinity: {} + annotations: {} + extraInitContainers: [] + extraVolumeMounts: [] + extraVolumes: [] + labels: {} + nodeSelector: {} + podAnnotations: {} + podLabels: {} + priorityClassName: "" + resources: {} + service: {} + sidecars: [] + tolerations: [] + metrics: + enabled: false + image: + pullPolicy: IfNotPresent + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.8.0-debian-10-r72 + livenessProbe: + enabled: true + failureThreshold: 6 + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + prometheusRule: + additionalLabels: {} + enabled: false + namespace: "" + rules: [] + readinessProbe: + enabled: true + failureThreshold: 6 + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + securityContext: + enabled: false + runAsUser: 1001 + service: + annotations: + prometheus.io/port: "9187" + prometheus.io/scrape: "true" + loadBalancerIP: null + type: ClusterIP + serviceMonitor: + additionalLabels: {} + enabled: false + networkPolicy: + allowExternal: true + enabled: false + explicitNamespacesSelector: {} + persistence: + accessModes: + - ReadWriteOnce + annotations: {} + enabled: true + existingClaim: null + mountPath: /bitnami/postgresql + size: 50Gi + subPath: "" + postgresqlDataDir: /bitnami/postgresql/data + postgresqlDatabase: pipelinesdb + postgresqlPassword: "" + postgresqlUsername: apiuser + readinessProbe: + enabled: true + failureThreshold: 6 + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + replication: + applicationName: my_application + enabled: false + numSynchronousReplicas: 0 + password: repl_password + slaveReplicas: 1 + synchronousCommit: "off" + user: repl_user + resources: + requests: + cpu: 250m + memory: 256Mi + securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + service: + annotations: {} + port: 5432 + type: ClusterIP + serviceAccount: + enabled: false + shmVolume: + chmod: + enabled: true + enabled: true + slave: + affinity: {} + annotations: {} + extraInitContainers: | + # - name: do-something + # image: busybox + # command: ['do', 'something'] + extraVolumeMounts: [] + extraVolumes: [] + labels: {} + nodeSelector: {} + podAnnotations: {} + podLabels: {} + priorityClassName: "" + service: {} + sidecars: [] + tolerations: [] + updateStrategy: + type: RollingUpdate + volumePermissions: + enabled: false + image: + pullPolicy: Always + registry: docker.io + repository: bitnami/minideb + tag: buster + securityContext: + runAsUser: 0 + rabbitmq: + affinity: {} + enabled: true + externalUrl: amqps://pipelines-rabbit.jfrog.tech + extraSecrets: {} + extraVolumeMounts: [] + extraVolumes: [] + forceBoot: + enabled: false + global: + postgresql: + database: pipelinesdb + host: postgres-postgresql + password: password + port: 5432 + ssl: false + user: artifactory + vault: + host: OVERRIDE + port: OVERRIDE + token: OVERRIDE + image: + debug: false + pullPolicy: IfNotPresent + registry: registry.connect.redhat.com + repository: jfrog/xray-rabbitmq + tag: 3.8.6 + ingress: + annotations: null + enabled: false + path: / + tls: true + tlsSecret: OVERRIDE + ldap: + enabled: false + port: "389" + server: "" + tls: + enabled: false + user_dn_pattern: cn=${username},dc=example,dc=org + livenessProbe: + commandOverride: [] + enabled: true + failureThreshold: 6 + initialDelaySeconds: 120 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 20 + metrics: + enabled: false + plugins: rabbitmq_prometheus + podAnnotations: + prometheus.io/port: '{{ .Values.metrics.port }}' + prometheus.io/scrape: "true" + port: 9419 + prometheusRule: + additionalLabels: {} + enabled: false + namespace: "" + rules: [] + serviceMonitor: + additionalLabels: {} + enabled: false + honorLabels: false + interval: 30s + networkPolicy: + allowExternal: true + enabled: false + nodeSelector: {} + persistence: + accessMode: ReadWriteOnce + enabled: true + path: /opt/bitnami/rabbitmq/var/lib/rabbitmq + size: 20Gi + podAnnotations: {} + podDisruptionBudget: {} + podLabels: {} + podManagementPolicy: OrderedReady + protocol: amqps + rabbitmq: + advancedConfiguration: "" + clustering: + address_type: hostname + k8s_domain: cluster.local + rebalance: false + configuration: |- + ## Clustering + cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s + cluster_formation.k8s.host = kubernetes.default.svc.cluster.local + cluster_formation.node_cleanup.interval = 10 + cluster_formation.node_cleanup.only_log_warning = true + cluster_partition_handling = autoheal + # queue master locator + queue_master_locator=min-masters + # enable guest user + loopback_users.guest = false + env: {} + erlangCookie: PIPELINESRABBITMQCLUSTER + extraConfiguration: |- + #disk_free_limit.absolute = 50MB + #management.load_definitions = /app/load_definition.json + extraPlugins: "" + loadDefinition: + enabled: false + secretName: load-definition + logs: '-' + maxAvailableSchedulers: 2 + onlineSchedulers: 1 + password: guest + plugins: rabbitmq_management rabbitmq_peer_discovery_k8s + setUlimitNofiles: true + tls: + caCertificate: "" + enabled: false + failIfNoPeerCert: true + serverCertificate: "" + serverKey: "" + sslOptionsVerify: verify_peer + ulimitNofiles: "65536" + username: guest + rbacEnabled: true + readinessProbe: + commandOverride: [] + enabled: true + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 20 + replicas: 1 + resources: {} + securityContext: + enabled: true + extra: {} + fsGroup: 1001 + runAsUser: 1001 + service: + annotations: {} + distPort: 25672 + managerPort: 15672 + port: 5672 + tlsPort: 5671 + type: ClusterIP + serviceVmLb: + annotations: null + enabled: false + loadBalancerIP: null + loadBalancerSourceRanges: [] + tolerations: [] + updateStrategy: + type: RollingUpdate + volumePermissions: + enabled: false + image: + pullPolicy: Always + registry: docker.io + repository: bitnami/minideb + tag: buster + resources: {} + rbac: + create: true + redis: + cluster: + enabled: false + slaveCount: 2 + clusterDomain: cluster.local + configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + enabled: true + global: + postgresql: + database: pipelinesdb + host: postgres-postgresql + password: password + port: 5432 + ssl: false + user: artifactory + redis: {} + vault: + host: OVERRIDE + port: OVERRIDE + token: OVERRIDE + image: + pullPolicy: IfNotPresent + registry: registry.redhat.io + repository: rhel8/redis-5 + tag: 1-98 + master: + affinity: {} + command: "" + configmap: |- + appendonly yes + loglevel notice + disableCommands: + - FLUSHDB + - FLUSHALL + extraFlags: [] + livenessProbe: + enabled: true + failureThreshold: 5 + initialDelaySeconds: 5 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + persistence: + accessModes: + - ReadWriteOnce + enabled: true + matchExpressions: {} + matchLabels: {} + path: /data + size: 8Gi + subPath: "" + podAnnotations: {} + podLabels: {} + readinessProbe: + enabled: true + failureThreshold: 5 + initialDelaySeconds: 5 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 1 + resources: {} + service: + annotations: {} + labels: {} + loadBalancerIP: null + port: 6379 + type: ClusterIP + statefulset: + updateStrategy: RollingUpdate + metrics: + enabled: false + image: + pullPolicy: IfNotPresent + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.5.2-debian-10-r21 + podAnnotations: + prometheus.io/port: "9121" + prometheus.io/scrape: "true" + prometheusRule: + additionalLabels: {} + enabled: false + namespace: "" + rules: [] + service: + annotations: {} + labels: {} + type: ClusterIP + serviceMonitor: + enabled: false + selector: + prometheus: kube-prometheus + networkPolicy: + enabled: false + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + password: "" + persistence: {} + podSecurityPolicy: + create: false + rbac: + create: false + role: + rules: [] + redisPort: 6379 + securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + sentinel: + configmap: null + downAfterMilliseconds: 60000 + enabled: false + failoverTimeout: 18000 + image: + pullPolicy: IfNotPresent + registry: docker.io + repository: bitnami/redis-sentinel + tag: 5.0.8-debian-10-r25 + initialCheckTimeout: 5 + livenessProbe: + enabled: true + failureThreshold: 5 + initialDelaySeconds: 5 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + masterSet: mymaster + parallelSyncs: 1 + port: 26379 + quorum: 2 + readinessProbe: + enabled: true + failureThreshold: 5 + initialDelaySeconds: 5 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 1 + service: + annotations: {} + labels: {} + loadBalancerIP: null + redisPort: 6379 + sentinelPort: 26379 + type: ClusterIP + staticID: false + usePassword: true + serviceAccount: + create: false + name: null + slave: + affinity: {} + command: /run.sh + configmap: null + disableCommands: + - FLUSHDB + - FLUSHALL + extraFlags: [] + livenessProbe: + enabled: true + failureThreshold: 5 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + persistence: + accessModes: + - ReadWriteOnce + enabled: true + matchExpressions: {} + matchLabels: {} + path: /data + size: 8Gi + subPath: "" + podAnnotations: {} + podLabels: {} + port: 6379 + readinessProbe: + enabled: true + failureThreshold: 5 + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 10 + resources: {} + service: + annotations: {} + labels: {} + loadBalancerIP: null + port: 6379 + type: ClusterIP + statefulset: + updateStrategy: RollingUpdate + sysctlImage: + command: [] + enabled: false + mountHostSys: false + pullPolicy: Always + registry: docker.io + repository: bitnami/minideb + resources: {} + tag: buster + usePassword: false + usePasswordFile: false + volumePermissions: + enabled: false + image: + pullPolicy: Always + registry: docker.io + repository: bitnami/minideb + tag: buster + resources: {} + runMode: production + runtimeOverride: {} + securityContext: + enabled: true + gid: 1030 + uid: 1030 + vault: + affinity: {} + configMaps: "" + customInitContainers: "" + customVolumeMounts: "" + customVolumes: "" + disablemlock: false + enabled: true + image: + pullPolicy: IfNotPresent + repository: registry.connect.redhat.com/jfrog/pipelines-vault + tag: 1.7.1 + init: + image: + pullPolicy: IfNotPresent + repository: jfrog/pipelines-vault-init + nodeSelector: {} + rbac: + role: + rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - '*' + resources: {} + service: + port: 30100 + type: ClusterIP + tolerations: [] + updateStrategy: RollingUpdate + +HOOKS: +MANIFEST: +--- +# Source: openshift-pipelines/charts/pipelines/charts/rabbitmq/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pipelines-rabbitmq + namespace: default + labels: + app: rabbitmq + chart: rabbitmq-6.25.0 + release: "pipelines" + heritage: "Helm" +secrets: + - name: "pipelines-rabbitmq" +--- +# Source: openshift-pipelines/charts/pipelines/templates/service-account.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pipelines + labels: + helm.sh/chart: pipelines-1.4.5 + app.kubernetes.io/name: pipelines + app.kubernetes.io/instance: pipelines + app.kubernetes.io/version: "1.7.2" + app.kubernetes.io/managed-by: Helm +--- +# Source: openshift-pipelines/charts/pipelines/templates/vault-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pipelines-pipelines-vault + labels: + helm.sh/chart: pipelines-1.4.5 + app.kubernetes.io/name: pipelines + app.kubernetes.io/instance: pipelines + app.kubernetes.io/version: "1.7.2" + app.kubernetes.io/managed-by: Helm + component: pipelines-pipelines-vault +--- +# Source: openshift-pipelines/charts/pipelines/charts/rabbitmq/templates/secrets.yaml +apiVersion: v1 +kind: Secret +metadata: + name: pipelines-rabbitmq + namespace: default + labels: + app: rabbitmq + chart: rabbitmq-6.25.0 + release: "pipelines" + heritage: "Helm" +type: Opaque +data: + + rabbitmq-password: "Z3Vlc3Q=" + + + rabbitmq-erlang-cookie: "UElQRUxJTkVTUkFCQklUTVFDTFVTVEVS" +--- +# Source: openshift-pipelines/charts/pipelines/templates/database-secret.yaml +apiVersion: v1 +kind: Secret +metadata: + name: pipelines-database + labels: + helm.sh/chart: pipelines-1.4.5 + app.kubernetes.io/name: pipelines + app.kubernetes.io/instance: pipelines + app.kubernetes.io/version: "1.7.2" + app.kubernetes.io/managed-by: Helm +type: Opaque +data: + postgresql-password: "cGFzc3dvcmQ=" + postgresql-url: cG9zdGdyZXM6Ly9hcnRpZmFjdG9yeTpwYXNzd29yZEBwb3N0Z3Jlcy1wb3N0Z3Jlc3FsOjU0MzIvcGlwZWxpbmVzZGI/c3NsbW9kZT1kaXNhYmxl +--- +# Source: openshift-pipelines/charts/pipelines/templates/pipelines-system-yaml.yaml +apiVersion: v1 +kind: Secret +metadata: + name: pipelines-system-yaml + labels: + helm.sh/chart: pipelines-1.4.5 + app.kubernetes.io/name: pipelines + app.kubernetes.io/instance: pipelines + app.kubernetes.io/version: "1.7.2" + app.kubernetes.io/managed-by: Helm +type: Opaque +data: +stringData: + system.yaml: | + shared: + ## Artifactory configuration + ## + artifactory: + ## Artifactory URL + ## + baseUrl: "http://openshiftartifactoryha-nginx" + ## Unified UI URL + ## + baseUrlUI: "http://openshiftartifactoryha-nginx" + ## Pipelines Service ID + ## + serviceId: "jfpip@12345" + ## Artifactory Service ID + ## + artifactoryServiceId: "FFFFFFFFFFFF" + ## Artifactory License ID + ## + licenseId: "FFFFFFFFF" + ## Proxy to connect to Artifactory + ## + proxy: + url: "" + username: "" + password: "" + + ## Router configuration + ## + router: + ip: "" + accessPort: 8046 + dataPort: 8082 + joinKey: "EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE" + + security: + masterKey: "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" + + ## Database configuration + ## + db: + type: "postgres" + ip: postgres-postgresql + port: "5432" + name: pipelinesdb + username: artifactory + password: password + externalUrl: "" + connectionString: "postgres://artifactory:password@postgres-postgresql:5432/pipelinesdb" + + ## RabbitMQ configuration + ## + msg: + ip: pipelines-rabbitmq + port: 5672 + adminPort: 15672 + erlangCookie: PIPELINESRABBITMQCLUSTER + username: guest + password: guest + defaultExchange: pipelinesEx + amqpVhost: pipelines + amqpRootVhost: pipelinesRoot + queues: + - "core.pipelineSync" + - "core.runTrigger" + - "core.stepTrigger" + - "core.marshaller" + - "cluster.init" + - "core.logup" + - "www.signals" + - "core.nexec" + - "core.hookHandler" + - "core.extensionSync" + ui: + username: monitor + password: monitor + external: + ## URL for build plane VMs to access RabbitMQ + url: amqps://pipelines-rabbit.jfrog.tech + rootUrl: "" + adminUrl: "" + + ## Vault configuration + ## + vault: + ip: pipelines-pipelines-vault + port: 30100 + ## DO NOT CHANGE THE TOKEN VALUE!!! + token: "_VAULT_TOKEN_" + unsealKeys: + - "" + - "" + - "" + - "" + - "" + + ## Redis configuration + ## + redis: + ip: pipelines-redis-master + port: 6379 + clusterEnabled: false + + ## This section is used for bringing up the core services and setting up + ## configurations required by the installer & the services + ## + core: + ## id is automatically determined based on the current hostname + ## or set using the SHARED_NODE_ID environment variable. + ## + id: "afd8df9d08bf257ae9b7d7dbbf348b7a3a574ebdd3a61d350d4b64e3129dee85" + installerIP: "1.2.3.4" + installerAuthToken: "c7595edd-b63d-4fd6-9e1e-13924d6637f0" + installerImage: "jfrog/pipelines-installer" + registryUrl: "registry.connect.redhat.com" + os: "Ubuntu_16.04" + osDistribution: "xenial" + architecture: "x86_64" + dockerVersion: "" + runMode: "production" + user: "" + group: "" + noVerifySsl: false + ignoreTLSErrors: false + controlplaneVersion: "1.7.1" + buildplaneVersion: "1.7.1" + accessControlAllowOrigins: + - http://openshiftartifactoryha-nginx + - http://openshiftartifactoryha-nginx + rabbitmqHealthCheckIntervalInMins: 1 + ## Global proxy settings, to be applied to all services + ## + proxy: + httpProxy: "" + httpsProxy: "" + noProxy: "" + username: "" + password: "" + + ## Mailserver settings + ## + mailserver: + host: "" + port: "" + username: "" + password: "" + tls: "" + ssl: "" + apiRetryIntervalMs: 3000 + accountSyncFrequencyHr: 1 + imageRegistrySecret: "" + hardDeleteIntervalInMins: 60 + configBackupCount: 5 + lastUpdateTime: "" + callHomeUrl: "https://api.bintray.com/products/jfrog/pipelines/stats/usage" + allowCallHome: true + serviceInstanceHealthCheckIntervalInMins: 1 + serviceInstanceStatsCutOffIntervalInHours: 24 + + ## Service configuration + ## + services: + api: + name: pipelines-pipelines-api + port: 30000 + externalUrl: http://pipelines-api.jfrog.tech + www: + name: pipelines-pipelines-www + port: 30001 + externalUrl: http://pipelines-www.jfrog.tech + sessionSecret: "c7595edd-b63d-4fd6-9e1e-13924d6637f0" + pipelineSync: + name: pipelineSync + runTrigger: + name: runTrigger + stepTrigger: + name: stepTrigger + cron: + name: cron + nexec: + name: nexec + hookHandler: + name: hookHandler + marshaller: + name: marshaller + extensionSync: + name: extensionSync + + ## Runtime configuration + ## + runtime: + rootBucket: "jfrogpipelines" + defaultMinionCount: 1 + nodeCacheIntervalMS: 600000 + jobConsoleBatchSize: 10 + jobConsoleBufferIntervalMs: 3 + maxDiskUsagePercentage: 90 + stepTimeoutMS: 3600000 + nodeStopDayOfWeek: 0 + nodeStopIntervalDays: 30 + maxNodeCheckInDelayMin: 15 + defaultMinionInstanceSize: "c4.large" + allowDynamicNodes: true + allowCustomNodes: true + languageImages: + - architecture: x86_64 + os: Ubuntu_16.04 + language: node + registryUrl: docker.bintray.io + image: jfrog/pipelines-u16node + isDefault: true + defaultVersion: 10.18.0 + - architecture: x86_64 + os: Ubuntu_16.04 + language: java + registryUrl: docker.bintray.io + image: jfrog/pipelines-u16java + defaultVersion: 13 + - architecture: x86_64 + os: Ubuntu_16.04 + language: cpp + registryUrl: docker.bintray.io + image: jfrog/pipelines-u16cpp + defaultVersion: 9.0.0 + - architecture: x86_64 + os: Ubuntu_16.04 + language: go + registryUrl: docker.bintray.io + image: jfrog/pipelines-u16go + defaultVersion: 1.12.14 + - architecture: x86_64 + os: Ubuntu_18.04 + language: node + registryUrl: docker.bintray.io + image: jfrog/pipelines-u18node + isDefault: true + defaultVersion: 10.18.0 + - architecture: x86_64 + os: Ubuntu_18.04 + language: java + registryUrl: docker.bintray.io + image: jfrog/pipelines-u18java + defaultVersion: 13 + - architecture: x86_64 + os: Ubuntu_18.04 + language: cpp + registryUrl: docker.bintray.io + image: jfrog/pipelines-u18cpp + defaultVersion: 9.0.0 + - architecture: x86_64 + os: Ubuntu_18.04 + language: go + registryUrl: docker.bintray.io + image: jfrog/pipelines-u18go + defaultVersion: 1.12.14 + - architecture: x86_64 + os: CentOS_7 + language: node + registryUrl: docker.bintray.io + image: jfrog/pipelines-c7node + isDefault: true + defaultVersion: 10.18.0 + - architecture: x86_64 + os: CentOS_7 + language: java + registryUrl: docker.bintray.io + image: jfrog/pipelines-c7java + defaultVersion: 11 + - architecture: x86_64 + os: CentOS_7 + language: cpp + registryUrl: docker.bintray.io + image: jfrog/pipelines-c7cpp + defaultVersion: 3.4.2 + - architecture: x86_64 + os: CentOS_7 + language: go + registryUrl: docker.bintray.io + image: jfrog/pipelines-c7go + defaultVersion: 1.12.14 + - architecture: x86_64 + os: WindowsServer_2019 + language: node + registryUrl: docker.bintray.io + image: jfrog/pipelines-w19node + defaultVersion: 10.18.0 + - architecture: x86_64 + os: WindowsServer_2019 + language: java + registryUrl: docker.bintray.io + image: jfrog/pipelines-w19java + defaultVersion: 11 + - architecture: x86_64 + os: WindowsServer_2019 + language: cpp + registryUrl: docker.bintray.io + image: jfrog/pipelines-w19cpp + defaultVersion: 9.0.0 + - architecture: x86_64 + os: WindowsServer_2019 + language: go + registryUrl: docker.bintray.io + image: jfrog/pipelines-w19go + defaultVersion: 1.12.14 + - architecture: x86_64 + os: WindowsServer_2019 + language: dotnetcore + registryUrl: docker.bintray.io + image: jfrog/pipelines-w19dotnetcore + isDefault: true + defaultVersion: 3.1 + - architecture: x86_64 + os: RHEL_7 + language: node + registryUrl: docker.bintray.io + image: jfrog/pipelines-c7node + isDefault: true + defaultVersion: 10.18.0 + - architecture: x86_64 + os: RHEL_7 + language: java + registryUrl: docker.bintray.io + image: jfrog/pipelines-c7java + defaultVersion: 11 + - architecture: x86_64 + os: RHEL_7 + language: cpp + registryUrl: docker.bintray.io + image: jfrog/pipelines-c7cpp + defaultVersion: 3.4.2 + - architecture: x86_64 + os: RHEL_7 + language: go + registryUrl: docker.bintray.io + image: jfrog/pipelines-c7go + defaultVersion: 1.12.14 +--- +# Source: openshift-pipelines/charts/pipelines/templates/rabbitmq-secret.yaml +apiVersion: v1 +kind: Secret +metadata: + name: pipelines-rabbitmq-secret + labels: + helm.sh/chart: pipelines-1.4.5 + app.kubernetes.io/name: pipelines + app.kubernetes.io/instance: pipelines + app.kubernetes.io/version: "1.7.2" + app.kubernetes.io/managed-by: Helm +type: Opaque +data: + rabbitmq-erlang-cookie: "UElQRUxJTkVTUkFCQklUTVFDTFVTVEVS" + rabbitmq-password: "Z3Vlc3Q=" +--- +# Source: openshift-pipelines/charts/pipelines/charts/rabbitmq/templates/configuration.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: pipelines-rabbitmq-config + namespace: default + labels: + app: rabbitmq + chart: rabbitmq-6.25.0 + release: "pipelines" + heritage: "Helm" +data: + enabled_plugins: |- + [rabbitmq_management, rabbitmq_peer_discovery_k8s]. + rabbitmq.conf: |- + ##username and password + default_user=guest + default_pass=CHANGEME + ## Clustering + cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s + cluster_formation.k8s.host = kubernetes.default.svc.cluster.local + cluster_formation.node_cleanup.interval = 10 + cluster_formation.node_cleanup.only_log_warning = true + cluster_partition_handling = autoheal + # queue master locator + queue_master_locator=min-masters + # enable guest user + loopback_users.guest = false + #disk_free_limit.absolute = 50MB + #management.load_definitions = /app/load_definition.json +--- +# Source: openshift-pipelines/charts/pipelines/charts/rabbitmq/templates/healthchecks.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: pipelines-rabbitmq-healthchecks + namespace: default + labels: + app: rabbitmq + chart: rabbitmq-6.25.0 + release: "pipelines" + heritage: "Helm" +data: + rabbitmq-health-check: |- + #!/bin/sh + START_FLAG=/opt/bitnami/rabbitmq/var/lib/rabbitmq/.start + if [ -f ${START_FLAG} ]; then + rabbitmqctl node_health_check + RESULT=$? + if [ $RESULT -ne 0 ]; then + rabbitmqctl status + exit $? + fi + rm -f ${START_FLAG} + exit ${RESULT} + fi + rabbitmq-api-check $1 $2 + rabbitmq-api-check: |- + #!/bin/sh + set -e + URL=$1 + EXPECTED=$2 + ACTUAL=$(curl --silent --show-error --fail "${URL}") + echo "${ACTUAL}" + test "${EXPECTED}" = "${ACTUAL}" +--- +# Source: openshift-pipelines/charts/pipelines/charts/redis/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: pipelines-redis + namespace: default + labels: + app: redis + chart: redis-10.6.3 + heritage: Helm + release: pipelines +data: + redis.conf: |- + # User-supplied configuration: + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + master.conf: |- + dir /data + # User-supplied master configuration: + appendonly yes + loglevel notice + rename-command FLUSHDB "" + rename-command FLUSHALL "" + replica.conf: |- + dir /data + slave-read-only yes + rename-command FLUSHDB "" + rename-command FLUSHALL "" +--- +# Source: openshift-pipelines/charts/pipelines/charts/redis/templates/health-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: pipelines-redis-health + namespace: default + labels: + app: redis + chart: redis-10.6.3 + heritage: Helm + release: pipelines +data: + ping_readiness_local.sh: |- + #!/bin/bash + response=$( + timeout -s 9 $1 \ + redis-cli \ + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_local.sh: |- + #!/bin/bash + response=$( + timeout -s 9 $1 \ + redis-cli \ + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_master.sh: |- + #!/bin/bash + response=$( + timeout -s 9 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_master.sh: |- + #!/bin/bash + response=$( + timeout -s 9 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? + "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? + exit $exit_status + ping_liveness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? + "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? + exit $exit_status +--- +# Source: openshift-pipelines/charts/pipelines/templates/pipelines-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: pipelines + labels: + helm.sh/chart: pipelines-1.4.5 + app.kubernetes.io/name: pipelines + app.kubernetes.io/instance: pipelines + app.kubernetes.io/version: "1.7.2" + app.kubernetes.io/managed-by: Helm +rules: +- apiGroups: + - "" + - extensions + - apps + resources: + - deployments + - persistentvolumes + - persistentvolumeclaims + - pods + - deployments/scale + verbs: + - '*' +--- +# Source: openshift-pipelines/charts/pipelines/templates/pipelines-rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: pipelines + labels: + helm.sh/chart: pipelines-1.4.5 + app.kubernetes.io/name: pipelines + app.kubernetes.io/instance: pipelines + app.kubernetes.io/version: "1.7.2" + app.kubernetes.io/managed-by: Helm +subjects: +- kind: ServiceAccount + name: pipelines + namespace: default +roleRef: + kind: ClusterRole + apiGroup: rbac.authorization.k8s.io + name: pipelines +--- +# Source: openshift-pipelines/charts/pipelines/charts/rabbitmq/templates/role.yaml +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: pipelines-rabbitmq-endpoint-reader + namespace: default + labels: + app: rabbitmq + chart: rabbitmq-6.25.0 + release: "pipelines" + heritage: "Helm" +rules: +- apiGroups: [""] + resources: ["endpoints"] + verbs: ["get"] +- apiGroups: [""] + resources: ["events"] + verbs: ["create"] +--- +# Source: openshift-pipelines/charts/pipelines/templates/vault-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: pipelines-pipelines-vault + labels: + helm.sh/chart: pipelines-1.4.5 + app.kubernetes.io/name: pipelines + app.kubernetes.io/instance: pipelines + app.kubernetes.io/version: "1.7.2" + app.kubernetes.io/managed-by: Helm + component: pipelines-pipelines-vault +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - '*' +--- +# Source: openshift-pipelines/charts/pipelines/charts/rabbitmq/templates/rolebinding.yaml +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: pipelines-rabbitmq-endpoint-reader + namespace: default + labels: + app: rabbitmq + chart: rabbitmq-6.25.0 + release: "pipelines" + heritage: "Helm" +subjects: +- kind: ServiceAccount + name: pipelines-rabbitmq +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pipelines-rabbitmq-endpoint-reader +--- +# Source: openshift-pipelines/charts/pipelines/templates/vault-rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: pipelines-pipelines-vault + labels: + helm.sh/chart: pipelines-1.4.5 + app.kubernetes.io/name: pipelines + app.kubernetes.io/instance: pipelines + app.kubernetes.io/version: "1.7.2" + app.kubernetes.io/managed-by: Helm + component: pipelines-pipelines-vault +subjects: +- kind: ServiceAccount + name: pipelines-pipelines-vault +roleRef: + kind: Role + apiGroup: rbac.authorization.k8s.io + name: pipelines-pipelines-vault +--- +# Source: openshift-pipelines/charts/pipelines/charts/rabbitmq/templates/svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: pipelines-rabbitmq-headless + namespace: default + labels: + app: rabbitmq + chart: rabbitmq-6.25.0 + release: "pipelines" + heritage: "Helm" +spec: + clusterIP: None + ports: + - name: epmd + port: 4369 + targetPort: epmd + - name: amqp + port: 5672 + targetPort: amqp + - name: dist + port: 25672 + targetPort: dist + - name: stats + port: 15672 + targetPort: stats + selector: + app: rabbitmq + release: "pipelines" +--- +# Source: openshift-pipelines/charts/pipelines/charts/rabbitmq/templates/svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: pipelines-rabbitmq + namespace: default + labels: + app: rabbitmq + chart: rabbitmq-6.25.0 + release: "pipelines" + heritage: "Helm" +spec: + type: ClusterIP + ports: + - name: epmd + port: 4369 + targetPort: epmd + nodePort: null + - name: amqp + port: 5672 + targetPort: amqp + nodePort: null + - name: dist + port: 25672 + targetPort: dist + nodePort: null + - name: stats + port: 15672 + targetPort: stats + nodePort: null + selector: + app: rabbitmq + release: "pipelines" +--- +# Source: openshift-pipelines/charts/pipelines/charts/redis/templates/headless-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: pipelines-redis-headless + namespace: default + labels: + app: redis + chart: redis-10.6.3 + release: pipelines + heritage: Helm +spec: + type: ClusterIP + clusterIP: None + ports: + - name: redis + port: 6379 + targetPort: redis + selector: + app: redis + release: pipelines +--- +# Source: openshift-pipelines/charts/pipelines/charts/redis/templates/redis-master-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: pipelines-redis-master + namespace: default + labels: + app: redis + chart: redis-10.6.3 + release: pipelines + heritage: Helm +spec: + type: ClusterIP + ports: + - name: redis + port: 6379 + targetPort: redis + selector: + app: redis + release: pipelines + role: master +--- +# Source: openshift-pipelines/charts/pipelines/templates/api-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: pipelines-pipelines-api + labels: + helm.sh/chart: pipelines-1.4.5 + app.kubernetes.io/name: pipelines + app.kubernetes.io/instance: pipelines + app.kubernetes.io/version: "1.7.2" + app.kubernetes.io/managed-by: Helm + component: pipelines-pipelines-api +spec: + type: ClusterIP + ports: + - port: 30000 + targetPort: 30000 + protocol: TCP + name: api + selector: + app.kubernetes.io/name: pipelines + app.kubernetes.io/instance: pipelines + component: pipelines-pipelines-services +--- +# Source: openshift-pipelines/charts/pipelines/templates/pipelines-service-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: pipelines-pipelines-services-headless + labels: + helm.sh/chart: pipelines-1.4.5 + app.kubernetes.io/name: pipelines + app.kubernetes.io/instance: pipelines + app.kubernetes.io/version: "1.7.2" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + clusterIP: None + ports: + - port: 30000 + targetPort: 30000 + protocol: TCP + name: api + - port: 30001 + targetPort: 30001 + protocol: TCP + name: www + selector: + app.kubernetes.io/name: pipelines + app.kubernetes.io/instance: pipelines + component: pipelines-pipelines-services +--- +# Source: openshift-pipelines/charts/pipelines/templates/vault-service-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: pipelines-pipelines-vault-headless + labels: + helm.sh/chart: pipelines-1.4.5 + app.kubernetes.io/name: pipelines + app.kubernetes.io/instance: pipelines + app.kubernetes.io/version: "1.7.2" + app.kubernetes.io/managed-by: Helm + component: pipelines-pipelines-vault +spec: + type: ClusterIP + clusterIP: None + ports: + - name: http + port: 30100 + targetPort: 30100 + protocol: TCP + - name: server + port: 30101 + protocol: TCP + selector: + app.kubernetes.io/name: pipelines + app.kubernetes.io/instance: pipelines + component: pipelines-pipelines-vault +--- +# Source: openshift-pipelines/charts/pipelines/templates/vault-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: pipelines-pipelines-vault + labels: + helm.sh/chart: pipelines-1.4.5 + app.kubernetes.io/name: pipelines + app.kubernetes.io/instance: pipelines + app.kubernetes.io/version: "1.7.2" + app.kubernetes.io/managed-by: Helm + component: pipelines-pipelines-vault +spec: + type: ClusterIP + ports: + - name: http + port: 30100 + targetPort: 30100 + protocol: TCP + - name: server + port: 30101 + protocol: TCP + selector: + app.kubernetes.io/name: pipelines + app.kubernetes.io/instance: pipelines + component: pipelines-pipelines-vault +--- +# Source: openshift-pipelines/charts/pipelines/templates/www-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: pipelines-pipelines-www + labels: + helm.sh/chart: pipelines-1.4.5 + app.kubernetes.io/name: pipelines + app.kubernetes.io/instance: pipelines + app.kubernetes.io/version: "1.7.2" + app.kubernetes.io/managed-by: Helm + component: pipelines-pipelines-www +spec: + type: ClusterIP + ports: + - port: 30001 + targetPort: 30001 + protocol: TCP + name: www + selector: + app.kubernetes.io/name: pipelines + app.kubernetes.io/instance: pipelines + component: pipelines-pipelines-services +--- +# Source: openshift-pipelines/charts/pipelines/charts/rabbitmq/templates/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: pipelines-rabbitmq + namespace: default + labels: + app: rabbitmq + chart: rabbitmq-6.25.0 + release: "pipelines" + heritage: "Helm" +spec: + serviceName: pipelines-rabbitmq-headless + podManagementPolicy: OrderedReady + replicas: 1 + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: rabbitmq + release: "pipelines" + template: + metadata: + labels: + app: rabbitmq + release: "pipelines" + chart: rabbitmq-6.25.0 + annotations: + checksum/secret: cd200625b24962e95e00a823013671ecf528464dc6d000ff2103710176764a2a + spec: + serviceAccountName: pipelines-rabbitmq + terminationGracePeriodSeconds: 10 + containers: + - name: rabbitmq + image: registry.connect.redhat.com/jfrog/xray-rabbitmq:3.8.6 + imagePullPolicy: "IfNotPresent" + command: + - bash + - -ec + - | + mkdir -p /opt/bitnami/rabbitmq/.rabbitmq/ + mkdir -p /opt/bitnami/rabbitmq/etc/rabbitmq/ + touch /opt/bitnami/rabbitmq/var/lib/rabbitmq/.start + #persist the erlang cookie in both places for server and cli tools + echo $RABBITMQ_ERL_COOKIE > /opt/bitnami/rabbitmq/var/lib/rabbitmq/.erlang.cookie + cp /opt/bitnami/rabbitmq/var/lib/rabbitmq/.erlang.cookie /opt/bitnami/rabbitmq/.rabbitmq/ + #change permission so only the user has access to the cookie file + chmod 600 /opt/bitnami/rabbitmq/.rabbitmq/.erlang.cookie /opt/bitnami/rabbitmq/var/lib/rabbitmq/.erlang.cookie + #copy the mounted configuration to both places + cp /opt/bitnami/rabbitmq/conf/* /opt/bitnami/rabbitmq/etc/rabbitmq + # Apply resources limits + ulimit -n "${RABBITMQ_ULIMIT_NOFILES}" + #replace the default password that is generated + sed -i "/CHANGEME/cdefault_pass=${RABBITMQ_PASSWORD//\\/\\\\}" /opt/bitnami/rabbitmq/etc/rabbitmq/rabbitmq.conf + exec rabbitmq-server + volumeMounts: + - name: config-volume + mountPath: /opt/bitnami/rabbitmq/conf + - name: healthchecks + mountPath: /usr/local/sbin/rabbitmq-api-check + subPath: rabbitmq-api-check + - name: healthchecks + mountPath: /usr/local/sbin/rabbitmq-health-check + subPath: rabbitmq-health-check + - name: data + mountPath: "/opt/bitnami/rabbitmq/var/lib/rabbitmq" + ports: + - name: epmd + containerPort: 4369 + - name: amqp + containerPort: 5672 + - name: dist + containerPort: 25672 + - name: stats + containerPort: 15672 + livenessProbe: + exec: + command: + - sh + - -c + - rabbitmq-api-check "http://guest:$RABBITMQ_PASSWORD@127.0.0.1:15672/api/healthchecks/node" '{"status":"ok"}' + initialDelaySeconds: 120 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 6 + successThreshold: 1 + readinessProbe: + exec: + command: + - sh + - -c + - rabbitmq-health-check "http://guest:$RABBITMQ_PASSWORD@127.0.0.1:15672/api/healthchecks/node" '{"status":"ok"}' + initialDelaySeconds: 10 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 3 + successThreshold: 1 + env: + - name: BITNAMI_DEBUG + value: "false" + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K8S_SERVICE_NAME + value: "pipelines-rabbitmq-headless" + - name: K8S_ADDRESS_TYPE + value: hostname + - name: RABBITMQ_NODENAME + value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" + - name: K8S_HOSTNAME_SUFFIX + value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" + - name: RABBITMQ_LOGS + value: "-" + - name: RABBITMQ_ULIMIT_NOFILES + value: "65536" + - name: RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS + value: +S 2:1 + - name: RABBITMQ_USE_LONGNAME + value: "true" + - name: RABBITMQ_ERL_COOKIE + valueFrom: + secretKeyRef: + name: pipelines-rabbitmq + key: rabbitmq-erlang-cookie + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: pipelines-rabbitmq + key: rabbitmq-password + securityContext: + fsGroup: 1001 + runAsUser: 1001 + volumes: + - name: config-volume + configMap: + name: pipelines-rabbitmq-config + items: + - key: rabbitmq.conf + path: rabbitmq.conf + - key: enabled_plugins + path: enabled_plugins + - name: healthchecks + configMap: + name: pipelines-rabbitmq-healthchecks + items: + - key: rabbitmq-health-check + path: rabbitmq-health-check + mode: 111 + - key: rabbitmq-api-check + path: rabbitmq-api-check + mode: 111 + volumeClaimTemplates: + - metadata: + name: data + labels: + app: rabbitmq + release: "pipelines" + heritage: "Helm" + spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "20Gi" +--- +# Source: openshift-pipelines/charts/pipelines/charts/redis/templates/redis-master-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: pipelines-redis-master + namespace: default + labels: + app: redis + chart: redis-10.6.3 + release: pipelines + heritage: Helm +spec: + selector: + matchLabels: + app: redis + release: pipelines + role: master + serviceName: pipelines-redis-headless + template: + metadata: + labels: + app: redis + chart: redis-10.6.3 + release: pipelines + role: master + annotations: + checksum/health: 5d2e8523ae6c0cac2452aab66904ac5b5d6dc0a529ac4e9333177b412c6e8fd1 + checksum/configmap: 58a5a052638c9f5d1252ef740b81decddd00d24176a06b07b57f3e4b1987e666 + checksum/secret: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 + spec: + securityContext: + fsGroup: 1001 + serviceAccountName: "default" + containers: + - name: redis + image: "registry.redhat.io/rhel8/redis-5:1-98" + imagePullPolicy: "IfNotPresent" + securityContext: + runAsUser: 1001 + command: + - /bin/bash + - -c + - | + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + ARGS=("--port" "${REDIS_PORT}") + ARGS+=("--protected-mode" "no") + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + redis-server "${ARGS[@]}" + env: + - name: REDIS_REPLICATION_MODE + value: master + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + - name: REDIS_PORT + value: "6379" + ports: + - name: redis + containerPort: 6379 + livenessProbe: + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh 5 + readinessProbe: + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh 5 + resources: + {} + volumeMounts: + - name: health + mountPath: /health + - name: redis-data + mountPath: /data + subPath: + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc/ + volumes: + - name: health + configMap: + name: pipelines-redis-health + defaultMode: 0755 + - name: config + configMap: + name: pipelines-redis + - name: redis-tmp-conf + emptyDir: {} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: redis + release: pipelines + heritage: Helm + component: master + spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "8Gi" + + selector: + updateStrategy: + type: RollingUpdate +--- +# Source: openshift-pipelines/charts/pipelines/templates/pipelines-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: pipelines-pipelines-services + labels: + helm.sh/chart: pipelines-1.4.5 + app.kubernetes.io/name: pipelines + app.kubernetes.io/instance: pipelines + app.kubernetes.io/version: "1.7.2" + app.kubernetes.io/managed-by: Helm +spec: + serviceName: pipelines-pipelines-services-headless + replicas: 1 + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/name: pipelines + app.kubernetes.io/instance: pipelines + component: pipelines-pipelines-services + template: + metadata: + labels: + app.kubernetes.io/name: pipelines + app.kubernetes.io/instance: pipelines + component: pipelines-pipelines-services + annotations: + checksum/systemyaml: f5d51f2f399be165ea4c3d48b085ab08baed54b2591828cd38fb5f847af16cae + checksum/secretdb: 48459e973b36b16071c353caa94a8ca3d3b446a893f79f86af191ce6f3856887 + checksum/secretaws: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b + checksum/configaws: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b + checksum/secretk8s: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b + checksum/configk8s: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b + checksum/configfilebeat: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 + spec: + serviceAccountName: pipelines + initContainers: + - name: copy-system-yaml + image: "quay.io/jfrog/init:1.0.0" + imagePullPolicy: IfNotPresent + securityContext: + allowPrivilegeEscalation: false + command: + - '/bin/sh' + - '-c' + - > + echo "Copy system.yaml to /opt/jfrog/pipelines/var/etc"; + cp -fv /tmp/etc/system.yaml /opt/jfrog/pipelines/var/etc/system.yaml; + volumeMounts: + - name: jfrog-pipelines-folder + mountPath: /opt/jfrog/pipelines/var/etc + - name: systemyaml + mountPath: "/tmp/etc/system.yaml" + subPath: system.yaml + - name: wait-for-vault + image: "quay.io/jfrog/init:1.0.0" + imagePullPolicy: IfNotPresent + securityContext: + allowPrivilegeEscalation: false + command: + - 'sh' + - '-c' + - > + echo "Waiting for Vault to come up..."; + until nc -z -w 2 pipelines-pipelines-vault 30100 && echo Vault ok; do + sleep 2; + done; + - name: pipelines-installer + image: "registry.connect.redhat.com/jfrog/pipelines-installer:1.7.1" + imagePullPolicy: IfNotPresent + securityContext: + allowPrivilegeEscalation: false + env: + - name: VAULT_TOKEN + valueFrom: + secretKeyRef: + name: root-vault-secret + key: token + - name: PIPELINES_SHARED_DB_CONNECTIONSTRING + valueFrom: + secretKeyRef: + name: pipelines-database + key: postgresql-url + - name: PIPELINES_NODE_ID + valueFrom: + fieldRef: + fieldPath: "metadata.name" + command: + - 'sh' + - '-c' + - > + echo "Waiting for RabbitMQ to come up..."; + until nc -z -w 2 pipelines-rabbitmq 5672 && echo rabbitmq ok; do + sleep 2; + done; + echo "Waiting for Redis to come up..."; + until nc -z -w 2 pipelines-redis-master 6379 && echo redis ok; do + sleep 2; + done; + sleep 20; + ./pipelines-k8s; + echo "Setting router as user for system.yaml"; + chown 1117:1117 /opt/jfrog/pipelines/var/etc/system.yaml; + volumeMounts: + - name: jfrog-pipelines-folder + mountPath: /opt/jfrog/pipelines/var/etc + + - name: "redhat-custom-setup" + image: quay.io/jfrog/init:1.0.0 + imagePullPolicy: Always + command: + - 'sh' + - '-c' + - 'chown -R 1117:1117 /opt/jfrog/pipelines/var/etc' + securityContext: + runAsUser: 0 + volumeMounts: + - mountPath: "/opt/jfrog/pipelines/var/etc" + name: volume + + containers: + - name: router + image: "registry.connect.redhat.com/jfrog/pipelines-router:1.7.1" + imagePullPolicy: IfNotPresent + env: + - name: JF_ROUTER_SERVICEREGISTRY_URL + value: "http://openshiftartifactoryha-nginx/access" + - name: JF_ROUTER_SERVICEREGISTRY_GRPCADDRESS + value: "openshiftartifactoryha-nginx" + - name: JF_ROUTER_ENTRYPOINTS_INTERNALPORT + value: "8046" + - name: JF_ROUTER_ENTRYPOINTS_EXTERNALPORT + value: "8082" + - name: JF_ROUTER_LOGGING_ROUTER_LOGLEVEL + value: "DEBUG" + - name: JF_SHARED_NODE_ID + valueFrom: + fieldRef: + fieldPath: "metadata.name" + - name: JF_SHARED_NODE_IP + valueFrom: + fieldRef: + fieldPath: "status.podIP" + - name: JF_SHARED_SECURITY_JOINKEY + value: "EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE" + - name: JF_ROUTER_ENCRYPTSYSTEMCONFIG + value: "true" + ports: + - name: router + containerPort: 8046 + securityContext: + allowPrivilegeEscalation: false + resources: + + {} + volumeMounts: + - name: jfrog-pipelines-folder + mountPath: /opt/jfrog/router/var/etc + - name: api + image: "registry.connect.redhat.com/jfrog/pipelines-api:1.7.1" + imagePullPolicy: IfNotPresent + env: + - name: PIPELINES_NODE_ID + valueFrom: + fieldRef: + fieldPath: "metadata.name" + ports: + - name: api + containerPort: 30000 + livenessProbe: + httpGet: + path: / + port: api + initialDelaySeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + readinessProbe: + httpGet: + path: / + port: api + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 5 + resources: + {} + volumeMounts: + - name: jfrog-pipelines-folder + mountPath: /opt/jfrog/pipelines/var/etc + - name: jfrog-pipelines-logs + mountPath: /opt/jfrog/pipelines/var/log + - name: www + image: "registry.connect.redhat.com/jfrog/pipelines-www:1.7.1" + imagePullPolicy: IfNotPresent + ports: + - name: www + containerPort: 30001 + livenessProbe: + httpGet: + path: / + port: www + initialDelaySeconds: 10 + failureThreshold: 6 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: / + port: www + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 5 + resources: + {} + volumeMounts: + - name: jfrog-pipelines-folder + mountPath: /opt/jfrog/pipelines/var/etc + - name: jfrog-pipelines-logs + mountPath: /opt/jfrog/pipelines/var/log + - name: pipelinesync + image: "registry.connect.redhat.com/jfrog/pipelines-micro:1.7.1" + imagePullPolicy: IfNotPresent + workingDir: /opt/jfrog/pipelines/app/micro/pipelineSync + env: + - name: COMPONENT + value: pipelinesync + resources: + {} + volumeMounts: + - name: jfrog-pipelines-folder + mountPath: /opt/jfrog/pipelines/var/etc + - name: jfrog-pipelines-logs + mountPath: /opt/jfrog/pipelines/var/log + - name: runtrigger + image: "registry.connect.redhat.com/jfrog/pipelines-micro:1.7.1" + imagePullPolicy: IfNotPresent + workingDir: /opt/jfrog/pipelines/app/micro/runTrigger + env: + - name: COMPONENT + value: runtrigger + resources: + {} + volumeMounts: + - name: jfrog-pipelines-folder + mountPath: /opt/jfrog/pipelines/var/etc + - name: jfrog-pipelines-logs + mountPath: /opt/jfrog/pipelines/var/log + - name: steptrigger + image: "registry.connect.redhat.com/jfrog/pipelines-micro:1.7.1" + imagePullPolicy: IfNotPresent + workingDir: /opt/jfrog/pipelines/app/micro/stepTrigger + env: + - name: COMPONENT + value: steptrigger + resources: + {} + volumeMounts: + - name: jfrog-pipelines-folder + mountPath: /opt/jfrog/pipelines/var/etc + - name: jfrog-pipelines-logs + mountPath: /opt/jfrog/pipelines/var/log + - name: cron + image: "registry.connect.redhat.com/jfrog/pipelines-micro:1.7.1" + imagePullPolicy: IfNotPresent + workingDir: /opt/jfrog/pipelines/app/micro/cron + env: + - name: COMPONENT + value: cron + resources: + {} + volumeMounts: + - name: jfrog-pipelines-folder + mountPath: /opt/jfrog/pipelines/var/etc + - name: jfrog-pipelines-logs + mountPath: /opt/jfrog/pipelines/var/log + - name: nexec + image: "registry.connect.redhat.com/jfrog/pipelines-micro:1.7.1" + imagePullPolicy: IfNotPresent + workingDir: /opt/jfrog/pipelines/app/micro/nexec + env: + - name: COMPONENT + value: nexec + resources: + {} + volumeMounts: + - name: jfrog-pipelines-folder + mountPath: /opt/jfrog/pipelines/var/etc + - name: jfrog-pipelines-logs + mountPath: /opt/jfrog/pipelines/var/log + - name: hookhandler + image: "registry.connect.redhat.com/jfrog/pipelines-micro:1.7.1" + imagePullPolicy: IfNotPresent + workingDir: /opt/jfrog/pipelines/app/micro/hookHandler + env: + - name: COMPONENT + value: hookhandler + resources: + {} + volumeMounts: + - name: jfrog-pipelines-folder + mountPath: /opt/jfrog/pipelines/var/etc + - name: jfrog-pipelines-logs + mountPath: /opt/jfrog/pipelines/var/log + - name: marshaller + image: "registry.connect.redhat.com/jfrog/pipelines-micro:1.7.1" + imagePullPolicy: IfNotPresent + workingDir: /opt/jfrog/pipelines/app/micro/marshaller + env: + - name: COMPONENT + value: marshaller + resources: + {} + volumeMounts: + - name: jfrog-pipelines-folder + mountPath: /opt/jfrog/pipelines/var/etc + - name: jfrog-pipelines-logs + mountPath: /opt/jfrog/pipelines/var/log + - name: logup + image: "registry.connect.redhat.com/jfrog/pipelines-micro:1.7.1" + imagePullPolicy: IfNotPresent + workingDir: /opt/jfrog/pipelines/app/micro/logup + env: + - name: COMPONENT + value: logup + resources: + {} + volumeMounts: + - name: jfrog-pipelines-folder + mountPath: /opt/jfrog/pipelines/var/etc + - name: jfrog-pipelines-logs + mountPath: /opt/jfrog/pipelines/var/log + - name: extensionsync + image: "registry.connect.redhat.com/jfrog/pipelines-micro:1.7.1" + imagePullPolicy: IfNotPresent + workingDir: /opt/jfrog/pipelines/app/micro/extensionSync + env: + - name: COMPONENT + value: extensionsync + resources: + {} + volumeMounts: + - name: jfrog-pipelines-folder + mountPath: /opt/jfrog/pipelines/var/etc + - name: jfrog-pipelines-logs + mountPath: /opt/jfrog/pipelines/var/log + volumes: + - name: jfrog-pipelines-folder + emptyDir: {} + - name: jfrog-pipelines-logs + emptyDir: {} + - name: systemyaml + secret: + secretName: pipelines-system-yaml +--- +# Source: openshift-pipelines/charts/pipelines/templates/vault-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: pipelines-pipelines-vault + labels: + helm.sh/chart: pipelines-1.4.5 + app.kubernetes.io/name: pipelines + app.kubernetes.io/instance: pipelines + app.kubernetes.io/version: "1.7.2" + app.kubernetes.io/managed-by: Helm + component: pipelines-pipelines-vault +spec: + serviceName: pipelines-pipelines-vault-headless + replicas: 1 + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/name: pipelines + app.kubernetes.io/instance: pipelines + component: pipelines-pipelines-vault + template: + metadata: + labels: + app.kubernetes.io/name: pipelines + app.kubernetes.io/instance: pipelines + component: pipelines-pipelines-vault + spec: + serviceAccountName: pipelines-pipelines-vault + initContainers: + - name: config + image: 'quay.io/jfrog/init:1.0.0' + imagePullPolicy: IfNotPresent + env: + - name: PIPELINES_SHARED_DB_CONNECTIONSTRING + valueFrom: + secretKeyRef: + name: pipelines-database + key: postgresql-url + command: ["/bin/sh", "-c"] + args: + - | + cat > /etc/vault/config/vault.hcl < + echo "Waiting for Postgres to come up..."; + until nc -z -w 2 postgres-postgresql 5432 && echo database ok; do + sleep 2; + done; + sleep 10; + - name: create-vault-table + image: "registry.connect.redhat.com/jfrog/pipelines-installer:1.7.1" + imagePullPolicy: IfNotPresent + env: + - name: PIPELINES_SHARED_DB_CONNECTIONSTRING + valueFrom: + secretKeyRef: + name: pipelines-database + key: postgresql-url + command: + - 'sh' + - '-c' + - > + echo "Copy system.yaml to /opt/jfrog/pipelines/var/etc"; + cp -fv /tmp/etc/system.yaml /opt/jfrog/pipelines/var/etc/system.yaml; + echo "Creating Vault Table..."; + ./pipelines-k8s initVault; + volumeMounts: + - name: jfrog-pipelines-folder + mountPath: /opt/jfrog/pipelines/var/etc + - name: systemyaml + mountPath: "/tmp/etc/system.yaml" + subPath: system.yaml + containers: + - name: vault-init + image: "registry.connect.redhat.com/jfrog/pipelines-vault-init:1.7.1" + imagePullPolicy: IfNotPresent + env: + - name: CHECK_INTERVAL + value: "10s" + - name: VAULT_NAMESPACE + value: default + - name: VAULT_ADDRESS + value: "http://localhost:30100" + resources: + requests: + memory: 10Mi + cpu: 10m + limits: + memory: 50Mi + cpu: 50m + - name: vault + image: "registry.connect.redhat.com/jfrog/pipelines-vault:1.7.1" + imagePullPolicy: IfNotPresent + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: "status.podIP" + - name: "VAULT_API_ADDR" + value: "http://$(POD_IP):30100" + - name: "VAULT_CLUSTER_ADDR" + value: "http://$(POD_IP):30101" + args: + - "server" + - "-config=/etc/vault/config/vault.hcl" + ports: + - name: http + containerPort: 30100 + protocol: "TCP" + - name: server + containerPort: 30101 + protocol: "TCP" + readinessProbe: + httpGet: + path: "/v1/sys/health?standbyok=true" + port: 30100 + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + {} + securityContext: + capabilities: + add: + - IPC_LOCK + volumeMounts: + - name: vault-config + mountPath: /etc/vault/config + volumes: + - name: vault-config + emptyDir: {} + - name: jfrog-pipelines-folder + emptyDir: {} + - name: systemyaml + secret: + secretName: pipelines-system-yaml + diff --git a/Openshift4/openshift-pipelines/requirements.lock b/Openshift4/openshift-pipelines/requirements.lock new file mode 100644 index 0000000..a315dab --- /dev/null +++ b/Openshift4/openshift-pipelines/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: pipelines + repository: https://charts.jfrog.io/ + version: 1.4.5 +digest: sha256:83b0fa740797074925e7f237762ff493727faf58476c3884f247acc44428202b +generated: "2020-09-21T10:32:37.846331-07:00" diff --git a/Openshift4/openshift-pipelines/requirements.yaml b/Openshift4/openshift-pipelines/requirements.yaml new file mode 100644 index 0000000..1a1825b --- /dev/null +++ b/Openshift4/openshift-pipelines/requirements.yaml @@ -0,0 +1,4 @@ +dependencies: + - name: pipelines + version: 1.4.5 + repository: https://charts.jfrog.io/ diff --git a/Openshift4/openshift-pipelines/values.yaml b/Openshift4/openshift-pipelines/values.yaml new file mode 100755 index 0000000..d69ed25 --- /dev/null +++ b/Openshift4/openshift-pipelines/values.yaml @@ -0,0 +1,1117 @@ +pipelines: + # MUST SET FOR EXTERNAL POSTGRESQL AND VAULT + global: + postgresql: + host: OVERRIDE + port: OVERRIDE + database: OVERRIDE + user: OVERRIDE + password: OVERRIDE + ssl: OVERRIDE + + vault: + host: OVERRIDE + port: OVERRIDE + token: OVERRIDE + + ## Common + initContainer: + #image: registry.connect.redhat.com/jfrog/init:1.0.0 + image: quay.io/jfrog/init:1.0.0 + pullPolicy: IfNotPresent + + ## Available modes: devmode (enable it for debuging) and production + runMode: production + + ## Image Registry to pull images for Pipelines components from + ## You can override it with your private Artifactory registry + imageRegistry: registry.connect.redhat.com + + ## For supporting pulling from private registries + ## Secret type: kubernetes.io/dockerconfigjson + imagePullSecrets: + + ## Existing secret with Pipelines system.yaml + existingSecret: + + ## String to partially override pipelines.fullname template (will maintain the release name) + # nameOverride: + + ## String to fully override pipelines.fullname template + # fullnameOverride: + + ## Set user/group to run Pipelines components with + securityContext: + enabled: true + uid: 1030 + gid: 1030 + + ## Pipelines components + pipelines: + + version: 1.7.1 + + ## Artifactory URL - Mandatory + jfrogUrl: OVERRIDE + ## Artifactory UI URL - Mandatory + jfrogUrlUI: OVERRIDE + + ## Join Key to connect to Artifactory + ## IMPORTANT: You should NOT use the example joinKey for a production deployment! + joinKey: EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE + + ## Pipelines requires a unique master key + ## You can generate one with the command: "openssl rand -hex 32" + ## IMPORTANT: You should NOT use the example masterKey for a production deployment! + masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + + ## Installer Authentication Token + ## The unique token can be generated with: uuidgen | tr '[:upper:]' '[:lower:]' + authToken: "c7595edd-b63d-4fd6-9e1e-13924d6637f0" + + ## Pipelines ID in Artifactory + ## For production, the unique ID should be generated instead of using 12345: openssl rand | tr -dc 1-9 | head -c 10 + serviceId: jfpip@12345 + + ## Artifactory Service ID + ## This should be set to the Artifactory Service ID + artifactoryServiceId: "FFFFFFFFFFFF" + + ## Artifactory License ID + ## + licenseId: "FFFFFFFFF" + + ## A name must be unique if the same Artifactory is shared between different Pipelines + ## Repository type `Generic` with layout `maven-2-default` must be precreated in advance + rootBucket: jfrogpipelines + + mountPath: /opt/jfrog/pipelines/var/etc + + logPath: /opt/jfrog/pipelines/var/log + + replicaCount: 1 + + # CORS configuration. Default values are artifactory url and www external url + accessControlAllowOrigins_0: OVERRIDE + accessControlAllowOrigins_1: OVERRIDE + + # RabbitMQ health check interval in mins + rabbitmqHealthCheckIntervalInMins: 1 + + updateStrategy: RollingUpdate + + nodeSelector: {} + tolerations: [] + affinity: {} + + ## Apply horizontal pod auto scaling on Pipelines pods + ## Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 3 + targetCPUUtilizationPercentage: 70 + + api: + image: + repository: jfrog/pipelines-api + pullPolicy: IfNotPresent + + service: + ## Supported service types: ClusterIP, NodePort and LoadBalancer + type: ClusterIP + port: 30000 + + annotations: + # external-dns.alpha.kubernetes.io/hostname: example.org + # service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp + # service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:XXXXXX:certificate/XXXXXX + + ## Set LB static IP + loadBalancerIP: + + ## Whitelist IPs allowed to LoadBalancer type services + ## Example: loadBalancerSourceRanges={82.82.190.51/32,141.141.8.8/32} + loadBalancerSourceRanges: [] + + ## External URL, it is ignored if ingress is enabled + externalUrl: OVERRIDE + + ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + path: / + hosts: + - chart-example.local + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + router: + image: + repository: jfrog/pipelines-router + pullPolicy: IfNotPresent + + internalPort: 8046 + externalPort: 8082 + + mountPath: "/opt/jfrog/router/var/etc" + + resources: {} + # requests: + # memory: "2Gi" + # cpu: "500m" + # limits: + # memory: "4Gi" + # cpu: "2" + + www: + image: + repository: jfrog/pipelines-www + pullPolicy: IfNotPresent + + service: + ## Supported service types: ClusterIP, NodePort and LoadBalancer + type: ClusterIP + port: 30001 + + annotations: + # external-dns.alpha.kubernetes.io/hostname: example.org + # service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp + # service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:XXXXXX:certificate/XXXXXX + + ## Set LB static IP + loadBalancerIP: + + ## Whitelist IPs allowed to LoadBalancer type services + ## Example: loadBalancerSourceRanges={82.82.190.51/32,141.141.8.8/32} + loadBalancerSourceRanges: [] + + ## External URL, it is ignored if ingress is enabled + externalUrl: OVERRIDE + + ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + path: / + hosts: + - chart-example.local + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + msg: + uiUser: OVERRIDE + uiUserPassword: OVERRIDE + + pipelineSync: + image: + repository: jfrog/pipelines-micro + pullPolicy: IfNotPresent + + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + runTrigger: + image: + repository: jfrog/pipelines-micro + pullPolicy: IfNotPresent + + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + stepTrigger: + image: + repository: jfrog/pipelines-micro + pullPolicy: IfNotPresent + + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + cron: + image: + repository: jfrog/pipelines-micro + pullPolicy: IfNotPresent + + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + nexec: + image: + repository: jfrog/pipelines-micro + pullPolicy: IfNotPresent + + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + hookHandler: + image: + repository: jfrog/pipelines-micro + pullPolicy: IfNotPresent + + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + marshaller: + image: + repository: jfrog/pipelines-micro + pullPolicy: IfNotPresent + + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + logup: + image: + repository: jfrog/pipelines-micro + pullPolicy: IfNotPresent + + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + extensionSync: + image: + repository: jfrog/pipelines-micro + pullPolicy: IfNotPresent + + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + ## Pipelines installer + pipelinesInit: + image: + repository: jfrog/pipelines-installer + pullPolicy: IfNotPresent + + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + ## Cluster Role Based Access + ## Ref: https://kubernetes.io/docs/admin/authorization/rbac/ + rbac: + role: + ## Rules to create. It follows the role specification + rules: + - apiGroups: ["", "extensions", "apps"] + resources: + - deployments + - persistentvolumes + - persistentvolumeclaims + - pods + - deployments/scale + verbs: ["*"] + + # Add any list of configmaps to Pipelines + configMaps: | + # posthook-start.sh: |- + # echo "This is a post start script" + # posthook-end.sh: |- + # echo "This is a post end script" + + ## Add custom volumes + customVolumes: | + # - name: custom-script + # configMap: + # name: custom-script + + ## Add custom volumesMounts + customVolumeMounts: | + # - name: custom-script + # mountPath: /scripts/script.sh + # subPath: script.sh + + ## Add custom init containers + customInitContainers: | + - name: "redhat-custom-setup" + image: {{ .Values.initContainer.image }} + imagePullPolicy: Always + command: + - 'sh' + - '-c' + - 'mkdir -p /opt/jfrog/pipelines/var/etc && mkdir -p /opt/jfrog/pipelines/var/tmp && mkdir -p /opt/jfrog/pipelines/var/log && chown -R 1117:1117 /opt/jfrog/pipelines && chmod -R 0777 /opt/jfrog/pipelines' + securityContext: + runAsUser: 0 + volumeMounts: + - mountPath: "/opt/jfrog/pipelines" + name: jfrog-pipelines-folder + + ## Add custom sidecar containers + # - The provided example uses a custom volume (customVolumes) + customSidecarContainers: | + # - name: "sidecar-list-etc" + # image: "{{ .Values.initContainer.image }}" + # imagePullPolicy: "{{ .Values.initContainer.pullPolicy }}" + # securityContext: + # allowPrivilegeEscalation: false + # command: + # - 'sh' + # - '-c' + # - 'sh /scripts/script.sh' + # volumeMounts: + # - mountPath: "{{ .Values.pipelines.mountPath }}" + # name: volume + # - mountPath: "/scripts/script.sh" + # name: custom-script + # subPath: script.sh + # resources: + # requests: + # memory: "32Mi" + # cpu: "50m" + # limits: + # memory: "128Mi" + # cpu: "100m" + + systemYaml: | + shared: + ## Artifactory configuration + ## + artifactory: + ## Artifactory URL + ## + baseUrl: "{{ tpl (required "\n\npipelines.jfrogUrl is required!\n" .Values.pipelines.jfrogUrl) . }}" + ## Unified UI URL + ## + baseUrlUI: "{{ tpl (required "\n\npipelines.jfrogUrlUI is required!\n" .Values.pipelines.jfrogUrlUI) . }}" + ## Pipelines Service ID + ## + serviceId: "{{ .Values.pipelines.serviceId }}" + ## Artifactory Service ID + ## + artifactoryServiceId: "{{ .Values.pipelines.artifactoryServiceId }}" + ## Artifactory License ID + ## + licenseId: "{{ .Values.pipelines.licenseId }}" + ## Proxy to connect to Artifactory + ## + proxy: + url: "" + username: "" + password: "" + + ## Router configuration + ## + router: + ip: "" + accessPort: {{ .Values.pipelines.router.internalPort }} + dataPort: {{ .Values.pipelines.router.externalPort }} + joinKey: "{{ .Values.pipelines.joinKey }}" + + security: + masterKey: "{{ .Values.pipelines.masterKey }}" + + ## Database configuration + ## + db: + type: "postgres" + {{- if .Values.postgresql.enabled }} + ip: {{ tpl .Release.Name . }}-postgresql + port: "{{ .Values.postgresql.service.port }}" + name: {{ .Values.postgresql.postgresqlDatabase }} + username: {{ .Values.postgresql.postgresqlUsername }} + password: {{ .Values.postgresql.postgresqlPassword }} + {{- else }} + ip: {{ tpl .Values.global.postgresql.host . }} + port: "{{ .Values.global.postgresql.port }}" + name: {{ .Values.global.postgresql.database }} + username: {{ .Values.global.postgresql.user }} + password: {{ .Values.global.postgresql.password }} + {{- end }} + externalUrl: "" + {{- if .Values.postgresql.enabled }} + connectionString: "{{ tpl (printf "postgres://%s:%s@%s-postgresql:%v/%s" .Values.postgresql.postgresqlUsername .Values.postgresql.postgresqlPassword .Release.Name .Values.postgresql.service.port .Values.postgresql.postgresqlDatabase) . }}" + {{- else if and (not .Values.postgresql.enabled) (.Values.global.postgresql.ssl) }} + connectionString: "{{ tpl (printf "postgres://%s:%s@%v:%v/%s?sslmode=require" .Values.global.postgresql.user .Values.global.postgresql.password .Values.global.postgresql.host .Values.global.postgresql.port .Values.global.postgresql.database) . }}" + {{- else }} + connectionString: "{{ tpl (printf "postgres://%s:%s@%v:%v/%s" .Values.global.postgresql.user .Values.global.postgresql.password .Values.global.postgresql.host .Values.global.postgresql.port .Values.global.postgresql.database) . }}" + {{- end }} + + ## RabbitMQ configuration + ## + msg: + {{- if .Values.rabbitmq.enabled }} + ip: {{ .Release.Name }}-rabbitmq + port: {{ .Values.rabbitmq.service.port }} + adminPort: {{ .Values.rabbitmq.service.managerPort }} + erlangCookie: {{ .Values.rabbitmq.rabbitmq.erlangCookie }} + username: {{ .Values.rabbitmq.rabbitmq.username }} + password: {{ .Values.rabbitmq.rabbitmq.password }} + defaultExchange: pipelinesEx + amqpVhost: pipelines + amqpRootVhost: pipelinesRoot + {{- else }} + ip: {{ tpl .Values.rabbitmq.internal_ip . }} + port: {{ .Values.rabbitmq.port}} + adminPort: {{ .Values.rabbitmq.manager_port }} + erlangCookie: {{ .Values.rabbitmq.erlang_cookie }} + username: {{ .Values.rabbitmq.ms_username }} + password: {{ .Values.rabbitmq.ms_password }} + defaultExchange: {{ .Values.rabbitmq.root_vhost_exchange_name }} + amqpVhost: {{ .Values.rabbitmq.build_vhost_name}} + amqpRootVhost: {{ .Values.rabbitmq.root_vhost_name }} + protocol: {{ .Values.rabbitmq.protocol }} + {{- end }} + queues: + - "core.pipelineSync" + - "core.runTrigger" + - "core.stepTrigger" + - "core.marshaller" + - "cluster.init" + - "core.logup" + - "www.signals" + - "core.nexec" + - "core.hookHandler" + - "core.extensionSync" + ui: + {{- if .Values.rabbitmq.enabled }} + username: {{ .Values.pipelines.msg.uiUser }} + password: {{ .Values.pipelines.msg.uiUserPassword }} + {{- else }} + protocol: http + username: {{ .Values.rabbitmq.cp_username }} + password: {{ .Values.rabbitmq.cp_password }} + {{- end }} + external: + ## URL for build plane VMs to access RabbitMQ + {{- if .Values.rabbitmq.externalUrl }} + url: {{ .Values.rabbitmq.externalUrl }} + {{- else if (and .Values.rabbitmq.serviceVmLb.enabled .Values.rabbitmq.serviceVmLb.loadBalancerIP) }} + url: amqp://{{ .Values.rabbitmq.serviceVmLb.loadBalancerIP }} + {{- else if .Values.rabbitmq.enabled }} + url: amqp://{{ tpl .Release.Name . }}-rabbitmq + {{- else }} + url: {{ .Values.rabbitmq.protocol }}://{{ tpl .Values.rabbitmq.msg_hostname . }}:{{ .Values.rabbitmq.port }} + {{- end }} + rootUrl: "" + adminUrl: "" + {{- if not .Values.rabbitmq.enabled }} + build: + username: {{ .Values.rabbitmq.build_username }} + password: {{ .Values.rabbitmq.build_password }} + {{- end }} + + ## Vault configuration + ## + vault: + {{- if .Values.vault.enabled }} + ip: {{ include "pipelines.vault.name" . }} + port: {{ .Values.vault.service.port }} + {{- else }} + ip: {{ .Values.global.vault.host }} + port: {{ .Values.global.vault.port }} + {{- end }} + ## DO NOT CHANGE THE TOKEN VALUE!!! + token: "_VAULT_TOKEN_" + unsealKeys: + - "" + - "" + - "" + - "" + - "" + + ## Redis configuration + ## + redis: + ip: {{ .Release.Name }}-redis-master + port: 6379 + clusterEnabled: false + + ## This section is used for bringing up the core services and setting up + ## configurations required by the installer & the services + ## + core: + ## id is automatically determined based on the current hostname + ## or set using the SHARED_NODE_ID environment variable. + ## + id: "afd8df9d08bf257ae9b7d7dbbf348b7a3a574ebdd3a61d350d4b64e3129dee85" + installerIP: "1.2.3.4" + installerAuthToken: "{{ .Values.pipelines.authToken }}" + installerImage: "jfrog/pipelines-installer" + registryUrl: "{{ .Values.imageRegistry }}" + os: "Ubuntu_16.04" + osDistribution: "xenial" + architecture: "x86_64" + dockerVersion: "" + runMode: "{{ .Values.runMode }}" + user: "" + group: "" + noVerifySsl: false + ignoreTLSErrors: false + controlplaneVersion: "{{ default .Chart.AppVersion .Values.pipelines.version }}" + buildplaneVersion: "{{ default .Chart.AppVersion .Values.pipelines.version }}" + accessControlAllowOrigins: + - {{ .Values.pipelines.accessControlAllowOrigins_0 }} + - {{ .Values.pipelines.accessControlAllowOrigins_1 }} + rabbitmqHealthCheckIntervalInMins: {{ .Values.pipelines.rabbitmqHealthCheckIntervalInMins}} + ## Global proxy settings, to be applied to all services + ## + proxy: + httpProxy: "" + httpsProxy: "" + noProxy: "" + username: "" + password: "" + + ## Mailserver settings + ## + mailserver: + host: "" + port: "" + username: "" + password: "" + tls: "" + ssl: "" + apiRetryIntervalMs: 3000 + accountSyncFrequencyHr: 1 + imageRegistrySecret: "{{ .Values.imagePullSecrets }}" + hardDeleteIntervalInMins: 60 + configBackupCount: 5 + lastUpdateTime: "" + callHomeUrl: "https://api.bintray.com/products/jfrog/pipelines/stats/usage" + allowCallHome: true + serviceInstanceHealthCheckIntervalInMins: 1 + serviceInstanceStatsCutOffIntervalInHours: 24 + + ## Service configuration + ## + services: + api: + name: {{ include "pipelines.api.name" . }} + port: {{ .Values.pipelines.api.service.port }} + {{- if (and .Values.pipelines.api.ingress.enabled .Values.pipelines.api.ingress.tls) }} + {{- range .Values.pipelines.api.ingress.hosts }} + externalUrl: https://{{ . }} + {{- end }} + {{- else if .Values.pipelines.api.ingress.enabled }} + {{- range .Values.pipelines.api.ingress.hosts }} + externalUrl: http://{{ . }} + {{- end }} + {{- else }} + externalUrl: {{ .Values.pipelines.api.externalUrl }} + {{- end }} + www: + name: {{ include "pipelines.www.name" . }} + port: {{ .Values.pipelines.www.service.port }} + {{- if (and .Values.pipelines.www.ingress.enabled .Values.pipelines.www.ingress.tls) }} + {{- range .Values.pipelines.www.ingress.hosts }} + externalUrl: https://{{ . }} + {{- end }} + {{- else if .Values.pipelines.www.ingress.enabled }} + {{- range .Values.pipelines.www.ingress.hosts }} + externalUrl: http://{{ . }} + {{- end }} + {{- else }} + externalUrl: {{ .Values.pipelines.www.externalUrl }} + {{- end }} + sessionSecret: "{{ .Values.pipelines.authToken }}" + pipelineSync: + name: pipelineSync + runTrigger: + name: runTrigger + stepTrigger: + name: stepTrigger + cron: + name: cron + nexec: + name: nexec + hookHandler: + name: hookHandler + marshaller: + name: marshaller + extensionSync: + name: extensionSync + + ## Runtime configuration + ## + runtime: + rootBucket: "{{ .Values.pipelines.rootBucket }}" + defaultMinionCount: 1 + nodeCacheIntervalMS: 600000 + jobConsoleBatchSize: 10 + jobConsoleBufferIntervalMs: 3 + maxDiskUsagePercentage: 90 + stepTimeoutMS: 3600000 + nodeStopDayOfWeek: 0 + nodeStopIntervalDays: 30 + maxNodeCheckInDelayMin: 15 + defaultMinionInstanceSize: "c4.large" + allowDynamicNodes: true + allowCustomNodes: true + {{- range $key, $value := .Values.runtimeOverride }} + {{ $key }}: {{ $value | quote }} + {{- end }} + languageImages: + - architecture: x86_64 + os: Ubuntu_16.04 + language: node + registryUrl: docker.bintray.io + image: jfrog/pipelines-u16node + isDefault: true + defaultVersion: 10.18.0 + - architecture: x86_64 + os: Ubuntu_16.04 + language: java + registryUrl: docker.bintray.io + image: jfrog/pipelines-u16java + defaultVersion: 13 + - architecture: x86_64 + os: Ubuntu_16.04 + language: cpp + registryUrl: docker.bintray.io + image: jfrog/pipelines-u16cpp + defaultVersion: 9.0.0 + - architecture: x86_64 + os: Ubuntu_16.04 + language: go + registryUrl: docker.bintray.io + image: jfrog/pipelines-u16go + defaultVersion: 1.12.14 + - architecture: x86_64 + os: Ubuntu_18.04 + language: node + registryUrl: docker.bintray.io + image: jfrog/pipelines-u18node + isDefault: true + defaultVersion: 10.18.0 + - architecture: x86_64 + os: Ubuntu_18.04 + language: java + registryUrl: docker.bintray.io + image: jfrog/pipelines-u18java + defaultVersion: 13 + - architecture: x86_64 + os: Ubuntu_18.04 + language: cpp + registryUrl: docker.bintray.io + image: jfrog/pipelines-u18cpp + defaultVersion: 9.0.0 + - architecture: x86_64 + os: Ubuntu_18.04 + language: go + registryUrl: docker.bintray.io + image: jfrog/pipelines-u18go + defaultVersion: 1.12.14 + - architecture: x86_64 + os: CentOS_7 + language: node + registryUrl: docker.bintray.io + image: jfrog/pipelines-c7node + isDefault: true + defaultVersion: 10.18.0 + - architecture: x86_64 + os: CentOS_7 + language: java + registryUrl: docker.bintray.io + image: jfrog/pipelines-c7java + defaultVersion: 11 + - architecture: x86_64 + os: CentOS_7 + language: cpp + registryUrl: docker.bintray.io + image: jfrog/pipelines-c7cpp + defaultVersion: 3.4.2 + - architecture: x86_64 + os: CentOS_7 + language: go + registryUrl: docker.bintray.io + image: jfrog/pipelines-c7go + defaultVersion: 1.12.14 + - architecture: x86_64 + os: WindowsServer_2019 + language: node + registryUrl: docker.bintray.io + image: jfrog/pipelines-w19node + defaultVersion: 10.18.0 + - architecture: x86_64 + os: WindowsServer_2019 + language: java + registryUrl: docker.bintray.io + image: jfrog/pipelines-w19java + defaultVersion: 11 + - architecture: x86_64 + os: WindowsServer_2019 + language: cpp + registryUrl: docker.bintray.io + image: jfrog/pipelines-w19cpp + defaultVersion: 9.0.0 + - architecture: x86_64 + os: WindowsServer_2019 + language: go + registryUrl: docker.bintray.io + image: jfrog/pipelines-w19go + defaultVersion: 1.12.14 + - architecture: x86_64 + os: WindowsServer_2019 + language: dotnetcore + registryUrl: docker.bintray.io + image: jfrog/pipelines-w19dotnetcore + isDefault: true + defaultVersion: 3.1 + - architecture: x86_64 + os: RHEL_7 + language: node + registryUrl: docker.bintray.io + image: jfrog/pipelines-c7node + isDefault: true + defaultVersion: 10.18.0 + - architecture: x86_64 + os: RHEL_7 + language: java + registryUrl: docker.bintray.io + image: jfrog/pipelines-c7java + defaultVersion: 11 + - architecture: x86_64 + os: RHEL_7 + language: cpp + registryUrl: docker.bintray.io + image: jfrog/pipelines-c7cpp + defaultVersion: 3.4.2 + - architecture: x86_64 + os: RHEL_7 + language: go + registryUrl: docker.bintray.io + image: jfrog/pipelines-c7go + defaultVersion: 1.12.14 + + ## Runtime Override Properties Section + runtimeOverride: {} + + # PostgreSQL + ## https://hub.helm.sh/charts/bitnami/postgresql + ## Configuration values for the postgresql dependency + ## ref: https://github.com/kubernetes/charts/blob/master/stable/postgresql/README.md + ## + postgresql: + enabled: false + + ## RabbitMQ HA + ## https://hub.helm.sh/charts/bitnami/rabbitmq + ## Configuration values for the rabbitmq dependency + ## ref: https://github.com/kubernetes/charts/blob/master/stable/rabbitmq/README.md + ## + # /var/lib/rabbitmq + rabbitmq: + enabled: true + protocol: amqps + replicas: 1 + image: + registry: registry.connect.redhat.com + repository: jfrog/xray-rabbitmq + tag: 3.8.6 + + rabbitmq: + username: admin + password: "" + + ## Erlang cookie to determine whether different nodes are allowed to communicate with each other + erlangCookie: PIPELINESRABBITMQCLUSTER + # existingErlangSecret: name-of-existing-secret + + extraPlugins: "" + + service: + type: ClusterIP + + ## Service annotations + annotations: {} + + ## Load Balancer sources + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + + persistence: + enabled: true + size: 20Gi + + resources: {} + + affinity: {} + + ingress: + ## Set to true to enable ingress record generation + enabled: false + + ## The list of hostnames to be covered with this ingress record. + ## Most likely this will be just one host, but in the event more hosts are needed, this is an array + # hostName: foo.bar.com + path: / + + ## Set this to true in order to enable TLS on the ingress record + ## A side effect of this will be that the backend wordpress service will be connected at port 443 + tls: true + + ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS + tlsSecret: OVERRIDE + + ## Ingress annotations done as key:value pairs + annotations: + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: true + + ## External URL for Build Plane VMs to access RabbitMQ + ## e.g. amqps://pipelines-msg.doamin.com + ## It should be set for the LoadBalancer below IP with proper domain name and TLS if external IP is used. + externalUrl: OVERRIDE + + ## Service with external/internal LoadBalancer to access RabbitMQ by Node-pool VMs + serviceVmLb: + enabled: false + + annotations: + ## Set internal LB for Azure + # service.beta.kubernetes.io/azure-load-balancer-internal: "true" + ## Set internal LB for AWS + # service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 + ## Set internal LB for GCP + # cloud.google.com/load-balancer-type: "Internal" + + ## You must to provide internal LB static IP + loadBalancerIP: + + ## Whitelist IPs allowed to LoadBalancer type services + ## Example: loadBalancerSourceRanges={82.82.190.51/32,141.141.8.8/32} + loadBalancerSourceRanges: [] + + ## Redis + ## Configuration values for the redis dependency + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/redis + ## + redis: + enabled: true + image: + registry: registry.redhat.io + repository: rhel8/redis-5 + tag: 1-98 + + redisPort: 6379 + + cluster: + enabled: false + slaveCount: 2 + + usePassword: false + + master: + command: "" + configmap: |- + appendonly yes + loglevel notice + + resources: {} + # requests: + # memory: 200Mi + # cpu: 100m + # limits: + # memory: 700Mi + + affinity: {} + + slave: + resources: {} + # requests: + # memory: 200Mi + # cpu: 100m + # limits: + # memory: 200Mi + + affinity: {} + + ## Vault + vault: + enabled: true + + updateStrategy: RollingUpdate + + image: + repository: registry.connect.redhat.com/jfrog/pipelines-vault + tag: 1.7.1 + pullPolicy: IfNotPresent + + init: + image: + repository: jfrog/pipelines-vault-init + pullPolicy: IfNotPresent + + service: + # Supported service types: ClusterIP and NodePort + type: ClusterIP + port: 30100 + + # PRODUCTION environments should always enable mlock + disablemlock: false + + resources: {} + # requests: + # memory: 256Mi + # cpu: 200m + # limits: + # memory: 1Gi + # cpu: 600m + + affinity: {} + nodeSelector: {} + tolerations: [] + + ## Role Based Access + ## Ref: https://kubernetes.io/docs/admin/authorization/rbac/ + rbac: + role: + ## Rules to create. It follows the role specification + rules: + - apiGroups: + - '' + resources: + - secrets + verbs: + - "*" + + # Add any list of configmaps to vault + configMaps: | + # posthook-start.sh: |- + # echo "This is a post start script" + # posthook-end.sh: |- + # echo "This is a post end script" + + ## Add custom volumes + customVolumes: | + # - name: custom-script + # configMap: + # name: custom-script + + ## Add custom volumesMounts + customVolumeMounts: | + # - name: custom-script + # mountPath: /scripts/script.sh + # subPath: script.sh + + ## Add custom init containers + customInitContainers: | + # - name: "custom-setup" + # image: "{{ .Values.initContainer.image }}" + # imagePullPolicy: "{{ .Values.initContainer.pullPolicy}}" + # command: + # - 'sh' + # - '-c' + # - 'touch {{ .Values.pipelines.mountPath }}/example-custom-setup' + # volumeMounts: + # - mountPath: "{{ .Values.pipelines.mountPath}}" + # name: pipelines-data + + # Filebeat Sidecar container + filebeat: + enabled: false + + ## The Build Plane is where the actual builds will run + buildPlane: + ## Dynamic Build Plane integration for the initial bootstrapping of the build planes. + ## Any required changes post install need to be done in UI: Administration/Pipelines/Integrations + dynamic: + ## customer part is not needed for on-prem install + customer: + accountId: "" + nodePoolName: "" + nodelimit: "" + provider: + aws: + enabled: false + ## Replace the dummy values with the real ones + nodePoolName: "aws-dynamic-node-pool" + nodelimit: "3" + instanceType: c4.xlarge + securityGroupId: testsecuritygroupId + subnetId: test-subnetId + keyPairName: testaccountSSHKeyPair + vpcId: testVPCId + region: us-east-1 + ## + accessKey: "" + secretKey: "" + ## Existing secret with AWS keys + existingSecret: + k8s: + enabled: false + ## Replace the dummy values with the real ones + nodePoolName: "k8s-dynamic-node-pool" + nodelimit: "3" + cpu: "1" + memory: "1000" + namespace: default + storageClass: standard + ## Node Affinity values: {key1:value1,key2:value2} + labels: + ## Kubernetes node pool kubeconfig base64 encoded + kubeconfig: "" + ## Existing secret with kubeconfig + existingSecret: