From c49b58351098e508df76987e3d3e0940b136bd64 Mon Sep 17 00:00:00 2001 From: John Peterson Date: Thu, 1 Oct 2020 17:30:12 -0700 Subject: [PATCH] Fixing changelog versions to be based off the operator version --- .../artifactory-ha-operator/CHANGELOG.md | 23 +- .../openshift-artifactory-ha/CHANGELOG.md | 29 + .../openshift-artifactory-ha/Chart.yaml | 19 + .../openshift-artifactory-ha/LICENSE | 201 +++ .../openshift-artifactory-ha/README.md | 1454 +++++++++++++++++ .../openshift-artifactory-ha/helminstall.sh | 57 + .../requirements.lock | 6 + .../requirements.yaml | 4 + .../openshift-artifactory-ha/values.yaml | 99 ++ .../operator/xray-operator/CHANGELOG.md | 11 +- .../helm-charts/openshift-xray/CHANGELOG.md | 3 + .../helm-charts/openshift-xray/Chart.yaml | 13 +- .../helm-charts/openshift-xray/LICENSE | 0 .../helm-charts/openshift-xray/README.md | 0 .../helm-charts/openshift-xray/helminstall.sh | 9 +- .../helm-charts/openshift-xray/rabbitmq.yaml | 4 +- .../openshift-xray/rabbitmqservice.yaml | 8 +- .../openshift-xray/requirements.lock | 6 +- .../openshift-xray/requirements.yaml | 2 +- .../helm-charts/openshift-xray/values.yaml | 147 +- 20 files changed, 2007 insertions(+), 88 deletions(-) create mode 100755 Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/CHANGELOG.md create mode 100755 Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/Chart.yaml create mode 100755 Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/LICENSE create mode 100755 Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/README.md create mode 100755 Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/helminstall.sh create mode 100644 Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/requirements.lock create mode 100644 Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/requirements.yaml create mode 100755 Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/values.yaml mode change 100644 => 100755 Openshift4/operator/xray-operator/helm-charts/openshift-xray/CHANGELOG.md mode change 100644 => 100755 Openshift4/operator/xray-operator/helm-charts/openshift-xray/Chart.yaml mode change 100644 => 100755 Openshift4/operator/xray-operator/helm-charts/openshift-xray/LICENSE mode change 100644 => 100755 Openshift4/operator/xray-operator/helm-charts/openshift-xray/README.md mode change 100644 => 100755 Openshift4/operator/xray-operator/helm-charts/openshift-xray/helminstall.sh mode change 100644 => 100755 Openshift4/operator/xray-operator/helm-charts/openshift-xray/values.yaml diff --git a/Openshift4/operator/artifactory-ha-operator/CHANGELOG.md b/Openshift4/operator/artifactory-ha-operator/CHANGELOG.md index 840b74f..0cc03df 100755 --- a/Openshift4/operator/artifactory-ha-operator/CHANGELOG.md +++ b/Openshift4/operator/artifactory-ha-operator/CHANGELOG.md @@ -1,26 +1,29 @@ # JFrog Openshift Artifactory-ha Chart Changelog All changes to this chart will be documented in this file. -## [3.1.0] - Aug 17, 2020 -* Updating to latest jfrog/artifactory-ha helm chart version 3.1.0 artifactory version 3.1.0 +## [1.1.0] - Sept 30, 2020 +* Updating Operator to latest jfrog/artifactory-ha helm chart version 4.1.0 artifactory version 7.9.0 -## [3.0.5] - July 16, 2020 -* Updating to latest jfrog/artifactory-ha helm chart version 3.0.5 artifactory version 7.6.3 +## [1.0.3] - Aug 17, 2020 +* Updating Operator to latest jfrog/artifactory-ha helm chart version 3.1.0 artifactory version 7.7.3 -## [2.6.0] - June 29, 2020 +## [1.0.2] - July 16, 2020 +* Updating Operator to latest jfrog/artifactory-ha helm chart version 3.0.5 artifactory version 7.6.3 + +## [1.0.1] - June 29, 2020 * Updating to latest jfrog/artifactory-ha helm chart version 2.6.0 artifactory version 7.6.1 -## [2.4.6] - May 12, 2020 +## [1.0.0] - May 12, 2020 * Updating to latest jfrog/artifactory-ha helm chart version 2.4.6 artifactory version 7.4.3 -## [2.3.0] - April 13, 2020 +## [0.4.0] - April 13, 2020 * Updating to latest jfrog/artifactory-ha helm chart version 2.3.0 -## [2.2.9] - April 11, 2020 +## [0.3.0] - April 11, 2020 * Fixed issues with master key -## [2.1.9] - March 17, 2020 +## [0.2.0] - March 17, 2020 * Updated Artifactory version to 7.3.2 -## [2.0.35] - March 09, 2020 +## [0.1.0] - March 09, 2020 * Updated Artifactory version to 7.2.1 diff --git a/Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/CHANGELOG.md b/Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/CHANGELOG.md new file mode 100755 index 0000000..4fe4ae2 --- /dev/null +++ b/Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/CHANGELOG.md @@ -0,0 +1,29 @@ +# JFrog Openshift Artifactory-ha Chart Changelog +All changes to this chart will be documented in this file. + +## [4.1.0] - Sept 30, 2020 +* Updating to latest jfrog/artifactory-ha helm chart version 4.1.0 artifactory version 7.9.0 + +## [3.1.0] - Aug 17, 2020 +* Updating to latest jfrog/artifactory-ha helm chart version 3.1.0 artifactory version 7.7.3 + +## [3.0.5] - Jul 16, 2020 +* Updating to latest jfrog/artifactory helm chart version 3.0.5 artifactory version 7.6.3 + +## [2.6.0] - June 29, 2020 +* Updating to latest jfrog/artifactory helm chart version 2.6.0 artifactory version 7.6.1 + +## [2.4.6] - May 12, 2020 +* Updating to latest jfrog/artifactory-ha helm chart version 2.4.6 artifactory version 7.4.3 + +## [2.3.0] - April 13, 2020 +* Updating to latest jfrog/artifactory-ha helm chart version 2.3.0 + +## [2.2.9] - April 11, 2020 +* Fixed issues with master key + +## [2.1.9] - March 17, 2020 +* Updated Artifactory version to 7.3.2 + +## [2.0.35] - March 09, 2020 +* Updated Artifactory version to 7.2.1 diff --git a/Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/Chart.yaml b/Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/Chart.yaml new file mode 100755 index 0000000..8c0c988 --- /dev/null +++ b/Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/Chart.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +appVersion: 7.9.0 +description: Openshift JFrog Artifactory HA subcharting Artifactory HA to work in Openshift environment +home: https://www.jfrog.com/artifactory/ +icon: https://raw.githubusercontent.com/jfrog/charts/master/stable/artifactory-ha/logo/artifactory-logo.png +keywords: +- artifactory +- jfrog +- devops +maintainers: +- email: vinaya@jfrog.com + name: Vinay Aggarwal +- email: johnp@jfrog.com + name: John Peterson +name: openshift-artifactory-ha +sources: +- https://bintray.com/jfrog/product/JFrog-Artifactory-Pro/view +- https://github.com/jfrog/charts +version: 4.1.0 diff --git a/Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/LICENSE b/Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/LICENSE new file mode 100755 index 0000000..8dada3e --- /dev/null +++ b/Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/README.md b/Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/README.md new file mode 100755 index 0000000..87da20a --- /dev/null +++ b/Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/README.md @@ -0,0 +1,1454 @@ +# JFrog Artifactory High Availability Helm Chart + +## Openshift +The Artifactory HA chart has been made a subchart of this chart. + +Note due to this change we now reference values through the subchart name as shown below: + +original: +``` +artifactory.node.replicaCount +``` + +now: +``` +artifactory-ha.artifactory.node.replicaCount +``` + +This is due to helm referencing them through the subchart artifactory-ha now. +## Prerequisites Details + +* Kubernetes 1.12+ +* Artifactory HA license + +## Chart Details +This chart will do the following: + +* Deploy Artifactory highly available cluster. 1 primary node and 2 member nodes. +* Deploy a PostgreSQL database +* Deploy an Nginx server + +## Artifactory HA architecture +The Artifactory HA cluster in this chart is made up of +- A single primary node +- Two member nodes, which can be resized at will + +Load balancing is done to the member nodes only. +This leaves the primary node free to handle jobs and tasks and not be interrupted by inbound traffic. +> This can be controlled by the parameter `artifactory.service.pool`. + +## Installing the Chart + +### Add JFrog Helm repository +Before installing JFrog helm charts, you need to add the [JFrog helm repository](https://charts.jfrog.io/) to your helm client +```bash +helm repo add jfrog https://charts.jfrog.io +``` + + +**NOTE:** Passing masterKey is mandatory for fresh install of chart (7.x Appversion) + +### Create a unique Master Key +Artifactory HA cluster requires a unique master key. + +**For production grade installations it is strongly recommended to use a custom master key. If you initially use the default master key it will be very hard to change the master key at a later stage** + +You should generate a unique one and pass it to the template at install/upgrade time. +```bash +# Create a key +export MASTER_KEY=$(openssl rand -hex 32) +echo ${MASTER_KEY} +``` + +### Install Chart +To install the chart with the release name `artifactory-ha`: + +```bash +helm upgrade --install artifactory-ha --set artifactory.masterKey=${MASTER_KEY} --namespace artifactory-ha jfrog/artifactory-ha +``` + +### System Configuration +Artifactory uses a common system configuration file - `system.yaml`. See [official documentation](https://www.jfrog.com/confluence/display/JFROG/System+YAML+Configuration+File) on its usage. +In order to override the default `system.yaml` configuration, do the following: +```bash +artifactory: + systemYaml: | + +``` + +### Deploying Artifactory for small/medium/large instllations +In the chart directory, we have added three values files, one for each installation type - small/medium/large. These values files are recommendations for setting resources requests and limits for your installation. The values are derived from the following [documentation](https://www.jfrog.com/confluence/display/EP/Installing+on+Kubernetes#InstallingonKubernetes-Systemrequirements). You can find them in the corresponding chart directory - values-small.yaml, values-medium.yaml and values-large.yaml + +### Accessing Artifactory +**NOTE:** It might take a few minutes for Artifactory's public IP to become available, and the nodes to complete initial setup. +Follow the instructions outputted by the install command to get the Artifactory IP and URL to access it. + +### Updating Artifactory +Once you have a new chart version, you can update your deployment with +```bash +helm upgrade artifactory-ha --namespace artifactory-ha jfrog/artifactory-ha +``` + +If artifactory was installed without providing a value to postgresql.postgresqlPassword (a password was autogenerated), follow these instructions: +1. Get the current password by running: +```bash +POSTGRES_PASSWORD=$(kubectl get secret -n -postgresql -o jsonpath="{.data.postgresql-password}" | base64 --decode) +``` +2. Upgrade the release by passing the previously auto-generated secret: +```bash +helm upgrade --namespace artifactory-ha jfrog/artifactory-ha --set postgresql.postgresqlPassword=${POSTGRES_PASSWORD} +``` + +This will apply any configuration changes on your existing deployment. + +### Special Upgrade Notes +#### Artifactory upgrade from 6.x to 7.x (App Version) +Arifactory 6.x to 7.x upgrade requires a one time migration process. This is done automatically on pod startup if needed. +It's possible to configure the migration timeout with the following configuration in extreme cases. The provided default should be more than enough for completion of the migration. +```yaml +artifactory: + # Migration support from 6.x to 7.x + migration: + timeoutSeconds: 3600 +``` + +### Artifactory memory and CPU resources +The Artifactory HA Helm chart comes with support for configured resource requests and limits to all pods. By default, these settings are commented out. +It is **highly** recommended to set these so you have full control of the allocated resources and limits. + +See more information on [setting resources for your Artifactory based on planned usage](https://www.jfrog.com/confluence/display/RTF/System+Requirements#SystemRequirements-RecommendedHardware). + +```bash +# Example of setting resource requests and limits to all pods (including passing java memory settings to Artifactory) +helm upgrade --install artifactory-ha \ + --set artifactory.primary.resources.requests.cpu="500m" \ + --set artifactory.primary.resources.limits.cpu="2" \ + --set artifactory.primary.resources.requests.memory="1Gi" \ + --set artifactory.primary.resources.limits.memory="4Gi" \ + --set artifactory.primary.javaOpts.xms="1g" \ + --set artifactory.primary.javaOpts.xmx="4g" \ + --set artifactory.node.resources.requests.cpu="500m" \ + --set artifactory.node.resources.limits.cpu="2" \ + --set artifactory.node.resources.requests.memory="1Gi" \ + --set artifactory.node.resources.limits.memory="4Gi" \ + --set artifactory.node.javaOpts.xms="1g" \ + --set artifactory.node.javaOpts.xmx="4g" \ + --set initContainers.resources.requests.cpu="10m" \ + --set initContainers.resources.limits.cpu="250m" \ + --set initContainers.resources.requests.memory="64Mi" \ + --set initContainers.resources.limits.memory="128Mi" \ + --set postgresql.resources.requests.cpu="200m" \ + --set postgresql.resources.limits.cpu="1" \ + --set postgresql.resources.requests.memory="500Mi" \ + --set postgresql.resources.limits.memory="1Gi" \ + --set nginx.resources.requests.cpu="100m" \ + --set nginx.resources.limits.cpu="250m" \ + --set nginx.resources.requests.memory="250Mi" \ + --set nginx.resources.limits.memory="500Mi" \ + --namespace artifactory-ha jfrog/artifactory-ha +``` +> Artifactory java memory parameters can (and should) also be set to match the allocated resources with `artifactory.[primary|node].javaOpts.xms` and `artifactory.[primary|node].javaOpts.xmx`. + +Get more details on configuring Artifactory in the [official documentation](https://www.jfrog.com/confluence/). + +Although it is possible to set resources limits and requests this way, it is recommended to use the pre-built values files +for small, medium and large installation and change them according to your needs (if necessary), as described [here](#Deploying-Artifactory-for-small/medium/large-installations) + +### Deploying Artifactory for small/medium/large installations +In the chart directory, we have added three values files, one for each installation type - small/medium/large. These values files are recommendations for setting resources requests and limits for your installation. The values are derived from the following [documentation](https://www.jfrog.com/confluence/display/EP/Installing+on+Kubernetes#InstallingonKubernetes-Systemrequirements). You can find them in the corresponding chart directory - values-small.yaml, values-medium.yaml and values-large.yaml + +### Artifactory storage +Artifactory HA support a wide range of storage back ends. You can see more details on [Artifactory HA storage options](https://www.jfrog.com/confluence/display/RTF/HA+Installation+and+Setup#HAInstallationandSetup-SettingUpYourStorageConfiguration) + +In this chart, you set the type of storage you want with `artifactory.persistence.type` and pass the required configuration settings. +The default storage in this chart is the `file-system` replication, where the data is replicated to all nodes. + +> **IMPORTANT:** All storage configurations (except NFS) come with a default `artifactory.persistence.redundancy` parameter. +This is used to set how many replicas of a binary should be stored in the cluster's nodes. +Once this value is set on initial deployment, you can not update it using helm. +It is recommended to set this to a number greater than half of your cluster's size, and never scale your cluster down to a size smaller than this number. + +#### Existing volume claim + +###### Primary node +In order to use an existing volume claim for the Artifactory primary storage, you need to: +- Create a persistent volume claim by the name `volume--artifactory-ha-primary-0` e.g `volume-myrelease-artifactory-ha-primary-0` +- Pass a parameter to `helm install` and `helm upgrade` +```bash +... +--set artifactory.primary.persistence.existingClaim=true +``` + +###### Member nodes +In order to use an existing volume claim for the Artifactory member nodes storage, you need to: +- Create persistent volume claims according to the number of replicas defined at `artifactory.node.replicaCount` by the names `volume--artifactory-ha-member-`, e.g `volume-myrelease-artifactory-ha-member-0` and `volume-myrelease-artifactory-ha-primary-1`. +- Pass a parameter to `helm install` and `helm upgrade` +```bash +... +--set artifactory.node.persistence.existingClaim=true +``` + +#### Existing shared volume claim + +In order to use an existing claim (for data and backup) that is to be shared across all nodes, you need to: + +- Create PVCs with ReadWriteMany that match the naming conventions: +``` + {{ template "artifactory-ha.fullname" . }}-data-pvc- + {{ template "artifactory-ha.fullname" . }}-backup-pvc- +``` +An example that shows 2 existing claims to be used: +``` + myexample-artifactory-ha-data-pvc-0 + myexample-artifactory-ha-backup-pvc-0 + myexample-artifactory-ha-data-pvc-1 + myexample-artifactory-ha-backup-pvc-1 +``` +- Set the artifactory.persistence.fileSystem.existingSharedClaim.enabled in values.yaml to true: +``` +-- set artifactory.persistence.fileSystem.existingSharedClaim.enabled=true +-- set artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims=2 +``` + +#### NFS +To use an NFS server as your cluster's storage, you need to +- Setup an NFS server. Get its IP as `NFS_IP` +- Create a `data` and `backup` directories on the NFS exported directory with write permissions to all +- Pass NFS parameters to `helm install` and `helm upgrade` +```bash +... +--set artifactory.persistence.type=nfs \ +--set artifactory.persistence.nfs.ip=${NFS_IP} \ +... +``` + +#### Google Storage +To use a Google Storage bucket as the cluster's filestore. See [Google Storage Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-GoogleStorageBinaryProvider) +- Pass Google Storage parameters to `helm install` and `helm upgrade` +```bash +... +--set artifactory.persistence.type=google-storage \ +--set artifactory.persistence.googleStorage.identity=${GCP_ID} \ +--set artifactory.persistence.googleStorage.credential=${GCP_KEY} \ +... +``` +``` +In order to use a GCP service account, Artifactory needs a gcp.credentials.json file in the same directory asa binaraystore.xml file. +This can be generated by running: +```bash +gcloud iam service-accounts keys create --iam-account +``` +Which will produce the following, which can be saved to a file or copied into your `values.yaml`. +```bash +{ + "type": "service_account", + "project_id": "", + "private_key_id": "?????", + "private_key": "-----BEGIN PRIVATE KEY-----\n????????==\n-----END PRIVATE KEY-----\n", + "client_email": "???@j.iam.gserviceaccount.com", + "client_id": "???????", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1....." +} +``` + +One option is to create your own [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) and pass it to your `helm install` in a custom `values.yaml` +```bash +# Create the Kubernetes secret from the file you created earlier. +# IMPORTANT: The file must be called "gcp.credentials.json" because this is used later as the secret key! +kubectl create secret generic artifactory-gcp-creds --from-file=./gcp.credentials.json +``` +Set this secret in your custom `values.yaml` +```bash +artifactory: + persistence: + googleStorage + gcpServiceAccount: + enabled: true + customSecretName: artifactory-gcp-creds +``` + +Another option is to put your generated config directly in your custom `values.yaml` and the a secret will be created from it +``` +artifactory: + persistence: + googleStorage + gcpServiceAccount: + enabled: true + config: | + { + "type": "service_account", + "project_id": "", + "private_key_id": "?????", + "private_key": "-----BEGIN PRIVATE KEY-----\n????????==\n-----END PRIVATE KEY-----\n", + "client_email": "???@j.iam.gserviceaccount.com", + "client_id": "???????", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1....." + } +``` + +#### AWS S3 +**NOTE** Keep in mind that when using the `aws-s3` persistence type, you will not be able to provide an IAM on the pod level. +In order to grant permissions to Artifactory using an IAM role, you will have to attach the IAM role to the machine(s) on which Artifactory is running. +This is due to the fact that the `aws-s3` template uses the `JetS3t` library to interact with AWS. If you want to grant an IAM role at the pod level, see the `AWS S3 Vs` section. + +To use an AWS S3 bucket as the cluster's filestore. See [S3 Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-S3BinaryProvider) +- Pass AWS S3 parameters to `helm install` and `helm upgrade` +```bash +... +# With explicit credentials: +--set artifactory.persistence.type=aws-s3 \ +--set artifactory.persistence.awsS3.endpoint=${AWS_S3_ENDPOINT} \ +--set artifactory.persistence.awsS3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3.identity=${AWS_ACCESS_KEY_ID} \ +--set artifactory.persistence.awsS3.credential=${AWS_SECRET_ACCESS_KEY} \ +... + +... +# With using existing IAM role +--set artifactory.persistence.type=aws-s3 \ +--set artifactory.persistence.awsS3.endpoint=${AWS_S3_ENDPOINT} \ +--set artifactory.persistence.awsS3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3.roleName=${AWS_ROLE_NAME} \ +... +``` +**NOTE:** Make sure S3 `endpoint` and `region` match. See [AWS documentation on endpoint](https://docs.aws.amazon.com/general/latest/gr/rande.html) + +#### AWS S3 V3 +To use an AWS S3 bucket as the cluster's filestore and access it with the official AWS SDK, See [S3 Official SDK Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-AmazonS3OfficialSDKTemplate). +This filestore template uses the official AWS SDK, unlike the `aws-s3` implementation that uses the `JetS3t` library. +Use this template if you want to attach an IAM role to the Artifactory pod directly (as opposed to attaching it to the machine/s that Artifactory will run on). + +**NOTE** This will have to be combined with a k8s mechanism for attaching IAM roles to pods, like [kube2iam](https://github.com/helm/charts/tree/master/stable/kube2iam) or anything similar. + +- Pass AWS S3 V3 parameters and the annotation pointing to the IAM role (when using an IAM role. this is kube2iam specific and may vary depending on the implementation) to `helm install` and `helm upgrade` + +```bash +# With explicit credentials: +--set artifactory.persistence.type=aws-s3-v3 \ +--set artifactory.persistence.awsS3V3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3V3.bucketName=${AWS_S3_BUCKET_NAME} \ +--set artifactory.persistence.awsS3V3.identity=${AWS_ACCESS_KEY_ID} \ +--set artifactory.persistence.awsS3V3.credential=${AWS_SECRET_ACCESS_KEY} \ +... +``` + +```bash +# With using existing IAM role +--set artifactory.persistence.type=aws-s3-v3 \ +--set artifactory.persistence.awsS3V3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3V3.bucketName=${AWS_S3_BUCKET_NAME} \ +--set artifactory.annotations.'iam\.amazonaws\.com/role'=${AWS_IAM_ROLE_ARN} +... +``` + +#### Microsoft Azure Blob Storage +To use Azure Blob Storage as the cluster's filestore. See [Azure Blob Storage Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-AzureBlobStorageClusterBinaryProvider) +- Pass Azure Blob Storage parameters to `helm install` and `helm upgrade` +```bash +... +--set artifactory.persistence.type=azure-blob \ +--set artifactory.persistence.azureBlob.accountName=${AZURE_ACCOUNT_NAME} \ +--set artifactory.persistence.azureBlob.accountKey=${AZURE_ACCOUNT_KEY} \ +--set artifactory.persistence.azureBlob.endpoint=${AZURE_ENDPOINT} \ +--set artifactory.persistence.azureBlob.containerName=${AZURE_CONTAINER_NAME} \ +... +``` + +#### Custom binarystore.xml +You have an option to provide a custom [binarystore.xml](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore).
+There are two options for this + +1. Editing directly in [values.yaml](values.yaml) +```yaml +artifactory: + persistence: + binarystoreXml: | + + + + + +``` + +2. Create your own [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) and pass it to your `helm install` command +```yaml +# Prepare your custom Secret file (custom-binarystore.yaml) +kind: Secret +apiVersion: v1 +metadata: + name: custom-binarystore + labels: + app: artifactory + chart: artifactory +stringData: + binarystore.xml: |- + + + + +``` + +```bash +# Create a secret from the file +kubectl apply -n artifactory -f ./custom-binarystore.yaml + +# Pass it to your helm install command: +helm upgrade --install artifactory-ha --set artifactory.persistence.customBinarystoreXmlSecret=custom-binarystore --namespace artifactory-ha jfrog/artifactory-ha +``` + +### Create a unique Master Key + +Alternatively, you can create a secret containing the master key manually and pass it to the template at install/upgrade time. +```bash +# Create a key +export MASTER_KEY=$(openssl rand -hex 32) +echo ${MASTER_KEY} + +# Create a secret containing the key. The key in the secret must be named master-key +kubectl create secret generic my-secret --from-literal=master-key=${MASTER_KEY} + +# Pass the created secret to helm +helm upgrade --install artifactory-ha --set artifactory.masterKeySecretName=my-secret --namespace artifactory-ha jfrog/artifactory-ha +``` +**NOTE:** In either case, make sure to pass the same master key on all future calls to `helm install` and `helm upgrade`! In the first case, this means always passing `--set artifactory.masterKey=${MASTER_KEY}`. In the second, this means always passing `--set artifactory.masterKeySecretName=my-secret` and ensuring the contents of the secret remain unchanged. + +### Special Upgrade Notes +### MasterKey during 6.x to 7.x Migration (App version) + +**NOTE:** 6.x only supports masterKey with 16 hex (32 characters) and if you have set masterKey using `openssl rand -hex 32` (64 characters) in 6.x, only the first 32 characters are used and rest are ignored. Hence, during 6.x to 7.x migration, we trim first 32 characters and set masterkey, which implies 7.x still uses the trimmed masterkey of 6.x. Hence, `artifactory.masterKey` should not be passed during migration from 6.x to 7.x. + +### Create a unique Join Key +Artifactory requires a unique join key. By default the chart has one set in values.yaml (`artifactory.joinKey`). + +**This key is for demo purpose and should not be used in a production environment!** + +You should generate a unique key and pass it to the template at install/upgrade time. +```bash +# Create a key +export JOIN_KEY=$(openssl rand -hex 32) +echo ${JOIN_KEY} + +# Pass the created join key to helm +helm upgrade --install artifactory-ha --set artifactory.joinKey=${JOIN_KEY} --namespace artifactory-ha jfrog/artifactory-ha +``` + +Alternatively, you can create a secret containing the join key manually and pass it to the template at install/upgrade time. +```bash +# Create a key +export JOIN_KEY=$(openssl rand -hex 32) +echo ${JOIN_KEY} + +# Create a secret containing the key. The key in the secret must be named join-key +kubectl create secret generic my-secret --from-literal=join-key=${JOIN_KEY} + +# Pass the created secret to helm +helm upgrade --install artifactory-ha --set artifactory.joinKeySecretName=my-secret --namespace artifactory-ha jfrog/artifactory-ha +``` +**NOTE:** In either case, make sure to pass the same join key on all future calls to `helm install` and `helm upgrade`! This means always passing `--set artifactory.joinKey=${JOIN_KEY}`. In the second, this means always passing `--set artifactory.joinKeySecretName=my-secret` and ensuring the contents of the secret remain unchanged.. + +### Install Artifactory HA license +For activating Artifactory HA, you must install an appropriate license. There are three ways to manage the license. **Artifactory UI**, **REST API**, or a **Kubernetes Secret**. + +The easier and recommended way is the **Artifactory UI**. Using the **Kubernetes Secret** or **REST API** is for advanced users and is better suited for automation. + +**IMPORTANT:** You should use only one of the following methods. Switching between them while a cluster is running might disable your Artifactory HA cluster! + +##### Artifactory UI +Once primary cluster is running, open Artifactory UI and insert the license(s) in the UI. See [HA installation and setup](https://www.jfrog.com/confluence/display/RTF/HA+Installation+and+Setup) for more details. **Note that you should enter all licenses at once, with each license is separated by a newline.** If you add the licenses one at a time, you may get redirected to a node without a license and the UI won't load for that node. + +##### REST API +You can add licenses via REST API (https://www.jfrog.com/confluence/display/RTF/Artifactory+REST+API#ArtifactoryRESTAPI-InstallHAClusterLicenses). Note that the REST API expects "\n" for the newlines in the licenses. + +##### Kubernetes Secret +You can deploy the Artifactory license(s) as a [Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/). +Prepare a text file with the license(s) written in it. If writing multiple licenses (must be in the same file), it's important to put **two new lines between each license block**! +```bash +# Create the Kubernetes secret (assuming the local license file is 'art.lic') +kubectl create secret generic artifactory-cluster-license --from-file=./art.lic + +# Pass the license to helm +helm upgrade --install artifactory-ha --set artifactory.license.secret=artifactory-cluster-license,artifactory.license.dataKey=art.lic --namespace artifactory-ha jfrog/artifactory-ha +``` +**NOTE:** This method is relevant for initial deployment only! Once Artifactory is deployed, you should not keep passing these parameters as the license is already persisted into Artifactory's storage (they will be ignored). +Updating the license should be done via Artifactory UI or REST API. + +##### Create the secret as part of the helm release +values.yaml +```yaml +artifactory: + license: + licenseKey: |- + + + + + + + +``` + +```bash +helm upgrade --install artifactory-ha -f values.yaml --namespace artifactory-ha jfrog/artifactory-ha +``` +**NOTE:** This method is relevant for initial deployment only! Once Artifactory is deployed, you should not keep passing these parameters as the license is already persisted into Artifactory's storage (they will be ignored). +Updating the license should be done via Artifactory UI or REST API. +If you want to keep managing the artifactory license using the same method, you can use the copyOnEveryStartup example shown in the values.yaml file + + +### copyOnEveryStartup feature +Files stored in the `/artifactory-extra-conf` directory are only copied to the `ARTIFACTORY_HOME/etc` directory upon the first startup. +In some cases, you want your configuration files to be copied to the `ARTIFACTORY_HOME/etc` directory on every startup. +Two examples for that would be: + +1. the binarstore.xml file. If you use the default behaviour, your binarystore.xml configuration will only be copied on the first startup, +which means that changes you make over time to the `binaryStoreXml` configuration will not be applied. In order to make sure your changes are applied on every startup, do the following: +Create a values file with the following values: +```yaml +artifactory: + copyOnEveryStartup: + - source: /artifactory_bootstrap/binarystore.xml + target: etc/artifactory/ +``` + +Install the helm chart with the values file you created: +```bash +helm upgrade --install artifactory-ha --namespace artifactory-ha jfrog/artifactory-ha -f values.yaml +``` + +2. Any custom configuration file you have to configure artifactory, such as `logback.xml`: +Create a config map with your `logback.xml` configuration. + +Create a values file with the following values: +```yaml +artifactory: + ## Create a volume pointing to the config map with your configuration file + customVolumes: | + - name: logback-xml-configmap + configMap: + name: logback-xml-configmap + customVolumeMounts: | + - name: logback-xml-configmap + mountPath: /tmp/artifactory-logback/ + copyOnEveryStartup: + - source: /tmp/artifactory-logback/* + target: etc/ +``` + +Install the helm chart with the values file you created: +```bash +helm upgrade --install artifactory-ha --namespace artifactory-ha jfrog/artifactory-ha -f values.yaml +``` + +### Configure NetworkPolicy + +NetworkPolicy specifies what ingress and egress is allowed in this namespace. It is encouraged to be more specific whenever possible to increase security of the system. + +In the `networkpolicy` section of values.yaml you can specify a list of NetworkPolicy objects. + +For podSelector, ingress and egress, if nothing is provided then a default `- {}` is applied which is to allow everything. + +A full (but very wide open) example that results in 2 NetworkPolicy objects being created: +```yaml +networkpolicy: + # Allows all ingress and egress to/from artifactory primary and member pods. + - name: artifactory + podSelector: + matchLabels: + app: artifactory-ha + egress: + - {} + ingress: + - {} + # Allows connectivity from artifactory-ha pods to postgresql pods, but no traffic leaving postgresql pod. + - name: postgresql + podSelector: + matchLabels: + app: postgresql + ingress: + - from: + - podSelector: + matchLabels: + app: artifactory-ha +``` + +### Artifactory JMX Configuration +** You can see some information about the exposed MBeans here - https://www.jfrog.com/confluence/display/RTF/Artifactory+JMX+MBeans + +Enable JMX in your deployment: +```bash +helm upgrade --install artifactory \ + --set artifactory.primary.javaOpts.jmx.enabled=true \ + --set artifactory.node.javaOpts.jmx.enabled=true \ + --namespace artifactory-ha jfrog/artifactory-ha +``` +This will enable access to Artifactory with JMX on the default port (9010). +** You have the option to change the port by setting ```artifactory.primary.javaOpts.jmx.port``` and ```artifactory.node.javaOpts.jmx.port``` +to your choice of port + +In order to connect to Artifactory using JMX with jconsole (or any similar tool) installed on your computer, follow the following steps: +1. Enable JMX as described above and Change the Artifactory service to be of type LoadBalancer: +```bash +helm upgrade --install artifactory \ + --set artifactory.primary.javaOpts.jmx.enabled=true \ + --set artifactory.node.javaOpts.jmx.enabled=true \ + --set artifactory.service.type=LoadBalancer \ + --namespace artifactory-ha jfrog/artifactory-ha +``` +2. The default setting for java.rmi.server.hostname is the service name (this is also configurable with +```artifactory.primary.javaOpts.jmx.host``` and ```artifactory.node.javaOpts.jmx.host```), So in order to connect to Artifactory +with jconsole you should map the Artifactory kuberentes service IP to the service name using your hosts file as such: +``` + artifactory-ha--primary + +``` +3. Launch jconsole with the service address and port: +```bash +jconsole artifactory-ha--primary: +jconsole : +``` + +### Bootstrapping Artifactory admin password +You can bootstrap the `admin` user password as described in the [bootstrap Artifactory admin credentials](https://www.jfrog.com/confluence/display/JFROG/Users+and+Groups#UsersandGroups-RecreatingtheDefaultAdminUserrecreate) guide. + +1. Create `admin-creds-values.yaml` and provide the IP (By default 127.0.0.1) and password: +```yaml +artifactory: + admin: + ip: "" # Example: "*" to allow access from anywhere + username: "admin" + password: "" +``` + +2. Apply the `admin-creds-values.yaml` file: +```bash +helm upgrade --install artifactory --namespace artifactory-ha jfrog/artifactory-ha -f admin-creds-values.yaml +``` + +### Bootstrapping Artifactory configuration +**IMPORTANT:** Bootstrapping Artifactory needs license. Pass license as shown in above section. + +* User guide to [bootstrap Artifactory Global Configuration](https://www.jfrog.com/confluence/display/RTF/Configuration+Files#ConfigurationFiles-BootstrappingtheGlobalConfiguration) +* User guide to [bootstrap Artifactory Security Configuration](https://www.jfrog.com/confluence/display/RTF/Configuration+Files#ConfigurationFiles-BootstrappingtheSecurityConfiguration) + +1. Create `bootstrap-config.yaml` with artifactory.config.import.xml and security.import.xml as shown below: +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: my-release-bootstrap-config +data: + artifactory.config.import.xml: | + + security.import.xml: | + +``` + +2. Create configMap in Kubernetes: +```bash +kubectl apply -f bootstrap-config.yaml +``` +3. Pass the configMap to helm +```bash +helm upgrade --install artifactory-ha --set artifactory.license.secret=artifactory-cluster-license,artifactory.license.dataKey=art.lic,artifactory.configMapName=my-release-bootstrap-config --namespace artifactory-ha jfrog/artifactory-ha +``` + +### Use custom nginx.conf with Nginx + +Steps to create configMap with nginx.conf +* Create `nginx.conf` file. +```bash +kubectl create configmap nginx-config --from-file=nginx.conf +``` +* Pass configMap to helm install +```bash +helm upgrade --install artifactory-ha --set nginx.customConfigMap=nginx-config --namespace artifactory-ha jfrog/artifactory-ha +``` + +### Scaling your Artifactory cluster +A key feature in Artifactory HA is the ability to set an initial cluster size with `--set artifactory.node.replicaCount=${CLUSTER_SIZE}` and if needed, resize it. + +##### Before scaling +**IMPORTANT:** When scaling, you need to explicitly pass the database password if it's an auto generated one (this is the default with the enclosed PostgreSQL helm chart). + +Get the current database password +```bash +export DB_PASSWORD=$(kubectl get $(kubectl get secret -o name | grep postgresql) -o jsonpath="{.data.postgresql-password}" | base64 --decode) +``` +Use `--set postgresql.postgresqlPassword=${DB_PASSWORD}` with every scale action to prevent a miss configured cluster! + +##### Scale up +Let's assume you have a cluster with **2** member nodes, and you want to scale up to **3** member nodes (a total of 4 nodes). +```bash +# Scale to 4 nodes (1 primary and 3 member nodes) +helm upgrade --install artifactory-ha --set artifactory.node.replicaCount=3 --set postgresql.postgresqlPassword=${DB_PASSWORD} --namespace artifactory-ha jfrog/artifactory-ha +``` + +##### Scale down +Let's assume you have a cluster with **3** member nodes, and you want to scale down to **2** member node. + +```bash +# Scale down to 2 member nodes +helm upgrade --install artifactory-ha --set artifactory.node.replicaCount=2 --set postgresql.postgresqlPassword=${DB_PASSWORD} --namespace artifactory-ha jfrog/artifactory-ha +``` +- **NOTE:** Since Artifactory is running as a Kubernetes Stateful Set, the removal of the node will **not** remove the persistent volume. You need to explicitly remove it +```bash +# List PVCs +kubectl get pvc + +# Remove the PVC with highest ordinal! +# In this example, the highest node ordinal was 2, so need to remove its storage. +kubectl delete pvc volume-artifactory-node-2 +``` + +### Use an external Database + +**For production grade installations it is recommended to use an external PostgreSQL with a static password** + +#### PostgreSQL +There are cases where you will want to use external PostgreSQL with a different database name e.g. `my-artifactory-db`, then you need set a custom PostgreSQL connection URL, where `my-artifactory-db` is the database name. + +This can be done with the following parameters +```bash +... +--set postgresql.enabled=false \ +--set database.type=postgresql \ +--set database.driver=org.postgresql.Driver \ +--set database.url='jdbc:postgresql://${DB_HOST}:${DB_PORT}/my-artifactory-db' \ +--set database.user=${DB_USER} \ +--set database.password=${DB_PASSWORD} \ +... +``` +**NOTE:** You must set `postgresql.enabled=false` in order for the chart to use the `database.*` parameters. Without it, they will be ignored! + +#### Other DB type +There are cases where you will want to use a different database and not the enclosed **PostgreSQL**. +See more details on [configuring the database](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Database) +> The official Artifactory Docker images include the PostgreSQL database driver. +> For other database types, you will have to add the relevant database driver to Artifactory's tomcat/lib + +This can be done with the following parameters +```bash +# Make sure your Artifactory Docker image has the MySQL database driver in it +... +--set postgresql.enabled=false \ +--set artifactory.preStartCommand="wget -O /opt/jfrog/artifactory/tomcat/lib/mysql-connector-java-5.1.41.jar https://jcenter.bintray.com/mysql/mysql-connector-java/5.1.41/mysql-connector-java-5.1.41.jar" \ +--set database.type=mysql \ +--set database.driver=com.mysql.jdbc.Driver \ +--set database.url=${DB_URL} \ +--set database.user=${DB_USER} \ +--set database.password=${DB_PASSWORD} \ +... +``` +**NOTE:** You must set `postgresql.enabled=false` in order for the chart to use the `database.*` parameters. Without it, they will be ignored! + +#### Using pre-existing Kubernetes Secret +If you store your database credentials in a pre-existing Kubernetes `Secret`, you can specify them via `database.secrets` instead of `database.user` and `database.password`: +```bash +# Create a secret containing the database credentials +kubectl create secret generic my-secret --from-literal=user=${DB_USER} --from-literal=password=${DB_PASSWORD} +... +--set postgresql.enabled=false \ +--set database.secrets.user.name=my-secret \ +--set database.secrets.user.key=user \ +--set database.secrets.password.name=my-secret \ +--set database.secrets.password.key=password \ +... +``` + +### Deleting Artifactory +To delete the Artifactory HA cluster + +On helm v2: +```bash +helm delete --purge artifactory-ha +``` + +On helm v3: +```bash +helm delete artifactory-ha --namespace artifactory-ha +``` + +This will completely delete your Artifactory HA cluster. +**NOTE:** Since Artifactory is running as Kubernetes Stateful Sets, the removal of the helm release will **not** remove the persistent volumes. You need to explicitly remove them +```bash +kubectl delete pvc -l release=artifactory-ha +``` +See more details in the official [Kubernetes Stateful Set removal page](https://kubernetes.io/docs/tasks/run-application/delete-stateful-set/) + +### Custom Docker registry for your images +If you need to pull your Docker images from a private registry (for example, when you have a custom image with a MySQL database driver), you need to create a +[Kubernetes Docker registry secret](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/) and pass it to helm +```bash +# Create a Docker registry secret called 'regsecret' +kubectl create secret docker-registry regsecret --docker-server=${DOCKER_REGISTRY} --docker-username=${DOCKER_USER} --docker-password=${DOCKER_PASS} --docker-email=${DOCKER_EMAIL} +``` +Once created, you pass it to `helm` +```bash +helm upgrade --install artifactory-ha --set imagePullSecrets=regsecret --namespace artifactory-ha jfrog/artifactory-ha +``` + +### Logger sidecars +This chart provides the option to add sidecars to tail various logs from Artifactory. See the available values in [values.yaml](values.yaml) + +Get list of containers in the pod +```bash +kubectl get pods -n -o jsonpath='{.spec.containers[*].name}' | tr ' ' '\n' +``` + +View specific log +```bash +kubectl logs -n -c +``` + + +### Custom init containers +There are cases where a special, unsupported init processes is needed like checking something on the file system or testing something before spinning up the main container. + +For this, there is a section for writing custom init containers before and after the predefined init containers in the [values.yaml](values.yaml) . By default it's commented out +```yaml +artifactory: + ## Add custom init containers executed before predefined init containers + customInitContainersBegin: | + ## Init containers template goes here ## + ## Add custom init containers executed after predefined init containers + customInitContainers: | + ## Init containers template goes here ## +``` + +### Custom sidecar containers +There are cases where an extra sidecar container is needed. For example monitoring agents or log collection. + +For this, there is a section for writing a custom sidecar container in the [values.yaml](values.yaml). By default it's commented out +```yaml +artifactory: + ## Add custom sidecar containers + customSidecarContainers: | + ## Sidecar containers template goes here ## +``` + +You can configure the sidecar to run as a custom user if needed by setting the following in the container template +```yaml + # Example of running container as root (id 0) + securityContext: + runAsUser: 0 + fsGroup: 0 +``` + +### Custom volumes +If you need to use a custom volume in a custom init or sidecar container, you can use this option. + +For this, there is a section for defining custom volumes in the [values.yaml](values.yaml). By default it's commented out +```yaml +artifactory: + ## Add custom volumes + customVolumes: | + ## Custom volume comes here ## +``` + +### Add Artifactory User Plugin during installation +If you need to add [Artifactory User Plugin](https://github.com/jfrog/artifactory-user-plugins), you can use this option. + +Create a secret with [Artifactory User Plugin](https://github.com/jfrog/artifactory-user-plugins) by following command: +```bash +# Secret with single user plugin +kubectl create secret generic archive-old-artifacts --from-file=archiveOldArtifacts.groovy --namespace=artifactory-ha + +# Secret with single user plugin with configuration file +kubectl create secret generic webhook --from-file=webhook.groovy --from-file=webhook.config.json.sample --namespace=artifactory-ha +``` + +Add plugin secret names to `plugins.yaml` as following: +```yaml +artifactory: + userPluginSecrets: + - archive-old-artifacts + - webhook +``` + +You can now pass the created `plugins.yaml` file to helm install command to deploy Artifactory with user plugins as follows: +```bash +helm upgrade --install artifactory-ha -f plugins.yaml --namespace artifactory-ha jfrog/artifactory-ha +``` + +Alternatively, you may be in a situation in which you would like to create a secret in a Helm chart that depends on this chart. In this scenario, the name of the secret is likely dynamically generated via template functions, so passing a statically named secret isn't possible. In this case, the chart supports evaluating strings as templates via the [`tpl`](https://helm.sh/docs/charts_tips_and_tricks/#using-the-tpl-function) function - simply pass the raw string containing the templating language used to name your secret as a value instead by adding the following to your chart's `values.yaml` file: +```yaml +artifactory-ha: # Name of the artifactory-ha dependency + artifactory: + userPluginSecrets: + - '{{ template "my-chart.fullname" . }}' +``` +NOTE: By defining userPluginSecrets, this overrides any pre-defined plugins from the container image that are stored in /tmp/plugins. At this time [artifactory-pro:6.9.0](https://bintray.com/jfrog/artifactory-pro) is distributed with `internalUser.groovy` plugin. If you need this plugin in addition to your user plugins, you should include these additional plugins as part of your userPluginSecrets. + +### Provide custom configMaps to Artifactory +If you want to mount a custom file to Artifactory, either an init shell script or a custom configuration file (such as `logback.xml`), you can use this option. + +Create a `configmaps.yaml` file with the following content: +```yaml +artifactory: + configMaps: | + logback.xml: | + + + + + %date [%-5level] \(%-20c{3}:%L\) %message%n + + + + + + + + + + + + + + + my-custom-post-start-hook.sh: | + echo "This is my custom post start hook" + + customVolumeMounts: | + - name: artifactory-configmaps + mountPath: /tmp/my-config-map + + postStartCommand: | + chmod +x /tmp/my-config-map/my-custom-post-start-hook.sh; + /tmp/my-config-map/my-custom-post-start-hook.sh; + + copyOnEveryStartup: + - source: /tmp/my-config-map/logback.xml + target: etc/ + +``` + +and use it with you helm install/upgrade: +```bash +helm upgrade --install artifactory-ha -f configmaps.yaml --namespace artifactory-ha jfrog/artifactory-ha +``` + +This will, in turn: +* create a configMap with the files you specified above +* create a volume pointing to the configMap with the name `artifactory-configmaps` +* Mount said configMap onto `/tmp/my-config-map` using a `customVolumeMounts` +* Set the shell script we mounted as the `postStartCommand` +* Copy the `logback.xml` file to its proper location in the `$ARTIFACTORY_HOME/etc` directory. + + +### Artifactory filebeat +If you want to collect logs from your Artifactory installation and send them to a central log collection solution like ELK, you can use this option. + +Create a `filebeat.yaml` values file with the following content: +```yaml +filebeat: + enabled: true + logstashUrl: + resources: + requests: + memory: "100Mi" + cpu: "100m" + limits: + memory: "100Mi" + cpu: "100m" +``` + +You can optionally customize the `filebeat.yaml` to send output to a different location like so: +```yaml +filebeat: + enabled: true + filebeatYml: | + +``` + +and use it with you helm install/upgrade: +```bash +helm upgrade --install artifactory-ha -f filebeat.yaml --namespace artifactory-ha jfrog/artifactory +``` + +This will start sending your Artifactory logs to the log aggregator of your choice, based on your configuration in the `filebeatYml` + +## Configuration +The following table lists the configurable parameters of the artifactory chart and their default values. + +| Parameter | Description | Default | +|------------------------------|-----------------------------------|-------------------------------------------------------| +| `imagePullSecrets` | Docker registry pull secret | | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the fullname template | +| `serviceAccount.annotations` | Artifactory service account annotations | `` | +| `rbac.create` | Specifies whether RBAC resources should be created | `true` | +| `rbac.role.rules` | Rules to create | `[]` | +| `logger.image.repository` | repository for logger image | `busybox` | +| `logger.image.tag` | tag for logger image | `1.30` | +| `artifactory.name` | Artifactory name | `artifactory` | +| `artifactory.image.pullPolicy` | Container pull policy | `IfNotPresent` | +| `artifactory.image.repository` | Container image | `docker.bintray.io/jfrog/artifactory-pro` | +| `artifactory.image.version` | Container image tag | `.Chart.AppVersion` | +| `artifactory.priorityClass.create` | Create a PriorityClass object | `false` | +| `artifactory.priorityClass.value` | Priority Class value | `1000000000` | +| `artifactory.priorityClass.name` | Priority Class name | `{{ template "artifactory-ha.fullname" . }}` | +| `artifactory.priorityClass.existingPriorityClass` | Use existing priority class | `` | +| `artifactory.loggers` | Artifactory loggers (see values.yaml for possible values) | `[]` | +| `artifactory.loggersResources.requests.memory` | Artifactory loggers initial memory request | | +| `artifactory.loggersResources.requests.cpu` | Artifactory loggers initial cpu request | | +| `artifactory.loggersResources.limits.memory` | Artifactory loggers memory limit | | +| `artifactory.loggersResources.limits.cpu` | Artifactory loggers cpu limit | | +| `artifactory.catalinaLoggers` | Artifactory Tomcat loggers (see values.yaml for possible values) | `[]` | +| `artifactory.catalinaLoggersResources.requests.memory` | Artifactory Tomcat loggers initial memory request | | +| `artifactory.catalinaLoggersResources.requests.cpu` | Artifactory Tomcat loggers initial cpu request | | +| `artifactory.catalinaLoggersResources.limits.memory` | Artifactory Tomcat loggers memory limit | | +| `artifactory.catalinaLoggersResources.limits.cpu` | Artifactory Tomcat loggers cpu limit | | +| `artifactory.customInitContainersBegin`| Custom init containers to run before existing init containers | | +| `artifactory.customInitContainers`| Custom init containers to run after existing init containers | | +| `artifactory.customSidecarContainers`| Custom sidecar containers | | +| `artifactory.customVolumes` | Custom volumes | | +| `artifactory.customVolumeMounts` | Custom Artifactory volumeMounts | | +| `artifactory.customPersistentPodVolumeClaim` | Custom PVC spec to create and attach a unique PVC for each pod on startup with the volumeClaimTemplates feature in StatefulSet | | +| `artifactory.customPersistentVolumeClaim` | Custom PVC spec to be mounted to the all artifactory containers using a volume | | +| `artifactory.userPluginSecrets` | Array of secret names for Artifactory user plugins | | +| `artifactory.masterKey` | Artifactory master key. A 128-Bit key size (hexadecimal encoded) string (32 hex characters). Can be generated with `openssl rand -hex 32`. NOTE: This key can be generated only once and cannot be updated once created |``| +| `artifactory.masterKeySecretName` | Artifactory Master Key secret name | | +| `artifactory.joinKey` | Join Key to connect other services to Artifactory. Can be generated with `openssl rand -hex 32` | `` | +| `artifactory.joinKeySecretName` | Artifactory join Key secret name | | +| `artifactory.admin.ip` | Artifactory admin ip to be set upon startup, can use (*) for 0.0.0.0| `127.0.0.1` | +| `artifactory.admin.username` | Artifactory admin username to be set upon startup| `admin` | +| `artifactory.admin.password` | Artifactory admin password to be set upon startup| | +| `artifactory.admin.secret` | Artifactory admin secret name | | +| `artifactory.admin.dataKey` | Artifactory admin secret data key | | +| `artifactory.preStartCommand` | Command to run before entrypoint starts | | +| `artifactory.postStartCommand` | Command to run after container starts. Supports templating with `tpl` | | +| `artifactory.license.licenseKey` | Artifactory license key. Providing the license key as a parameter will cause a secret containing the license key to be created as part of the release. Use either this setting or the license.secret and license.dataKey. If you use both, the latter will be used. | | +| `artifactory.configMaps` | configMaps to be created as volume by the name `artifactory-configmaps`. In order to use these configMaps, you will need to add `customVolumeMounts` to point to the created volume and mount it onto a container | | +| `artifactory.license.secret` | Artifactory license secret name | | +| `artifactory.license.dataKey`| Artifactory license secret data key | | +| `artifactory.service.name` | Artifactory service name to be set in Nginx configuration | `artifactory` | +| `artifactory.service.type` | Artifactory service type | `ClusterIP` | +| `artifactory.service.clusterIP`| Specific cluster IP or `None` for headless services | `nil` | +| `artifactory.service.loadBalancerSourceRanges`| Artifactory service array of IP CIDR ranges to whitelist (only when service type is LoadBalancer) | | +| `artifactory.service.annotations` | Artifactory service annotations | `{}` | +| `artifactory.service.pool` | Artifactory instances to be in the load balancing pool. `members` or `all` | `members` | +| `artifactory.externalPort` | Artifactory service external port | `8082` | +| `artifactory.internalPort` | Artifactory service internal port (**DO NOT** use port lower than 1024) | `8082` | +| `artifactory.internalArtifactoryPort` | Artifactory service internal port (**DO NOT** use port lower than 1024) | `8081` | +| `artifactory.externalArtifactoryPort` | Artifactory service external port | `8081` | +| `artifactory.extraEnvironmentVariables` | Extra environment variables to pass to Artifactory. Supports evaluating strings as templates via the [`tpl`](https://helm.sh/docs/charts_tips_and_tricks/#using-the-tpl-function) function. See [documentation](https://www.jfrog.com/confluence/display/RTF/Installing+with+Docker#InstallingwithDocker-SupportedEnvironmentVariables) | | +| `artifactory.livenessProbe.enabled` | Enable liveness probe | `true` | +| `artifactory.livenessProbe.path` | liveness probe HTTP Get path | `/router/api/v1/system/health` | +| `artifactory.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 180 | +| `artifactory.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `artifactory.livenessProbe.timeoutSeconds` | When the probe times out | 10 | +| `artifactory.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 | +| `artifactory.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 10 | +| `artifactory.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `artifactory.readinessProbe.path` | readiness probe HTTP Get path | `/router/api/v1/system/health` | +| `artifactory.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 60 | +| `artifactory.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `artifactory.readinessProbe.timeoutSeconds` | When the probe times out | 10 | +| `artifactory.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 | +| `artifactory.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 10 | +| `artifactory.copyOnEveryStartup` | List of files to copy on startup from source (which is absolute) to target (which is relative to ARTIFACTORY_HOME | | +| `artifactory.deleteDBPropertiesOnStartup` | Whether to delete the ARTIFACTORY_HOME/etc/db.properties file on startup. Disabling this will remove the ability for the db.properties to be updated with any DB-related environment variables change (e.g. DB_HOST, DB_URL) | `true` | +| `artifactory.database.maxOpenConnections` | Maximum amount of open connections from Artifactory to the DB | `80` | +| `artifactory.haDataDir.enabled` | Enable haDataDir for eventual storage in the HA cluster | `false` | +| `artifactory.haDataDir.path` | Path to the directory intended for use with NFS eventual configuration for HA | | +| `artifactory.haBackupDir.enabled` | Enable haBackupDir for eventual storage in the HA cluster | `false` | +| `artifactory.haBackupDir.path` | Path to the directory intended for use with NFS eventual configuration for HA | | +| `artifactory.haBackupDir.enabled` | Enable haBackupDir for eventual storage in the HA cluster | `false` | +| `artifactory.haBackupDir.path` | Path to the directory intended for use with NFS eventual configuration for HA | | +| `artifactory.migration.timeout` | Artifactory migration Maximum Time out in seconds| `3600` | +| `artifactory.migration.timeout` | Artifactory migration Maximum Time out in seconds| `3600` | +| `artifactory.persistence.mountPath` | Artifactory persistence volume mount path | `"/var/opt/jfrog/artifactory"` | +| `artifactory.persistence.enabled` | Artifactory persistence volume enabled | `true` | +| `artifactory.persistence.accessMode` | Artifactory persistence volume access mode | `ReadWriteOnce` | +| `artifactory.persistence.size` | Artifactory persistence or local volume size | `200Gi` | +| `artifactory.persistence.binarystore.enabled` | whether you want to mount the binarystore.xml file from a secret created by the chart. If `false` you will need need to get the binarystore.xml file into the file-system from either an `initContainer` or using a `preStartCommand` | `true` | +| `artifactory.persistence.binarystoreXml` | Artifactory binarystore.xml template | See `values.yaml` | +| `artifactory.persistence.customBinarystoreXmlSecret` | A custom Secret for binarystore.xml | `` | +| `artifactory.persistence.maxCacheSize` | Artifactory cache-fs provider maxCacheSize in bytes | `50000000000` | +| `artifactory.persistence.cacheProviderDir` | the root folder of binaries for the filestore cache. If the value specified starts with a forward slash ("/") it is considered the fully qualified path to the filestore folder. Otherwise, it is considered relative to the *baseDataDir*. | `cache` | +| `artifactory.persistence.type` | Artifactory HA storage type | `file-system` | +| `artifactory.persistence.redundancy` | Artifactory HA storage redundancy | `3` | +| `artifactory.persistence.nfs.ip` | NFS server IP | | +| `artifactory.persistence.nfs.haDataMount` | NFS data directory | `/data` | +| `artifactory.persistence.nfs.haBackupMount` | NFS backup directory | `/backup` | +| `artifactory.persistence.nfs.dataDir` | HA data directory | `/var/opt/jfrog/artifactory-ha` | +| `artifactory.persistence.nfs.backupDir` | HA backup directory | `/var/opt/jfrog/artifactory-backup` | +| `artifactory.persistence.nfs.capacity` | NFS PVC size | `200Gi` | +| `artifactory.persistence.nfs.mountOptions` | NFS mount options | `[]` | +| `artifactory.persistence.eventual.numberOfThreads` | Eventual number of threads | `10` | +| `artifactory.persistence.googleStorage.endpoint` | Google Storage API endpoint| `storage.googleapis.com` | +| `artifactory.persistence.googleStorage.httpsOnly` | Google Storage API has to be consumed https only| `false` | +| `artifactory.persistence.googleStorage.bucketName` | Google Storage bucket name | `artifactory-ha` | +| `artifactory.persistence.googleStorage.identity` | Google Storage service account id | | +| `artifactory.persistence.googleStorage.credential` | Google Storage service account key | | +| `artifactory.persistence.googleStorage.path` | Google Storage path in bucket | `artifactory-ha/filestore` | +| `artifactory.persistence.googleStorage.bucketExists`| Google Storage bucket exists therefore does not need to be created.| `false` | +| `artifactory.persistence.awsS3.bucketName` | AWS S3 bucket name | `artifactory-ha` | +| `artifactory.persistence.awsS3.endpoint` | AWS S3 bucket endpoint | See https://docs.aws.amazon.com/general/latest/gr/rande.html | +| `artifactory.persistence.awsS3.region` | AWS S3 bucket region | | +| `artifactory.persistence.awsS3.roleName` | AWS S3 IAM role name | | +| `artifactory.persistence.awsS3.identity` | AWS S3 AWS_ACCESS_KEY_ID | | +| `artifactory.persistence.awsS3.credential` | AWS S3 AWS_SECRET_ACCESS_KEY | | +| `artifactory.persistence.awsS3.properties` | AWS S3 additional properties | | +| `artifactory.persistence.awsS3.path` | AWS S3 path in bucket | `artifactory-ha/filestore` | +| `artifactory.persistence.awsS3.refreshCredentials` | AWS S3 renew credentials on expiration | `true` (When roleName is used, this parameter will be set to true) | +| `artifactory.persistence.awsS3.httpsOnly` | AWS S3 https access to the bucket only | `true` | +| `artifactory.persistence.awsS3.testConnection` | AWS S3 test connection on start up | `false` | +| `artifactory.persistence.awsS3.s3AwsVersion` | AWS S3 signature version | `AWS4-HMAC-SHA256` | +| `artifactory.persistence.awsS3V3.testConnection` | AWS S3 test connection on start up | `false` | +| `artifactory.persistence.awsS3V3.identity` | AWS S3 AWS_ACCESS_KEY_ID | | +| `artifactory.persistence.awsS3V3.credential` | AWS S3 AWS_SECRET_ACCESS_KEY | | +| `artifactory.persistence.awsS3V3.region` | AWS S3 bucket region | | +| `artifactory.persistence.awsS3V3.bucketName` | AWS S3 bucket name | `artifactory-aws` | +| `artifactory.persistence.awsS3V3.path` | AWS S3 path in bucket | `artifactory/filestore` | +| `artifactory.persistence.awsS3V3.endpoint` | AWS S3 bucket endpoint | See https://docs.aws.amazon.com/general/latest/gr/rande.html | +| `artifactory.persistence.awsS3V3.kmsServerSideEncryptionKeyId` | AWS S3 encryption key ID or alias | | +| `artifactory.persistence.awsS3V3.kmsKeyRegion` | AWS S3 KMS Key region | | +| `artifactory.persistence.awsS3V3.kmsCryptoMode` | AWS S3 KMS encryption mode | See https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-AmazonS3OfficialSDKTemplate | +| `artifactory.persistence.awsS3V3.useInstanceCredentials` | AWS S3 Use default authentication mechanism | See https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-authentication | +| `artifactory.persistence.awsS3V3.usePresigning` | AWS S3 Use URL signing | `false` | +| `artifactory.persistence.awsS3V3.signatureExpirySeconds` | AWS S3 Validity period in seconds for signed URLs | `300` | +| `artifactory.persistence.awsS3V3.cloudFrontDomainName` | AWS CloudFront domain name | See https://www.jfrog.com/confluence/display/RTF/Direct+Cloud+Storage+Download#DirectCloudStorageDownload-UsingCloudFront(Optional)| +| `artifactory.persistence.awsS3V3.cloudFrontKeyPairId` | AWS CloudFront key pair ID | See https://www.jfrog.com/confluence/display/RTF/Direct+Cloud+Storage+Download#DirectCloudStorageDownload-UsingCloudFront(Optional)| +| `artifactory.persistence.awsS3V3.cloudFrontPrivateKey` | AWS CloudFront private key | See https://www.jfrog.com/confluence/display/RTF/Direct+Cloud+Storage+Download#DirectCloudStorageDownload-UsingCloudFront(Optional)| +| `artifactory.persistence.azureBlob.accountName` | Azure Blob Storage account name | `` | +| `artifactory.persistence.azureBlob.accountKey` | Azure Blob Storage account key | `` | +| `artifactory.persistence.azureBlob.endpoint` | Azure Blob Storage endpoint | `` | +| `artifactory.persistence.azureBlob.containerName` | Azure Blob Storage container name | `` | +| `artifactory.persistence.azureBlob.testConnection` | Azure Blob Storage test connection | `false` | +| `artifactory.persistence.fileSystem.existingSharedClaim` | Enable using an existing shared pvc | `false` | +| `artifactory.persistence.fileStorage.dataDir` | HA data directory | `/var/opt/jfrog/artifactory/artifactory-data` | +| `artifactory.persistence.fileStorage.backupDir` | HA backup directory | `/var/opt/jfrog/artifactory-backup` | +| `artifactory.javaOpts.other` | Artifactory additional java options (for all nodes) | | +| `artifactory.ssh.enabled` | Enable Artifactory SSH access | | +| `artifactory.ssh.internalPort` | Artifactory SSH internal port | `1339` | +| `artifactory.ssh.externalPort` | Artifactory SSH external port | `1339` | +| `artifactory.primary.preStartCommand` | Artifactory primary node preStartCommand to be run after `artifactory.preStartCommand` | | +| `artifactory.primary.labels` | Artifactory primary node labels | `{}` | +| `artifactory.primary.resources.requests.memory` | Artifactory primary node initial memory request | | +| `artifactory.primary.resources.requests.cpu` | Artifactory primary node initial cpu request | | +| `artifactory.primary.resources.limits.memory` | Artifactory primary node memory limit | | +| `artifactory.primary.resources.limits.cpu` | Artifactory primary node cpu limit | | +| `artifactory.primary.javaOpts.xms` | Artifactory primary node java Xms size | | +| `artifactory.primary.javaOpts.xmx` | Artifactory primary node java Xms size | | +| `artifactory.primary.javaOpts.corePoolSize` | The number of async processes that can run in parallel in the primary node - https://jfrog.com/knowledge-base/how-do-i-tune-artifactory-for-heavy-loads/ | `16` | +| `artifactory.primary.javaOpts.jmx.enabled` | Enable JMX monitoring | `false` | +| `artifactory.primary.javaOpts.jmx.port` | JMX Port number | `9010` | +| `artifactory.primary.javaOpts.jmx.host` | JMX hostname (parsed as a helm template) | `{{ template "artifactory-ha.primary.name" $ }}` | +| `artifactory.primary.javaOpts.jmx.ssl` | Enable SSL | `false` | +| `artifactory.primary.javaOpts.jmx.authenticate` | Enable JMX authentication | `false` | +| `artifactory.primary.javaOpts.jmx.accessFile` | The path to the JMX access file, when JMX authentication is enabled | | +| `artifactory.primary.javaOpts.jmx.passwordFile` | The path to the JMX password file, when JMX authentication is enabled | | +| `artifactory.primary.javaOpts.other` | Artifactory primary node additional java options | | +| `artifactory.primary.persistence.existingClaim` | Whether to use an existing pvc for the primary node | `false` | +| `artifactory.node.preStartCommand` | Artifactory member node preStartCommand to be run after `artifactory.preStartCommand` | | +| `artifactory.node.labels` | Artifactory member node labels | `{}` | +| `artifactory.node.replicaCount` | Artifactory member node replica count | `2` | +| `artifactory.node.minAvailable` | Artifactory member node min available count | `1` | +| `artifactory.node.resources.requests.memory` | Artifactory member node initial memory request | | +| `artifactory.node.resources.requests.cpu` | Artifactory member node initial cpu request | | +| `artifactory.node.resources.limits.memory` | Artifactory member node memory limit | | +| `artifactory.node.resources.limits.cpu` | Artifactory member node cpu limit | | +| `artifactory.node.javaOpts.xms` | Artifactory member node java Xms size | | +| `artifactory.node.javaOpts.xmx` | Artifactory member node java Xms size | | +| `artifactory.node.javaOpts.corePoolSize` | The number of async processes that can run in parallel in the member nodes - https://jfrog.com/knowledge-base/how-do-i-tune-artifactory-for-heavy-loads/ | `16` | +| `artifactory.node.javaOpts.jmx.enabled` | Enable JMX monitoring | `false` | +| `artifactory.node.javaOpts.jmx.port` | JMX Port number | `9010` | +| `artifactory.node.javaOpts.jmx.host` | JMX hostname (parsed as a helm template) | `{{ template "artifactory-ha.fullname" $ }}` | +| `artifactory.node.javaOpts.jmx.ssl` | Enable SSL | `false` | +| `artifactory.node.javaOpts.jmx.authenticate` | Enable JMX authentication | `false` | +| `artifactory.node.javaOpts.jmx.accessFile` | The path to the JMX access file, when JMX authentication is enabled | | +| `artifactory.node.javaOpts.jmx.passwordFile` | The path to the JMX password file, when JMX authentication is enabled | | +| `artifactory.node.javaOpts.other` | Artifactory member node additional java options | | +| `artifactory.node.persistence.existingClaim` | Whether to use existing PVCs for the member nodes | `false` | +| `artifactory.terminationGracePeriodSeconds` | Termination grace period (seconds) | `30s` | +| `artifactory.node.waitForPrimaryStartup.enabled` | Whether to wait for the primary node to start before starting up the member nodes | `false` | +| `artifactory.node.waitForPrimaryStartup.time` | The amount of time to wait for the primary node to start before starting up the member nodes | `60` | +| `artifactory.systemYaml` | Artifactory system configuration (`system.yaml`) as described here - https://www.jfrog.com/confluence/display/JFROG/Artifactory+System+YAML | `see values.yaml` | +| `access.database.maxOpenConnections` | Maximum amount of open connections from Access to the DB | `80` | +| `initContainers.resources.requests.memory` | Init containers initial memory request | | +| `initContainers.resources.requests.cpu` | Init containers initial cpu request | | +| `initContainers.resources.limits.memory` | Init containers memory limit | | +| `initContainers.resources.limits.cpu` | Init containers cpu limit | | +| `ingress.enabled` | If true, Artifactory Ingress will be created | `false` | +| `ingress.annotations` | Artifactory Ingress annotations | `{}` | +| `ingress.labels` | Artifactory Ingress labels | `{}` | +| `ingress.hosts` | Artifactory Ingress hostnames | `[]` | +| `ingress.routerPath` | Router Ingress path | `/` | +| `ingress.artifactoryPath` | Artifactory Ingress path | `/artifactory` | +| `ingress.tls` | Artifactory Ingress TLS configuration (YAML) | `[]` | +| `ingress.defaultBackend.enabled` | If true, the default `backend` will be added using serviceName and servicePort | `true` | +| `ingress.annotations` | Ingress annotations, which are written out if annotations section exists in values. Everything inside of the annotations section will appear verbatim inside the resulting manifest. See `Ingress annotations` section below for examples of how to leverage the annotations, specifically for how to enable docker authentication. | | +| `ingress.additionalRules` | Ingress additional rules to be added to the Artifactory ingress. | `[]` | +| `metadata.database.maxOpenConnections` | Maximum amount of open connections from metadata to the DB | `80` | +| `nginx.enabled` | Deploy nginx server | `true` | +| `nginx.kind` | Nginx object kind, for example `DaemonSet`, `Deployment` or `StatefulSet` | `Deployment` | +| `nginx.name` | Nginx name | `nginx` | +| `nginx.replicaCount` | Nginx replica count | `1` | +| `nginx.uid` | Nginx User Id | `104` | +| `nginx.gid` | Nginx Group Id | `107` | +| `nginx.image.repository` | Container image | `docker.bintray.io/jfrog/nginx-artifactory-pro` | +| `nginx.image.version` | Container version | `.Chart.AppVersion` | +| `nginx.image.pullPolicy` | Container pull policy | `IfNotPresent` | +| `nginx.labels` | Nginx deployment labels | `{}` | +| `nginx.minAvailable` | Nginx node min available count | `0` | +| `nginx.loggers` | Nginx loggers (see values.yaml for possible values) | `[]` | +| `nginx.loggersResources.requests.memory` | Nginx logger initial memory request | | +| `nginx.loggersResources.requests.cpu` | Nginx logger initial cpu request | | +| `nginx.loggersResources.limits.memory` | Nginx logger memory limit | | +| `nginx.loggersResources.limits.cpu` | Nginx logger cpu limit | | +| `nginx.logs.stderr` | Send nginx logs to stderr | false | +| `nginx.logs.level` | Nginx log level: debug, info, notice, warn, error, crit, alert, or emerg | warn | +| `nginx.mainConf` | Content of the Artifactory nginx main nginx.conf config file | `see values.yaml` | +| `nginx.artifactoryConf` | Content of Artifactory nginx artifactory.conf config file | `see values.yaml` | +| `nginx.service.type` | Nginx service type | `LoadBalancer` | +| `nginx.service.clusterIP` | Specific cluster IP or `None` for headless services | `nil` | +| `nginx.service.loadBalancerSourceRanges`| Nginx service array of IP CIDR ranges to whitelist (only when service type is LoadBalancer) | | +| `nginx.service.labels` | Nginx service labels | `{}` | +| `nginx.service.annotations` | Nginx service annotations | `{}` | +| `nginx.service.ssloffload` | Nginx service SSL offload | false | +| `nginx.service.externalTrafficPolicy`| Nginx service desires to route external traffic to node-local or cluster-wide endpoints. | `Cluster` | +| `nginx.loadBalancerIP`| Provide Static IP to configure with Nginx | | +| `nginx.http.enabled` | Nginx http service enabled/disabled | true | +| `nginx.http.externalPort` | Nginx service external port | `80` | +| `nginx.http.internalPort` | Nginx service internal port | `80` | +| `nginx.https.enabled` | Nginx http service enabled/disabled | true | +| `nginx.https.externalPort` | Nginx service external port | `443` | +| `nginx.https.internalPort` | Nginx service internal port | `443` | +| `nginx.ssh.internalPort` | Nginx SSH internal port | `22` | +| `nginx.ssh.externalPort` | Nginx SSH external port | `22` | +| `nginx.externalPortHttp` | DEPRECATED: Nginx service external port | `80` | +| `nginx.internalPortHttp` | DEPRECATED: Nginx service internal port | `80` | +| `nginx.externalPortHttps` | DEPRECATED: Nginx service external port | `443` | +| `nginx.internalPortHttps` | DEPRECATED: Nginx service internal port | `443` | +| `nginx.livenessProbe.enabled` | would you like a liveness Probe to be enabled | `true` | +| `nginx.livenessProbe.path` | liveness probe HTTP Get path | `/router/api/v1/system/health` | +| `nginx.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 100 | +| `nginx.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `nginx.livenessProbe.timeoutSeconds` | When the probe times out | 10 | +| `nginx.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 | +| `nginx.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 10 | +| `nginx.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `nginx.readinessProbe.path` | Readiness probe HTTP Get path | `/router/api/v1/system/health` | +| `nginx.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 60 | +| `nginx.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `nginx.readinessProbe.timeoutSeconds` | When the probe times out | 10 | +| `nginx.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 | +| `nginx.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 10 | +| `nginx.tlsSecretName` | SSL secret that will be used by the Nginx pod | | +| `nginx.customConfigMap` | Nginx CustomeConfigMap name for `nginx.conf` | ` ` | +| `nginx.customArtifactoryConfigMap`| Nginx CustomeConfigMap name for `artifactory-ha.conf` | ` ` | +| `nginx.resources.requests.memory` | Nginx initial memory request | `250Mi` | +| `nginx.resources.requests.cpu` | Nginx initial cpu request | `100m` | +| `nginx.resources.limits.memory` | Nginx memory limit | `250Mi` | +| `nginx.resources.limits.cpu` | Nginx cpu limit | `500m` | +| `nginx.persistence.mountPath` | Nginx persistence volume mount path | `"/var/opt/jfrog/nginx"` | +| `nginx.persistence.enabled` | Nginx persistence volume enabled. This is only available when the nginx.replicaCount is set to 1 | `false` | +| `nginx.persistence.accessMode` | Nginx persistence volume access mode | `ReadWriteOnce` | +| `nginx.persistence.size` | Nginx persistence volume size | `5Gi` | +| `waitForDatabase` | Wait for database (using wait-for-db init container) | `true` | +| `postgresql.enabled` | Use enclosed PostgreSQL as database | `true` | +| `postgresql.imageTag` | PostgreSQL version | `9.6.11` | +| `postgresql.postgresqlDatabase` | PostgreSQL database name | `artifactory` | +| `postgresql.postgresqlUsername` | PostgreSQL database user | `artifactory` | +| `postgresql.postgresqlPassword` | PostgreSQL database password | | +| `postgresql.postgresqlExtendedConf.listenAddresses` | PostgreSQL listen address | `"'*'"` | +| `postgresql.postgresqlExtendedConf.maxConnections` | PostgreSQL max_connections parameter | `1500` | +| `postgresql.persistence.enabled` | PostgreSQL use persistent storage | `true` | +| `postgresql.persistence.size` | PostgreSQL persistent storage size | `50Gi` | +| `postgresql.service.port` | PostgreSQL database port | `5432` | +| `postgresql.resources.requests.memory` | PostgreSQL initial memory request | | +| `postgresql.resources.requests.cpu` | PostgreSQL initial cpu request | | +| `postgresql.resources.limits.memory` | PostgreSQL memory limit | | +| `postgresql.resources.limits.cpu` | PostgreSQL cpu limit | | +| `database.type` | External database type (`postgresql`, `mysql`, `oracle` or `mssql`) | | +| `database.driver` | External database driver e.g. `org.postgresql.Driver` | | +| `database.url` | External database connection URL | | +| `database.user` | External database username | | +| `database.password` | External database password | | +| `database.secrets.user.name` | External database username `Secret` name | | +| `database.secrets.user.key` | External database username `Secret` key | | +| `database.secrets.password.name` | External database password `Secret` name | | +| `database.secrets.password.key` | External database password `Secret` key | | +| `database.secrets.url.name ` | External database url `Secret` name | | +| `database.secrets.url.key` | External database url `Secret` key | | +| `networkpolicy.name` | Becomes part of the NetworkPolicy object name | `artifactory` | +| `networkpolicy.podselector` | Contains the YAML that specifies how to match pods. Usually using matchLabels. | | +| `networkpolicy.ingress` | YAML snippet containing to & from rules applied to incoming traffic | `- {}` (open to all inbound traffic) | +| `networkpolicy.egress` | YAML snippet containing to & from rules applied to outgoing traffic | `- {}` (open to all outbound traffic) | +| `filebeat.enabled` | Enable a filebeat container to send your logs to a log management solution like ELK | `false` | +| `filebeat.name` | filebeat container name | `artifactory-filebeat` | +| `filebeat.image.repository` | filebeat Docker image repository | `docker.elastic.co/beats/filebeat` | +| `filebeat.image.version` | filebeat Docker image version | `7.5.1` | +| `filebeat.logstashUrl` | The URL to the central Logstash service, if you have one | `logstash:5044` | +| `filebeat.livenessProbe.exec.command` | liveness probe exec command | see [values.yaml](stable/artifactory-ha/values.yaml) | +| `filebeat.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 10 | +| `filebeat.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 180 | +| `filebeat.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `filebeat.readinessProbe.exec.command` | readiness probe exec command | see [values.yaml](stable/artifactory-ha/values.yaml) | +| `filebeat.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 10 | +| `filebeat.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 180 | +| `filebeat.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `filebeat.resources.requests.memory` | Filebeat initial memory request | | +| `filebeat.resources.requests.cpu` | Filebeat initial cpu request | | +| `filebeat.resources.limits.memory` | Filebeat memory limit | | +| `filebeat.resources.limits.cpu` | Filebeat cpu limit | | +| `filebeat.filebeatYml` | Filebeat yaml configuration file | see [values.yaml](stable/artifactory-ha/values.yaml) | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +### Install Artifactory HA with Nginx and Terminate SSL in Nginx Service(LoadBalancer). +To install the helm chart with performing SSL offload in the LoadBalancer layer of Nginx. +For Ex: Using AWS ACM certificates to do SSL offload in the loadbalancer layer. + +```bash +helm upgrade --install artifactory-ha \ + --set nginx.service.ssloffload=true \ + --set nginx.https.enabled=false \ + --set nginx.service.annotations."service\.beta\.kubernetes\.io/aws-load-balancer-ssl-cert"="arn:aws:acm:xx-xxxx:xxxxxxxx:certificate/xxxxxxxxxxxxx" \ + --set nginx.service.annotations."service\.beta\.kubernetes\.io"/aws-load-balancer-backend-protocol=http \ + --set nginx.service.annotations."service\.beta\.kubernetes\.io"/aws-load-balancer-ssl-ports=https \ + --namespace artifactory-ha jfrog/artifactory-ha +``` + +### Ingress and TLS +To get Helm to create an ingress object with a hostname, add these two lines to your Helm command: +```bash +helm upgrade --install artifactory-ha \ + --set ingress.enabled=true \ + --set ingress.hosts[0]="artifactory.company.com" \ + --set artifactory.service.type=NodePort \ + --set nginx.enabled=false \ + --namespace artifactory-ha jfrog/artifactory-ha +``` + +If your cluster allows automatic creation/retrieval of TLS certificates (e.g. [cert-manager](https://github.com/jetstack/cert-manager)), please refer to the documentation for that mechanism. + +To manually configure TLS, first create/retrieve a key & certificate pair for the address(es) you wish to protect. Then create a TLS secret in the namespace: + +```bash +kubectl create secret tls artifactory-tls --cert=path/to/tls.cert --key=path/to/tls.key +``` + +Include the secret's name, along with the desired hostnames, in the Artifactory Ingress TLS section of your custom `values.yaml` file: + +```yaml + ingress: + ## If true, Artifactory Ingress will be created + ## + enabled: true + + ## Artifactory Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: + - artifactory.domain.com + annotations: + kubernetes.io/tls-acme: "true" + ## Artifactory Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: + - secretName: artifactory-tls + hosts: + - artifactory.domain.com +``` + +### Ingress annotations + +This example specifically enables Artifactory to work as a Docker Registry using the Repository Path method. See [Artifactory as Docker Registry](https://www.jfrog.com/confluence/display/RTF/Getting+Started+with+Artifactory+as+a+Docker+Registry) documentation for more information about this setup. + +```yaml +ingress: + enabled: true + defaultBackend: + enabled: false + hosts: + - myhost.example.com + annotations: + ingress.kubernetes.io/force-ssl-redirect: "true" + ingress.kubernetes.io/proxy-body-size: "0" + ingress.kubernetes.io/proxy-read-timeout: "600" + ingress.kubernetes.io/proxy-send-timeout: "600" + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite ^/(v2)/token /artifactory/api/docker/null/v2/token; + rewrite ^/(v2)/([^\/]*)/(.*) /artifactory/api/docker/$2/$1/$3; + nginx.ingress.kubernetes.io/proxy-body-size: "0" + tls: + - hosts: + - "myhost.example.com" +``` + +### Ingress additional rules + +You have the option to add additional ingress rules to the Artifactory ingress. An example for this use case can be routing the /xray path to Xray. +In order to do that, simply add the following to a `artifactory-ha-values.yaml` file: +```yaml +ingress: + enabled: true + + defaultBackend: + enabled: false + + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite "(?i)/xray(/|$)(.*)" /$2 break; + + additionalRules: | + - host: + http: + paths: + - path: / + backend: + serviceName: + servicePort: + - path: /xray + backend: + serviceName: + servicePort: + - path: /artifactory + backend: + serviceName: {{ template "artifactory.nginx.fullname" . }} + servicePort: {{ .Values.nginx.externalPortHttp }} +``` + +and running: +```bash +helm upgrade --install artifactory-ha --namespace artifactory-ha jfrog/artifactory-ha -f artifactory-ha-values.yaml +``` + +### Ingress behind another load balancer +If you are running a load balancer, that is used to offload the TLS, in front of Nginx Ingress Controller, or if you are setting **X-Forwarded-*** headers, you might want to enable **'use-forwarded-headers=true'** option. Otherwise nginx will be filling those headers with the request information it receives from the external load balancer. + +To enable it with `helm install` +```bash +helm upgrade --install nginx-ingress --namespace nginx-ingress stable/nginx-ingress --set-string controller.config.use-forwarded-headers=true +``` +or `helm upgrade` +```bash +helm upgrade nginx-ingress --set-string controller.config.use-forwarded-headers=true stable/nginx-ingress +``` +or create a values.yaml file with the following content: +```bash +controller: + config: + use-forwarded-headers: "true" +``` +Then install nginx-ingress with the values file you created: +```bash +helm upgrade --install nginx-ingress --namespace nginx-ingress stable/nginx-ingress -f values.yaml +``` + +## Useful links +- https://www.jfrog.com/confluence/display/EP/Getting+Started +- https://www.jfrog.com/confluence/display/RTF/Installing+Artifactory +- https://www.jfrog.com/confluence/ diff --git a/Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/helminstall.sh b/Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/helminstall.sh new file mode 100755 index 0000000..09e060d --- /dev/null +++ b/Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/helminstall.sh @@ -0,0 +1,57 @@ +#!/usr/bin/env bash + +# PreReq'd: +# helm install postgres bitnami/postgresql +# follow artifactory postgresql db setup: +# https://www.jfrog.com/confluence/display/JFROG/PostgreSQL +POSTGRES=$(helm ls | grep postgres | wc -l) + +if [[ "$POSTGRES" =~ (0) ]] +then + echo "External DB is required to run Jfrog Openshift Artifactory Helm chart" + echo "" + echo "Postgresql helm chart must be installed prior to installing this helm installer script." + echo "" + echo "helm install postgres bitnami/postgresql" + echo "" + echo "follow artifactory postgresql db setup:" + echo "https://www.jfrog.com/confluence/display/JFROG/PostgreSQL" + exit 1 +else + if [[ -z "$1" ]] + then + echo "Installing Jfrog Artifactory Openshift Helm" + else + echo "Patching Environment for RunAsAnyUid" + # patch the restricted scc to allow the pods to run as anyuid + oc patch scc restricted --patch '{"fsGroup":{"type":"RunAsAny"},"runAsUser":{"type":"RunAsAny"},"seLinuxContext":{"type":"RunAsAny"}}' --type=merge + if [[ -f "artifactory.cluster.license" ]] + then + echo "Creating k8s secret for Artifactory cluster licenses from file: artifactory.cluster.license" + # create the license secret + oc create secret generic artifactory-license --from-file=artifactory.cluster.license + fi + + if [[ -f "tls.crt" ]] + then + echo "Creating k8s secret for TLS tls-ingress from files tls.crt & tls.key" + # create the tls secret + oc create secret tls tls-ingress --cert=tls.crt --key=tls.key + fi + fi +fi + +# install via helm with default postgresql configuration +helm install artifactory-ha . \ + --set artifactory-ha.nginx.service.ssloffload=true \ + --set artifactory-ha.nginx.tlsSecretName=tls-ingress \ + --set artifactory-ha.artifactory.node.replicaCount=1 \ + --set artifactory-ha.artifactory.license.secret=artifactory-license,artifactory-ha.artifactory.license.dataKey=artifactory.cluster.license \ + --set artifactory-ha.database.type=postgresql \ + --set artifactory-ha.database.driver=org.postgresql.Driver \ + --set artifactory-ha.database.url=jdbc:postgresql://postgres-postgresql:5432/artifactory \ + --set artifactory-ha.database.user=artifactory \ + --set artifactory-ha.database.password=password \ + --set artifactory-ha.artifactory.joinKey=EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE \ + --set artifactory-ha.artifactory.masterKey=FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + diff --git a/Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/requirements.lock b/Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/requirements.lock new file mode 100644 index 0000000..4030294 --- /dev/null +++ b/Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: artifactory-ha + repository: https://charts.jfrog.io/ + version: 4.1.0 +digest: sha256:8df1fd70eeabbb7687da0dd534d2161a413389ec40f331d5eb8e95ae50119222 +generated: "2020-09-30T12:30:08.142288-07:00" diff --git a/Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/requirements.yaml b/Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/requirements.yaml new file mode 100644 index 0000000..1e23270 --- /dev/null +++ b/Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/requirements.yaml @@ -0,0 +1,4 @@ +dependencies: + - name: artifactory-ha + version: 4.1.0 + repository: https://charts.jfrog.io/ diff --git a/Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/values.yaml b/Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/values.yaml new file mode 100755 index 0000000..3354dc0 --- /dev/null +++ b/Openshift4/operator/artifactory-ha-operator/helm-charts/openshift-artifactory-ha/values.yaml @@ -0,0 +1,99 @@ +# Openshift Artifactory HA +# This helm chart subcharts the latest jfrog/artifactory-ha chart +# and applies various things like initContainers, nginx mainConf, etc +# to enable the artifactory-ha helm chart to work in an openshift environment +artifactory-ha: + ################################### + # EDIT TO YOUR DB CONFIGURATION + ################################### + database: + type: "OVERRIDE" + driver: "OVERRIDE" + url: "OVERRIDE" + user: "OVERRIDE" + password: "OVERRIDE" + initContainerImage: registry.connect.redhat.com/jfrog/init:1.0.1 + waitForDatabase: true + installerInfo: '{ "productId": "Openshift_artifactory-ha/{{ .Chart.Version }}", "features": [ { "featureId": "ArtifactoryVersion/{{ default .Chart.AppVersion .Values.artifactory.image.version }}" }, { "featureId": "{{ if .Values.postgresql.enabled }}postgresql{{ else }}{{ .Values.database.type }}{{ end }}/0.0.0" }, { "featureId": "Platform/Openshift" }, { "featureId": "Partner/ACC-006983" }, { "featureId": "Channel/Openshift" } ] }' + artifactory: + uid: "1000721030" + ## Change to use RH UBI images + image: + registry: registry.connect.redhat.com + repository: jfrog/artifactory-pro + tag: 7.9.0 + node: + replicaCount: 2 + waitForPrimaryStartup: + enabled: false + masterKey: "OVERRIDE" + joinKey: "OVERRIDE" + postgresql: + enabled: false + nginx: + uid: "1000720104" + gid: "1000720107" + image: + registry: registry.redhat.io + repository: rhel8/nginx-116 + tag: latest + ## K8S secret name for the TLS secret to be used for SSL + tlsSecretName: "OVERRIDE" + service: + ssloffload: false + http: + externalPort: 80 + internalPort: 8080 + https: + externalPort: 443 + internalPort: 8443 + mainConf: | + # Main Nginx configuration file + worker_processes 4; + error_log {{ .Values.nginx.persistence.mountPath }}/logs//error.log warn; + pid /tmp/nginx.pid; + events { + worker_connections 1024; + } + http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + variables_hash_max_size 1024; + variables_hash_bucket_size 64; + server_names_hash_max_size 4096; + server_names_hash_bucket_size 128; + types_hash_max_size 2048; + types_hash_bucket_size 64; + proxy_read_timeout 2400s; + client_header_timeout 2400s; + client_body_timeout 2400s; + proxy_connect_timeout 75s; + proxy_send_timeout 2400s; + proxy_buffer_size 32k; + proxy_buffers 40 32k; + proxy_busy_buffers_size 64k; + proxy_temp_file_write_size 250m; + proxy_http_version 1.1; + client_body_buffer_size 128k; + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + log_format timing 'ip = $remote_addr ' + 'user = \"$remote_user\" ' + 'local_time = \"$time_local\" ' + 'host = $host ' + 'request = \"$request\" ' + 'status = $status ' + 'bytes = $body_bytes_sent ' + 'upstream = \"$upstream_addr\" ' + 'upstream_time = $upstream_response_time ' + 'request_time = $request_time ' + 'referer = \"$http_referer\" ' + 'UA = \"$http_user_agent\"'; + access_log {{ .Values.nginx.persistence.mountPath }}/logs/access.log timing; + sendfile on; + #tcp_nopush on; + keepalive_timeout 65; + #gzip on; + include {{ .Values.nginx.persistence.mountPath }}/conf.d/*.conf; + } diff --git a/Openshift4/operator/xray-operator/CHANGELOG.md b/Openshift4/operator/xray-operator/CHANGELOG.md index e4ff9e8..7d1fbfb 100755 --- a/Openshift4/operator/xray-operator/CHANGELOG.md +++ b/Openshift4/operator/xray-operator/CHANGELOG.md @@ -1,15 +1,18 @@ # JFrog Openshift Xray Chart Changelog All changes to this chart will be documented in this file. -## [3.8.0] Aug 17, 2020 +## [1.1.0] Oct 1, 2020 +* Deploying JFrog Xray 3.8.8 as an Operator into Openshift + +## [1.0.3] Aug 17, 2020 * Deploying JFrog Xray 3.8.0 as an Operator into Openshift -## [3.6.2] - July 28, 2020 +## [1.0.2] - July 28, 2020 * Deploying JFrog Xray 3.6.2 as an Operator into Openshift -## [3.5.2] - June 29, 2020 +## [1.0.1] - June 29, 2020 * Deploying JFrog Xray 3.5.2 as an Operator into Openshift -## [3.3.0] - May 22, 2020 +## [1.0.0] - May 22, 2020 * Deploying JFrog Xray 3.3.0 as an Operator initial version of Jfrog Xray supported diff --git a/Openshift4/operator/xray-operator/helm-charts/openshift-xray/CHANGELOG.md b/Openshift4/operator/xray-operator/helm-charts/openshift-xray/CHANGELOG.md old mode 100644 new mode 100755 index 87f3b19..a4c6f3b --- a/Openshift4/operator/xray-operator/helm-charts/openshift-xray/CHANGELOG.md +++ b/Openshift4/operator/xray-operator/helm-charts/openshift-xray/CHANGELOG.md @@ -1,6 +1,9 @@ # JFrog Openshift Artifactory-Xray Chart Changelog All changes to this chart will be documented in this file. +## [6.0.6] Oct 1st, 2020 +* Updating to Xray chart version 6.0.6 and Xray app version 3.8.8 + ## [4.2.0] Aug 17, 2020 * Updating to Xray chart version 4.2.0 and Xray app version 3.8.0 diff --git a/Openshift4/operator/xray-operator/helm-charts/openshift-xray/Chart.yaml b/Openshift4/operator/xray-operator/helm-charts/openshift-xray/Chart.yaml old mode 100644 new mode 100755 index d917f4d..aa8c6b6 --- a/Openshift4/operator/xray-operator/helm-charts/openshift-xray/Chart.yaml +++ b/Openshift4/operator/xray-operator/helm-charts/openshift-xray/Chart.yaml @@ -1,7 +1,9 @@ apiVersion: v1 -appVersion: 3.8.0 -description: Universal component scan for security and license inventory and impact - analysis +appVersion: 3.8.8 +description: Universal component scan for security and license inventory and impact analysis +sources: +- https://bintray.com/jfrog/product/xray/view +- https://github.com/jfrog/charts keywords: - xray - jfrog @@ -11,7 +13,4 @@ maintainers: - email: johnp@jfrog.com name: John Peterson name: openshift-xray -sources: -- https://bintray.com/jfrog/product/xray/view -- https://github.com/jfrog/charts -version: 4.2.0 +version: 6.0.6 diff --git a/Openshift4/operator/xray-operator/helm-charts/openshift-xray/LICENSE b/Openshift4/operator/xray-operator/helm-charts/openshift-xray/LICENSE old mode 100644 new mode 100755 diff --git a/Openshift4/operator/xray-operator/helm-charts/openshift-xray/README.md b/Openshift4/operator/xray-operator/helm-charts/openshift-xray/README.md old mode 100644 new mode 100755 diff --git a/Openshift4/operator/xray-operator/helm-charts/openshift-xray/helminstall.sh b/Openshift4/operator/xray-operator/helm-charts/openshift-xray/helminstall.sh old mode 100644 new mode 100755 index 2260da1..e19987c --- a/Openshift4/operator/xray-operator/helm-charts/openshift-xray/helminstall.sh +++ b/Openshift4/operator/xray-operator/helm-charts/openshift-xray/helminstall.sh @@ -57,7 +57,10 @@ fi JFROGURL="" if [[ -z "$4" ]] then - JFROGURL="http://openshiftartifactoryha-nginx" + # HELM + JFROGURL="http://artifactory-ha-nginx" + # OPERATOR + # JFROGURL="http://openshiftartifactoryha-nginx" else JFROGURL=$4 fi @@ -68,4 +71,6 @@ helm install xray . \ --set xray.database.url=$DBURL \ --set xray.database.user=$DBUSER \ --set xray.database.password=$DBPASS \ - --set xray.xray.jfrogUrl=$JFROGURL + --set xray.xray.jfrogUrl=$JFROGURL \ + --set xray.xray.joinKey=EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE \ + --set xray.xray.masterKey=FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF diff --git a/Openshift4/operator/xray-operator/helm-charts/openshift-xray/rabbitmq.yaml b/Openshift4/operator/xray-operator/helm-charts/openshift-xray/rabbitmq.yaml index 521df8e..df49bf6 100644 --- a/Openshift4/operator/xray-operator/helm-charts/openshift-xray/rabbitmq.yaml +++ b/Openshift4/operator/xray-operator/helm-charts/openshift-xray/rabbitmq.yaml @@ -16,10 +16,10 @@ spec: app: rabbitmq spec: containers: - - image: quay.io/jfrog/xray-rabbitmq-rh:3.8.0 + - image: registry.connect.redhat.com/jfrog/xray-rabbitmq:3.8.9 imagePullPolicy: "Always" name: xray-rabbitmq ports: - containerPort: 4369 - containerPort: 5672 - - containerPort: 25672 + - containerPort: 15672 diff --git a/Openshift4/operator/xray-operator/helm-charts/openshift-xray/rabbitmqservice.yaml b/Openshift4/operator/xray-operator/helm-charts/openshift-xray/rabbitmqservice.yaml index a8f108a..fb51fce 100644 --- a/Openshift4/operator/xray-operator/helm-charts/openshift-xray/rabbitmqservice.yaml +++ b/Openshift4/operator/xray-operator/helm-charts/openshift-xray/rabbitmqservice.yaml @@ -8,17 +8,17 @@ spec: selector: app: rabbitmq ports: - - name: port1 + - name: epmd protocol: TCP port: 4369 targetPort: 4369 - - name: port3 + - name: ampq protocol: TCP port: 5672 targetPort: 5672 - - name: port4 + - name: management protocol: TCP - port: 25672 + port: 15672 targetPort: 25672 type: ClusterIP diff --git a/Openshift4/operator/xray-operator/helm-charts/openshift-xray/requirements.lock b/Openshift4/operator/xray-operator/helm-charts/openshift-xray/requirements.lock index 521af70..d53cf7c 100644 --- a/Openshift4/operator/xray-operator/helm-charts/openshift-xray/requirements.lock +++ b/Openshift4/operator/xray-operator/helm-charts/openshift-xray/requirements.lock @@ -1,6 +1,6 @@ dependencies: - name: xray repository: https://charts.jfrog.io/ - version: 4.2.0 -digest: sha256:5e016b3e02e80668003980f2e0399f152f7d83e39957813db3aa3efa9651474f -generated: "2020-08-17T14:49:47.771127-07:00" + version: 6.0.6 +digest: sha256:339b5ec4e309ce2970ed34ebc700d6fe8f436d6cbe8dd5d352f0b080401752af +generated: "2020-10-01T15:04:29.008985-07:00" diff --git a/Openshift4/operator/xray-operator/helm-charts/openshift-xray/requirements.yaml b/Openshift4/operator/xray-operator/helm-charts/openshift-xray/requirements.yaml index b0a88e0..f6311b7 100644 --- a/Openshift4/operator/xray-operator/helm-charts/openshift-xray/requirements.yaml +++ b/Openshift4/operator/xray-operator/helm-charts/openshift-xray/requirements.yaml @@ -1,4 +1,4 @@ dependencies: - name: xray - version: 4.2.0 + version: 6.0.6 repository: https://charts.jfrog.io/ diff --git a/Openshift4/operator/xray-operator/helm-charts/openshift-xray/values.yaml b/Openshift4/operator/xray-operator/helm-charts/openshift-xray/values.yaml old mode 100644 new mode 100755 index d4d3534..b242f3d --- a/Openshift4/operator/xray-operator/helm-charts/openshift-xray/values.yaml +++ b/Openshift4/operator/xray-operator/helm-charts/openshift-xray/values.yaml @@ -1,64 +1,101 @@ +# Openshift Jfrog Xray xray: - analysis: - image: - repository: registry.connect.redhat.com/jfrog/xray-analysis - version: 3.6.2 - name: xray-analysis - podManagementPolicy: Parallel - preStartCommand: null - updateStrategy: RollingUpdate - database: - password: OVERRIDE - url: OVERRIDE - user: OVERRIDE - global: - postgresqlTlsSecret: null - indexer: - image: - repository: registry.connect.redhat.com/jfrog/xray-indexer - version: 3.6.2 - name: xray-indexer - podManagementPolicy: Parallel - updateStrategy: RollingUpdate - persist: - image: - repository: registry.connect.redhat.com/jfrog/xray-persist - version: 3.6.2 - name: xray-persist - persistence: - size: 10Gi - podManagementPolicy: Parallel - preStartCommand: null - updateStrategy: RollingUpdate + unifiedUpgradeAllowed: true + replicaCount: 1 + xray: + masterKey: "OVERRIDE" + joinKey: "OVERRIDE" + consoleLog: false + jfrogUrl: "OVERRIDE" postgresql: enabled: false - rabbitmq-ha: - enabled: true + database: + url: "OVERRIDE" + user: "OVERRIDE" + password: "OVERRIDE" + common: + xrayUserId: "1000721035" + xrayGroupId: "1000721035" + analysis: + name: xray-analysis image: - repository: registry.connect.redhat.com/jfrog/xray-rabbitmq - tag: 3.8.0 - rabbitmqEpmdPort: 4369 - rabbitmqManagerPort: 15672 - rabbitmqNodePort: 5672 - replicaCount: 1 - replicaCount: 1 - router: + registry: registry.connect.redhat.com + repository: jfrog/xray-analysis + tag: 3.8.8 + updateStrategy: RollingUpdate + podManagementPolicy: Parallel + preStartCommand: + indexer: + name: xray-indexer image: - imagePullPolicy: IfNotPresent - repository: registry.connect.redhat.com/jfrog/xray-router - version: 1.4.2 - name: router + registry: registry.connect.redhat.com + repository: jfrog/xray-indexer + tag: 3.8.8 + updateStrategy: RollingUpdate + podManagementPolicy: Parallel + persist: + name: xray-persist + image: + registry: registry.connect.redhat.com + repository: jfrog/xray-persist + tag: 3.8.8 + updateStrategy: RollingUpdate + podManagementPolicy: Parallel + persistence: + size: 10Gi + preStartCommand: server: - image: - repository: registry.connect.redhat.com/jfrog/xray-server - version: 3.6.2 name: xray-server + image: + registry: registry.connect.redhat.com + repository: jfrog/xray-server + tag: 3.8.8 + updateStrategy: RollingUpdate podManagementPolicy: Parallel replicaCount: 1 - updateStrategy: RollingUpdate - unifiedUpgradeAllowed: true - xray: - consoleLog: false - jfrogUrl: OVERRIDE - joinKey: EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE - masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + router: + name: router + image: + registry: registry.connect.redhat.com + repository: jfrog/xray-router + tag: 1.4.3 + imagePullPolicy: IfNotPresent + rabbitmq-ha: + enabled: true + replicaCount: 1 + image: + repository: registry.connect.redhat.com/jfrog/xray-rabbitmq + tag: 3.8.9 + rabbitmqEpmdPort: 4369 + rabbitmqNodePort: 5672 + rabbitmqManagerPort: 15672 + rabbitmqUsername: guest + rabbitmqPassword: guest + managementUsername: management + managementPassword: management + initContainer: + enabled: false + securityContext: + fsGroup: 1000721035 + runAsUser: 1000721035 + runAsGroup: 1000721035 + livenessProbe: + initialDelaySeconds: 120 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + exec: + command: + - /bin/sh + - -c + - 'rabbitmqctl status' + readinessProbe: + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 3 + failureThreshold: 6 + exec: + command: + - /bin/sh + - -c + - 'rabbitmqctl status'