updates to openshift v4.3.5 to artifactory-ha v7.3.2

This commit is contained in:
John Peterson
2020-03-27 14:58:31 -07:00
parent 800324820d
commit 6e38a2df5e
34 changed files with 644 additions and 1747 deletions

View File

@@ -1,3 +1,4 @@
artifactory.cluster.license artifactory.cluster.license
jfrog.team.crt jfrog.team.crt
jfrog.team.key jfrog.team.key
artifactory-ha-operator/helm-charts/openshift-artifactory-ha

View File

@@ -1,4 +1,4 @@
FROM quay.io/operator-framework/helm-operator:v0.14.1 FROM quay.io/operator-framework/helm-operator:v0.16.0
COPY watches.yaml ${HOME}/watches.yaml COPY watches.yaml ${HOME}/watches.yaml
COPY helm-charts/ ${HOME}/helm-charts/ COPY helm-charts/ ${HOME}/helm-charts/

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,23 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: openshiftartifactoryhas.charts.helm.k8s.io
spec:
group: charts.helm.k8s.io
names:
kind: OpenshiftArtifactoryHa
listKind: OpenshiftArtifactoryHaList
plural: openshiftartifactoryhas
singular: openshiftartifactoryha
scope: Namespaced
subresources:
status: {}
validation:
openAPIV3Schema:
type: object
x-kubernetes-preserve-unknown-fields: true
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true

View File

@@ -0,0 +1,4 @@
packageName: openshiftartifactoryha-operator
channels:
- name: alpha
currentCSV: artifactory-ha-operator.v1.0.0

View File

@@ -0,0 +1,8 @@
apiVersion: operators.coreos.com/v1
kind: CatalogSourceConfig
metadata:
name: artifactory-ha-operator-csc
namespace: openshift-marketplace
spec:
targetNamespace: jfrog-artifactory
packages: artifactory-ha-operator

View File

@@ -1,30 +1,20 @@
apiVersion: charts.helm.k8s.io/v1alpha1 apiVersion: charts.helm.k8s.io/v1alpha1
kind: OpenshiftArtifactoryHa kind: OpenshiftArtifactoryHa
metadata: metadata:
name: artifactoryha name: openshiftartifactoryha
spec: spec:
# Default values copied from <project_dir>/helm-charts/openshift-artifactory-ha/values.yaml artifactory-ha:
access:
database: database:
maxOpenConnections: 80 driver: OVERRIDE
password: OVERRIDE
type: OVERRIDE
url: OVERRIDE
user: OVERRIDE
artifactory: artifactory:
accessAdmin:
dataKey: null
ip: 127.0.0.1
password: null
secret: null
annotations: {}
binarystore:
enabled: true
catalinaLoggers: []
configMapName: null
configMaps: ""
copyOnEveryStartup: null
customInitContainers: ""
customInitContainersBegin: | customInitContainersBegin: |
- name: "custom-setup" - name: "redhat-custom-setup"
image: "{{ .Values.initContainerImage }}" #image: "{{ .Values.initContainerImage }}"
image: {{ index .Values "initContainerImage" }}
imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}"
command: command:
- 'sh' - 'sh'
@@ -35,743 +25,22 @@ spec:
volumeMounts: volumeMounts:
- mountPath: "{{ .Values.artifactory.persistence.mountPath }}" - mountPath: "{{ .Values.artifactory.persistence.mountPath }}"
name: volume name: volume
customPersistentPodVolumeClaim: {}
customPersistentVolumeClaim: {}
customSidecarContainers: ""
customVolumeMounts: ""
customVolumes: ""
database:
maxOpenConnections: 80
deleteDBPropertiesOnStartup: true
externalArtifactoryPort: 8081
externalPort: 8082
haDataDir:
enabled: false
path: null
image: image:
pullPolicy: IfNotPresent repository: quay.io/jfrog/artifactory-rh-pro
repository: image-registry.openshift-image-registry.svc:5000/jfrog-artifactory/artifactory-pro
internalArtifactoryPort: 8081
internalPort: 8082
javaOpts: {}
joinKey: EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE
license:
dataKey: artifactory.cluster.license
licenseKey: null
secret: artifactory-license
livenessProbe:
enabled: true
failureThreshold: 10
initialDelaySeconds: 180
path: /router/api/v1/system/health
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 10
loggers: []
masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
name: artifactory-ha
node: node:
affinity: {}
javaOpts:
corePoolSize: 16
jmx:
accessFile: null
authenticate: false
enabled: false
host: null
passwordFile: null
port: 9010
ssl: false
labels: {}
minAvailable: 1
name: artifactory-ha-member
nodeSelector: {}
persistence:
existingClaim: false
podAntiAffinity:
topologyKey: kubernetes.io/hostname
type: ""
replicaCount: 1
resources: {}
tolerations: []
waitForPrimaryStartup: waitForPrimaryStartup:
enabled: true
time: 60
persistence:
accessMode: ReadWriteOnce
awsS3:
bucketName: artifactory-ha-aws
credential: null
endpoint: null
httpsOnly: true
identity: null
path: artifactory-ha/filestore
properties: {}
refreshCredentials: true
region: null
roleName: null
s3AwsVersion: AWS4-HMAC-SHA256
testConnection: false
awsS3V3:
bucketName: artifactory-aws
cloudFrontDomainName: null
cloudFrontKeyPairId: null
cloudFrontPrivateKey: null
credential: null
endpoint: null
identity: null
kmsCryptoMode: null
kmsKeyRegion: null
kmsServerSideEncryptionKeyId: null
path: artifactory/filestore
region: null
signatureExpirySeconds: 300
testConnection: false
useInstanceCredentials: true
usePresigning: false
azureBlob:
accountKey: null
accountName: null
containerName: null
endpoint: null
testConnection: false
binarystoreXml: |
{{- if eq .Values.artifactory.persistence.type "file-system" }}
<!-- File system replication -->
{{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }}
<!-- File Storage - Dynamic for Artifactory files, pre-created for DATA and BACKUP -->
<config version="4">
<chain>
<provider id="cache-fs" type="cache-fs"> <!-- This is a cached filestore -->
<provider id="sharding" type="sharding"> <!-- This is a sharding provider -->
{{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) -}}
<sub-provider id="shard{{ $sharedClaimNumber }}" type="state-aware"/>
{{- end }}
</provider>
</provider>
</chain>
<provider id="cache-fs" type="cache-fs">
<maxCacheSize>{{ .Values.artifactory.persistence.maxCacheSize }}</maxCacheSize>
<cacheProviderDir>{{ .Values.artifactory.persistence.cacheProviderDir }}</cacheProviderDir>
</provider>
// Specify the read and write strategy and redundancy for the sharding binary provider
<provider id="sharding" type="sharding">
<readBehavior>roundRobin</readBehavior>
<writeBehavior>percentageFreeSpace</writeBehavior>
<redundancy>2</redundancy>
</provider>
{{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) -}}
//For each sub-provider (mount), specify the filestore location
<provider id="shard{{ $sharedClaimNumber }}" type="state-aware">
<fileStoreDir>filestore{{ $sharedClaimNumber }}</fileStoreDir>
</provider>
{{- end }}
</config>
{{- else }}
<config version="2">
<chain>
<provider id="cache-fs" type="cache-fs">
<provider id="sharding-cluster" type="sharding-cluster">
<readBehavior>crossNetworkStrategy</readBehavior>
<writeBehavior>crossNetworkStrategy</writeBehavior>
<redundancy>{{ .Values.artifactory.persistence.redundancy }}</redundancy>
<lenientLimit>2</lenientLimit>
<minSpareUploaderExecutor>2</minSpareUploaderExecutor>
<sub-provider id="state-aware" type="state-aware"/>
<dynamic-provider id="remote" type="remote"/>
<property name="zones" value="local,remote"/>
</provider>
</provider>
</chain>
<provider id="cache-fs" type="cache-fs">
<maxCacheSize>{{ .Values.artifactory.persistence.maxCacheSize }}</maxCacheSize>
<cacheProviderDir>{{ .Values.artifactory.persistence.cacheProviderDir }}</cacheProviderDir>
</provider>
<!-- Shards add local file-system provider configuration -->
<provider id="state-aware" type="state-aware">
<fileStoreDir>shard-fs-1</fileStoreDir>
<zone>local</zone>
</provider>
<!-- Shards dynamic remote provider configuration -->
<provider id="remote" type="remote">
<checkPeriod>30</checkPeriod>
<serviceId>tester-remote1</serviceId>
<timeout>10000</timeout>
<zone>remote</zone>
<property name="header.remote.block" value="true"/>
</provider>
</config>
{{- end }}
{{- end }}
{{- if eq .Values.artifactory.persistence.type "google-storage" }}
<!-- Google storage -->
<config version="2">
<chain>
<provider id="sharding-cluster" type="sharding-cluster">
<readBehavior>crossNetworkStrategy</readBehavior>
<writeBehavior>crossNetworkStrategy</writeBehavior>
<redundancy>{{ .Values.artifactory.persistence.redundancy }}</redundancy>
<minSpareUploaderExecutor>2</minSpareUploaderExecutor>
<sub-provider id="eventual-cluster" type="eventual-cluster">
<provider id="retry" type="retry">
<provider id="google-storage" type="google-storage"/>
</provider>
</sub-provider>
<dynamic-provider id="remote" type="remote"/>
<property name="zones" value="local,remote"/>
</provider>
</chain>
<!-- Set max cache-fs size -->
<provider id="cache-fs" type="cache-fs">
<maxCacheSize>{{ .Values.artifactory.persistence.maxCacheSize }}</maxCacheSize>
<cacheProviderDir>{{ .Values.artifactory.persistence.cacheProviderDir }}</cacheProviderDir>
</provider>
<provider id="eventual-cluster" type="eventual-cluster">
<zone>local</zone>
</provider>
<provider id="remote" type="remote">
<checkPeriod>30</checkPeriod>
<timeout>10000</timeout>
<zone>remote</zone>
</provider>
<provider id="file-system" type="file-system">
<fileStoreDir>{{ .Values.artifactory.persistence.mountPath }}/data/filestore</fileStoreDir>
<tempDir>/tmp</tempDir>
</provider>
<provider id="google-storage" type="google-storage">
<providerId>google-cloud-storage</providerId>
<endpoint>{{ .Values.artifactory.persistence.googleStorage.endpoint }}</endpoint>
<httpsOnly>{{ .Values.artifactory.persistence.googleStorage.httpsOnly }}</httpsOnly>
<bucketName>{{ .Values.artifactory.persistence.googleStorage.bucketName }}</bucketName>
<identity>{{ .Values.artifactory.persistence.googleStorage.identity }}</identity>
<credential>{{ .Values.artifactory.persistence.googleStorage.credential }}</credential>
<path>{{ .Values.artifactory.persistence.googleStorage.path }}</path>
<bucketExists>{{ .Values.artifactory.persistence.googleStorage.bucketExists }}</bucketExists>
</provider>
</config>
{{- end }}
{{- if eq .Values.artifactory.persistence.type "aws-s3-v3" }}
<!-- AWS S3 V3 -->
<config version="2">
<chain> <!--template="cluster-s3-storage-v3"-->
<provider id="cache-fs-eventual-s3" type="cache-fs">
<provider id="sharding-cluster-eventual-s3" type="sharding-cluster">
<sub-provider id="eventual-cluster-s3" type="eventual-cluster">
<provider id="retry-s3" type="retry">
<provider id="s3-storage-v3" type="s3-storage-v3"/>
</provider>
</sub-provider>
<dynamic-provider id="remote-s3" type="remote"/>
</provider>
</provider>
</chain>
<provider id="sharding-cluster-eventual-s3" type="sharding-cluster">
<readBehavior>crossNetworkStrategy</readBehavior>
<writeBehavior>crossNetworkStrategy</writeBehavior>
<redundancy>{{ .Values.artifactory.persistence.redundancy }}</redundancy>
<property name="zones" value="local,remote"/>
</provider>
<provider id="remote-s3" type="remote">
<zone>remote</zone>
</provider>
<provider id="eventual-cluster-s3" type="eventual-cluster">
<zone>local</zone>
</provider>
<!-- Set max cache-fs size -->
<provider id="cache-fs-eventual-s3" type="cache-fs">
<maxCacheSize>{{ .Values.artifactory.persistence.maxCacheSize }}</maxCacheSize>
<cacheProviderDir>{{ .Values.artifactory.persistence.cacheProviderDir }}</cacheProviderDir>
</provider>
{{- with .Values.artifactory.persistence.awsS3V3 }}
<provider id="s3-storage-v3" type="s3-storage-v3">
<testConnection>{{ .testConnection }}</testConnection>
{{- if .identity }}
<identity>{{ .identity }}</identity>
{{- end }}
{{- if .credential }}
<credential>{{ .credential }}</credential>
{{- end }}
<region>{{ .region }}</region>
<bucketName>{{ .bucketName }}</bucketName>
<path>{{ .path }}</path>
<endpoint>{{ .endpoint }}</endpoint>
{{- with .kmsServerSideEncryptionKeyId }}
<kmsServerSideEncryptionKeyId>{{ . }}</kmsServerSideEncryptionKeyId>
{{- end }}
{{- with .kmsKeyRegion }}
<kmsKeyRegion>{{ . }}</kmsKeyRegion>
{{- end }}
{{- with .kmsCryptoMode }}
<kmsCryptoMode>{{ . }}</kmsCryptoMode>
{{- end }}
<useInstanceCredentials>true</useInstanceCredentials>
<usePresigning>{{ .usePresigning }}</usePresigning>
<signatureExpirySeconds>{{ .signatureExpirySeconds }}</signatureExpirySeconds>
{{- with .cloudFrontDomainName }}
<cloudFrontDomainName>{{ . }}</cloudFrontDomainName>
{{- end }}
{{- with .cloudFrontKeyPairId }}
<cloudFrontKeyPairId>{{ .cloudFrontKeyPairId }}</cloudFrontKeyPairId>
{{- end }}
{{- with .cloudFrontPrivateKey }}
<cloudFrontPrivateKey>{{ . }}</cloudFrontPrivateKey>
{{- end }}
</provider>
{{- end }}
</config>
{{- end }}
{{- if eq .Values.artifactory.persistence.type "aws-s3" }}
<!-- AWS S3 -->
<config version="2">
<chain> <!--template="cluster-s3"-->
<provider id="cache-fs" type="cache-fs">
<provider id="sharding-cluster" type="sharding-cluster">
<sub-provider id="eventual-cluster" type="eventual-cluster">
<provider id="retry-s3" type="retry">
<provider id="s3" type="s3"/>
</provider>
</sub-provider>
<dynamic-provider id="remote" type="remote"/>
</provider>
</provider>
</chain>
<!-- Set max cache-fs size -->
<provider id="cache-fs" type="cache-fs">
<maxCacheSize>{{ .Values.artifactory.persistence.maxCacheSize }}</maxCacheSize>
<cacheProviderDir>{{ .Values.artifactory.persistence.cacheProviderDir }}</cacheProviderDir>
</provider>
<provider id="eventual-cluster" type="eventual-cluster">
<zone>local</zone>
</provider>
<provider id="remote" type="remote">
<checkPeriod>30</checkPeriod>
<timeout>10000</timeout>
<zone>remote</zone>
</provider>
<provider id="sharding-cluster" type="sharding-cluster">
<readBehavior>crossNetworkStrategy</readBehavior>
<writeBehavior>crossNetworkStrategy</writeBehavior>
<redundancy>{{ .Values.artifactory.persistence.redundancy }}</redundancy>
<property name="zones" value="local,remote"/>
</provider>
<provider id="s3" type="s3">
<endpoint>{{ .Values.artifactory.persistence.awsS3.endpoint }}</endpoint>
{{- if .Values.artifactory.persistence.awsS3.roleName }}
<roleName>{{ .Values.artifactory.persistence.awsS3.roleName }}</roleName>
<refreshCredentials>true</refreshCredentials>
{{- else }}
<refreshCredentials>{{ .Values.artifactory.persistence.awsS3.refreshCredentials }}</refreshCredentials>
{{- end }}
<s3AwsVersion>{{ .Values.artifactory.persistence.awsS3.s3AwsVersion }}</s3AwsVersion>
<testConnection>{{ .Values.artifactory.persistence.awsS3.testConnection }}</testConnection>
<httpsOnly>{{ .Values.artifactory.persistence.awsS3.httpsOnly }}</httpsOnly>
<region>{{ .Values.artifactory.persistence.awsS3.region }}</region>
<bucketName>{{ .Values.artifactory.persistence.awsS3.bucketName }}</bucketName>
{{- if .Values.artifactory.persistence.awsS3.identity }}
<identity>{{ .Values.artifactory.persistence.awsS3.identity }}</identity>
{{- end }}
{{- if .Values.artifactory.persistence.awsS3.credential }}
<credential>{{ .Values.artifactory.persistence.awsS3.credential }}</credential>
{{- end }}
<path>{{ .Values.artifactory.persistence.awsS3.path }}</path>
{{- range $key, $value := .Values.artifactory.persistence.awsS3.properties }}
<property name="{{ $key }}" value="{{ $value }}"/>
{{- end }}
</provider>
</config>
{{- end }}
{{- if eq .Values.artifactory.persistence.type "azure-blob" }}
<!-- Azure Blob Storage -->
<config version="2">
<chain> <!--template="cluster-azure-blob-storage"-->
<provider id="cache-fs" type="cache-fs">
<provider id="sharding-cluster" type="sharding-cluster">
<sub-provider id="eventual-cluster" type="eventual-cluster">
<provider id="retry-azure-blob-storage" type="retry">
<provider id="azure-blob-storage" type="azure-blob-storage"/>
</provider>
</sub-provider>
<dynamic-provider id="remote" type="remote"/>
</provider>
</provider>
</chain>
<!-- Set max cache-fs size -->
<provider id="cache-fs" type="cache-fs">
<maxCacheSize>{{ .Values.artifactory.persistence.maxCacheSize }}</maxCacheSize>
<cacheProviderDir>{{ .Values.artifactory.persistence.cacheProviderDir }}</cacheProviderDir>
</provider>
<!-- cluster eventual Azure Blob Storage Service default chain -->
<provider id="sharding-cluster" type="sharding-cluster">
<readBehavior>crossNetworkStrategy</readBehavior>
<writeBehavior>crossNetworkStrategy</writeBehavior>
<redundancy>2</redundancy>
<lenientLimit>1</lenientLimit>
<property name="zones" value="local,remote"/>
</provider>
<provider id="remote" type="remote">
<zone>remote</zone>
</provider>
<provider id="eventual-cluster" type="eventual-cluster">
<zone>local</zone>
</provider>
<!--cluster eventual template-->
<provider id="azure-blob-storage" type="azure-blob-storage">
<accountName>{{ .Values.artifactory.persistence.azureBlob.accountName }}</accountName>
<accountKey>{{ .Values.artifactory.persistence.azureBlob.accountKey }}</accountKey>
<endpoint>{{ .Values.artifactory.persistence.azureBlob.endpoint }}</endpoint>
<containerName>{{ .Values.artifactory.persistence.azureBlob.containerName }}</containerName>
<testConnection>{{ .Values.artifactory.persistence.azureBlob.testConnection }}</testConnection>
</provider>
</config>
{{- end }}
cacheProviderDir: cache
customBinarystoreXmlSecret: null
enabled: true
eventual:
numberOfThreads: 10
fileSystem:
existingSharedClaim:
backupDir: /var/opt/jfrog/artifactory-backup
dataDir: '{{ .Values.artifactory.persistence.mountPath }}/artifactory-data'
enabled: false enabled: false
numberOfExistingClaims: 1 initContainerImage: registry.redhat.io/ubi8-minimal
googleStorage: installerInfo: '{ "productId": "Openshift_artifactory-ha/{{ .Chart.Version }}", "features": [ { "featureId": "ArtifactoryVersion/{{ default .Chart.AppVersion .Values.artifactory.image.version }}" }, { "featureId": "{{ if .Values.postgresql.enabled }}postgresql{{ else }}{{ default \"derby\" .Values.database.type }}{{ end }}/0.0.0" }, { "featureId": "Platform/{{ default \"openshift\" .Values.installer.platform }}" }, { "featureId": "Partner/ACC-006983" }, { "featureId": "Channel/Openshift" } ] }'
bucketExists: false
bucketName: artifactory-ha-gcp
credential: null
endpoint: storage.googleapis.com
httpsOnly: false
identity: null
path: artifactory-ha/filestore
local: false
maxCacheSize: 50000000000
mountPath: /var/opt/jfrog/artifactory
nfs:
backupDir: /var/opt/jfrog/artifactory-backup
capacity: 200Gi
dataDir: /var/opt/jfrog/artifactory-ha
haBackupMount: /backup
haDataMount: /data
ip: null
mountOptions: []
redundancy: 3
size: 200Gi
type: file-system
primary:
affinity: {}
javaOpts:
corePoolSize: 16
jmx:
accessFile: null
authenticate: false
enabled: false
host: null
passwordFile: null
port: 9010
ssl: false
labels: {}
name: artifactory-ha-primary
nodeSelector: {}
persistence:
existingClaim: false
podAntiAffinity:
topologyKey: kubernetes.io/hostname
type: ""
resources: {}
tolerations: []
priorityClass:
create: false
value: 1000000000
readinessProbe:
enabled: true
failureThreshold: 10
initialDelaySeconds: 60
path: /router/api/v1/system/health
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 10
service:
annotations: {}
loadBalancerSourceRanges: []
name: artifactory
pool: members
type: ClusterIP
systemYaml: |
shared:
extraJavaOpts: >
{{- with .Values.artifactory.primary.javaOpts }}
-Dartifactory.async.corePoolSize={{ .corePoolSize }}
{{- if .xms }}
-Xms{{ .xms }}
{{- end }}
{{- if .xmx }}
-Xmx{{ .xmx }}
{{- end }}
{{- if .jmx.enabled }}
-Dcom.sun.management.jmxremote
-Dcom.sun.management.jmxremote.port={{ .jmx.port }}
-Dcom.sun.management.jmxremote.rmi.port={{ .jmx.port }}
-Dcom.sun.management.jmxremote.ssl={{ .jmx.ssl }}
{{- if .jmx.host }}
-Djava.rmi.server.hostname={{ tpl .jmx.host $ }}
{{- else }}
-Djava.rmi.server.hostname={{ template "artifactory-ha.fullname" $ }}
{{- end }}
{{- if .jmx.authenticate }}
-Dcom.sun.management.jmxremote.authenticate=true
-Dcom.sun.management.jmxremote.access.file={{ .jmx.accessFile }}
-Dcom.sun.management.jmxremote.password.file={{ .jmx.passwordFile }}
{{- else }}
-Dcom.sun.management.jmxremote.authenticate=false
{{- end }}
{{- end }}
{{- if .other }}
{{ .other }}
{{- end }}
{{- end }}
database:
{{- if .Values.postgresql.enabled }}
type: postgresql
url: 'jdbc:postgresql://{{ .Release.Name }}-postgresql:{{ .Values.postgresql.service.port }}/{{ .Values.postgresql.postgresqlDatabase }}'
host: ''
driver: org.postgresql.Driver
username: '{{ .Values.postgresql.postgresqlUsername }}'
password: '{{ .Values.postgresql.postgresqlPassword }}'
{{ else }}
type: '{{ .Values.database.type }}'
url: '{{ .Values.database.url }}'
driver: '{{ .Values.database.driver }}'
username: '{{ .Values.database.user }}'
password: '{{ .Values.database.password }}'
{{- end }}
security:
joinKey: '{{ .Values.artifactory.joinKey }}'
masterKey: '{{ .Values.artifactory.masterKey }}'
artifactory:
{{- if .Values.artifactory.haDataDir.enabled }}
node:
haDataDir: {{ .Values.artifactory.haDataDir.path }}
{{- end }}
database:
maxOpenConnections: {{ .Values.artifactory.database.maxOpenConnections }}
access:
database:
maxOpenConnections: '{{ .Values.access.database.maxOpenConnections }}'
{{- if .Values.access.database.enabled }}
type: '{{ .Values.access.database.type }}'
url: '{{ .Values.access.database.url }}'
driver: '{{ .Values.access.database.driver }}'
username: '{{ .Values.access.database.user }}'
password: '{{ .Values.access.database.password }}'
{{- end }}
terminationGracePeriodSeconds: 30
uid: 1030
userPluginSecrets: null
database:
driver: null
password: null
secrets: {}
type: null
url: null
user: null
filebeat:
enabled: false
filebeatYml: |
logging.level: info
path.data: {{ .Values.artifactory.persistence.mountPath }}/log/filebeat
name: artifactory-filebeat
queue.spool: ~
filebeat.inputs:
- type: log
enabled: true
close_eof: ${CLOSE:false}
paths:
- {{ .Values.artifactory.persistence.mountPath }}/log/*.log
fields:
service: "jfrt"
log_type: "artifactory"
output:
logstash:
hosts: ["{{ .Values.filebeat.logstashUrl }}"]
image:
repository: docker.elastic.co/beats/filebeat
version: 7.5.1
livenessProbe:
exec:
command:
- sh
- -c
- |
#!/usr/bin/env bash -e
curl --fail 127.0.0.1:5066
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 5
logstashUrl: logstash:5044
name: artifactory-filebeat
readinessProbe:
exec:
command:
- sh
- -c
- |
#!/usr/bin/env bash -e
filebeat test output
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 5
resources: {}
terminationGracePeriod: 10
imagePullSecrets: null
ingress:
additionalRules: []
annotations: {}
artifactoryPath: /artifactory/
defaultBackend:
enabled: true
enabled: false
hosts: []
labels: {}
routerPath: /
tls: []
initContainerImage: alpine:3.10
initContainers:
resources: {}
installer:
platform: null
type: null
logger:
image:
repository: busybox
tag: "1.30"
networkpolicy:
- egress:
- {}
ingress:
- {}
name: artifactory
podSelector:
matchLabels:
app: artifactory-ha
nginx: nginx:
affinity: {}
artifactoryConf: |
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
ssl_certificate {{ .Values.nginx.persistence.mountPath }}/ssl/tls.crt;
ssl_certificate_key {{ .Values.nginx.persistence.mountPath }}/ssl/tls.key;
ssl_session_cache shared:SSL:1m;
ssl_prefer_server_ciphers on;
## server configuration
server {
{{- if .Values.nginx.internalPortHttps }}
listen {{ .Values.nginx.internalPortHttps }} ssl;
{{- else -}}
{{- if .Values.nginx.https.enabled }}
listen {{ .Values.nginx.https.internalPort }} ssl;
{{- end }}
{{- end }}
{{- if .Values.nginx.internalPortHttp }}
listen {{ .Values.nginx.internalPortHttp }};
{{- else -}}
{{- if .Values.nginx.http.enabled }}
listen {{ .Values.nginx.http.internalPort }};
{{- end }}
{{- end }}
server_name ~(?<repo>.+)\.{{ include "artifactory-ha.fullname" . }} {{ include "artifactory-ha.fullname" . }}
{{- range .Values.ingress.hosts -}}
{{- if contains "." . -}}
{{ "" | indent 0 }} ~(?<repo>.+)\.{{ (splitn "." 2 .)._1 }} {{ . }}
{{- end -}}
{{- end -}};
if ($http_x_forwarded_proto = '') {
set $http_x_forwarded_proto $scheme;
}
## Application specific logs
## access_log /var/log/nginx/artifactory-access.log timing;
## error_log /var/log/nginx/artifactory-error.log;
rewrite ^/artifactory/?$ / redirect;
if ( $repo != "" ) {
rewrite ^/(v1|v2)/(.*) /artifactory/api/docker/$repo/$1/$2 break;
}
chunked_transfer_encoding on;
client_max_body_size 0;
location / {
proxy_read_timeout 900;
proxy_pass_header Server;
proxy_cookie_path ~*^/.* /;
proxy_pass http://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalPort }}/;
proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host:$server_port;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
location /artifactory/ {
if ( $request_uri ~ ^/artifactory/(.*)$ ) {
proxy_pass http://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalArtifactoryPort }}/artifactory/$1;
}
proxy_pass http://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalArtifactoryPort }}/artifactory/;
}
}
}
customArtifactoryConfigMap: null
customConfigMap: null
enabled: true
tlsSecretName: tls-ingress
gid: 107
http: http:
enabled: true
externalPort: 80 externalPort: 80
internalPort: 80 internalPort: 8080
https: https:
enabled: true
externalPort: 443 externalPort: 443
internalPort: 443 internalPort: 8443
image: image:
pullPolicy: IfNotPresent repository: quay.io/jfrog/nginx-artifactory-rh-pro
#repository: image-registry.openshift-image-registry.svc:5000/jfrog-artifactory/nginx-artifactory-pro
repository: registry.redhat.io/rhel8/nginx-116
labels: {}
livenessProbe:
enabled: true
failureThreshold: 10
initialDelaySeconds: 60
path: /router/api/v1/system/health
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 10
loggers: []
mainConf: | mainConf: |
# Main Nginx configuration file # Main Nginx configuration file
worker_processes 4; worker_processes 4;
@@ -822,178 +91,7 @@ spec:
#gzip on; #gzip on;
include {{ .Values.nginx.persistence.mountPath }}/conf.d/*.conf; include {{ .Values.nginx.persistence.mountPath }}/conf.d/*.conf;
} }
name: nginx
nodeSelector: {}
persistence:
accessMode: ReadWriteOnce
enabled: false
mountPath: /var/opt/jfrog/nginx
size: 5Gi
readinessProbe:
enabled: true
failureThreshold: 10
initialDelaySeconds: 10
path: /router/api/v1/system/health
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 10
replicaCount: 1
resources: {}
service:
externalTrafficPolicy: Cluster
labels: {}
loadBalancerIP: null
loadBalancerSourceRanges: []
type: LoadBalancer
tolerations: []
uid: 104
postgresql: postgresql:
enabled: true
extraEnv: []
global:
postgresql: {}
image:
debug: false
pullPolicy: IfNotPresent
registry: docker.bintray.io
repository: bitnami/postgresql
tag: 9.6.15-debian-9-r91
livenessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
master:
affinity: {}
annotations: {}
extraVolumeMounts: []
extraVolumes: []
labels: {}
nodeSelector: {}
podAnnotations: {}
podLabels: {}
tolerations: []
metrics:
enabled: false enabled: false
image: waitForDatabase: false
pullPolicy: IfNotPresent
registry: docker.io
repository: bitnami/postgres-exporter
tag: 0.6.0-debian-9-r0
livenessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
readinessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
securityContext:
enabled: false
runAsUser: 1001
service:
annotations:
prometheus.io/port: "9187"
prometheus.io/scrape: "true"
loadBalancerIP: null
type: ClusterIP
serviceMonitor:
additionalLabels: {}
enabled: false
networkPolicy:
allowExternal: true
enabled: false
nodeSelector: {}
persistence:
accessModes:
- ReadWriteOnce
annotations: {}
enabled: true
mountPath: /bitnami/postgresql
size: 50Gi
subPath: ""
postgresqlConfiguration:
listenAddresses: '''*'''
maxConnections: "1500"
postgresqlDataDir: /bitnami/postgresql/data
postgresqlDatabase: artifactory
postgresqlPassword: ""
postgresqlUsername: artifactory
readinessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
replication:
applicationName: my_application
enabled: false
numSynchronousReplicas: 0
password: repl_password
slaveReplicas: 1
synchronousCommit: "off"
user: repl_user
resources:
requests:
cpu: 250m
memory: 256Mi
securityContext:
enabled: true
fsGroup: 1001
runAsUser: 1001
service:
annotations: {}
port: 5432
type: ClusterIP
serviceAccount:
enabled: false
slave:
affinity: {}
annotations: {}
extraVolumeMounts: []
extraVolumes: []
labels: {}
nodeSelector: {}
podAnnotations: {}
podLabels: {}
tolerations: []
updateStrategy:
type: RollingUpdate
volumePermissions:
enabled: true
image:
pullPolicy: Always
registry: docker.io
repository: bitnami/minideb
tag: stretch
securityContext:
runAsUser: 0
rbac:
create: true
role:
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- pods
verbs:
- get
- watch
- list
serviceAccount:
annotations: {}
create: true
name: null
waitForDatabase: true

View File

@@ -1,6 +0,0 @@
apiVersion: image.openshift.io/v1
kind: ImageStream
metadata:
name: nginx-artifactory-pro
namespace: jfrog-artifactory

View File

@@ -1,6 +0,0 @@
apiVersion: image.openshift.io/v1
kind: ImageStream
metadata:
name: artifactory-ha
namespace: jfrog-artifactory

View File

@@ -1,6 +0,0 @@
apiVersion: image.openshift.io/v1
kind: ImageStream
metadata:
name: artifactory-pro
namespace: jfrog-artifactory

View File

@@ -15,7 +15,7 @@ spec:
serviceAccountName: artifactory-ha-operator serviceAccountName: artifactory-ha-operator
containers: containers:
- name: artifactory-ha-operator - name: artifactory-ha-operator
image: image-registry.openshift-image-registry.svc:5000/jfrog-artifactory/artifactory-ha image: quay.io/jfrog/artifactory-ha-operator
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
env: env:
- name: WATCH_NAMESPACE - name: WATCH_NAMESPACE
@@ -28,3 +28,17 @@ spec:
fieldPath: metadata.name fieldPath: metadata.name
- name: OPERATOR_NAME - name: OPERATOR_NAME
value: "artifactory-ha-operator" value: "artifactory-ha-operator"
- name: RELATED_IMAGE_ARTIFACTORY_IMAGE_REPOSITORY
value: "quay.io/jfrog/artifactory-rh-pro"
- name: RELATED_IMAGE_NGINX_IMAGE_REPOSITORY
value: "quay.io/jfrog/nginx-artifactory-rh-pro"
- name: DATABASE_TYPE
value: "OVERRIDE"
- name: DATABASE_DRIVER
value: "OVERRIDE"
- name: DATABASE_URL
value: "OVERRIDE"
- name: DATABASE_USER
value: "OVERRIDE"
- name: DATABASE_PASSWORD
value: "OVERRIDE"

View File

@@ -1,7 +1,7 @@
apiVersion: operators.coreos.com/v1alpha2 apiVersion: operators.coreos.com/v1alpha2
kind: OperatorGroup kind: OperatorGroup
metadata: metadata:
name: jfrog-group name: jfrog-operator-group
namespace: jfrog-artifactory namespace: jfrog-artifactory
spec: spec:
targetNamespaces: targetNamespaces:

View File

@@ -10,7 +10,7 @@ objects:
annotations: annotations:
openshift.io/description: JFrog Artifactory openshift.io/description: JFrog Artifactory
openshift.io/display-name: jfrog-artifactory openshift.io/display-name: jfrog-artifactory
openshift.io/requester: johnp@jfrog.com openshift.io/requester: integrations@jfrog.com
creationTimestamp: null creationTimestamp: null
name: jfrog-artifactory name: jfrog-artifactory
spec: {} spec: {}

View File

@@ -4,6 +4,40 @@ metadata:
creationTimestamp: null creationTimestamp: null
name: artifactory-ha-operator name: artifactory-ha-operator
rules: rules:
- apiGroups:
- ""
resources:
- pods
- services
- services/finalizers
- endpoints
- persistentvolumeclaims
- events
- configmaps
- secrets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- apps
resources:
- deployments
- daemonsets
- replicasets
- statefulsets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@@ -23,41 +57,6 @@ rules:
- events - events
verbs: verbs:
- create - create
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- '*'
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- '*'
- apiGroups:
- ""
resources:
- configmaps
- secrets
- serviceaccounts
- services
verbs:
- '*'
- apiGroups:
- rbac.authorization.k8s.io
resources:
- rolebindings
- roles
verbs:
- '*'
- apiGroups:
- apps
resources:
- deployments
- statefulsets
verbs:
- '*'
- apiGroups: - apiGroups:
- monitoring.coreos.com - monitoring.coreos.com
resources: resources:

View File

@@ -1,15 +0,0 @@
kind: SecurityContextConstraints
apiVersion: v1
metadata:
name: scc-admin
allowPrivilegedContainer: true
runAsUser:
type: RunAsAny
seLinuxContext:
type: RunAsAny
fsGroup:
type: RunAsAny
supplementalGroups:
type: RunAsAny
users:
- kubeadmin

View File

@@ -0,0 +1,10 @@
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: artifactory-ha-operator
namespace: jfrog-artifactory
spec:
channel: alpha
name: artifactory-ha-operator
source: artifactory-ha-operator-csc
sourceNamespace: openshift-operators

View File

@@ -0,0 +1,2 @@
## README
Should use the latest openshift artifactory ha chart from the partnership artifactory

View File

@@ -1,18 +0,0 @@
kind: SecurityContextConstraints
apiVersion: v1
metadata:
name: hostpath
allowPrivilegedContainer: false
runAsUser:
type: RunAsAny
seLinuxContext:
type: RunAsAny
fsGroup:
type: RunAsAny
supplementalGroups:
type: RunAsAny
users:
- artifactory
groups:
- artifactory
- jfrog-artifactory

View File

@@ -1,6 +0,0 @@
dependencies:
- name: artifactory-ha
repository: https://charts.jfrog.io/
version: 2.0.31
digest: sha256:d7c2af74a0188ca8df2a97158c83b36f85dfae72c1b60ce4540a4e00da2d9a6f
generated: "2020-03-19T17:29:04.445679-07:00"

View File

@@ -1,25 +0,0 @@
#!/usr/bin/env bash
oc project default
oc apply -f helm-charts/openshift-artifactory-ha/pv-examples
oc apply -f deploy/project.yaml
oc apply -f deploy/namespace.yaml
oc project jfrog-artifactory
oc apply -f deploy/imagestream-nginx.yaml
oc apply -f deploy/imagestream-pro.yaml
oc apply -f deploy/imagestream-operator.yaml
oc patch image.config.openshift.io/cluster --type=merge --patch='{"spec":{"registrySources":{"insecureRegistries":["default-route-openshift-image-registry.apps-crc.testing"]}}}'
oc apply -f deploy/role.yaml
oc apply -f deploy/role_binding.yaml
oc apply -f deploy/service_account.yaml
oc apply -f deploy/securitycontextconstraints.yaml
oc adm policy add-scc-to-user scc-admin system:serviceaccount:jfrog-artifactory:artifactory-ha-operator
oc adm policy add-scc-to-user scc-admin system:serviceaccount:jfrog-artifactory:default
oc adm policy add-scc-to-user anyuid system:serviceaccount:jfrog-artifactory:artifactory-ha-operator
oc adm policy add-scc-to-user anyuid system:serviceaccount:jfrog-artifactory:default
oc adm policy add-scc-to-group anyuid system:authenticated
oc apply -f deploy/hostpathscc.yaml
oc patch securitycontextconstraints.security.openshift.io/hostpath --type=merge --patch='{"allowHostDirVolumePlugin": true}'
oc adm policy add-scc-to-user hostpath system:serviceaccount:jfrog-artifactory:artifactory-ha-operator
oc apply -f deploy/crds/charts.helm.k8s.io_openshiftartifactoryhas_crd.yaml
oc apply -f deploy/crds/charts.helm.k8s.io_v1alpha1_openshiftartifactoryha_cr.yaml
oc create secret generic artifactory-license --from-file=../artifactory.cluster.license

View File

@@ -1,16 +0,0 @@
#!/usr/bin/env bash
oc project jfrog-artifactory
oc delete deployments --all
oc delete statefulsets --all
oc delete configmaps --all
oc delete deploymentconfigs --all
oc delete pods --all
oc delete svc --all
oc delete networkpolicies --all
oc delete pvc --all
oc delete PodDisruptionBudget --all
for s in $(oc get secrets | grep artifactory | cut -f1 -d ' '); do
oc delete secret $s
done
oc delete serviceaccount artifactoryha-artifactory-ha
oc delete role artifactoryha-artifactory-ha

View File

@@ -3,3 +3,11 @@
group: charts.helm.k8s.io group: charts.helm.k8s.io
kind: OpenshiftArtifactoryHa kind: OpenshiftArtifactoryHa
chart: helm-charts/openshift-artifactory-ha chart: helm-charts/openshift-artifactory-ha
overrideValues:
artifactory-ha.artifactory.image.repository: $RELATED_IMAGE_ARTIFACTORY_IMAGE_REPOSITORY
artifactory-ha.nginx.image.repository: $RELATED_IMAGE_NGINX_IMAGE_REPOSITORY
artifactory-ha.database.type: $DATABASE_TYPE
artifactory-ha.database.driver: $DATABASE_DRIVER
artifactory-ha.database.url: $DATABASE_URL
artifactory-ha.database.user: $DATABASE_USER
artifactory-ha.database.password: $DATABASE_PASSWORD

View File

@@ -1,5 +1,5 @@
apiVersion: v1 apiVersion: v1
appVersion: 7.2.1 appVersion: 7.3.2
description: Universal Repository Manager supporting all major packaging formats, description: Universal Repository Manager supporting all major packaging formats,
build tools and CI servers. build tools and CI servers.
home: https://www.jfrog.com/artifactory/ home: https://www.jfrog.com/artifactory/
@@ -21,4 +21,4 @@ name: openshift-artifactory-ha
sources: sources:
- https://bintray.com/jfrog/product/JFrog-Artifactory-Pro/view - https://bintray.com/jfrog/product/JFrog-Artifactory-Pro/view
- https://github.com/jfrog/charts - https://github.com/jfrog/charts
version: 2.0.34 version: 2.1.3

View File

@@ -0,0 +1,6 @@
dependencies:
- name: artifactory-ha
repository: https://charts.jfrog.io/
version: 2.1.3
digest: sha256:58169c65a87781f34ab6a49706e0f345234878558ce681ee7ed38ace25b88dfe
generated: "2020-03-26T13:58:11.770394-07:00"

View File

@@ -1,4 +1,4 @@
dependencies: dependencies:
- name: artifactory-ha - name: artifactory-ha
version: 2.0.34 version: 2.1.3
repository: https://charts.jfrog.io/ repository: https://charts.jfrog.io/

View File

@@ -2,8 +2,22 @@
# Requires one custom init container # Requires one custom init container
# to resolve the user id perm issue with redhat # to resolve the user id perm issue with redhat
artifactory-ha: artifactory-ha:
###################################
# EDIT TO YOUR DB CONFIGURATION
###################################
database:
type: "OVERRIDE"
driver: "OVERRIDE"
url: "OVERRIDE"
user: "OVERRIDE"
password: "OVERRIDE"
###################################
# DO NOT EDIT FURTHER
###################################
initContainerImage: registry.redhat.io/ubi8-minimal initContainerImage: registry.redhat.io/ubi8-minimal
waitForDatabase: false waitForDatabase: false
installerInfo: '{ "productId": "Openshift_artifactory-ha/{{ .Chart.Version }}", "features": [ { "featureId": "ArtifactoryVersion/{{ default .Chart.AppVersion .Values.artifactory.image.version }}" }, { "featureId": "{{ if .Values.postgresql.enabled }}postgresql{{ else }}{{ default \"derby\" .Values.database.type }}{{ end }}/0.0.0" }, { "featureId": "Platform/{{ default \"openshift\" .Values.installer.platform }}" }, { "featureId": "Partner/ACC-006983" }, { "featureId": "Channel/Openshift" } ] }'
artifactory: artifactory:
## Add custom init containers execution before predefined init containers ## Add custom init containers execution before predefined init containers
customInitContainersBegin: | customInitContainersBegin: |
@@ -26,6 +40,8 @@ artifactory-ha:
node: node:
waitForPrimaryStartup: waitForPrimaryStartup:
enabled: false enabled: false
postgresql:
enabled: false
nginx: nginx:
image: image:
repository: "image-registry.openshift-image-registry.svc:5000/jfrog-artifactory/nginx-artifactory-pro" repository: "image-registry.openshift-image-registry.svc:5000/jfrog-artifactory/nginx-artifactory-pro"