Openshift4 operator for Artifactory HA v7.0.2

This commit is contained in:
John Peterson
2020-02-11 10:33:42 -08:00
parent 0e662f38ca
commit 17250d1413
160 changed files with 19388 additions and 0 deletions

View File

@@ -0,0 +1,22 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: openshiftartifactoryhas.charts.helm.k8s.io
spec:
group: charts.helm.k8s.io
names:
kind: OpenshiftArtifactoryHa
listKind: OpenshiftArtifactoryHaList
plural: openshiftartifactoryhas
singular: openshiftartifactoryha
scope: Namespaced
subresources:
status: {}
validation:
openAPIV3Schema:
type: object
x-kubernetes-preserve-unknown-fields: true
versions:
- name: v1alpha1
served: true
storage: true

View File

@@ -0,0 +1,997 @@
apiVersion: charts.helm.k8s.io/v1alpha1
kind: OpenshiftArtifactoryHa
metadata:
name: artifactoryha
spec:
# Default values copied from <project_dir>/helm-charts/openshift-artifactory-ha/values.yaml
access:
database:
maxOpenConnections: 80
artifactory:
accessAdmin:
dataKey: null
ip: 127.0.0.1
password: null
secret: null
annotations: {}
binarystore:
enabled: true
catalinaLoggers: []
configMapName: null
configMaps: ""
copyOnEveryStartup: null
customInitContainers: ""
customInitContainersBegin: |
- name: "custom-setup"
image: "{{ .Values.initContainerImage }}"
imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}"
command:
- 'sh'
- '-c'
- 'chown -R 1030:1030 {{ .Values.artifactory.persistence.mountPath }}'
securityContext:
runAsUser: 0
volumeMounts:
- mountPath: "{{ .Values.artifactory.persistence.mountPath }}"
name: volume
customPersistentPodVolumeClaim: {}
customPersistentVolumeClaim: {}
customSidecarContainers: ""
customVolumeMounts: ""
customVolumes: ""
database:
maxOpenConnections: 80
deleteDBPropertiesOnStartup: true
externalArtifactoryPort: 8081
externalPort: 8082
haDataDir:
enabled: false
path: null
image:
pullPolicy: IfNotPresent
repository: earlyaccess.jfrog.io/artifactory-pro
internalArtifactoryPort: 8081
internalPort: 8082
javaOpts: {}
joinKey: EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE
license:
dataKey: artifactory.cluster.license
licenseKey: null
secret: artifactory-license
livenessProbe:
enabled: true
failureThreshold: 10
initialDelaySeconds: 180
path: /router/api/v1/system/health
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 10
loggers: []
masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
name: artifactory-ha
node:
affinity: {}
javaOpts:
corePoolSize: 16
jmx:
accessFile: null
authenticate: false
enabled: false
host: null
passwordFile: null
port: 9010
ssl: false
labels: {}
minAvailable: 1
name: artifactory-ha-member
nodeSelector: {}
persistence:
existingClaim: false
podAntiAffinity:
topologyKey: kubernetes.io/hostname
type: ""
replicaCount: 1
resources: {}
tolerations: []
waitForPrimaryStartup:
enabled: true
time: 60
persistence:
accessMode: ReadWriteOnce
awsS3:
bucketName: artifactory-ha-aws
credential: null
endpoint: null
httpsOnly: true
identity: null
path: artifactory-ha/filestore
properties: {}
refreshCredentials: true
region: null
roleName: null
s3AwsVersion: AWS4-HMAC-SHA256
testConnection: false
awsS3V3:
bucketName: artifactory-aws
cloudFrontDomainName: null
cloudFrontKeyPairId: null
cloudFrontPrivateKey: null
credential: null
endpoint: null
identity: null
kmsCryptoMode: null
kmsKeyRegion: null
kmsServerSideEncryptionKeyId: null
path: artifactory/filestore
region: null
signatureExpirySeconds: 300
testConnection: false
useInstanceCredentials: true
usePresigning: false
azureBlob:
accountKey: null
accountName: null
containerName: null
endpoint: null
testConnection: false
binarystoreXml: |
{{- if eq .Values.artifactory.persistence.type "file-system" }}
<!-- File system replication -->
{{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }}
<!-- File Storage - Dynamic for Artifactory files, pre-created for DATA and BACKUP -->
<config version="4">
<chain>
<provider id="cache-fs" type="cache-fs"> <!-- This is a cached filestore -->
<provider id="sharding" type="sharding"> <!-- This is a sharding provider -->
{{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) -}}
<sub-provider id="shard{{ $sharedClaimNumber }}" type="state-aware"/>
{{- end }}
</provider>
</provider>
</chain>
<provider id="cache-fs" type="cache-fs">
<maxCacheSize>{{ .Values.artifactory.persistence.maxCacheSize }}</maxCacheSize>
<cacheProviderDir>{{ .Values.artifactory.persistence.cacheProviderDir }}</cacheProviderDir>
</provider>
// Specify the read and write strategy and redundancy for the sharding binary provider
<provider id="sharding" type="sharding">
<readBehavior>roundRobin</readBehavior>
<writeBehavior>percentageFreeSpace</writeBehavior>
<redundancy>2</redundancy>
</provider>
{{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) -}}
//For each sub-provider (mount), specify the filestore location
<provider id="shard{{ $sharedClaimNumber }}" type="state-aware">
<fileStoreDir>filestore{{ $sharedClaimNumber }}</fileStoreDir>
</provider>
{{- end }}
</config>
{{- else }}
<config version="2">
<chain>
<provider id="cache-fs" type="cache-fs">
<provider id="sharding-cluster" type="sharding-cluster">
<readBehavior>crossNetworkStrategy</readBehavior>
<writeBehavior>crossNetworkStrategy</writeBehavior>
<redundancy>{{ .Values.artifactory.persistence.redundancy }}</redundancy>
<lenientLimit>2</lenientLimit>
<minSpareUploaderExecutor>2</minSpareUploaderExecutor>
<sub-provider id="state-aware" type="state-aware"/>
<dynamic-provider id="remote" type="remote"/>
<property name="zones" value="local,remote"/>
</provider>
</provider>
</chain>
<provider id="cache-fs" type="cache-fs">
<maxCacheSize>{{ .Values.artifactory.persistence.maxCacheSize }}</maxCacheSize>
<cacheProviderDir>{{ .Values.artifactory.persistence.cacheProviderDir }}</cacheProviderDir>
</provider>
<!-- Shards add local file-system provider configuration -->
<provider id="state-aware" type="state-aware">
<fileStoreDir>shard-fs-1</fileStoreDir>
<zone>local</zone>
</provider>
<!-- Shards dynamic remote provider configuration -->
<provider id="remote" type="remote">
<checkPeriod>30</checkPeriod>
<serviceId>tester-remote1</serviceId>
<timeout>10000</timeout>
<zone>remote</zone>
<property name="header.remote.block" value="true"/>
</provider>
</config>
{{- end }}
{{- end }}
{{- if eq .Values.artifactory.persistence.type "google-storage" }}
<!-- Google storage -->
<config version="2">
<chain>
<provider id="sharding-cluster" type="sharding-cluster">
<readBehavior>crossNetworkStrategy</readBehavior>
<writeBehavior>crossNetworkStrategy</writeBehavior>
<redundancy>{{ .Values.artifactory.persistence.redundancy }}</redundancy>
<minSpareUploaderExecutor>2</minSpareUploaderExecutor>
<sub-provider id="eventual-cluster" type="eventual-cluster">
<provider id="retry" type="retry">
<provider id="google-storage" type="google-storage"/>
</provider>
</sub-provider>
<dynamic-provider id="remote" type="remote"/>
<property name="zones" value="local,remote"/>
</provider>
</chain>
<!-- Set max cache-fs size -->
<provider id="cache-fs" type="cache-fs">
<maxCacheSize>{{ .Values.artifactory.persistence.maxCacheSize }}</maxCacheSize>
<cacheProviderDir>{{ .Values.artifactory.persistence.cacheProviderDir }}</cacheProviderDir>
</provider>
<provider id="eventual-cluster" type="eventual-cluster">
<zone>local</zone>
</provider>
<provider id="remote" type="remote">
<checkPeriod>30</checkPeriod>
<timeout>10000</timeout>
<zone>remote</zone>
</provider>
<provider id="file-system" type="file-system">
<fileStoreDir>{{ .Values.artifactory.persistence.mountPath }}/data/filestore</fileStoreDir>
<tempDir>/tmp</tempDir>
</provider>
<provider id="google-storage" type="google-storage">
<providerId>google-cloud-storage</providerId>
<endpoint>{{ .Values.artifactory.persistence.googleStorage.endpoint }}</endpoint>
<httpsOnly>{{ .Values.artifactory.persistence.googleStorage.httpsOnly }}</httpsOnly>
<bucketName>{{ .Values.artifactory.persistence.googleStorage.bucketName }}</bucketName>
<identity>{{ .Values.artifactory.persistence.googleStorage.identity }}</identity>
<credential>{{ .Values.artifactory.persistence.googleStorage.credential }}</credential>
<path>{{ .Values.artifactory.persistence.googleStorage.path }}</path>
<bucketExists>{{ .Values.artifactory.persistence.googleStorage.bucketExists }}</bucketExists>
</provider>
</config>
{{- end }}
{{- if eq .Values.artifactory.persistence.type "aws-s3-v3" }}
<!-- AWS S3 V3 -->
<config version="2">
<chain> <!--template="cluster-s3-storage-v3"-->
<provider id="cache-fs-eventual-s3" type="cache-fs">
<provider id="sharding-cluster-eventual-s3" type="sharding-cluster">
<sub-provider id="eventual-cluster-s3" type="eventual-cluster">
<provider id="retry-s3" type="retry">
<provider id="s3-storage-v3" type="s3-storage-v3"/>
</provider>
</sub-provider>
<dynamic-provider id="remote-s3" type="remote"/>
</provider>
</provider>
</chain>
<provider id="sharding-cluster-eventual-s3" type="sharding-cluster">
<readBehavior>crossNetworkStrategy</readBehavior>
<writeBehavior>crossNetworkStrategy</writeBehavior>
<redundancy>{{ .Values.artifactory.persistence.redundancy }}</redundancy>
<property name="zones" value="local,remote"/>
</provider>
<provider id="remote-s3" type="remote">
<zone>remote</zone>
</provider>
<provider id="eventual-cluster-s3" type="eventual-cluster">
<zone>local</zone>
</provider>
<!-- Set max cache-fs size -->
<provider id="cache-fs-eventual-s3" type="cache-fs">
<maxCacheSize>{{ .Values.artifactory.persistence.maxCacheSize }}</maxCacheSize>
<cacheProviderDir>{{ .Values.artifactory.persistence.cacheProviderDir }}</cacheProviderDir>
</provider>
{{- with .Values.artifactory.persistence.awsS3V3 }}
<provider id="s3-storage-v3" type="s3-storage-v3">
<testConnection>{{ .testConnection }}</testConnection>
{{- if .identity }}
<identity>{{ .identity }}</identity>
{{- end }}
{{- if .credential }}
<credential>{{ .credential }}</credential>
{{- end }}
<region>{{ .region }}</region>
<bucketName>{{ .bucketName }}</bucketName>
<path>{{ .path }}</path>
<endpoint>{{ .endpoint }}</endpoint>
{{- with .kmsServerSideEncryptionKeyId }}
<kmsServerSideEncryptionKeyId>{{ . }}</kmsServerSideEncryptionKeyId>
{{- end }}
{{- with .kmsKeyRegion }}
<kmsKeyRegion>{{ . }}</kmsKeyRegion>
{{- end }}
{{- with .kmsCryptoMode }}
<kmsCryptoMode>{{ . }}</kmsCryptoMode>
{{- end }}
<useInstanceCredentials>true</useInstanceCredentials>
<usePresigning>{{ .usePresigning }}</usePresigning>
<signatureExpirySeconds>{{ .signatureExpirySeconds }}</signatureExpirySeconds>
{{- with .cloudFrontDomainName }}
<cloudFrontDomainName>{{ . }}</cloudFrontDomainName>
{{- end }}
{{- with .cloudFrontKeyPairId }}
<cloudFrontKeyPairId>{{ .cloudFrontKeyPairId }}</cloudFrontKeyPairId>
{{- end }}
{{- with .cloudFrontPrivateKey }}
<cloudFrontPrivateKey>{{ . }}</cloudFrontPrivateKey>
{{- end }}
</provider>
{{- end }}
</config>
{{- end }}
{{- if eq .Values.artifactory.persistence.type "aws-s3" }}
<!-- AWS S3 -->
<config version="2">
<chain> <!--template="cluster-s3"-->
<provider id="cache-fs" type="cache-fs">
<provider id="sharding-cluster" type="sharding-cluster">
<sub-provider id="eventual-cluster" type="eventual-cluster">
<provider id="retry-s3" type="retry">
<provider id="s3" type="s3"/>
</provider>
</sub-provider>
<dynamic-provider id="remote" type="remote"/>
</provider>
</provider>
</chain>
<!-- Set max cache-fs size -->
<provider id="cache-fs" type="cache-fs">
<maxCacheSize>{{ .Values.artifactory.persistence.maxCacheSize }}</maxCacheSize>
<cacheProviderDir>{{ .Values.artifactory.persistence.cacheProviderDir }}</cacheProviderDir>
</provider>
<provider id="eventual-cluster" type="eventual-cluster">
<zone>local</zone>
</provider>
<provider id="remote" type="remote">
<checkPeriod>30</checkPeriod>
<timeout>10000</timeout>
<zone>remote</zone>
</provider>
<provider id="sharding-cluster" type="sharding-cluster">
<readBehavior>crossNetworkStrategy</readBehavior>
<writeBehavior>crossNetworkStrategy</writeBehavior>
<redundancy>{{ .Values.artifactory.persistence.redundancy }}</redundancy>
<property name="zones" value="local,remote"/>
</provider>
<provider id="s3" type="s3">
<endpoint>{{ .Values.artifactory.persistence.awsS3.endpoint }}</endpoint>
{{- if .Values.artifactory.persistence.awsS3.roleName }}
<roleName>{{ .Values.artifactory.persistence.awsS3.roleName }}</roleName>
<refreshCredentials>true</refreshCredentials>
{{- else }}
<refreshCredentials>{{ .Values.artifactory.persistence.awsS3.refreshCredentials }}</refreshCredentials>
{{- end }}
<s3AwsVersion>{{ .Values.artifactory.persistence.awsS3.s3AwsVersion }}</s3AwsVersion>
<testConnection>{{ .Values.artifactory.persistence.awsS3.testConnection }}</testConnection>
<httpsOnly>{{ .Values.artifactory.persistence.awsS3.httpsOnly }}</httpsOnly>
<region>{{ .Values.artifactory.persistence.awsS3.region }}</region>
<bucketName>{{ .Values.artifactory.persistence.awsS3.bucketName }}</bucketName>
{{- if .Values.artifactory.persistence.awsS3.identity }}
<identity>{{ .Values.artifactory.persistence.awsS3.identity }}</identity>
{{- end }}
{{- if .Values.artifactory.persistence.awsS3.credential }}
<credential>{{ .Values.artifactory.persistence.awsS3.credential }}</credential>
{{- end }}
<path>{{ .Values.artifactory.persistence.awsS3.path }}</path>
{{- range $key, $value := .Values.artifactory.persistence.awsS3.properties }}
<property name="{{ $key }}" value="{{ $value }}"/>
{{- end }}
</provider>
</config>
{{- end }}
{{- if eq .Values.artifactory.persistence.type "azure-blob" }}
<!-- Azure Blob Storage -->
<config version="2">
<chain> <!--template="cluster-azure-blob-storage"-->
<provider id="cache-fs" type="cache-fs">
<provider id="sharding-cluster" type="sharding-cluster">
<sub-provider id="eventual-cluster" type="eventual-cluster">
<provider id="retry-azure-blob-storage" type="retry">
<provider id="azure-blob-storage" type="azure-blob-storage"/>
</provider>
</sub-provider>
<dynamic-provider id="remote" type="remote"/>
</provider>
</provider>
</chain>
<!-- Set max cache-fs size -->
<provider id="cache-fs" type="cache-fs">
<maxCacheSize>{{ .Values.artifactory.persistence.maxCacheSize }}</maxCacheSize>
<cacheProviderDir>{{ .Values.artifactory.persistence.cacheProviderDir }}</cacheProviderDir>
</provider>
<!-- cluster eventual Azure Blob Storage Service default chain -->
<provider id="sharding-cluster" type="sharding-cluster">
<readBehavior>crossNetworkStrategy</readBehavior>
<writeBehavior>crossNetworkStrategy</writeBehavior>
<redundancy>2</redundancy>
<lenientLimit>1</lenientLimit>
<property name="zones" value="local,remote"/>
</provider>
<provider id="remote" type="remote">
<zone>remote</zone>
</provider>
<provider id="eventual-cluster" type="eventual-cluster">
<zone>local</zone>
</provider>
<!--cluster eventual template-->
<provider id="azure-blob-storage" type="azure-blob-storage">
<accountName>{{ .Values.artifactory.persistence.azureBlob.accountName }}</accountName>
<accountKey>{{ .Values.artifactory.persistence.azureBlob.accountKey }}</accountKey>
<endpoint>{{ .Values.artifactory.persistence.azureBlob.endpoint }}</endpoint>
<containerName>{{ .Values.artifactory.persistence.azureBlob.containerName }}</containerName>
<testConnection>{{ .Values.artifactory.persistence.azureBlob.testConnection }}</testConnection>
</provider>
</config>
{{- end }}
cacheProviderDir: cache
customBinarystoreXmlSecret: null
enabled: true
eventual:
numberOfThreads: 10
fileSystem:
existingSharedClaim:
backupDir: /var/opt/jfrog/artifactory-backup
dataDir: '{{ .Values.artifactory.persistence.mountPath }}/artifactory-data'
enabled: false
numberOfExistingClaims: 1
googleStorage:
bucketExists: false
bucketName: artifactory-ha-gcp
credential: null
endpoint: storage.googleapis.com
httpsOnly: false
identity: null
path: artifactory-ha/filestore
local: false
maxCacheSize: 50000000000
mountPath: /var/opt/jfrog/artifactory
nfs:
backupDir: /var/opt/jfrog/artifactory-backup
capacity: 200Gi
dataDir: /var/opt/jfrog/artifactory-ha
haBackupMount: /backup
haDataMount: /data
ip: null
mountOptions: []
redundancy: 3
size: 200Gi
type: file-system
primary:
affinity: {}
javaOpts:
corePoolSize: 16
jmx:
accessFile: null
authenticate: false
enabled: false
host: null
passwordFile: null
port: 9010
ssl: false
labels: {}
name: artifactory-ha-primary
nodeSelector: {}
persistence:
existingClaim: false
podAntiAffinity:
topologyKey: kubernetes.io/hostname
type: ""
resources: {}
tolerations: []
priorityClass:
create: false
value: 1000000000
readinessProbe:
enabled: true
failureThreshold: 10
initialDelaySeconds: 60
path: /router/api/v1/system/health
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 10
service:
annotations: {}
loadBalancerSourceRanges: []
name: artifactory
pool: members
type: ClusterIP
systemYaml: |
shared:
extraJavaOpts: >
{{- with .Values.artifactory.primary.javaOpts }}
-Dartifactory.async.corePoolSize={{ .corePoolSize }}
{{- if .xms }}
-Xms{{ .xms }}
{{- end }}
{{- if .xmx }}
-Xmx{{ .xmx }}
{{- end }}
{{- if .jmx.enabled }}
-Dcom.sun.management.jmxremote
-Dcom.sun.management.jmxremote.port={{ .jmx.port }}
-Dcom.sun.management.jmxremote.rmi.port={{ .jmx.port }}
-Dcom.sun.management.jmxremote.ssl={{ .jmx.ssl }}
{{- if .jmx.host }}
-Djava.rmi.server.hostname={{ tpl .jmx.host $ }}
{{- else }}
-Djava.rmi.server.hostname={{ template "artifactory-ha.fullname" $ }}
{{- end }}
{{- if .jmx.authenticate }}
-Dcom.sun.management.jmxremote.authenticate=true
-Dcom.sun.management.jmxremote.access.file={{ .jmx.accessFile }}
-Dcom.sun.management.jmxremote.password.file={{ .jmx.passwordFile }}
{{- else }}
-Dcom.sun.management.jmxremote.authenticate=false
{{- end }}
{{- end }}
{{- if .other }}
{{ .other }}
{{- end }}
{{- end }}
database:
{{- if .Values.postgresql.enabled }}
type: postgresql
url: 'jdbc:postgresql://{{ .Release.Name }}-postgresql:{{ .Values.postgresql.service.port }}/{{ .Values.postgresql.postgresqlDatabase }}'
host: ''
driver: org.postgresql.Driver
username: '{{ .Values.postgresql.postgresqlUsername }}'
password: '{{ .Values.postgresql.postgresqlPassword }}'
{{ else }}
type: '{{ .Values.database.type }}'
url: '{{ .Values.database.url }}'
driver: '{{ .Values.database.driver }}'
username: '{{ .Values.database.user }}'
password: '{{ .Values.database.password }}'
{{- end }}
security:
joinKey: '{{ .Values.artifactory.joinKey }}'
masterKey: '{{ .Values.artifactory.masterKey }}'
artifactory:
{{- if .Values.artifactory.haDataDir.enabled }}
node:
haDataDir: {{ .Values.artifactory.haDataDir.path }}
{{- end }}
database:
maxOpenConnections: {{ .Values.artifactory.database.maxOpenConnections }}
access:
database:
maxOpenConnections: '{{ .Values.access.database.maxOpenConnections }}'
{{- if .Values.access.database.enabled }}
type: '{{ .Values.access.database.type }}'
url: '{{ .Values.access.database.url }}'
driver: '{{ .Values.access.database.driver }}'
username: '{{ .Values.access.database.user }}'
password: '{{ .Values.access.database.password }}'
{{- end }}
terminationGracePeriodSeconds: 30
uid: 1030
userPluginSecrets: null
database:
driver: null
password: null
secrets: {}
type: null
url: null
user: null
filebeat:
enabled: false
filebeatYml: |
logging.level: info
path.data: {{ .Values.artifactory.persistence.mountPath }}/log/filebeat
name: artifactory-filebeat
queue.spool: ~
filebeat.inputs:
- type: log
enabled: true
close_eof: ${CLOSE:false}
paths:
- {{ .Values.artifactory.persistence.mountPath }}/log/*.log
fields:
service: "jfrt"
log_type: "artifactory"
output:
logstash:
hosts: ["{{ .Values.filebeat.logstashUrl }}"]
image:
repository: docker.elastic.co/beats/filebeat
version: 7.5.1
livenessProbe:
exec:
command:
- sh
- -c
- |
#!/usr/bin/env bash -e
curl --fail 127.0.0.1:5066
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 5
logstashUrl: logstash:5044
name: artifactory-filebeat
readinessProbe:
exec:
command:
- sh
- -c
- |
#!/usr/bin/env bash -e
filebeat test output
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 5
resources: {}
terminationGracePeriod: 10
imagePullSecrets: null
ingress:
additionalRules: []
annotations: {}
artifactoryPath: /artifactory/
defaultBackend:
enabled: true
enabled: false
hosts: []
labels: {}
routerPath: /
tls: []
initContainerImage: alpine:3.10
initContainers:
resources: {}
installer:
platform: null
type: null
logger:
image:
repository: busybox
tag: "1.30"
networkpolicy:
- egress:
- {}
ingress:
- {}
name: artifactory
podSelector:
matchLabels:
app: artifactory-ha
nginx:
affinity: {}
artifactoryConf: |
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
ssl_certificate {{ .Values.nginx.persistence.mountPath }}/ssl/tls.crt;
ssl_certificate_key {{ .Values.nginx.persistence.mountPath }}/ssl/tls.key;
ssl_session_cache shared:SSL:1m;
ssl_prefer_server_ciphers on;
## server configuration
server {
{{- if .Values.nginx.internalPortHttps }}
listen {{ .Values.nginx.internalPortHttps }} ssl;
{{- else -}}
{{- if .Values.nginx.https.enabled }}
listen {{ .Values.nginx.https.internalPort }} ssl;
{{- end }}
{{- end }}
{{- if .Values.nginx.internalPortHttp }}
listen {{ .Values.nginx.internalPortHttp }};
{{- else -}}
{{- if .Values.nginx.http.enabled }}
listen {{ .Values.nginx.http.internalPort }};
{{- end }}
{{- end }}
server_name ~(?<repo>.+)\.{{ include "artifactory-ha.fullname" . }} {{ include "artifactory-ha.fullname" . }}
{{- range .Values.ingress.hosts -}}
{{- if contains "." . -}}
{{ "" | indent 0 }} ~(?<repo>.+)\.{{ (splitn "." 2 .)._1 }} {{ . }}
{{- end -}}
{{- end -}};
if ($http_x_forwarded_proto = '') {
set $http_x_forwarded_proto $scheme;
}
## Application specific logs
## access_log /var/log/nginx/artifactory-access.log timing;
## error_log /var/log/nginx/artifactory-error.log;
rewrite ^/artifactory/?$ / redirect;
if ( $repo != "" ) {
rewrite ^/(v1|v2)/(.*) /artifactory/api/docker/$repo/$1/$2 break;
}
chunked_transfer_encoding on;
client_max_body_size 0;
location / {
proxy_read_timeout 900;
proxy_pass_header Server;
proxy_cookie_path ~*^/.* /;
proxy_pass http://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalPort }}/;
proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host:$server_port;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
location /artifactory/ {
if ( $request_uri ~ ^/artifactory/(.*)$ ) {
proxy_pass http://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalArtifactoryPort }}/artifactory/$1;
}
proxy_pass http://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalArtifactoryPort }}/artifactory/;
}
}
}
customArtifactoryConfigMap: null
customConfigMap: null
enabled: true
gid: 107
http:
enabled: true
externalPort: 80
internalPort: 80
https:
enabled: true
externalPort: 443
internalPort: 443
image:
pullPolicy: IfNotPresent
repository: earlyaccess.jfrog.io/nginx-artifactory-pro
labels: {}
livenessProbe:
enabled: true
failureThreshold: 10
initialDelaySeconds: 60
path: /router/api/v1/system/health
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 10
loggers: []
mainConf: |
# Main Nginx configuration file
worker_processes 4;
error_log {{ .Values.nginx.persistence.mountPath }}/logs//error.log warn;
pid /tmp/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
variables_hash_max_size 1024;
variables_hash_bucket_size 64;
server_names_hash_max_size 4096;
server_names_hash_bucket_size 128;
types_hash_max_size 2048;
types_hash_bucket_size 64;
proxy_read_timeout 2400s;
client_header_timeout 2400s;
client_body_timeout 2400s;
proxy_connect_timeout 75s;
proxy_send_timeout 2400s;
proxy_buffer_size 32k;
proxy_buffers 40 32k;
proxy_busy_buffers_size 64k;
proxy_temp_file_write_size 250m;
proxy_http_version 1.1;
client_body_buffer_size 128k;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
log_format timing 'ip = $remote_addr '
'user = \"$remote_user\" '
'local_time = \"$time_local\" '
'host = $host '
'request = \"$request\" '
'status = $status '
'bytes = $body_bytes_sent '
'upstream = \"$upstream_addr\" '
'upstream_time = $upstream_response_time '
'request_time = $request_time '
'referer = \"$http_referer\" '
'UA = \"$http_user_agent\"';
access_log {{ .Values.nginx.persistence.mountPath }}/logs/access.log timing;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
include /etc/nginx/conf.d/*.conf;
}
name: nginx
nodeSelector: {}
persistence:
accessMode: ReadWriteOnce
enabled: false
mountPath: /var/opt/jfrog/nginx
size: 5Gi
readinessProbe:
enabled: true
failureThreshold: 10
initialDelaySeconds: 10
path: /router/api/v1/system/health
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 10
replicaCount: 1
resources: {}
service:
externalTrafficPolicy: Cluster
labels: {}
loadBalancerIP: null
loadBalancerSourceRanges: []
type: LoadBalancer
tolerations: []
uid: 104
postgresql:
enabled: true
extraEnv: []
global:
postgresql: {}
image:
debug: false
pullPolicy: IfNotPresent
registry: docker.bintray.io
repository: bitnami/postgresql
tag: 9.6.15-debian-9-r91
livenessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
master:
affinity: {}
annotations: {}
extraVolumeMounts: []
extraVolumes: []
labels: {}
nodeSelector: {}
podAnnotations: {}
podLabels: {}
tolerations: []
metrics:
enabled: false
image:
pullPolicy: IfNotPresent
registry: docker.io
repository: bitnami/postgres-exporter
tag: 0.6.0-debian-9-r0
livenessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
readinessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
securityContext:
enabled: false
runAsUser: 1001
service:
annotations:
prometheus.io/port: "9187"
prometheus.io/scrape: "true"
loadBalancerIP: null
type: ClusterIP
serviceMonitor:
additionalLabels: {}
enabled: false
networkPolicy:
allowExternal: true
enabled: false
nodeSelector: {}
persistence:
accessModes:
- ReadWriteOnce
annotations: {}
enabled: true
mountPath: /bitnami/postgresql
size: 50Gi
subPath: ""
postgresqlConfiguration:
listenAddresses: '''*'''
maxConnections: "1500"
postgresqlDataDir: /bitnami/postgresql/data
postgresqlDatabase: artifactory
postgresqlPassword: ""
postgresqlUsername: artifactory
readinessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
replication:
applicationName: my_application
enabled: false
numSynchronousReplicas: 0
password: repl_password
slaveReplicas: 1
synchronousCommit: "off"
user: repl_user
resources:
requests:
cpu: 250m
memory: 256Mi
securityContext:
enabled: true
fsGroup: 1001
runAsUser: 1001
service:
annotations: {}
port: 5432
type: ClusterIP
serviceAccount:
enabled: false
slave:
affinity: {}
annotations: {}
extraVolumeMounts: []
extraVolumes: []
labels: {}
nodeSelector: {}
podAnnotations: {}
podLabels: {}
tolerations: []
updateStrategy:
type: RollingUpdate
volumePermissions:
enabled: true
image:
pullPolicy: Always
registry: docker.io
repository: bitnami/minideb
tag: stretch
securityContext:
runAsUser: 0
rbac:
create: true
role:
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- pods
verbs:
- get
- watch
- list
serviceAccount:
annotations: {}
create: true
name: null
waitForDatabase: true