upgraded to artifactory 7.21.5 and xray 3.27.2

This commit is contained in:
Vinay Aggarwal
2021-07-11 15:54:45 -07:00
parent 4ac8b06ce2
commit fbba8620f0
110 changed files with 2378 additions and 562 deletions

View File

@@ -0,0 +1,5 @@
- hosts: localhost
gather_facts: true
become: true
roles:
- name: artifactory-ami

View File

@@ -0,0 +1,29 @@
---
language: python
python: "2.7"
# Use the new container infrastructure
sudo: false
# Install ansible
addons:
apt:
packages:
- python-pip
install:
# Install ansible
- pip install ansible
# Check ansible version
- ansible --version
# Create ansible.cfg with correct roles_path
- printf '[defaults]\nroles_path=../' >ansible.cfg
script:
# Basic role syntax check
- ansible-playbook tests/test.yml -i tests/inventory --syntax-check
notifications:
webhooks: https://galaxy.ansible.com/api/v1/notifications/

View File

@@ -0,0 +1,51 @@
---
# defaults file for artifactory
# indicates were this collection was downlaoded from (galaxy, automation_hub, standalone)
ansible_marketplace: standalone
# whether we are creating a AMI for Marketplace or just for configuring EC2 instance
ami_creation: false
# The version of Artifactory to install
artifactory_version: 7.15.3
# licenses file - specify a licenses file or specify up to 5 licenses
artifactory_license1:
artifactory_license2:
artifactory_license3:
artifactory_license4:
artifactory_license5:
artifactory_license6:
# whether to enable HA
artifactory_ha_enabled: true
# value for whether a host is primary. this should be set in host vars
artifactory_is_primary: true
# The location where Artifactory should install.
artifactory_download_directory: /opt/jfrog
# The location where Artifactory should store data.
artifactory_file_store_dir: /data
extra_java_opts: -server -Xms2g -Xmx14g -Xss256k -XX:+UseG1GC
artifactory_tar: https://releases.jfrog.io/artifactory/artifactory-pro/org/artifactory/pro/jfrog-artifactory-pro/{{ artifactory_version }}/jfrog-artifactory-pro-{{ artifactory_version }}-linux.tar.gz
artifactory_home: "{{ artifactory_download_directory }}/artifactory-pro-{{ artifactory_version }}"
db_download_url: "https://jdbc.postgresql.org/download/postgresql-42.2.12.jar"
artifactory_user: artifactory
artifactory_group: artifactory
# Set the parameters required for the service.
service_list:
- name: artifactory
description: Start script for Artifactory
start_command: "{{ artifactory_home }}/bin/artifactory.sh start"
stop_command: "{{ artifactory_home }}/bin/artifactory.sh stop"
type: forking
status_pattern: artifactory
user_name: "{{ artifactory_user }}"
group_name: "{{ artifactory_group }}"

View File

@@ -0,0 +1,60 @@
---
# defaults file for artifactory
# indicates were this collection was downlaoded from (galaxy, automation_hub, standalone)
ansible_marketplace: standalone
# whether we are creating a AMI for Marketplace or just for configuring EC2 instance
ami_creation: false
# The version of Artifactory to install
artifactory_version: 7.15.3
# licenses file - specify a licenses file or specify up to 5 licenses
artifactory_license1:
artifactory_license2:
artifactory_license3:
artifactory_license4:
artifactory_license5:
artifactory_license6:
# whether to enable HA
artifactory_ha_enabled: true
# value for whether a host is primary. this should be set in host vars
artifactory_is_primary: true
# The location where Artifactory should install.
artifactory_download_directory: /opt/jfrog
# The location where Artifactory should store data.
artifactory_file_store_dir: /data
extra_java_opts: -server -Xms2g -Xmx14g -Xss256k -XX:+UseG1GC
# Pick the Artifactory flavour to install, can be also cpp-ce, jcr, pro.
# for Artifactory, use following values
artifactory_flavour: pro
artifactory_tar: https://releases.jfrog.io/artifactory/artifactory-pro/org/artifactory/{{ artifactory_flavour }}/jfrog-artifactory-{{ artifactory_flavour }}/{{ artifactory_version }}/jfrog-artifactory-{{ artifactory_flavour }}-{{ artifactory_version }}-linux.tar.gz
# for JCR, use following values
# artifactory_flavour: jcr
# artifactory_tar: https://dl.bintray.com/jfrog/artifactory/org/artifactory/{{ artifactory_flavour }}/jfrog-artifactory-{{ artifactory_flavour }}/{{ artifactory_version }}/jfrog-artifactory-{{ artifactory_flavour }}-{{ artifactory_version }}-linux.tar.gz
artifactory_home: "{{ artifactory_download_directory }}/artifactory-{{ artifactory_flavour }}-{{ artifactory_version }}"
db_download_url: "https://jdbc.postgresql.org/download/postgresql-42.2.12.jar"
artifactory_user: artifactory
artifactory_group: artifactory
# Set the parameters required for the service.
service_list:
- name: artifactory
description: Start script for Artifactory
start_command: "{{ artifactory_home }}/bin/artifactory.sh start"
stop_command: "{{ artifactory_home }}/bin/artifactory.sh stop"
type: forking
status_pattern: artifactory
user_name: "{{ artifactory_user }}"
group_name: "{{ artifactory_group }}"

View File

@@ -0,0 +1,10 @@
---
# handlers file for artifactory
- name: systemctl daemon-reload
systemd:
daemon_reload: yes
- name: restart artifactory
service:
name: artifactory
state: restarted

View File

@@ -0,0 +1,6 @@
---
exceptions:
- variation: Alpine
reason: Artifactory start/stop scripts don't properly work.
- variation: amazonlinux:1
reason: "Shutting down artifactory: /usr/bin/java\nfinding\nUsing the default catalina management port (8015) to test shutdown\nArtifactory Tomcat already stopped"

View File

@@ -0,0 +1,35 @@
---
galaxy_info:
author: Robert de Bock
role_name: artifactory
description: Install and configure artifactory on your system.
license: Apache-2.0
company: none
min_ansible_version: 2.8
platforms:
- name: Debian
versions:
- all
- name: EL
versions:
- 7
- 8
- name: Fedora
versions:
- all
- name: OpenSUSE
versions:
- all
- name: Ubuntu
versions:
- bionic
galaxy_tags:
- artifactory
- centos
- redhat
- server
- system
dependencies: []

View File

@@ -0,0 +1,2 @@
---
tox_parallel: yes

View File

@@ -0,0 +1,6 @@
---
project_name: JFrog
reference: "https://github.com/robertdebock/ansible-role-artifactory/blob/master/defaults/main.yml"
versions:
- name: Artifactory
url: "https://releases.jfrog.io/artifactory/"

View File

@@ -0,0 +1,82 @@
---
# tasks file for artifactory
- name: install nginx
include_role:
name: artifactory-nginx-ami
- name: create group for artifactory
group:
name: "{{ artifactory_group }}"
state: present
become: yes
- name: create user for artifactory
user:
name: "{{ artifactory_user }}"
group: "{{ artifactory_group }}"
system: yes
become: yes
- name: ensure artifactory_download_directory exists
file:
path: "{{ artifactory_download_directory }}"
state: directory
become: yes
- name: download artifactory
unarchive:
src: "{{ artifactory_tar }}"
dest: "{{ artifactory_download_directory }}"
remote_src: yes
owner: "{{ artifactory_user }}"
group: "{{ artifactory_group }}"
creates: "{{ artifactory_home }}"
become: yes
register: downloadartifactory
until: downloadartifactory is succeeded
retries: 3
- name: ensure artifactory_file_store_dir exists
file:
path: "{{ artifactory_file_store_dir }}"
state: directory
owner: "{{ artifactory_user }}"
group: "{{ artifactory_group }}"
become: yes
- name: ensure data subdirectories exist
file:
path: "{{ artifactory_home }}/var/{{ item }}"
state: directory
owner: "{{ artifactory_user }}"
group: "{{ artifactory_group }}"
loop:
- "bootstrap/artifactory/tomcat/lib"
- "etc"
become: yes
- name: download database driver
get_url:
url: "{{ db_download_url }}"
dest: "{{ artifactory_home }}/var/bootstrap/artifactory/tomcat/lib"
owner: "{{ artifactory_user }}"
group: "{{ artifactory_group }}"
become: yes
- name: clean up after creating ami
block:
- name: Remove SSH keys
file:
path: "{{ ssh_keys.dir }}"
state: absent
loop:
- dir: "/home/.jfrog_ami/.ssh/authorized_keys"
- dir: "/root/.ssh/authorized_keys"
- dir: "/home/centos/.ssh/authorized_keys"
loop_control:
loop_var: ssh_keys
- name: shutdown VM
command: /sbin/shutdown -h now
ignore_errors: 'yes'
when: ami_creation

View File

@@ -0,0 +1,37 @@
{% if artifactory_license1 %}
{% if artifactory_license1|length %}
{{ artifactory_license1 }}
{% endif %}
{% endif %}
{% if artifactory_license2 %}
{% if artifactory_license2|length %}
{{ artifactory_license2 }}
{% endif %}
{% endif %}
{% if artifactory_license3 %}
{% if artifactory_license3|length %}
{{ artifactory_license3 }}
{% endif %}
{% endif %}
{% if artifactory_license4 %}
{% if artifactory_license4|length %}
{{ artifactory_license4 }}
{% endif %}
{% endif %}
{% if artifactory_license5 %}
{% if artifactory_license5|length %}
{{ artifactory_license5 }}
{% endif %}
{% endif %}
{% if artifactory_license6 %}
{% if artifactory_license6|length %}
{{ artifactory_license6 }}
{% endif %}
{% endif %}

View File

@@ -0,0 +1,4 @@
<?xml version="1.0" encoding="UTF-8"?>
<config version="2">
<chain template="cluster-file-system"/>
</config>

View File

@@ -0,0 +1,12 @@
{
"productId": "Ansible_artifactory/1.0.0",
"features": [
{
"featureId": "Partner/ACC-006973"
},
{
"featureId": "Channel/{{ ansible_marketplace }}"
}
]
}

View File

@@ -0,0 +1,39 @@
## @formatter:off
## JFROG ARTIFACTORY SYSTEM CONFIGURATION FILE
## HOW TO USE: comment-out any field and keep the correct yaml indentation by deleting only the leading '#' character.
configVersion: 1
## NOTE: JFROG_HOME is a place holder for the JFrog root directory containing the deployed product, the home directory for all JFrog products.
## Replace JFROG_HOME with the real path! For example, in RPM install, JFROG_HOME=/opt/jfrog
## NOTE: Sensitive information such as passwords and join key are encrypted on first read.
## NOTE: The provided commented key and value is the default.
## SHARED CONFIGURATIONS
## A shared section for keys across all services in this config
shared:
## Node Settings
node:
## A unique id to identify this node.
## Default: auto generated at startup.
id: {{ ansible_machine_id }}
## Sets this node as primary in HA installation
# primary: {{ artifactory_is_primary }}
Affinity: "any"
## Sets this node as part of HA installation
haEnabled: {{ artifactory_ha_enabled }}
## Database Configuration
database:
## One of: mysql, oracle, mssql, postgresql, mariadb
## Default: Embedded derby
## Example for mysql/postgresql
type: "{{ db_type }}"
driver: "{{ db_driver }}"
url: "{{ db_url }}"
username: "{{ db_user }}"
password: "{{ db_password }}"

View File

@@ -0,0 +1,29 @@
---
language: python
python: "2.7"
# Use the new container infrastructure
sudo: false
# Install ansible
addons:
apt:
packages:
- python-pip
install:
# Install ansible
- pip install ansible
# Check ansible version
- ansible --version
# Create ansible.cfg with correct roles_path
- printf '[defaults]\nroles_path=../' >ansible.cfg
script:
# Basic role syntax check
- ansible-playbook tests/test.yml -i tests/inventory --syntax-check
notifications:
webhooks: https://galaxy.ansible.com/api/v1/notifications/

View File

@@ -0,0 +1,2 @@
---
# defaults file for artifactory-nginx

View File

@@ -0,0 +1,37 @@
#user nobody;
worker_processes 1;
error_log /var/log/nginx/error.log info;
#pid logs/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
variables_hash_max_size 1024;
variables_hash_bucket_size 64;
server_names_hash_max_size 4096;
server_names_hash_bucket_size 128;
types_hash_max_size 2048;
types_hash_bucket_size 64;
proxy_read_timeout 2400s;
client_header_timeout 2400s;
client_body_timeout 2400s;
proxy_connect_timeout 75s;
proxy_send_timeout 2400s;
proxy_buffer_size 32k;
proxy_buffers 40 32k;
proxy_busy_buffers_size 64k;
proxy_temp_file_write_size 250m;
proxy_http_version 1.1;
client_body_buffer_size 128k;
include /etc/nginx/conf.d/*.conf;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 65;
}

View File

@@ -0,0 +1,2 @@
---
# handlers file for artifactory-nginx

View File

@@ -0,0 +1,53 @@
galaxy_info:
author: your name
description: your role description
company: your company (optional)
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Choose a valid license ID from https://spdx.org - some suggested licenses:
# - BSD-3-Clause (default)
# - MIT
# - GPL-2.0-or-later
# - GPL-3.0-only
# - Apache-2.0
# - CC-BY-4.0
license: license (GPL-2.0-or-later, MIT, etc)
min_ansible_version: 2.9
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
#
# Provide a list of supported platforms, and for each platform a list of versions.
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
# To view available platforms and versions (or releases), visit:
# https://galaxy.ansible.com/api/v1/platforms/
#
# platforms:
# - name: Fedora
# versions:
# - all
# - 25
# - name: SomePlatform
# versions:
# - all
# - 1.0
# - 7
# - 99.99
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: []
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.

View File

@@ -0,0 +1,30 @@
---
- name: Add epel-release repo
yum:
name: epel-release
state: present
vars:
ansible_python_interpreter: /bin/python2
- name: Install nginx
yum:
name: nginx
state: present
vars:
ansible_python_interpreter: /bin/python2
- name: configure main nginx conf file.
copy:
src: nginx.conf
dest: /etc/nginx/nginx.conf
owner: root
group: root
mode: '0755'
become: yes
- name: restart nginx
service:
name: nginx
state: restarted
enabled: yes
become: yes

View File

@@ -0,0 +1,43 @@
###########################################################
## this configuration was generated by JFrog Artifactory ##
###########################################################
## add HA entries when ha is configure
upstream artifactory {
server 127.0.0.1:8082;
}
upstream artifactory-direct {
server 127.0.0.1:8081;
}
## server configuration
server {
listen 80 ;
server_name _;
if ($http_x_forwarded_proto = '') {
set $http_x_forwarded_proto $scheme;
}
## Application specific logs
access_log /var/log/nginx/artifactory-access.log;
error_log /var/log/nginx/artifactory-error.log;
rewrite ^/$ /ui/ redirect;
rewrite ^/ui$ /ui/ redirect;
chunked_transfer_encoding on;
client_max_body_size 0;
location / {
proxy_read_timeout 2400s;
proxy_pass_header Server;
proxy_cookie_path ~*^/.* /;
proxy_pass "http://artifactory";
proxy_next_upstream error timeout non_idempotent;
proxy_next_upstream_tries 1;
proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host:$server_port;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
location ~ ^/artifactory/ {
proxy_pass http://artifactory-direct;
}
}
}

View File

@@ -0,0 +1,5 @@
---
- hosts: localhost
remote_user: root
roles:
- artifactory-nginx

View File

@@ -0,0 +1,2 @@
---
# vars file for artifactory-nginx

View File

@@ -0,0 +1,29 @@
---
language: python
python: "2.7"
# Use the new container infrastructure
sudo: false
# Install ansible
addons:
apt:
packages:
- python-pip
install:
# Install ansible
- pip install ansible
# Check ansible version
- ansible --version
# Create ansible.cfg with correct roles_path
- printf '[defaults]\nroles_path=../' >ansible.cfg
script:
# Basic role syntax check
- ansible-playbook tests/test.yml -i tests/inventory --syntax-check
notifications:
webhooks: https://galaxy.ansible.com/api/v1/notifications/

View File

@@ -0,0 +1,2 @@
---
# defaults file for artifactory-nginx

View File

@@ -0,0 +1,2 @@
---
# handlers file for artifactory-nginx

View File

@@ -0,0 +1,53 @@
galaxy_info:
author: your name
description: your role description
company: your company (optional)
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Choose a valid license ID from https://spdx.org - some suggested licenses:
# - BSD-3-Clause (default)
# - MIT
# - GPL-2.0-or-later
# - GPL-3.0-only
# - Apache-2.0
# - CC-BY-4.0
license: license (GPL-2.0-or-later, MIT, etc)
min_ansible_version: 2.9
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
#
# Provide a list of supported platforms, and for each platform a list of versions.
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
# To view available platforms and versions (or releases), visit:
# https://galaxy.ansible.com/api/v1/platforms/
#
# platforms:
# - name: Fedora
# versions:
# - all
# - 25
# - name: SomePlatform
# versions:
# - all
# - 1.0
# - 7
# - 99.99
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: []
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.

View File

@@ -0,0 +1,54 @@
---
# tasks file for artifactory-nginx
- name: configure the artifactory nginx conf
template:
src: artifactory.conf.j2
dest: /etc/nginx/conf.d/artifactory.conf
owner: root
group: root
mode: '0755'
become: yes
- name: ensure nginx dir exists
file:
path: "/var/opt/jfrog/nginx/ssl"
state: directory
become: yes
- name: configure certificate
template:
src: certificate.pem.j2
dest: "/var/opt/jfrog/nginx/ssl/cert.pem"
become: yes
- name: ensure pki exists
file:
path: "/etc/pki/tls"
state: directory
become: yes
- name: configure key
template:
src: certificate.key.j2
dest: "/etc/pki/tls/cert.key"
become: yes
- name: Allow apache to modify files in /srv/git_repos
sefcontext:
target: '/var/opt/jfrog/nginx/ssl/cert.pem'
setype: httpd_sys_content_t
state: present
vars:
ansible_python_interpreter: /bin/python2
become: yes
- name: Apply new SELinux file context to filesystem
command: restorecon -v /var/opt/jfrog/nginx/ssl/cert.pem
become: yes
- name: restart nginx
service:
name: nginx
state: restarted
enabled: yes
become: yes

View File

@@ -0,0 +1,49 @@
###########################################################
## this configuration was generated by JFrog Artifactory ##
###########################################################
## add HA entries when ha is configure
upstream artifactory {
server 127.0.0.1:8082;
}
upstream artifactory-direct {
server 127.0.0.1:8081;
}
ssl_protocols TLSv1.1 TLSv1.2;
ssl_certificate /var/opt/jfrog/nginx/ssl/cert.pem;
ssl_certificate_key /etc/pki/tls/cert.key;
ssl_session_cache shared:SSL:1m;
ssl_prefer_server_ciphers on;
## server configuration
server {
listen 80;
listen 443 ssl http2;
server_name _;
if ($http_x_forwarded_proto = '') {
set $http_x_forwarded_proto $scheme;
}
## Application specific logs
access_log /var/log/nginx/artifactory-access.log;
error_log /var/log/nginx/artifactory-error.log;
rewrite ^/$ /ui/ redirect;
rewrite ^/ui$ /ui/ redirect;
chunked_transfer_encoding on;
client_max_body_size 0;
location / {
proxy_read_timeout 2400s;
proxy_pass_header Server;
proxy_cookie_path ~*^/.* /;
proxy_pass "http://artifactory";
proxy_next_upstream error timeout non_idempotent;
proxy_next_upstream_tries 1;
proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host:$server_port;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
location ~ ^/artifactory/ {
proxy_pass http://artifactory-direct;
}
}
}

View File

@@ -0,0 +1 @@
{{ certificate_key | regex_replace('(-+(BEGIN|END) [A-Z ]*-+ ?|[A-Za-z0-9\+=/]* )', '\\1\n') }}

View File

@@ -0,0 +1 @@
{{ certificate | regex_replace('(-+(BEGIN|END) [A-Z ]*-+ ?|[A-Za-z0-9\+=/]* )', '\\1\n') }}

View File

@@ -0,0 +1,5 @@
---
- hosts: localhost
remote_user: root
roles:
- artifactory-nginx

View File

@@ -0,0 +1,2 @@
---
# vars file for artifactory-nginx

View File

@@ -0,0 +1,29 @@
---
language: python
python: "2.7"
# Use the new container infrastructure
sudo: false
# Install ansible
addons:
apt:
packages:
- python-pip
install:
# Install ansible
- pip install ansible
# Check ansible version
- ansible --version
# Create ansible.cfg with correct roles_path
- printf '[defaults]\nroles_path=../' >ansible.cfg
script:
# Basic role syntax check
- ansible-playbook tests/test.yml -i tests/inventory --syntax-check
notifications:
webhooks: https://galaxy.ansible.com/api/v1/notifications/

View File

@@ -0,0 +1,2 @@
---
# defaults file for artifactory-nginx

View File

@@ -0,0 +1,37 @@
#user nobody;
worker_processes 1;
error_log /var/log/nginx/error.log info;
#pid logs/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
variables_hash_max_size 1024;
variables_hash_bucket_size 64;
server_names_hash_max_size 4096;
server_names_hash_bucket_size 128;
types_hash_max_size 2048;
types_hash_bucket_size 64;
proxy_read_timeout 2400s;
client_header_timeout 2400s;
client_body_timeout 2400s;
proxy_connect_timeout 75s;
proxy_send_timeout 2400s;
proxy_buffer_size 32k;
proxy_buffers 40 32k;
proxy_busy_buffers_size 64k;
proxy_temp_file_write_size 250m;
proxy_http_version 1.1;
client_body_buffer_size 128k;
include /etc/nginx/conf.d/*.conf;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 65;
}

View File

@@ -0,0 +1,2 @@
---
# handlers file for artifactory-nginx

View File

@@ -0,0 +1,53 @@
galaxy_info:
author: your name
description: your role description
company: your company (optional)
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Choose a valid license ID from https://spdx.org - some suggested licenses:
# - BSD-3-Clause (default)
# - MIT
# - GPL-2.0-or-later
# - GPL-3.0-only
# - Apache-2.0
# - CC-BY-4.0
license: license (GPL-2.0-or-later, MIT, etc)
min_ansible_version: 2.9
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
#
# Provide a list of supported platforms, and for each platform a list of versions.
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
# To view available platforms and versions (or releases), visit:
# https://galaxy.ansible.com/api/v1/platforms/
#
# platforms:
# - name: Fedora
# versions:
# - all
# - 25
# - name: SomePlatform
# versions:
# - all
# - 1.0
# - 7
# - 99.99
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: []
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.

View File

@@ -0,0 +1,34 @@
---
- name: configure main nginx conf file.
copy:
src: nginx.conf
dest: /etc/nginx/nginx.conf
owner: root
group: root
mode: '0755'
become: yes
- name: configure main nginx conf file.
copy:
src: nginx.conf
dest: /etc/nginx/nginx.conf
owner: root
group: root
mode: '0755'
become: yes
- name: configure the artifactory nginx conf
template:
src: artifactory.conf.j2
dest: /etc/nginx/conf.d/artifactory.conf
owner: root
group: root
mode: '0755'
become: yes
- name: restart nginx
service:
name: nginx
state: restarted
enabled: yes
become: yes

View File

@@ -0,0 +1,43 @@
###########################################################
## this configuration was generated by JFrog Artifactory ##
###########################################################
## add HA entries when ha is configure
upstream artifactory {
server 127.0.0.1:8082;
}
upstream artifactory-direct {
server 127.0.0.1:8081;
}
## server configuration
server {
listen 80 ;
server_name _;
if ($http_x_forwarded_proto = '') {
set $http_x_forwarded_proto $scheme;
}
## Application specific logs
access_log /var/log/nginx/artifactory-access.log;
error_log /var/log/nginx/artifactory-error.log;
rewrite ^/$ /ui/ redirect;
rewrite ^/ui$ /ui/ redirect;
chunked_transfer_encoding on;
client_max_body_size 0;
location / {
proxy_read_timeout 2400s;
proxy_pass_header Server;
proxy_cookie_path ~*^/.* /;
proxy_pass "http://artifactory";
proxy_next_upstream error timeout non_idempotent;
proxy_next_upstream_tries 1;
proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host:$server_port;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
location ~ ^/artifactory/ {
proxy_pass http://artifactory-direct;
}
}
}

View File

@@ -0,0 +1,5 @@
---
- hosts: localhost
remote_user: root
roles:
- artifactory-nginx

View File

@@ -0,0 +1,2 @@
---
# vars file for artifactory-nginx

View File

@@ -0,0 +1,29 @@
---
language: python
python: "2.7"
# Use the new container infrastructure
sudo: false
# Install ansible
addons:
apt:
packages:
- python-pip
install:
# Install ansible
- pip install ansible
# Check ansible version
- ansible --version
# Create ansible.cfg with correct roles_path
- printf '[defaults]\nroles_path=../' >ansible.cfg
script:
# Basic role syntax check
- ansible-playbook tests/test.yml -i tests/inventory --syntax-check
notifications:
webhooks: https://galaxy.ansible.com/api/v1/notifications/

View File

@@ -0,0 +1,43 @@
---
# defaults file for artifactory
# indicates were this collection was downlaoded from (galaxy, automation_hub, standalone)
ansible_marketplace: standalone
# The version of Artifactory to install
artifactory_version: 7.19.4
# licenses - cluster license content in json
artifactory_licenses:
# whether to enable HA
artifactory_ha_enabled: true
# value for whether a host is primary. this should be set in host vars
artifactory_is_primary: true
# The location where Artifactory should install.
artifactory_download_directory: /opt/jfrog
# The location where Artifactory should store data.
artifactory_file_store_dir: /data
extra_java_opts: -server -Xms2g -Xmx14g -Xss256k -XX:+UseG1GC
artifactory_tar: https://releases.jfrog.io/artifactory/artifactory-pro/org/artifactory/pro/jfrog-artifactory-pro/{{ artifactory_version }}/jfrog-artifactory-pro-{{ artifactory_version }}-linux.tar.gz
artifactory_home: "{{ artifactory_download_directory }}/artifactory-pro-{{ artifactory_version }}"
artifactory_user: artifactory
artifactory_group: artifactory
# Set the parameters required for the service.
service_list:
- name: artifactory
description: Start script for Artifactory
start_command: "{{ artifactory_home }}/bin/artifactory.sh start"
stop_command: "{{ artifactory_home }}/bin/artifactory.sh stop"
type: forking
status_pattern: artifactory
user_name: "{{ artifactory_user }}"
group_name: "{{ artifactory_group }}"
product_id: CloudFormation_QS_EC2/1.0.0

View File

@@ -0,0 +1,10 @@
---
# handlers file for artifactory
- name: systemctl daemon-reload
systemd:
daemon_reload: yes
- name: restart artifactory
service:
name: artifactory
state: restarted

View File

@@ -0,0 +1,6 @@
---
exceptions:
- variation: Alpine
reason: Artifactory start/stop scripts don't properly work.
- variation: amazonlinux:1
reason: "Shutting down artifactory: /usr/bin/java\nfinding\nUsing the default catalina management port (8015) to test shutdown\nArtifactory Tomcat already stopped"

View File

@@ -0,0 +1,35 @@
---
galaxy_info:
author: Robert de Bock
role_name: artifactory
description: Install and configure artifactory on your system.
license: Apache-2.0
company: none
min_ansible_version: 2.8
platforms:
- name: Debian
versions:
- all
- name: EL
versions:
- 7
- 8
- name: Fedora
versions:
- all
- name: OpenSUSE
versions:
- all
- name: Ubuntu
versions:
- bionic
galaxy_tags:
- artifactory
- centos
- redhat
- server
- system
dependencies: []

View File

@@ -0,0 +1,2 @@
---
tox_parallel: yes

View File

@@ -0,0 +1,6 @@
---
project_name: JFrog
reference: "https://github.com/robertdebock/ansible-role-artifactory/blob/master/defaults/main.yml"
versions:
- name: Artifactory
url: "https://releases.jfrog.io/artifactory/"

View File

@@ -0,0 +1,43 @@
- name: set license for Enterprise
block:
- name: use license file
copy:
src: "{{ artifactory_license_file }}"
dest: "{{ artifactory_home }}/var/etc/artifactory/artifactory.cluster.license"
force: no # only copy if file doesn't exist
become: yes
when: artifactory_license_file is defined and artifactory_is_primary == true
- name: use license strings
vars:
artifactory_licenses_dict: "{{ artifactory_licenses | default('{}') }}"
template:
src: artifactory.cluster.license.j2
dest: "{{ artifactory_home }}/var/etc/artifactory/artifactory.cluster.license"
force: no # only create if file doesn't exist
become: yes
when: artifactory_license_file is not defined and artifactory_is_primary == true
when: artifactory_ha_enabled
- name: set license for Pro
block:
- name: use license file
copy:
src: "{{ artifactory_license_file }}"
dest: "{{ artifactory_home }}/var/etc/artifactory/artifactory.lic"
force: no # only create if file doesn't exist
become: yes
when: artifactory_license_file is defined
- name: use license strings
vars:
artifactory_licenses_dict: "{{ artifactory_licenses | default('{}') }}"
template:
src: artifactory.pro.license.j2
dest: "{{ artifactory_home }}/var/etc/artifactory/artifactory.lic"
force: no # only create if file doesn't exist
become: yes
when: artifactory_license_file is not defined
when: not artifactory_ha_enabled

View File

@@ -0,0 +1,151 @@
---
# tasks file for artifactory
- name: Set artifactory major version
set_fact:
artifactory_major_verion: "{{ artifactory_version.split('.')[0] }}"
- name: create group for artifactory
group:
name: "{{ artifactory_group }}"
state: present
become: yes
- name: create user for artifactory
user:
name: "{{ artifactory_user }}"
group: "{{ artifactory_group }}"
system: yes
become: yes
- name: ensure artifactory_download_directory exists
file:
path: "{{ artifactory_download_directory }}"
state: directory
become: yes
- name: ensure artifactory_file_store_dir exists
file:
path: "{{ artifactory_file_store_dir }}"
state: directory
owner: "{{ artifactory_user }}"
group: "{{ artifactory_group }}"
become: yes
- name: ensure data subdirectories exist and have correct ownership
file:
path: "{{ artifactory_home }}/var/{{ item }}"
state: directory
owner: "{{ artifactory_user }}"
group: "{{ artifactory_group }}"
loop:
- "bootstrap"
- "etc"
- "data"
- "etc/info"
- "etc/security"
- "etc/artifactory"
become: yes
- name: check if system yaml file exits
stat:
path: "{{ artifactory_home }}/var/etc/system.yaml"
register: system_yaml
- name: use specified system yaml
copy:
src: "{{ system_file }}"
dest: "{{ artifactory_home }}/var/etc/system.yaml"
become: yes
when: system_file is defined and not system_yaml.stat.exists
- name: configure system yaml
template:
src: system.yaml.j2
dest: "{{ artifactory_home }}/var/etc/system.yaml"
become: yes
when: system_file is not defined and not system_yaml.stat.exists
- name: configure master key
template:
src: master.key.j2
dest: "{{ artifactory_home }}/var/etc/security/master.key"
force: no # only create if file doesn't exist
become: yes
- name: configure join key
template:
src: join.key.j2
dest: "{{ artifactory_home }}/var/etc/security/join.key"
force: no # only create if file doesn't exist
become: yes
- name: configure installer info
template:
src: installer-info.json.j2
dest: "{{ artifactory_home }}/var/etc/info/installer-info.json"
become: yes
- name: use specified binary store file
copy:
src: "{{ binary_store_file }}"
dest: "{{ artifactory_home }}/var/etc/artifactory/binarystore.xml"
force: no # only copy if file doesn't exist
become: yes
when: binary_store_file is defined
- name: set default binary store
template:
src: binarystore.xml.j2
dest: "{{ artifactory_home }}/var/etc/artifactory/binarystore.xml"
force: no # only create if file doesn't exist
become: yes
when: binary_store_file is not defined
- name: configure licenses
include_tasks: configure-licenses.yml
- name: create artifactory service
shell: "{{ artifactory_home }}/app/bin/installService.sh"
become: yes
- name: Delete plugin folder
file:
state: absent
path: "{{ artifactory_home }}/var/etc/artifactory/plugins"
- name: symlink plugin folder to EFS
file:
src: "/efsmount/plugins"
path: "{{ artifactory_home }}/var/etc/artifactory/plugins"
state: link
owner: "{{ artifactory_user }}"
group: "{{ artifactory_group }}"
- name: ensure data subdirectories exist and have correct ownership
file:
path: "{{ artifactory_home }}/var/{{ item }}"
state: directory
owner: "{{ artifactory_user }}"
group: "{{ artifactory_group }}"
loop:
- "etc/artifactory/plugins"
become: yes
- name: start and enable the primary node
service:
name: artifactory
state: restarted
become: yes
# when: artifactory_is_primary == true
# - name: random wait before restarting to prevent secondary nodes from hitting DB first
# pause:
# seconds: "{{ 120 | random + 10}}"
# when: artifactory_is_primary == false
# - name: start and enable the secondary nodes
# service:
# name: artifactory
# state: restarted
# become: yes
# when: artifactory_is_primary == false

View File

@@ -0,0 +1,6 @@
{% if artifactory_licenses_dict %}
{% for key in (artifactory_licenses_dict.keys() | select('match', '^ArtifactoryLicense\d$')) %}
{{ artifactory_licenses_dict[key] }}
{% endfor %}
{% endif %}

View File

@@ -0,0 +1,8 @@
{% if artifactory_licenses_dict %}
{% for key in (artifactory_licenses_dict.keys() | select('match', '^ArtifactoryLicense\d$')) %}
{% if loop.first %}
{{ artifactory_licenses_dict[key] }}
{% endif %}
{% endfor %}
{% endif %}

View File

@@ -0,0 +1,14 @@
<config version="2">
<chain>
<provider id="cache-fs" type="cache-fs">
<provider id="s3-storage-v3" type="s3-storage-v3"/>
</provider>
</chain>
<provider id="s3-storage-v3" type="s3-storage-v3">
<endpoint>s3.{{ s3_region }}.amazonaws.com</endpoint>
<bucketName>{{ s3_bucket }}</bucketName>
<path>artifactory/filestore</path>
<region>{{ s3_region }}</region>
<useInstanceCredentials>true</useInstanceCredentials>
</provider>
</config>

View File

@@ -0,0 +1,11 @@
{
"productId": "{{ product_id }}",
"features": [
{
"featureId": "Partner/ACC-006973"
},
{
"featureId": "Channel/{{ ansible_marketplace }}"
}
]
}

View File

@@ -0,0 +1,41 @@
## @formatter:off
## JFROG ARTIFACTORY SYSTEM CONFIGURATION FILE
## HOW TO USE: comment-out any field and keep the correct yaml indentation by deleting only the leading '#' character.
configVersion: 1
## NOTE: JFROG_HOME is a place holder for the JFrog root directory containing the deployed product, the home directory for all JFrog products.
## Replace JFROG_HOME with the real path! For example, in RPM install, JFROG_HOME=/opt/jfrog
## NOTE: Sensitive information such as passwords and join key are encrypted on first read.
## NOTE: The provided commented key and value is the default.
## SHARED CONFIGURATIONS
## A shared section for keys across all services in this config
shared:
## Java options
extraJavaOpts: "{{ extra_java_opts }}"
## Node Settings
node:
## A unique id to identify this node.
## Default: auto generated at startup.
id: {{ ansible_machine_id }}
## Sets this node as primary in HA installation
# primary: {{ artifactory_is_primary }}
Affinity: "any"
## Sets this node as part of HA installation
haEnabled: {{ artifactory_ha_enabled }}
## Database Configuration
database:
## One of: mysql, oracle, mssql, postgresql, mariadb
## Default: Embedded derby
## Example for mysql/postgresql
type: "{{ db_type }}"
driver: "{{ db_driver }}"
url: "{{ db_url }}"
username: "{{ db_user }}"
password: "{{ db_password }}"

View File

@@ -0,0 +1,29 @@
---
language: python
python: "2.7"
# Use the new container infrastructure
sudo: false
# Install ansible
addons:
apt:
packages:
- python-pip
install:
# Install ansible
- pip install ansible
# Check ansible version
- ansible --version
# Create ansible.cfg with correct roles_path
- printf '[defaults]\nroles_path=../' >ansible.cfg
script:
# Basic role syntax check
- ansible-playbook tests/test.yml -i tests/inventory --syntax-check
notifications:
webhooks: https://galaxy.ansible.com/api/v1/notifications/

View File

@@ -0,0 +1,26 @@
---
# defaults file for xray
# indicates were this collection was downlaoded from (galaxy, automation_hub, standalone)
ansible_marketplace: standalone
# whether we are creating a AMI for Marketplace or just for configuring EC2 instance
ami_creation: false
# The version of xray to install
xray_version: 3.17.4
# whether to enable HA
xray_ha_enabled: true
# The location where xray should install.
xray_download_directory: /opt/jfrog
# The remote xray download file
xray_tar: https://releases.jfrog.io/artifactory/jfrog-xray/xray-linux/{{ xray_version }}/jfrog-xray-{{ xray_version }}-linux.tar.gz
#The xray install directory
xray_home: "{{ xray_download_directory }}/jfrog-xray-{{ xray_version }}-linux"
#xray users and groups
xray_user: xray
xray_group: xray

View File

@@ -0,0 +1,2 @@
---
# handlers file for xray

View File

@@ -0,0 +1,53 @@
galaxy_info:
author: your name
description: your role description
company: your company (optional)
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Choose a valid license ID from https://spdx.org - some suggested licenses:
# - BSD-3-Clause (default)
# - MIT
# - GPL-2.0-or-later
# - GPL-3.0-only
# - Apache-2.0
# - CC-BY-4.0
license: license (GPL-2.0-or-later, MIT, etc)
min_ansible_version: 2.9
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
#
# Provide a list of supported platforms, and for each platform a list of versions.
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
# To view available platforms and versions (or releases), visit:
# https://galaxy.ansible.com/api/v1/platforms/
#
# platforms:
# - name: Fedora
# versions:
# - all
# - 25
# - name: SomePlatform
# versions:
# - all
# - 1.0
# - 7
# - 99.99
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: []
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.

View File

@@ -0,0 +1,37 @@
---
- name: Install db5.3-util
apt:
deb: "{{ xray_home }}/app/third-party/misc/db5.3-util_5.3.28-3ubuntu3_amd64.deb"
ignore_errors: yes
become: yes
- name: Install db-util
apt:
deb: "{{ xray_home }}/app/third-party/misc/db-util_1_3a5.3.21exp1ubuntu1_all.deb"
ignore_errors: yes
become: yes
- name: Install libssl
apt:
deb: "{{ xray_home }}/app/third-party/rabbitmq/libssl1.1_1.1.0j-1_deb9u1_amd64.deb"
ignore_errors: yes
become: yes
- name: Install socat
apt:
deb: "{{ xray_home }}/app/third-party/rabbitmq/socat_1.7.3.1-2+deb9u1_amd64.deb"
become: yes
- name: Install libwxbase3.0-0v5
apt:
name: libwxbase3.0-0v5
update_cache: yes
state: present
ignore_errors: yes
become: yes
- name: Install erlang
apt:
deb: "{{ xray_home }}/app/third-party/rabbitmq/esl-erlang_21.2.1-1~ubuntu~xenial_amd64.deb"
become: yes

View File

@@ -0,0 +1,21 @@
---
- name: Install db-utl
yum:
name: "{{ xray_home }}/app/third-party/misc/libdb-utils-5.3.21-25.el7.x86_64.rpm"
state: present
vars:
ansible_python_interpreter: /bin/python2
- name: Install socat
yum:
name: "{{ xray_home }}/app/third-party/rabbitmq/socat-1.7.3.2-2.el7.x86_64.rpm"
state: present
vars:
ansible_python_interpreter: /bin/python2
- name: Install erlang
yum:
name: "{{ xray_home }}/app/third-party/rabbitmq/erlang-23.2.7-1.el7.x86_64.rpm"
state: present
vars:
ansible_python_interpreter: /bin/python2

View File

@@ -0,0 +1,60 @@
---
- name: create group for xray
group:
name: "{{ xray_group }}"
state: present
become: yes
- name: create user for xray
user:
name: "{{ xray_user }}"
group: "{{ xray_group }}"
system: yes
become: yes
- name: ensure xray_download_directory exists
file:
path: "{{ xray_download_directory }}"
state: directory
become: yes
- name: download xray
unarchive:
src: "{{ xray_tar }}"
dest: "{{ xray_download_directory }}"
remote_src: yes
owner: "{{ xray_user }}"
group: "{{ xray_group }}"
creates: "{{ xray_home }}"
become: yes
register: downloadxray
until: downloadxray is succeeded
retries: 3
- name: perform prerequisite installation
include_tasks: "{{ ansible_os_family }}.yml"
- name: ensure etc exists
file:
path: "{{ xray_home }}/var/etc"
state: directory
owner: "{{ xray_user }}"
group: "{{ xray_group }}"
become: yes
- name: Remove SSH keys
file:
path: "{{ ssh_keys.dir }}"
state: absent
loop:
- dir: "/home/.xray_ami/.ssh/authorized_keys"
- dir: "/root/.ssh/authorized_keys"
- dir: "/home/centos/.ssh/authorized_keys"
loop_control:
loop_var: ssh_keys
when: ami_creation
- name: shutdown VM
command: /sbin/shutdown -h now
ignore_errors: 'yes'
when: ami_creation

View File

@@ -0,0 +1,11 @@
{
"productId": "Ansible_artifactory/1.0.0",
"features": [
{
"featureId": "Partner/ACC-006973"
},
{
"featureId": "Channel/{{ ansible_marketplace }}"
}
]
}

View File

@@ -0,0 +1 @@
{{ join_key }}

View File

@@ -0,0 +1 @@
{{ master_key }}

View File

@@ -0,0 +1,36 @@
## @formatter:off
## JFROG ARTIFACTORY SYSTEM CONFIGURATION FILE
## HOW TO USE: comment-out any field and keep the correct yaml indentation by deleting only the leading '#' character.
configVersion: 1
## NOTE: JFROG_HOME is a place holder for the JFrog root directory containing the deployed product, the home directory for all JFrog products.
## Replace JFROG_HOME with the real path! For example, in RPM install, JFROG_HOME=/opt/jfrog
## NOTE: Sensitive information such as passwords and join key are encrypted on first read.
## NOTE: The provided commented key and value is the default.
## SHARED CONFIGURATIONS
## A shared section for keys across all services in this config
shared:
## Base URL of the JFrog Platform Deployment (JPD)
## This is the URL to the machine where JFrog Artifactory is deployed, or the load balancer pointing to it. It is recommended to use DNS names rather than direct IPs.
## Examples: "http://jfrog.acme.com" or "http://10.20.30.40:8082"
jfrogUrl: {{ jfrog_url }}
## Node Settings
node:
## A unique id to identify this node.
## Default: auto generated at startup.
id: {{ ansible_machine_id }}
## Database Configuration
database:
## One of: mysql, oracle, mssql, postgresql, mariadb
## Default: Embedded derby
## Example for mysql/postgresql
type: "{{ db_type }}"
driver: "{{ db_driver }}"
url: "{{ db_url }}"
username: "{{ db_user }}"
password: "{{ db_password }}"

View File

@@ -0,0 +1,2 @@
localhost

View File

@@ -0,0 +1,5 @@
---
- hosts: localhost
remote_user: root
roles:
- xray

View File

@@ -0,0 +1,2 @@
---
# vars file for xray

View File

@@ -0,0 +1,29 @@
---
language: python
python: "2.7"
# Use the new container infrastructure
sudo: false
# Install ansible
addons:
apt:
packages:
- python-pip
install:
# Install ansible
- pip install ansible
# Check ansible version
- ansible --version
# Create ansible.cfg with correct roles_path
- printf '[defaults]\nroles_path=../' >ansible.cfg
script:
# Basic role syntax check
- ansible-playbook tests/test.yml -i tests/inventory --syntax-check
notifications:
webhooks: https://galaxy.ansible.com/api/v1/notifications/

View File

@@ -0,0 +1,23 @@
---
# defaults file for xray
# indicates were this collection was downlaoded from (galaxy, automation_hub, standalone)
ansible_marketplace: standalone
# The version of xray to install
xray_version: 3.17.4
# whether to enable HA
xray_ha_enabled: true
# The location where xray should install.
xray_download_directory: /opt/jfrog
# The remote xray download file
xray_tar: https://releases.jfrog.io/artifactory/jfrog-xray/xray-linux/{{ xray_version }}/jfrog-xray-{{ xray_version }}-linux.tar.gz
#The xray install directory
xray_home: "{{ xray_download_directory }}/jfrog-xray-{{ xray_version }}-linux"
#xray users and groups
xray_user: xray
xray_group: xray

View File

@@ -0,0 +1,2 @@
---
# handlers file for xray

View File

@@ -0,0 +1,53 @@
galaxy_info:
author: your name
description: your role description
company: your company (optional)
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Choose a valid license ID from https://spdx.org - some suggested licenses:
# - BSD-3-Clause (default)
# - MIT
# - GPL-2.0-or-later
# - GPL-3.0-only
# - Apache-2.0
# - CC-BY-4.0
license: license (GPL-2.0-or-later, MIT, etc)
min_ansible_version: 2.9
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
#
# Provide a list of supported platforms, and for each platform a list of versions.
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
# To view available platforms and versions (or releases), visit:
# https://galaxy.ansible.com/api/v1/platforms/
#
# platforms:
# - name: Fedora
# versions:
# - all
# - 25
# - name: SomePlatform
# versions:
# - all
# - 1.0
# - 7
# - 99.99
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: []
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.

View File

@@ -0,0 +1,37 @@
---
- name: Install db5.3-util
apt:
deb: "{{ xray_home }}/app/third-party/misc/db5.3-util_5.3.28-3ubuntu3_amd64.deb"
ignore_errors: yes
become: yes
- name: Install db-util
apt:
deb: "{{ xray_home }}/app/third-party/misc/db-util_1_3a5.3.21exp1ubuntu1_all.deb"
ignore_errors: yes
become: yes
- name: Install libssl
apt:
deb: "{{ xray_home }}/app/third-party/rabbitmq/libssl1.1_1.1.0j-1_deb9u1_amd64.deb"
ignore_errors: yes
become: yes
- name: Install socat
apt:
deb: "{{ xray_home }}/app/third-party/rabbitmq/socat_1.7.3.1-2+deb9u1_amd64.deb"
become: yes
- name: Install libwxbase3.0-0v5
apt:
name: libwxbase3.0-0v5
update_cache: yes
state: present
ignore_errors: yes
become: yes
- name: Install erlang
apt:
deb: "{{ xray_home }}/app/third-party/rabbitmq/esl-erlang_21.2.1-1~ubuntu~xenial_amd64.deb"
become: yes

View File

@@ -0,0 +1,21 @@
---
- name: Install db-utl
yum:
name: "{{ xray_home }}/app/third-party/misc/libdb-utils-5.3.21-25.el7.x86_64.rpm"
state: present
vars:
ansible_python_interpreter: /bin/python2
- name: Install socat
yum:
name: "{{ xray_home }}/app/third-party/rabbitmq/socat-1.7.3.2-2.el7.x86_64.rpm"
state: present
vars:
ansible_python_interpreter: /bin/python2
- name: Install erlang
yum:
name: "{{ xray_home }}/app/third-party/rabbitmq/erlang-23.2.7-1.el7.x86_64.rpm"
state: present
vars:
ansible_python_interpreter: /bin/python2

View File

@@ -0,0 +1,52 @@
- name: initialize Postgres DB
block:
- name: check if user/role exists
command: psql -A -t {{db_master_url}} -c "SELECT 1 FROM pg_roles WHERE rolname='{{db_user}}'"
register: user_exists
- debug:
var: user_exists.stdout_lines
- name: create user/role
command: psql {{db_master_url}} -c "CREATE USER {{db_user}} WITH PASSWORD '{{db_password}}'"
register: shell_output
when: user_exists.stdout != "1"
- debug:
var: shell_output.stdout_lines
when: user_exists.stdout != "1"
- name: grant membership role
command: psql {{db_master_url}} -c "GRANT {{db_user}} TO {{db_master_user}}"
register: shell_output
when: user_exists.stdout != "1"
- debug:
var: shell_output.stdout_lines
when: user_exists.stdout != "1"
- name: check if xraydb exists
command: psql -A -t {{db_master_url}} -c "SELECT 1 FROM pg_database WHERE datname='xraydb'"
register: db_exists
- debug:
var: db_exists.stdout_lines
- name: create xraydb database
command: psql {{db_master_url}} -c "CREATE DATABASE xraydb WITH OWNER={{db_user}} ENCODING='UTF8'"
register: shell_output
when: db_exists.stdout != "1"
- debug:
var: shell_output.stdout_lines
when: db_exists.stdout != "1"
- name: grant xraydb privileges to role
command: psql {{db_master_url}} -c "GRANT ALL PRIVILEGES ON DATABASE xraydb TO {{db_user}}"
register: shell_output
when: db_exists.stdout != "1"
- debug:
var: shell_output.stdout_lines
when: db_exists.stdout != "1"
become: yes

View File

@@ -0,0 +1,76 @@
---
- name: initialize postgres database
include_tasks: initialize-pg-db.yml
- name: create group for xray
group:
name: "{{ xray_group }}"
state: present
become: yes
- name: create user for xray
user:
name: "{{ xray_user }}"
group: "{{ xray_group }}"
system: yes
become: yes
- name: ensure xray_download_directory exists
file:
path: "{{ xray_download_directory }}"
state: directory
become: yes
- name: perform prerequisite installation
include_tasks: "{{ ansible_os_family }}.yml"
- name: ensure data subdirectories exist and have correct ownership
file:
path: "{{ xray_home }}/var/{{ item }}"
state: directory
owner: "{{ xray_user }}"
group: "{{ xray_group }}"
loop:
- "etc"
- "data"
- "etc/info"
- "etc/security"
become: yes
- name: configure system yaml
template:
src: system.yaml.j2
dest: "{{ xray_home }}/var/etc/system.yaml"
force: no # only create if file doesn't exist
become: yes
- name: configure master key
template:
src: master.key.j2
dest: "{{ xray_home }}/var/etc/security/master.key"
force: no # only create if file doesn't exist
become: yes
- name: configure join key
template:
src: join.key.j2
dest: "{{ xray_home }}/var/etc/security/join.key"
force: no # only create if file doesn't exist
become: yes
- name: configure installer info
template:
src: installer-info.json.j2
dest: "{{ xray_home }}/var/etc/info/installer-info.json"
force: no # only create if file doesn't exist
become: yes
- name: create xray service
shell: "{{ xray_home }}/app/bin/installService.sh"
become: yes
- name: start and enable xray
service:
name: xray
state: restarted
become: yes

View File

@@ -0,0 +1,11 @@
{
"productId": "Ansible_artifactory/1.0.0",
"features": [
{
"featureId": "Partner/ACC-006973"
},
{
"featureId": "Channel/{{ ansible_marketplace }}"
}
]
}

View File

@@ -0,0 +1 @@
{{ join_key }}

View File

@@ -0,0 +1 @@
{{ master_key }}

View File

@@ -0,0 +1,39 @@
## @formatter:off
## JFROG ARTIFACTORY SYSTEM CONFIGURATION FILE
## HOW TO USE: comment-out any field and keep the correct yaml indentation by deleting only the leading '#' character.
configVersion: 1
## NOTE: JFROG_HOME is a place holder for the JFrog root directory containing the deployed product, the home directory for all JFrog products.
## Replace JFROG_HOME with the real path! For example, in RPM install, JFROG_HOME=/opt/jfrog
## NOTE: Sensitive information such as passwords and join key are encrypted on first read.
## NOTE: The provided commented key and value is the default.
## SHARED CONFIGURATIONS
## A shared section for keys across all services in this config
shared:
## Base URL of the JFrog Platform Deployment (JPD)
## This is the URL to the machine where JFrog Artifactory is deployed, or the load balancer pointing to it. It is recommended to use DNS names rather than direct IPs.
## Examples: "http://jfrog.acme.com" or "http://10.20.30.40:8082"
jfrogUrl: {{ jfrog_url }}
## Java options
extraJavaOpts: "{{ extra_java_opts }}"
## Node Settings
node:
## A unique id to identify this node.
## Default: auto generated at startup.
id: {{ ansible_machine_id }}
## Database Configuration
database:
## One of: mysql, oracle, mssql, postgresql, mariadb
## Default: Embedded derby
## Example for mysql/postgresql
type: "{{ db_type }}"
driver: "{{ db_driver }}"
url: "{{ db_url }}"
username: "{{ db_user }}"
password: "{{ db_password }}"

View File

@@ -0,0 +1,2 @@
localhost

View File

@@ -0,0 +1,5 @@
---
- hosts: localhost
remote_user: root
roles:
- xray

View File

@@ -0,0 +1,2 @@
---
# vars file for xray

View File

@@ -0,0 +1,12 @@
- hosts: localhost
gather_facts: true
become: true
tasks:
- include_role:
name: artifactory
- include_role:
name: artifactory-nginx
when: "enable_ssl != true"
- include_role:
name: artifactory-nginx-ssl
when: "enable_ssl == true"

View File

@@ -0,0 +1,5 @@
- hosts: localhost
gather_facts: true
become: true
roles:
- name: xray

View File

@@ -0,0 +1,5 @@
- hosts: localhost
gather_facts: true
become: true
roles:
- name: xray-ami

View File

@@ -0,0 +1,442 @@
AWSTemplateFormatVersion: '2010-09-09'
Description: 'JFrog Artifactory Quick Start Deployment (qs-1qpmmjh61)'
Metadata:
cfn-lint:
config:
ignore_checks:
- W9006
- W9002
- W9003
- W9004
Parameters:
# AvailabilityZones:
# Description: List of Availability Zones to use for the subnets in the VPC. Two
# Availability Zones are used for this deployment.
# Type: List<AWS::EC2::AvailabilityZone::Name>
VpcId:
Type: AWS::EC2::VPC::Id
VpcCidr:
Description: CIDR block for the VPC
AllowedPattern: ^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(1[6-9]|2[0-8]))$
ConstraintDescription: CIDR block parameter must be in the form x.x.x.x/16-28
Default: 10.0.0.0/16
Type: String
PrivateSubnet1Cidr:
AllowedPattern: ^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(1[6-9]|2[0-8]))$
ConstraintDescription: CIDR block parameter must be in the form x.x.x.x/16-28
Default: 10.0.0.0/19
Type: String
PrivateSubnet2Cidr:
AllowedPattern: ^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(1[6-9]|2[0-8]))$
ConstraintDescription: CIDR block parameter must be in the form x.x.x.x/16-28
Default: 10.0.32.0/19
Type: String
PrivateSubnet3Cidr:
AllowedPattern: ^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(1[6-9]|2[0-8]))$
ConstraintDescription: CIDR block parameter must be in the form x.x.x.x/16-28
Default: 10.0.64.0/19
Type: String
SubnetIds:
Type: List<AWS::EC2::Subnet::Id>
DatabaseAllocatedStorage:
Type: Number
DatabasePreferredAz:
Type: String
MultiAzDatabase:
Description: Choose false to create an Amazon RDS instance in a single Availability Zone.
ConstraintDescription: True or False
AllowedValues:
- "true"
- "false"
Type: String
DatabaseUser:
Type: String
DatabasePassword:
NoEcho: 'true'
Type: String
DatabaseInstance:
Type: String
DatabaseName:
Type: String
InstanceType:
Default: m5.xlarge
Type: String
ArtifactoryHostRole:
Type: String
# VolumeSize:
# Type: Number
EfsSecurityGroup:
Type: String
Mappings:
DatabaseMap:
Postgres:
Name: postgresql
DatabaseVersion: 11.5
Driver: "org.postgresql.Driver"
Plugin: postgresql-42.2.9.jar
PluginURL: https://jdbc.postgresql.org/download/
port: "5432"
extraDatabaseOps: ""
JavaOptionstoInstance:
c5.2xlarge:
Min: 8
Max: 12
DeploymentSize: Small
c5.4xlarge:
Min: 16
Max: 24
DeploymentSize: Large
m5.large:
Min: 4
Max: 4
DeploymentSize: xxSmall
m5.xlarge:
Min: 8
Max: 12
DeploymentSize: xSmall
m5.2xlarge:
Min: 16
Max: 24
DeploymentSize: Small
m5.4xlarge:
Min: 32
Max: 48
DeploymentSize: Medium
m5.8xlarge:
Min: 64
Max: 96
DeploymentSize: Large
m5.12xlarge:
Min: 96
Max: 144
DeploymentSize: xLarge
m5.16xlarge:
Min: 128
Max: 192
DeploymentSize: xxLarge
m5.24xlarge:
Min: 192
Max: 288
DeploymentSize: xxxLarge
m5.metal:
Min: 192
Max: 288
DeploymentSize: xxxLarge
m5d.large:
Min: 4
Max: 4
DeploymentSize: xxSmall
m5d.xlarge:
Min: 8
Max: 12
DeploymentSize: xSmall
m5d.2xlarge:
Min: 16
Max: 24
DeploymentSize: Small
m5d.4xlarge:
Min: 32
Max: 48
DeploymentSize: Medium
m5d.8xlarge:
Min: 64
Max: 96
DeploymentSize: Large
m5d.12xlarge:
Min: 96
Max: 144
DeploymentSize: xLarge
m5d.16xlarge:
Min: 128
Max: 192
DeploymentSize: xxLarge
m5d.24xlarge:
Min: 192
Max: 288
DeploymentSize: xxxLarge
m5d.metal:
Min: 192
Max: 288
DeploymentSize: xxxLarge
m5a.large:
Min: 4
Max: 4
DeploymentSize: xxSmall
m5a.xlarge:
Min: 8
Max: 12
DeploymentSize: xSmall
m5a.2xlarge:
Min: 16
Max: 24
DeploymentSize: Small
m5a.4xlarge:
Min: 32
Max: 48
DeploymentSize: Medium
m5a.8xlarge:
Min: 64
Max: 96
DeploymentSize: Large
m5a.12xlarge:
Min: 96
Max: 144
DeploymentSize: xLarge
m5a.16xlarge:
Min: 128
Max: 192
DeploymentSize: xxLarge
m5a.24xlarge:
Min: 192
Max: 288
DeploymentSize: xxxLarge
Conditions:
IsMultiAzDatabase: !Equals [!Ref MultiAzDatabase, 'true']
Resources:
ArtifactoryDatabaseSubnetGroup:
Type: AWS::RDS::DBSubnetGroup
Properties:
DBSubnetGroupDescription: Private Subnets available to the RDS Instance(s)
SubnetIds: !Ref SubnetIds
ArtifactoryDatabase:
Type: AWS::RDS::DBInstance
Properties:
AllocatedStorage: !Ref DatabaseAllocatedStorage
AvailabilityZone: !If [IsMultiAzDatabase, !Ref AWS::NoValue, !Ref DatabasePreferredAz]
BackupRetentionPeriod: 30
DBInstanceClass: !Ref DatabaseInstance
DBName: !Ref DatabaseName
DBSubnetGroupName: !Ref ArtifactoryDatabaseSubnetGroup
Engine: "Postgres"
EngineVersion: !FindInMap
- DatabaseMap
- "Postgres"
- DatabaseVersion
MasterUsername: !Ref DatabaseUser
MasterUserPassword: !Ref DatabasePassword
MultiAZ: !Ref MultiAzDatabase
StorageEncrypted: true
VPCSecurityGroups:
- !Ref ArtifactoryDatabaseSG
ArtifactoryDatabaseSG:
Type: AWS::EC2::SecurityGroup
Properties:
Tags:
- Key: Name
Value: artifactory-rds-sg
GroupDescription: SG for RDS Instance to allow communication from the Bastion and Artifactory servers.
VpcId: !Ref VpcId
SecurityGroupIngress:
- IpProtocol: tcp
FromPort: 22
ToPort: 22
CidrIp: !Ref VpcCidr
- IpProtocol: tcp
FromPort: !FindInMap
- DatabaseMap
- "Postgres"
- port
ToPort: !FindInMap
- DatabaseMap
- "Postgres"
- port
CidrIp: !Ref PrivateSubnet1Cidr
- IpProtocol: tcp
FromPort: !FindInMap
- DatabaseMap
- "Postgres"
- port
ToPort: !FindInMap
- DatabaseMap
- "Postgres"
- port
CidrIp: !Ref PrivateSubnet2Cidr
- IpProtocol: tcp
FromPort: !FindInMap
- DatabaseMap
- "Postgres"
- port
ToPort: !FindInMap
- DatabaseMap
- "Postgres"
- port
CidrIp: !Ref PrivateSubnet3Cidr
SecurityGroupEgress:
- IpProtocol: tcp
FromPort: 22
ToPort: 22
CidrIp: 0.0.0.0/0
- IpProtocol: tcp
FromPort: 80
ToPort: 80
CidrIp: 0.0.0.0/0
- IpProtocol: tcp
FromPort: 443
ToPort: 443
CidrIp: 0.0.0.0/0
ArtifactoryS3Bucket:
Type: AWS::S3::Bucket
Properties:
AccessControl: Private
BucketEncryption:
ServerSideEncryptionConfiguration:
- ServerSideEncryptionByDefault:
SSEAlgorithm: AES256
ArtifactoryS3IAMPolicy:
Type: AWS::IAM::Policy
Metadata:
cfn-lint:
config:
ignore_checks:
- EIAMPolicyActionWildcard
ignore_reasons:
- EIAMPolicyWildcardResource: excluding for s3:Get*, s3:Put*, s3:List*
Properties:
PolicyName: S3BucketPermissions
PolicyDocument:
Version: 2012-10-17
Statement:
- Sid: S3BucketPermissions
Effect: Allow
Action:
- s3:AbortMultipartUpload
- s3:BypassGovernanceRetention
- s3:CreateAccessPoint
- s3:CreateAccessPointForObjectLambda
- s3:CreateBucket
- s3:CreateJob
- s3:DeleteAccessPoint
- s3:DeleteAccessPointForObjectLambda
- s3:DeleteAccessPointPolicy
- s3:DeleteAccessPointPolicyForObjectLambda
- s3:DeleteBucket
- s3:DeleteBucketOwnershipControls
- s3:DeleteBucketPolicy
- s3:DeleteBucketWebsite
- s3:DeleteJobTagging
- s3:DeleteObject
- s3:DeleteObjectTagging
- s3:DeleteObjectVersion
- s3:DeleteObjectVersionTagging
- s3:DeleteStorageLensConfiguration
- s3:DeleteStorageLensConfigurationTagging
- s3:DescribeJob
- s3:Get*
- s3:List*
- s3:ObjectOwnerOverrideToBucketOwner
- s3:Put*
- s3:ReplicateDelete
- s3:ReplicateObject
- s3:ReplicateTags
- s3:RestoreObject
- s3:UpdateJobPriority
- s3:UpdateJobStatus
Resource:
- Fn::Join:
- ''
- - !Sub "arn:${AWS::Partition}:s3:::"
- !Ref ArtifactoryS3Bucket
- Fn::Join:
- ''
- - !Sub "arn:${AWS::Partition}:s3:::"
- !Ref ArtifactoryS3Bucket
- "/*"
Roles:
- !Ref ArtifactoryHostRole
# ArtifactoryEbsVolume:
# Type: AWS::EC2::Volume
# Properties:
# AvailabilityZone:
# !If
# - IsMultiAzDatabase
# - !Select
# - '0'
# - !Ref AvailabilityZones
# - !Ref DatabasePreferredAz
# Encrypted: false
# Size: !Ref VolumeSize
# Tags:
# - Key: Name
# Value: !Sub "Artifactory-${AWS::StackName}"
# VolumeType: gp2
# DeletionPolicy: Snapshot
# UpdateReplacePolicy: Snapshot
ArtifactoryEfsFileSystem:
Type: AWS::EFS::FileSystem
Properties:
BackupPolicy:
Status: DISABLED
Encrypted: true
FileSystemTags:
- Key: Name
Value: !Sub "Artifactory-${AWS::StackName}"
PerformanceMode: generalPurpose
ThroughputMode: bursting
ArtifactoryEfsMountTarget1:
Type: AWS::EFS::MountTarget
Properties:
FileSystemId: !Ref ArtifactoryEfsFileSystem
SecurityGroups:
- !Ref EfsSecurityGroup
SubnetId: !Select ['0', !Ref SubnetIds]
ArtifactoryEfsMountTarget2:
Type: AWS::EFS::MountTarget
Properties:
FileSystemId: !Ref ArtifactoryEfsFileSystem
SecurityGroups:
- !Ref EfsSecurityGroup
SubnetId: !Select ['1', !Ref SubnetIds]
Outputs:
S3Bucket:
Value: !Ref ArtifactoryS3Bucket
Description: Actual S3 bucket created for Artifactory
DatabaseDriver:
Value: !FindInMap [DatabaseMap, "Postgres", Driver]
DatabasePlugin:
Value: !FindInMap [DatabaseMap, "Postgres", Plugin]
DatabasePluginUrl:
Value: !Sub
- "${MainURL}${PluginVersion}"
- {
MainURL: !FindInMap [DatabaseMap, "Postgres", PluginURL],
PluginVersion: !FindInMap [DatabaseMap, "Postgres", Plugin]
}
DatabaseType:
Value: !FindInMap [DatabaseMap, "Postgres", Name]
DatabaseUrl:
Value: !Sub
- "jdbc:${DatabaseType}://${ArtifactoryDatabaseEndpointAddress}:${port}/${DatabaseName}${extraDatabaseOps}"
- {
DatabaseType: !FindInMap [DatabaseMap, "Postgres", Name],
ArtifactoryDatabaseEndpointAddress: !GetAtt ArtifactoryDatabase.Endpoint.Address,
port: !FindInMap [DatabaseMap, "Postgres", port],
extraDatabaseOps: !FindInMap [DatabaseMap, "Postgres", extraDatabaseOps],
}
XrayMasterDatabaseUrl:
Value: !Sub
- "${ArtifactoryDatabaseEndpointAddress}:${port}/${DatabaseName}?sslmode=disable"
- {
ArtifactoryDatabaseEndpointAddress: !GetAtt ArtifactoryDatabase.Endpoint.Address,
port: !FindInMap [DatabaseMap, "Postgres", port],
}
XrayDatabaseUrl:
Value: !Sub
- "${ArtifactoryDatabaseEndpointAddress}:${port}/xraydb?sslmode=disable"
- {
ArtifactoryDatabaseEndpointAddress: !GetAtt ArtifactoryDatabase.Endpoint.Address,
port: !FindInMap [DatabaseMap, "Postgres", port],
}
JavaOpts:
Value: !Sub
- "-Xms${min}g -Xmx${max}g"
- {
min: !FindInMap [JavaOptionstoInstance, !Ref InstanceType, Min],
max: !FindInMap [JavaOptionstoInstance, !Ref InstanceType, Max]
}
DeploymentSize:
Value: !FindInMap [JavaOptionstoInstance, !Ref InstanceType, DeploymentSize]
ArtifactoryEfsFileSystem:
Value: !Ref ArtifactoryEfsFileSystem

View File

@@ -0,0 +1,466 @@
AWSTemplateFormatVersion: "2010-09-09"
Description: "Artifactory: Deploys the EC2 Autoscaling, LaunchConfig and instances (qs-1qpmmjh5o)"
Metadata:
cfn-lint:
config:
ignore_checks:
- W9006
- W9002
- W9003
- W9004
- E9101
ignore_reasons:
- E9101: "'master' is part of the product naming conventions for now"
Parameters:
PrivateSubnetIds:
Type: List<AWS::EC2::Subnet::Id>
MinScalingNodes:
Type: Number
MaxScalingNodes:
Type: Number
DeploymentTag:
Type: String
HostRole:
Type: String
QsS3BucketName:
Type: String
QsS3KeyPrefix:
Type: String
QsS3Uri:
Type: String
ArtifactoryLicensesSecretName:
Type: String
ArtifactoryServerName:
Type: String
Certificate:
Type: String
CertificateKey:
Type: String
NoEcho: 'true'
CertificateDomain:
Type: String
EnableSSL:
Type: String
ArtifactoryS3Bucket:
Type: String
DatabaseUrl:
Type: String
DatabaseDriver:
Type: String
DatabasePluginUrl:
Type: String
DatabasePlugin:
Type: String
DatabaseType:
Type: String
DatabaseUser:
Type: String
DatabasePassword:
Type: String
NoEcho: 'true'
MasterKey:
Type: String
NoEcho: 'true'
ExtraJavaOptions:
Type: String
ArtifactoryVersion:
Type: String
KeyPairName:
Type: AWS::EC2::KeyPair::KeyName
TargetGroupARN:
Type: String
SSLTargetGroupARN:
Type: String
InternalTargetGroupARN:
Type: String
HostProfile:
Type: String
SecurityGroups:
Type: String
InstanceType:
Type: String
# PrimaryVolume:
# Type: String
# VolumeSize:
# Type: Number
ArtifactoryEfsFileSystem:
Type: String
# To populate additional mappings use following link
# https://raw.githubusercontent.com/aws-quickstart/quickstart-linux-bastion/master/templates/linux-bastion.template
Mappings:
AWSAMIRegionMap:
ap-northeast-1:
CentOS7HVM: "ami-06a46da680048c8ae"
ap-northeast-2:
CentOS7HVM: "ami-06e83aceba2cb0907"
ap-south-1:
CentOS7HVM: "ami-026f33d38b6410e30"
ap-southeast-1:
CentOS7HVM: "ami-07f65177cb990d65b"
ap-southeast-2:
CentOS7HVM: "ami-0b2045146eb00b617"
ca-central-1:
CentOS7HVM: "ami-04a25c39dc7a8aebb"
eu-central-1:
CentOS7HVM: "ami-0e8286b71b81c3cc1"
me-south-1:
CentOS7HVM: "ami-011c71a894b10f35b"
ap-east-1:
CentOS7HVM: "ami-0e5c29e6c87a9644f"
eu-north-1:
CentOS7HVM: "ami-05788af9005ef9a93"
eu-south-1:
CentOS7HVM: "ami-0a84267606bcea16b"
eu-west-1:
CentOS7HVM: "ami-0b850cf02cc00fdc8"
eu-west-2:
CentOS7HVM: "ami-09e5afc68eed60ef4"
eu-west-3:
CentOS7HVM: "ami-0cb72d2e599cffbf9"
sa-east-1:
CentOS7HVM: "ami-0b30f38d939dd4b54"
us-east-1:
CentOS7HVM: "ami-0affd4508a5d2481b"
us-east-2:
CentOS7HVM: "ami-01e36b7901e884a10"
us-west-1:
CentOS7HVM: "ami-098f55b4287a885ba"
us-west-2:
CentOS7HVM: "ami-0bc06212a56393ee1"
cn-north-1:
CentOS7HVM: "ami-0e02aaefeb74c3373"
cn-northwest-1:
CentOS7HVM: "ami-07183a7702633260b"
us-gov-east-1:
CentOS7HVM: "ami-00e30c71"
us-gov-west-1:
CentOS7HVM: "ami-bbba86da"
Resources:
ArtifactoryScalingGroup:
Type: AWS::AutoScaling::AutoScalingGroup
Properties:
LaunchConfigurationName: !Ref ArtifactoryLaunchConfiguration
VPCZoneIdentifier: !Ref PrivateSubnetIds
MinSize: !Ref MinScalingNodes
MaxSize: !Ref MaxScalingNodes
Cooldown: '300'
DesiredCapacity: !Ref MinScalingNodes
TargetGroupARNs:
- !Ref TargetGroupARN
- !Ref SSLTargetGroupARN
- !Ref InternalTargetGroupARN
HealthCheckType: ELB
HealthCheckGracePeriod: 1800
Tags:
- Key: Name
Value: !Ref DeploymentTag
PropagateAtLaunch: true
- Key: ArtifactoryVersion
Value: !Ref ArtifactoryVersion
PropagateAtLaunch: true
TerminationPolicies:
- OldestInstance
- Default
CreationPolicy:
ResourceSignal:
Count: !Ref MinScalingNodes
Timeout: PT60M
ArtifactoryLaunchConfiguration:
Type: AWS::AutoScaling::LaunchConfiguration
Metadata:
AWS::CloudFormation::Authentication:
S3AccessCreds:
type: S3
roleName:
- !Ref HostRole # !Ref ArtifactoryHostRole
buckets:
- !Ref QsS3BucketName
AWS::CloudFormation::Init:
configSets:
jfrog_ami_setup:
- "config-cloudwatch"
- "config-ansible-art-ami"
- "config-artifactory"
- "secure-artifactory"
artifactory_install:
- "config-cloudwatch"
- "config-artifactory"
- "secure-artifactory"
config-cloudwatch:
files:
/root/cloudwatch.conf:
content: |
[general]
state_file = /var/awslogs/state/agent-state
[/var/log/messages]
file = /var/log/messages
log_group_name = /artifactory/instances/{instance_id}
log_stream_name = /var/log/messages/
datetime_format = %b %d %H:%M:%S
[/var/log/amazon/efs]
file = /var/log/amazon/efs
log_group_name = /artifactory/instances/{instance_id}
log_stream_name = /var/log/amazon/efs/
datetime_format = %b %d %H:%M:%S
[/var/log/jfrog-ami-setup.log]
file = /var/log/messages
log_group_name = /artifactory/instances/{instance_id}
log_stream_name = /var/log/jfrog-ami-setup.log
datetime_format = %b %d %H:%M:%S
[/var/log/jfrog-ami-artifactory.log]
file = /var/log/messages
log_group_name = /artifactory/instances/{instance_id}
log_stream_name = /var/log/jfrog-ami-artifactory.log
datetime_format = %b %d %H:%M:%S
mode: "0400"
config-ansible-art-ami:
files:
/root/.jfrog_ami/jfrog-ami-setup.yml:
content: !Sub |
# Base install for JFrogAMIInstance
- import_playbook: artifactory-ami.yml
vars:
ami_creation: false
artifactory_ha_enabled: false
artifactory_tar: "https://releases.jfrog.io/artifactory/artifactory-pro/org/artifactory/pro/jfrog-artifactory-pro/${ArtifactoryVersion}/jfrog-artifactory-pro-${ArtifactoryVersion}-linux.tar.gz"
artifactory_version: ${ArtifactoryVersion}
db_download_url: "https://jdbc.postgresql.org/download/postgresql-42.2.12.jar"
db_type: "postgresql"
db_driver: "org.postgresql.Driver"
mode: "0400"
# config-artifactory-primary:
# files:
# /root/attach_volume.sh:
# content: !Sub |
# #!/usr/bin/env bash
# echo "Using primary volume ID ${PrimaryVolume}"
# VOLUME_ID="${PrimaryVolume}"
# echo "VOLUME_ID: $VOLUME_ID"
# if [[ -z "$VOLUME_ID" ]]; then
# echo 'Invalid $VOLUME_ID'
# exit 1
# fi
# # Get instance id from AWS
# INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
# # Attach the volume created by another CFT
# # the device name should become /dev/nvme1n1
# # See: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nvme-ebs-volumes.html
# echo "Attaching volume $VOLUME_ID to instance $INSTANCE_ID"
# /var/awslogs/bin/aws ec2 attach-volume --volume-id $VOLUME_ID --instance-id $INSTANCE_ID --device /dev/xvdf --region ${AWS::Region}
# echo "Wait for volume $VOLUME_ID to attach"
# sleep 30 # Give volume time to attach
# lsblk # debug
# mode: "0770"
config-artifactory:
files:
/root/mount_efs.sh:
content: !Sub |
#!/usr/bin/env bash
ARTIFACTORY_HOME="/opt/jfrog/artifactory-pro-${ArtifactoryVersion}"
# Get instance id from AWS
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
EFS_FILE_SYSTEM_ID="${ArtifactoryEfsFileSystem}"
EFS_MOUNT_POINT="/efsmount"
EFS_MOUNT_TARGET_DNS="$EFS_FILE_SYSTEM_ID.efs.${AWS::Region}.amazonaws.com"
echo "before mounting efs"
ls -l /
mkdir -p $EFS_MOUNT_POINT
mount -t nfs4 -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport $EFS_MOUNT_TARGET_DNS:/ $EFS_MOUNT_POINT
chmod go+rw $EFS_MOUNT_POINT
echo "after mounting efs"
ls -l /
# mkdir -p $EFS_MOUNT_POINT/$INSTANCE_ID/var
# mkdir -p $ARTIFACTORY_HOME
# ln -s $EFS_MOUNT_POINT/$INSTANCE_ID/var $ARTIFACTORY_HOME
echo "before creating plugins folder"
echo ls -l $EFS_MOUNT_POINT
ls -l $EFS_MOUNT_POINT
mkdir -p $EFS_MOUNT_POINT/plugins
echo ls -l $ARTIFACTORY_HOME/var/etc/artifactory
ls -l $ARTIFACTORY_HOME/var/etc/artifactory
# mkdir -p $ARTIFACTORY_HOME/var/etc/artifactory
# ln -s $EFS_MOUNT_POINT/plugins $ARTIFACTORY_HOME/var/etc/artifactory
# echo ls -l $ARTIFACTORY_HOME/var/etc/artifactory
# ls -l $ARTIFACTORY_HOME/var/etc/artifactory
mode: "0770"
/root/.jfrog_ami/artifactory.yml:
content: !Sub |
# Base install for Artifactory
- import_playbook: site-artifactory.yml
vars:
artifactory_download_directory: "/opt/jfrog"
artifactory_home: "/opt/jfrog/artifactory-pro-${ArtifactoryVersion}"
artifactory_ha_enabled: true
artifactory_server_name: ${ArtifactoryServerName}
server_name: ${ArtifactoryServerName}.${CertificateDomain}
s3_region: ${AWS::Region}
s3_bucket: ${ArtifactoryS3Bucket}
certificate: ${Certificate}
certificate_key: ${CertificateKey}
certificate_domain: ${CertificateDomain}
enable_ssl: ${EnableSSL}
ssl_dir: /etc/pki/tls/certs
db_type: ${DatabaseType}
db_driver: ${DatabaseDriver}
db_url: ${DatabaseUrl}
db_user: ${DatabaseUser}
db_password: ${DatabasePassword}
master_key: ${MasterKey}
join_key: ${MasterKey}
extra_java_opts: ${ExtraJavaOptions}
artifactory_version: ${ArtifactoryVersion}
artifactory_keystore:
path: /opt/jfrog/artifactory/app/third-party/java/lib/security/cacerts
default_password: changeit
new_keystore_pass: ${DatabasePassword}
artifactory_java_db_drivers:
- name: ${DatabasePlugin}
url: ${DatabasePluginUrl}
owner: artifactory
group: artifactory
product_id: 'CloudFormation_SP_EC2/1.0.0'
mode: "0400"
/root/.vault_pass.txt:
content: !Sub |
${DatabasePassword}
mode: "0400"
/root/.secureit.sh:
content:
ansible-vault encrypt /root/.jfrog_ami/artifactory.yml --vault-id /root/.vault_pass.txt
mode: "0770"
secure-artifactory:
commands:
'secure ansible playbook':
command: '/root/.secureit.sh'
ignoreErrors: 'false'
Properties:
KeyName: !Ref KeyPairName
IamInstanceProfile: !Ref HostProfile
ImageId: !FindInMap
- AWSAMIRegionMap
- !Ref AWS::Region
- 'CentOS7HVM'
SecurityGroups:
- !Ref SecurityGroups
InstanceType: !Ref InstanceType
# BlockDeviceMappings:
# - DeviceName: /dev/xvda
# Ebs:
# VolumeSize: !Ref VolumeSize
# VolumeType: gp2
# DeleteOnTermination: true
# Encrypted: true
UserData:
Fn::Base64:
!Sub |
#!/bin/bash -x
#CFN Functions
function cfn_fail
{
cfn-signal -e 1 --stack ${AWS::StackName} --region ${AWS::Region} --resource ArtifactoryScalingGroup
exit 1
}
function cfn_success
{
cfn-signal -e 0 --stack ${AWS::StackName} --region ${AWS::Region} --resource ArtifactoryScalingGroup
exit 0
}
S3URI=${QsS3Uri}
# Update OS
yum update -y
# Install EPEL Repository
yum install -y epel-release
# Install git, jq, nfs-utils, policycoreutils python
yum install -y git jq nfs-utils policycoreutils-python
yum update --security -y 2>&1 | tee /var/log/userdata.yum_security_update.log
yum install -y python3 libselinux-python3
echo $PATH
PATH=/opt/aws/bin:$PATH
echo $PATH
# Create virtual env and activate
python3 -m venv ~/venv --system-site-packages
source ~/venv/bin/activate
pip install --upgrade pip
pip install wheel
# Install Cloudformation helper scripts
pip install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-py3-latest.tar.gz 2>&1 | tee /var/log/userdata.aws_cfn_bootstrap_install.log
pip install awscli 2>&1 | tee /var/log/userdata.awscli_install.log
pip install ansible 2>&1 | tee /var/log/userdata.ansible_install.log
mkdir ~/.jfrog_ami
aws s3 --region ${AWS::Region} sync s3://${QsS3BucketName}/${QsS3KeyPrefix}cloudInstallerScripts/ ~/.jfrog_ami/ || cfn_fail
setsebool httpd_can_network_connect 1 -P
# CentOS cloned virtual machines do not create a new machine id
# https://www.thegeekdiary.com/centos-rhel-7-how-to-change-the-machine-id/
rm -f /etc/machine-id
systemd-machine-id-setup
cfn-init -v --stack ${AWS::StackName} --resource ArtifactoryLaunchConfiguration --configsets jfrog_ami_setup --region ${AWS::Region} || cfn_fail
# Setup CloudWatch Agent
curl https://s3.amazonaws.com/aws-cloudwatch/downloads/latest/awslogs-agent-setup.py -O
chmod +x ./awslogs-agent-setup.py
./awslogs-agent-setup.py -n -r ${AWS::Region} -c /root/cloudwatch.conf 2>&1 | tee /var/log/userdata.cloudwatch_agent_install.log
/root/mount_efs.sh 2>&1 | tee /var/log/jfrog-efs-mount.log || cfn_fail
#/root/attach_volume.sh || cfn_fail
ansible-galaxy collection install community.general ansible.posix
setsebool httpd_can_network_connect 1 -P
aws secretsmanager get-secret-value --secret-id ${ArtifactoryLicensesSecretName} --region ${AWS::Region} | jq -r '{"artifactory_licenses":(.SecretString | fromjson )}' > ~/.jfrog_ami/licenses.json || cfn_fail
ansible-playbook /root/.jfrog_ami/jfrog-ami-setup.yml --vault-id /root/.vault_pass.txt 2>&1 | tee /var/log/jfrog-ami-setup.log || cfn_fail
ansible-playbook /root/.jfrog_ami/artifactory.yml -e "@~/.jfrog_ami/licenses.json" --vault-id /root/.vault_pass.txt 2>&1 | tee /var/log/jfrog-ami-artifactory.log || cfn_fail
rm -rf /root/.secureit.sh
cfn_success &> /var/log/cfn_success.log
cfn_success || cfn_fail

Some files were not shown because too many files have changed in this diff Show More