Merge branch 'master' of github.com:jfrog/JFrog-Cloud-Installers into openshift4

This commit is contained in:
John Peterson
2020-06-29 17:22:16 -07:00
197 changed files with 13010 additions and 5269 deletions

8
.gitignore vendored Normal file
View File

@@ -0,0 +1,8 @@
.molecule
*.log
*.swp
.tox
./idea
.idea/
.DS_Store

12
Amazon/containers/Dockerfile Executable file
View File

@@ -0,0 +1,12 @@
ARG UPSTREAM_IMAGE=docker.bintray.io/jfrog/artifactory-jcr
ARG UPSTREAM_TAG
FROM ${UPSTREAM_IMAGE}:${UPSTREAM_TAG}
USER root
# Copy security.xml
COPY ./security.xml /security_bootstrap/security.import.xml
RUN chown -R artifactory:artifactory /security_bootstrap
# Copy entrypoint script.
COPY ./entrypoint-artifactory.sh /entrypoint-artifactory.sh
COPY ./installer-info.json /artifactory_bootstrap/info/installer-info.json
RUN chmod 755 /entrypoint-artifactory.sh
USER artifactory

View File

@@ -16,7 +16,7 @@ JFrog Container Registry can be installed into either an ECS or EKS cluster.
To simply get up and running, you can try: To simply get up and running, you can try:
```docker run -d -p 8081:8081 <image-url>``` ```docker run -d -p 8081:8081 -p 8082:8082 <image-url>```
After this, you can access the UI at \<URL\>:8081. The default username is 'admin'. See 'Getting or setting initial password' to find out how to get the initial password. After this, you can access the UI at \<URL\>:8081. The default username is 'admin'. See 'Getting or setting initial password' to find out how to get the initial password.
### Getting or setting initial password ### Getting or setting initial password

View File

@@ -0,0 +1,88 @@
#!/usr/bin/env bash
VERSION=$1
EDITIONS=( artifactory-pro artifactory-jcr )
#for loop start: editoins
for EDITION in "${EDITIONS[@]}"
do
UPSTREAM_IMAGE_NAME=docker.bintray.io/jfrog/$EDITION
BUILD_IMAGE_NAME=partnership-public-images.jfrog.io/aws/$EDITION
ARTIFACTORY_PASSWORD=corona1831
# Logic starts here
if [ -z "$VERSION" ]
then
echo "No version passed in. Build failed."
echo "usage: buildAwsContainers <vesion>"
echo "example: buildAwsContainers 7.2.1 "
exit -1
fi
# Extract and modify the entrypoint to run out custom code for first-time password
docker pull $UPSTREAM_IMAGE_NAME:$VERSION
docker run -d --rm --name tmp-docker $UPSTREAM_IMAGE_NAME:$VERSION
docker cp tmp-docker:/entrypoint-artifactory.sh original-entrypoint.sh
docker rm -f tmp-docker
perl -pe 's/^addExtraJavaArgs$/`cat extra_conf`/ge' original-entrypoint.sh > entrypoint-artifactory.sh
#Create installer-info file
if [ "$EDITION" == "artifactory-pro" ]
then
cat <<EOF > installer-info.json
{
"productId": "CloudFormation_artifactory-ha/$VERSION",
"features": [
{
"featureId": "Partner/ACC-006973"
}
]
}
EOF
else
cat <<EOF > installer-info.json
{
"productId": "CloudFormation_artifactory-jcr/$VERSION",
"features": [
{
"featureId": "Partner/ACC-006973"
}
]
}
EOF
fi
cat installer-info.json
# Create the new docker image
docker build --no-cache --build-arg UPSTREAM_TAG=$VERSION -t $BUILD_IMAGE_NAME:$VERSION .
# Run minimal test
set -x
docker run --name test-new-image -d -e ARTIFACTORY_PASSWORD=$ARTIFACTORY_PASSWORD -p 8081:8081 -p 8082:8082 $BUILD_IMAGE_NAME:$VERSION
# Wait for it to come up
SUCCESS=false
for i in {1..30}
do
STATUS=$(docker exec test-new-image curl -u admin:$ARTIFACTORY_PASSWORD http://localhost:8082/router/api/v1/system/health | jq .services[0].state)
if [ "$STATUS" == "\"HEALTHY\"" ]; then
echo "Build successful!"
SUCCESS=true
break
fi
echo "Container is not up yet, waiting 10 seconds..."
sleep 10
done
#clearnup
docker stop test-new-image
docker rm test-new-image
rm installer-info.json
if [ "$SUCCESS" = true ] ; then
echo "Test Succeeded. Build succeeded."
else
echo "Test failed. Build failed. Removing docker image"
exit 1
fi
#for loop endL: editions
done

18
Amazon/containers/extra_conf Executable file
View File

@@ -0,0 +1,18 @@
addExtraJavaArgs
setupFirstTimePass() {
# Create a unique password
if [ -z "$ARTIFACTORY_PASSWORD" ]; then
echo "INFO: Since ARTIFACTORY_PASSWORD environment is not set. We are generating our own random password."
ARTIFACTORY_PASSWORD=$(openssl rand -base64 32 | tr -dc A-Za-z0-9 | head -c 18)
echo "Generated ARTIFACTORY_PASSWORD is $ARTIFACTORY_PASSWORD"
echo $ARTIFACTORY_PASSWORD > /var/opt/jfrog/artifactory/generated-pass.txt
fi
SALTED_PASSWD=$(echo -n ${ARTIFACTORY_PASSWORD}{CAFEBABEEBABEFAC} | md5sum | cut -d ' ' -f1)
sed -i -e "s/<password></<password>$SALTED_PASSWD</g" /security_bootstrap/security.import.xml
cp /security_bootstrap/security.import.xml /var/opt/jfrog/artifactory/etc/artifactory/security/security.import.xml
}
# Set up first time password only on initial boot
if [[ ! -f "/var/opt/jfrog/artifactory/init.boot.done" ]]; then
touch /var/opt/jfrog/artifactory/init.boot.done
setupFirstTimePass
fi

18
Amazon/containers/security.xml Executable file
View File

@@ -0,0 +1,18 @@
<security version="v8">
<users>
<user>
<username>admin</username>
<password></password>
<salt>CAFEBABEEBABEFAC</salt>
<admin>true</admin>
<enabled>true</enabled>
<updatableProfile>true</updatableProfile>
<accountNonExpired>true</accountNonExpired>
<credentialsNonExpired>true</credentialsNonExpired>
<accountNonLocked>true</accountNonLocked>
<realm>internal</realm>
<transientUser>false</transientUser>
<groups/>
</user>
</users>
</security>

35
Ansible/README.md Normal file
View File

@@ -0,0 +1,35 @@
# JFrog Ansible Collection
This Ansible directory consists of the following directories that support the JFrog Ansible collection.
* [collection directory](collection) - This directory contains the Ansible collection package that has the Ansible roles for Artifactory and Xray. See the collection [README](collection/README.md) for details on the available roles and variables.
* [infra directory](infra) - This directory contains example infrastructure templates that can be used for testing and as example deployments.
* [project directory](project) - This directory contains example playbooks for various architectures from single Artifactory (RT) deployments to high-availability setups.
* [test directory](test) - This directory contains Gradle tests that can be used to verify a deployment. It also has Ansible playbooks for creating infrastructure, provisioning software and testing with Gradle.
## Getting Started
1. Install this collection or the roles in your Ansible path using your ansible.cfg file. The following is an example:
```
# Installs collections into [current dir]/ansible_collections/namespace/collection_name
collections_paths = ~/.ansible/collections:/usr/share/ansible/collections:collection
# Installs roles into [current dir]/roles/namespace.rolename
roles_path = Ansible/collection/jfrog/ansible/roles
```
2. Ansible uses SSH to connect to hosts. Ensure that your SSH private key is on your client and the public keys are installed on your Ansible hosts. If you are using a bastion host, you can add the following Ansible variable to allow proxying through the bastion host.
```
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -A user@host -W %h:%p"'
eg.
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -A ubuntu@{{ azureDeployment.deployment.outputs.lbIp.value }} -W %h:%p"'
```
3. Create your inventory file. Use one of the examples from the [project directory](project) to construct an inventory file (hosts.yml) with the host addresses and variables.
4. Create your playbook. Use one of the examples from the [project directory](project) to construct a playbook using the JFrog Ansible roles. These roles will be applied to your inventory and provision software.
5. Then execute with the following command to provision the JFrog software with Ansible. Variables can also be passed in at the command-line.
```
ansible-playbook -i hosts.yml playbook.yml --extra-vars "master_key=$(openssl rand -hex 16) join_key=$(openssl rand -hex 16)"
```

View File

@@ -0,0 +1,8 @@
#
# Ansible managed
#
exclude_paths:
- ./meta/version.yml
- ./meta/exception.yml
- ./meta/preferences.yml
- ./molecule/default/verify.yml

View File

@@ -0,0 +1,12 @@
---
extends: default
rules:
braces:
max-spaces-inside: 1
level: error
brackets:
max-spaces-inside: 1
level: error
line-length: disable
truthy: disable

View File

@@ -0,0 +1,87 @@
# Ansible
This repo contains the Ansible collection for JFrog roles. These roles allow you to provision Artifactory for High-Availability using a Primary node and multiple Secondary nodes. Additionally, a Postgresql role is provided for installing an Artifactory Postgresql database.
## Roles Provided
### artifactory
The artifactory role installs the Artifactory Pro software onto the host. Per the Vars below, it will configure a node as primary or secondary. This role uses secondary roles artifactory-nginx to install nginx.
### artifactory-nginx-ssl
The artifactory-nginx-ssl role installs and configures nginx for SSL.
### postgres
The postgres role will install Postgresql software and configure a database and user to support an Artifactory or Xray server.
### xray
The xray role will install Xray software onto the host. An Artifactory server and Postgress database is required.
## Vars Required
The following Vars must be configured.
### databsase vars
* db_users: This is a list of database users to create. eg. db_users: - { db_user: "artifactory", db_password: "Art1fAct0ry" }
* dbs: This is the database to create. eg. dbs: - { db_name: "artifactory", db_owner: "artifactory" }
### artifactory vars
* artifactory_version: The version of Artifactory to install. eg. "7.4.1"
* master_key: This is the Artifactory Master Key.
* join_key: This is the Artifactory Join Key.
* db_download_url: This is the download URL for the JDBC driver for your database. eg. "https://jdbc.postgresql.org/download/postgresql-42.2.12.jar"
* db_type: This is the database type. eg. "postgresql"
* db_driver: This is the JDBC driver class. eg. "org.postgresql.Driver"
* db_url: This is the JDBC database url. eg. "jdbc:postgresql://10.0.0.120:5432/artifactory"
* db_user: The database user to configure. eg. "artifactory"
* db_password: The database password to configure. "Art1fact0ry"
* server_name: This is the server name. eg. "artifactory.54.175.51.178.xip.io"
* system_file: Your own system YAML file can be specified and used. If specified, this file will be used rather than constructing a file from the parameters above.
* binary_store_file: Your own binary store file can be used. If specified, the default cluster-file-system will not be used.
### primary vars
* artifactory_is_primary: For the primary node this must be set to **true**.
* artifactory_license1 - 5: These are the cluster licenses.
* artifactory_license_file: Your own license file can be used. If specified, a license file constructed from the licenses above will not be used.
### secondary vars
* artifactory_is_primary: For the secondary node(s) this must be set to **false**.
### ssl vars (Used with artifactory-nginx-ssl role)
* certificate: This is the SSL cert.
* certificate_key: This is the SSL private key.
### xray vars
* xray_version: The version of Artifactory to install. eg. "3.3.0"
* jfrog_url: This is the URL to the Artifactory base URL. eg. "http://ec2-54-237-207-135.compute-1.amazonaws.com"
* master_key: This is the Artifactory Master Key.
* join_key: This is the Artifactory Join Key.
* db_type: This is the database type. eg. "postgresql"
* db_driver: This is the JDBC driver class. eg. "org.postgresql.Driver"
* db_url: This is the database url. eg. "postgres://10.0.0.59:5432/xraydb?sslmode=disable"
* db_user: The database user to configure. eg. "xray"
* db_password: The database password to configure. "xray"
## Example Inventory and Playbooks
Example playbooks are located in the [project](../project) directory. This directory contains several example inventory and plaaybooks for different Artifactory, HA and Xray architectures.
## Executing a Playbook
```
ansible-playbook -i <hosts file> <playbook file>
eg.
ansible-playbook -i example-playbooks/rt-xray-ha/hosts.yml example-playbooks/rt-xray-ha/playbook.yml
```
## Autogenerating Master and Join Keys
You may want to auto-generate your master amd join keys and apply it to all the nodes.
```
ansible-playbook -i hosts.yml playbook.yml --extra-vars "master_key=$(openssl rand -hex 16) join_key=$(openssl rand -hex 16)"
```
## Bastion Hosts
In many cases, you may want to run this Ansible collection through a Bastion host to provision JFrog servers. You can include the following Var for a host or group of hosts:
```
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -A user@host -W %h:%p"'
eg.
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -A ubuntu@{{ azureDeployment.deployment.outputs.lbIp.value }} -W %h:%p"'
```

View File

@@ -0,0 +1,57 @@
### REQUIRED
# The namespace of the collection. This can be a company/brand/organization or product namespace under which all
# content lives. May only contain alphanumeric lowercase characters and underscores. Namespaces cannot start with
# underscores or numbers and cannot contain consecutive underscores
namespace: jfrog
# The name of the collection. Has the same character restrictions as 'namespace'
name: ansible
# The version of the collection. Must be compatible with semantic versioning
version: 1.0.0
# The path to the Markdown (.md) readme file. This path is relative to the root of the collection
readme: README.md
# A list of the collection's content authors. Can be just the name or in the format 'Full Name <email> (url)
# @nicks:irc/im.site#channel'
authors:
- your name <example@domain.com>
### OPTIONAL but strongly recommended
# A short summary description of the collection
description: your collection description
# Either a single license or a list of licenses for content inside of a collection. Ansible Galaxy currently only
# accepts L(SPDX,https://spdx.org/licenses/) licenses. This key is mutually exclusive with 'license_file'
license:
- GPL-2.0-or-later
# The path to the license file for the collection. This path is relative to the root of the collection. This key is
# mutually exclusive with 'license'
license_file: ''
# A list of tags you want to associate with the collection for indexing/searching. A tag name has the same character
# requirements as 'namespace' and 'name'
tags: []
# Collections that this collection requires to be installed for it to be usable. The key of the dict is the
# collection label 'namespace.name'. The value is a version range
# L(specifiers,https://python-semanticversion.readthedocs.io/en/latest/#requirement-specification). Multiple version
# range specifiers can be set and are separated by ','
dependencies: {}
# The URL of the originating SCM repository
repository: http://example.com/repository
# The URL to any online docs
documentation: http://docs.example.com
# The URL to the homepage of the collection/project
homepage: http://example.com
# The URL to the collection issue tracker
issues: http://example.com/issue/tracker

View File

@@ -0,0 +1,31 @@
# Collections Plugins Directory
This directory can be used to ship various plugins inside an Ansible collection. Each plugin is placed in a folder that
is named after the type of plugin it is in. It can also include the `module_utils` and `modules` directory that
would contain module utils and modules respectively.
Here is an example directory of the majority of plugins currently supported by Ansible:
```
└── plugins
├── action
├── become
├── cache
├── callback
├── cliconf
├── connection
├── filter
├── httpapi
├── inventory
├── lookup
├── module_utils
├── modules
├── netconf
├── shell
├── strategy
├── terminal
├── test
└── vars
```
A full list of plugin types can be found at [Working With Plugins](https://docs.ansible.com/ansible/2.9/plugins/plugins.html).

View File

@@ -0,0 +1,29 @@
---
language: python
python: "2.7"
# Use the new container infrastructure
sudo: false
# Install ansible
addons:
apt:
packages:
- python-pip
install:
# Install ansible
- pip install ansible
# Check ansible version
- ansible --version
# Create ansible.cfg with correct roles_path
- printf '[defaults]\nroles_path=../' >ansible.cfg
script:
# Basic role syntax check
- ansible-playbook tests/test.yml -i tests/inventory --syntax-check
notifications:
webhooks: https://galaxy.ansible.com/api/v1/notifications/

View File

@@ -0,0 +1,2 @@
---
# defaults file for artifactory-nginx

View File

@@ -0,0 +1,2 @@
---
# handlers file for artifactory-nginx

View File

@@ -1,6 +1,6 @@
galaxy_info: galaxy_info:
author: your name author: your name
description: your description description: your role description
company: your company (optional) company: your company (optional)
# If the issue tracker for your role is not on github, uncomment the # If the issue tracker for your role is not on github, uncomment the
@@ -16,7 +16,7 @@ galaxy_info:
# - CC-BY-4.0 # - CC-BY-4.0
license: license (GPL-2.0-or-later, MIT, etc) license: license (GPL-2.0-or-later, MIT, etc)
min_ansible_version: 2.4 min_ansible_version: 2.9
# If this a Container Enabled role, provide the minimum Ansible Container version. # If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version: # min_ansible_container_version:

View File

@@ -0,0 +1,41 @@
---
# tasks file for artifactory-nginx
- name: configure the artifactory nginx conf
template:
src: artifactory.conf.j2
dest: /etc/nginx/conf.d/artifactory.conf
owner: root
group: root
mode: '0755'
become: yes
- name: ensure nginx dir exists
file:
path: "/var/opt/jfrog/nginx/ssl"
state: directory
become: yes
- name: configure certificate
template:
src: certificate.pem.j2
dest: "/var/opt/jfrog/nginx/ssl/cert.pem"
become: yes
- name: ensure pki exists
file:
path: "/etc/pki/tls"
state: directory
become: yes
- name: configure key
template:
src: certificate.key.j2
dest: "/etc/pki/tls/cert.key"
become: yes
- name: restart nginx
service:
name: nginx
state: restarted
enabled: yes
become: yes

View File

@@ -0,0 +1,48 @@
###########################################################
## this configuration was generated by JFrog Artifactory ##
###########################################################
## add HA entries when ha is configure
upstream artifactory {
server 127.0.0.1:8082;
}
upstream artifactory-direct {
server 127.0.0.1:8081;
}
ssl_protocols TLSv1.1 TLSv1.2;
ssl_certificate /var/opt/jfrog/nginx/ssl/cert.pem;
ssl_certificate_key /etc/pki/tls/cert.key;
ssl_session_cache shared:SSL:1m;
ssl_prefer_server_ciphers on;
## server configuration
server {
listen 443 ssl http2;
server_name {{ server_name }};
if ($http_x_forwarded_proto = '') {
set $http_x_forwarded_proto $scheme;
}
## Application specific logs
access_log /var/log/nginx/artifactory-access.log;
error_log /var/log/nginx/artifactory-error.log;
rewrite ^/$ /ui/ redirect;
rewrite ^/ui$ /ui/ redirect;
chunked_transfer_encoding on;
client_max_body_size 0;
location / {
proxy_read_timeout 2400s;
proxy_pass_header Server;
proxy_cookie_path ~*^/.* /;
proxy_pass "http://artifactory";
proxy_next_upstream error timeout non_idempotent;
proxy_next_upstream_tries 1;
proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host:$server_port;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
location ~ ^/artifactory/ {
proxy_pass http://artifactory-direct;
}
}
}

View File

@@ -0,0 +1,4 @@
{% set cert = certificate_key.split('|') %}
{% for line in cert %}
{{ line }}
{% endfor %}

View File

@@ -0,0 +1,4 @@
{% set cert = certificate.split('|') %}
{% for line in cert %}
{{ line }}
{% endfor %}

View File

@@ -0,0 +1,2 @@
localhost

View File

@@ -0,0 +1,5 @@
---
- hosts: localhost
remote_user: root
roles:
- artifactory-nginx

View File

@@ -0,0 +1,2 @@
---
# vars file for artifactory-nginx

View File

@@ -0,0 +1,29 @@
---
language: python
python: "2.7"
# Use the new container infrastructure
sudo: false
# Install ansible
addons:
apt:
packages:
- python-pip
install:
# Install ansible
- pip install ansible
# Check ansible version
- ansible --version
# Create ansible.cfg with correct roles_path
- printf '[defaults]\nroles_path=../' >ansible.cfg
script:
# Basic role syntax check
- ansible-playbook tests/test.yml -i tests/inventory --syntax-check
notifications:
webhooks: https://galaxy.ansible.com/api/v1/notifications/

View File

@@ -0,0 +1,2 @@
---
# defaults file for artifactory-nginx

View File

@@ -0,0 +1,2 @@
---
# handlers file for artifactory-nginx

View File

@@ -0,0 +1,53 @@
galaxy_info:
author: your name
description: your role description
company: your company (optional)
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Choose a valid license ID from https://spdx.org - some suggested licenses:
# - BSD-3-Clause (default)
# - MIT
# - GPL-2.0-or-later
# - GPL-3.0-only
# - Apache-2.0
# - CC-BY-4.0
license: license (GPL-2.0-or-later, MIT, etc)
min_ansible_version: 2.9
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
#
# Provide a list of supported platforms, and for each platform a list of versions.
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
# To view available platforms and versions (or releases), visit:
# https://galaxy.ansible.com/api/v1/platforms/
#
# platforms:
# - name: Fedora
# versions:
# - all
# - 25
# - name: SomePlatform
# versions:
# - all
# - 1.0
# - 7
# - 99.99
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: []
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.

View File

@@ -0,0 +1,35 @@
---
- name: install nginx
package:
name: nginx
state: present
register: package_res
retries: 5
delay: 60
become: yes
until: package_res is success
- name: configure main nginx conf file.
copy:
src: nginx.conf
dest: /etc/nginx/nginx.conf
owner: root
group: root
mode: '0755'
become: yes
- name: configure the artifactory nginx conf
template:
src: artifactory.conf.j2
dest: /etc/nginx/conf.d/artifactory.conf
owner: root
group: root
mode: '0755'
become: yes
- name: restart nginx
service:
name: nginx
state: restarted
enabled: yes
become: yes

View File

@@ -0,0 +1,43 @@
###########################################################
## this configuration was generated by JFrog Artifactory ##
###########################################################
## add HA entries when ha is configure
upstream artifactory {
server 127.0.0.1:8082;
}
upstream artifactory-direct {
server 127.0.0.1:8081;
}
## server configuration
server {
listen 80 ;
server_name {{ server_name }};
if ($http_x_forwarded_proto = '') {
set $http_x_forwarded_proto $scheme;
}
## Application specific logs
access_log /var/log/nginx/artifactory-access.log;
error_log /var/log/nginx/artifactory-error.log;
rewrite ^/$ /ui/ redirect;
rewrite ^/ui$ /ui/ redirect;
chunked_transfer_encoding on;
client_max_body_size 0;
location / {
proxy_read_timeout 2400s;
proxy_pass_header Server;
proxy_cookie_path ~*^/.* /;
proxy_pass "http://artifactory";
proxy_next_upstream error timeout non_idempotent;
proxy_next_upstream_tries 1;
proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host:$server_port;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
location ~ ^/artifactory/ {
proxy_pass http://artifactory-direct;
}
}
}

View File

@@ -0,0 +1,2 @@
localhost

View File

@@ -0,0 +1,5 @@
---
- hosts: localhost
remote_user: root
roles:
- artifactory-nginx

View File

@@ -0,0 +1,2 @@
---
# vars file for artifactory-nginx

View File

@@ -0,0 +1,29 @@
---
language: python
python: "2.7"
# Use the new container infrastructure
sudo: false
# Install ansible
addons:
apt:
packages:
- python-pip
install:
# Install ansible
- pip install ansible
# Check ansible version
- ansible --version
# Create ansible.cfg with correct roles_path
- printf '[defaults]\nroles_path=../' >ansible.cfg
script:
# Basic role syntax check
- ansible-playbook tests/test.yml -i tests/inventory --syntax-check
notifications:
webhooks: https://galaxy.ansible.com/api/v1/notifications/

View File

@@ -0,0 +1,48 @@
---
# defaults file for artifactory
# indicates were this collection was downlaoded from (galaxy, automation_hub, standalone)
ansible_marketplace: standalone
# The version of Artifactory to install
artifactory_version: 7.4.1
# licenses file - specify a licenses file or specify up to 5 licenses
artifactory_license1:
artifactory_license2:
artifactory_license3:
artifactory_license4:
artifactory_license5:
# whether to enable HA
artifactory_ha_enabled: true
# value for whether a host is primary. this should be set in host vars
artifactory_is_primary: true
# The location where Artifactory should install.
artifactory_download_directory: /opt/jfrog
# The location where Artifactory should store data.
artifactory_file_store_dir: /data
# Pick the Artifactory flavour to install, can be also cpp-ce, jcr, pro.
artifactory_flavour: pro
extra_java_opts: -server -Xms2g -Xmx14g -Xss256k -XX:+UseG1GC
artifactory_tar: https://dl.bintray.com/jfrog/artifactory-pro/org/artifactory/pro/jfrog-artifactory-pro/{{ artifactory_version }}/jfrog-artifactory-pro-{{ artifactory_version }}-linux.tar.gz
artifactory_home: "{{ artifactory_download_directory }}/artifactory-{{ artifactory_flavour }}-{{ artifactory_version }}"
artifactory_user: artifactory
artifactory_group: artifactory
# Set the parameters required for the service.
service_list:
- name: artifactory
description: Start script for Artifactory
start_command: "{{ artifactory_home }}/bin/artifactory.sh start"
stop_command: "{{ artifactory_home }}/bin/artifactory.sh stop"
type: forking
status_pattern: artifactory
user_name: "{{ artifactory_user }}"
group_name: "{{ artifactory_group }}"

View File

@@ -0,0 +1,10 @@
---
# handlers file for artifactory
- name: systemctl daemon-reload
systemd:
daemon_reload: yes
- name: restart artifactory
service:
name: artifactory
state: restarted

View File

@@ -0,0 +1,6 @@
---
exceptions:
- variation: Alpine
reason: Artifactory start/stop scripts don't properly work.
- variation: amazonlinux:1
reason: "Shutting down artifactory: /usr/bin/java\nfinding\nUsing the default catalina management port (8015) to test shutdown\nArtifactory Tomcat already stopped"

View File

@@ -0,0 +1,35 @@
---
galaxy_info:
author: Robert de Bock
role_name: artifactory
description: Install and configure artifactory on your system.
license: Apache-2.0
company: none
min_ansible_version: 2.8
platforms:
- name: Debian
versions:
- all
- name: EL
versions:
- 7
- 8
- name: Fedora
versions:
- all
- name: OpenSUSE
versions:
- all
- name: Ubuntu
versions:
- bionic
galaxy_tags:
- artifactory
- centos
- redhat
- server
- system
dependencies: []

View File

@@ -0,0 +1,2 @@
---
tox_parallel: yes

View File

@@ -0,0 +1,6 @@
---
project_name: JFrog
reference: "https://github.com/robertdebock/ansible-role-artifactory/blob/master/defaults/main.yml"
versions:
- name: Artifactory
url: "https://dl.bintray.com/jfrog/artifactory/"

View File

@@ -0,0 +1,160 @@
---
# tasks file for artifactory
- name: install nginx
include_role:
name: artifactory-nginx
- name: create group for artifactory
group:
name: "{{ artifactory_group }}"
state: present
become: yes
- name: create user for artifactory
user:
name: "{{ artifactory_user }}"
group: "{{ artifactory_group }}"
system: yes
become: yes
- name: ensure artifactory_download_directory exists
file:
path: "{{ artifactory_download_directory }}"
state: directory
become: yes
- name: download artifactory
unarchive:
src: "{{ artifactory_tar }}"
dest: "{{ artifactory_download_directory }}"
remote_src: yes
owner: "{{ artifactory_user }}"
group: "{{ artifactory_group }}"
creates: "{{ artifactory_home }}"
become: yes
register: downloadartifactory
until: downloadartifactory is succeeded
retries: 3
- name: ensure artifactory_file_store_dir exists
file:
path: "{{ artifactory_file_store_dir }}"
state: directory
owner: "{{ artifactory_user }}"
group: "{{ artifactory_group }}"
become: yes
- name: ensure etc exists
file:
path: "{{ artifactory_home }}/var/etc"
state: directory
owner: "{{ artifactory_user }}"
group: "{{ artifactory_group }}"
become: yes
- name: use specified system yaml
copy:
src: "{{ system_file }}"
dest: "{{ artifactory_home }}/var/etc/system.yaml"
become: yes
when: system_file is defined
- name: configure system yaml
template:
src: system.yaml.j2
dest: "{{ artifactory_home }}/var/etc/system.yaml"
become: yes
when: system_file is not defined
- name: ensure {{ artifactory_home }}/var/etc/security/ exists
file:
path: "{{ artifactory_home }}/var/etc/security/"
state: directory
owner: "{{ artifactory_user }}"
group: "{{ artifactory_group }}"
become: yes
- name: configure master key
template:
src: master.key.j2
dest: "{{ artifactory_home }}/var/etc/security/master.key"
become: yes
- name: configure join key
template:
src: join.key.j2
dest: "{{ artifactory_home }}/var/etc/security/join.key"
become: yes
- name: ensure {{ artifactory_home }}/var/etc/info/ exists
file:
path: "{{ artifactory_home }}/var/etc/info/"
state: directory
owner: "{{ artifactory_user }}"
group: "{{ artifactory_group }}"
become: yes
- name: configure installer info
template:
src: installer-info.json.j2
dest: "{{ artifactory_home }}/var/etc/info/installer-info.json"
become: yes
- name: use specified binary store
copy:
src: "{{ binary_store_file }}"
dest: "{{ artifactory_home }}/var/etc/binarystore.xml"
become: yes
when: binary_store_file is defined
- name: use default binary store
template:
src: binarystore.xml.j2
dest: "{{ artifactory_home }}/var/etc/binarystore.xml"
become: yes
when: binary_store_file is not defined
- name: use license file
copy:
src: "{{ artifactory_license_file }}"
dest: "{{ artifactory_home }}/var/etc/artifactory/artifactory.cluster.license"
become: yes
when: artifactory_license_file is defined and artifactory_is_primary == true
- name: use license strings
template:
src: artifactory.cluster.license.j2
dest: "{{ artifactory_home }}/var/etc/artifactory/artifactory.cluster.license"
become: yes
when: artifactory_license_file is not defined and artifactory_is_primary == true
- name: download database driver
get_url:
url: "{{ db_download_url }}"
dest: "{{ artifactory_home }}/var/bootstrap/artifactory/tomcat/lib"
owner: "{{ artifactory_user }}"
group: "{{ artifactory_group }}"
become: yes
- name: create artifactory service
shell: "{{ artifactory_home }}/app/bin/installService.sh"
become: yes
- name: start and enable the primary node
service:
name: artifactory
state: restarted
become: yes
when: artifactory_is_primary == true
- name: random wait before restarting to prevent secondary nodes from hitting DB first
pause:
seconds: "{{ 120 | random + 10}}"
when: artifactory_is_primary == false
- name: start and enable the secondary nodes
service:
name: artifactory
state: restarted
become: yes
when: artifactory_is_primary == false

View File

@@ -0,0 +1,31 @@
{% if artifactory_license1 %}
{% if artifactory_license1|length %}
{{ artifactory_license1 }}
{% endif %}
{% endif %}
{% if artifactory_license2 %}
{% if artifactory_license2|length %}
{{ artifactory_license2 }}
{% endif %}
{% endif %}
{% if artifactory_license3 %}
{% if artifactory_license3|length %}
{{ artifactory_license3 }}
{% endif %}
{% endif %}
{% if artifactory_license4 %}
{% if artifactory_license4|length %}
{{ artifactory_license4 }}
{% endif %}
{% endif %}
{% if artifactory_license5 %}
{% if artifactory_license5|length %}
{{ artifactory_license5 }}
{% endif %}
{% endif %}

View File

@@ -0,0 +1,4 @@
<?xml version="1.0" encoding="UTF-8"?>
<config version="2">
<chain template="cluster-file-system"/>
</config>

View File

@@ -0,0 +1,12 @@
{
"productId": "Ansible_artifactory/1.0.0",
"features": [
{
"featureId": "Partner/ACC-006973"
},
{
"featureId": "Channel/{{ ansible_marketplace }}"
}
]
}

View File

@@ -0,0 +1 @@
{{ join_key }}

View File

@@ -0,0 +1,38 @@
## @formatter:off
## JFROG ARTIFACTORY SYSTEM CONFIGURATION FILE
## HOW TO USE: comment-out any field and keep the correct yaml indentation by deleting only the leading '#' character.
configVersion: 1
## NOTE: JFROG_HOME is a place holder for the JFrog root directory containing the deployed product, the home directory for all JFrog products.
## Replace JFROG_HOME with the real path! For example, in RPM install, JFROG_HOME=/opt/jfrog
## NOTE: Sensitive information such as passwords and join key are encrypted on first read.
## NOTE: The provided commented key and value is the default.
## SHARED CONFIGURATIONS
## A shared section for keys across all services in this config
shared:
## Node Settings
node:
## A unique id to identify this node.
## Default: auto generated at startup.
id: {{ ansible_machine_id }}
## Sets this node as primary in HA installation
primary: {{ artifactory_is_primary }}
## Sets this node as part of HA installation
haEnabled: {{ artifactory_ha_enabled }}
## Database Configuration
database:
## One of: mysql, oracle, mssql, postgresql, mariadb
## Default: Embedded derby
## Example for mysql/postgresql
type: "{{ db_type }}"
driver: "{{ db_driver }}"
url: "{{ db_url }}"
username: "{{ db_user }}"
password: "{{ db_password }}"

View File

@@ -0,0 +1,2 @@
---

View File

@@ -0,0 +1,30 @@
---
language: python
services:
- docker
env:
global:
- DEBUG=--debug
matrix:
- MOLECULE_DISTRO=centos7 MOLECULE_SCENARIO=default
- MOLECULE_DISTRO=centos7 MOLECULE_SCENARIO=version11
# - MOLECULE_DISTRO: fedora27
# - MOLECULE_DISTRO: fedora29
- MOLECULE_DISTRO=ubuntu1604 MOLECULE_SCENARIO=default
- MOLECULE_DISTRO=ubuntu1604 MOLECULE_SCENARIO=version11
- MOLECULE_DISTRO=ubuntu1804 MOLECULE_SCENARIO=default
- MOLECULE_DISTRO=ubuntu1804 MOLECULE_SCENARIO=version11
# - MOLECULE_DISTRO: debian9
before_install:
- sudo apt-get -qq update
- sudo apt-get install -y net-tools
install:
- pip install molecule docker-py
script:
- molecule --version
- ansible --version
- molecule $DEBUG test -s $MOLECULE_SCENARIO

View File

@@ -0,0 +1,84 @@
---
# Put database into alternative location with a bind mount.
postgres_server_bind_mount_var_lib_pgsql: false
# Where to put database.
postgres_server_bind_mount_var_lib_pgsql_target: ""
# Default version of Postgres server to install.
postgres_server_version: "9.6"
# Server version in package:
postgres_server_pkg_version: "{{ postgres_server_version|replace('.', '') }}"
# Whether or not the files are on ZFS.
postgres_server_volume_is_zfs: false
# Postgres setting max_connections.
postgres_server_max_connections: 100
# Postgres setting shared_buffers.
postgres_server_shared_buffers: 128MB
# Postgres setting maintenance_work_mem.
postgres_server_maintenance_work_mem: 64MB
# Postgres setting effective_io_concurrency.
postgres_server_effective_io_concurrency: 1
# Postgres setting max_worker_processes.
postgres_server_max_worker_processes: 8
# Postgres setting max_parallel_maintenance_workers.
postgres_server_max_parallel_maintenance_workers: 2
# Postgres setting max_parallel_workers_per_gather.
postgres_server_max_parallel_workers_per_gather: 2
# Postgres setting parallel_leader_participation.
postgres_server_parallel_leader_participation: true
# Postgres setting max_parallel_workers.
postgres_server_max_parallel_workers: 8
# Postgres setting max_locks_per_transaction.
postgres_server_max_locks_per_transaction: 64
# Configuration for "random access" cost.
postgres_server_random_page_cost: "4.0"
# User name that the postgres user runs as.
postgres_server_user: postgres
# Whether or not to lock checkpoints.
postgres_server_log_checkpoints: false
# Whether or not to lock connects.
postgres_server_log_connections: false
# Whether or not to lock disconnects.
postgres_server_log_disconnections: false
# Whether or not to log duration
postgres_server_log_duration: false
# Error logging verbosity.
postgres_server_log_error_verbosity: "default"
# Whether or not to log the host name.
postgres_server_log_hostname: false
# Whether or not to lock waits.
postgres_server_log_lock_waits: false
# Which statements to log.
postgres_server_log_statements: "none"
# Whether or not to enable the auto_explain module.
postgres_server_auto_explain_enabled: false
# Minimal duration to log auto explain for.
postgres_server_auto_explain_log_min_duration: -1
# Whether or not to use EXPLAIN ANALYZE.
postgres_server_auto_explain_log_analyze: true

View File

@@ -0,0 +1,4 @@
---
- name: restart postgres
systemd: name={{ postgres_server_service_name }} state=restarted

View File

@@ -0,0 +1,25 @@
---
galaxy_info:
role_name: postgres_server
author: Jeff Fry
description: Installation of Postgres for Artifactory HA
company: JFrog
min_ansible_version: 2.8
platforms:
- name: Fedora
versions:
- 27
- 29
- name: Ubuntu
versions:
- xenial
- bionic
- name: Debian
versions:
- stretch
galaxy_tags:
- postgres
- postgresql
dependencies: []

View File

@@ -0,0 +1,35 @@
---
- name: install python2 psycopg2
apt:
name: python-psycopg2
update_cache: yes
become: yes
- name: install python3 psycopg2
apt:
name: python3-psycopg2
update_cache: yes
become: yes
- name: add postgres apt key
apt_key:
url: https://www.postgresql.org/media/keys/ACCC4CF8.asc
id: "0x7FCC7D46ACCC4CF8"
state: present
become: yes
- name: register APT repository
apt_repository:
repo: deb http://apt.postgresql.org/pub/repos/apt/ {{ ansible_distribution_release }}-pgdg main
state: present
filename: pgdg
become: yes
- name: install postgres packages
apt:
name:
- postgresql-{{ postgres_server_version }}
- postgresql-server-dev-{{ postgres_server_version }}
- postgresql-contrib-{{ postgres_server_version }}
state: present
become: yes

View File

@@ -0,0 +1,72 @@
---
- name: install EPEL repository
yum: name=epel-release state=present
when: > # not for Fedora
ansible_distribution == 'CentOS' or
ansible_distribution == 'Red Hat Enterprise Linux'
become: yes
- name: install python2 psycopg2
yum:
name:
- python-psycopg2
- sudo
- wget
- perl
state: present
- name: install python3 psycopg2
yum:
name:
- python3-psycopg2
- sudo
- wget
- perl
state: present
- name: fixup some locale issues
lineinfile:
dest: /etc/default/locale
line: 'LANGUAGE="{{ item }}"'
state: present
create: yes
loop:
- 'en_US:en'
- 'en_us.UTF-8'
- name: get latest version
vars:
base: http://download.postgresql.org/pub/repos/yum
ver: "{{ ansible_distribution_version }}"
shell: |
set -eo pipefail
wget -O - {{ base }}/{{ postgres_server_version }}/redhat/rhel-{{ ver }}-x86_64/ 2>/dev/null | \
grep 'pgdg-redhat' | \
perl -pe 's/^.*rpm">//g' | \
perl -pe 's/<\/a>.*//g' | \
tail -n 1
args:
executable: /bin/bash
changed_when: false
check_mode: false
register: latest_version
tags: [skip_ansible_lint] # yes, I want wget here
- name: config postgres repository
vars:
base: http://download.postgresql.org/pub/repos/yum
ver: "{{ ansible_distribution_version }}"
yum:
name: "{{ base }}/{{ postgres_server_version }}/redhat/rhel-{{ ver }}-x86_64/{{ latest_version.stdout }}"
state: present
become: yes
- name: install postgres packages
yum:
name:
- postgresql{{ postgres_server_pkg_version }}-server
- postgresql{{ postgres_server_pkg_version }}-contrib
- postgresql{{ postgres_server_pkg_version }}-devel
state: present
become: yes

View File

@@ -0,0 +1,105 @@
---
- name: define distribution-specific variables
include_vars: "{{ ansible_os_family }}.yml"
- name: create directory for bind mount if necessary
file:
path: "{{ postgres_server_bind_mount_var_lib_pgsql_target }}"
state: directory
become: yes
when: postgres_server_bind_mount_var_lib_pgsql
- name: perform bind mount if necessary
mount:
path: "/var/lib/pgsql"
src: "{{ postgres_server_bind_mount_var_lib_pgsql_target }}"
opts: bind
state: mounted
fstype: none
become: yes
when: postgres_server_bind_mount_var_lib_pgsql
- name: perform installation
include_tasks: "{{ ansible_os_family }}.yml"
- name: extend path
copy:
dest: /etc/profile.d/postgres-path.sh
mode: a=rx
content: "export PATH=$PATH:/usr/pgsql-{{ postgres_server_version }}/bin"
become: yes
- name: initialize PostgreSQL database cluster
environment:
LC_ALL: "en_US.UTF-8"
vars:
ansible_become: "{{ postgres_server_initdb_become }}"
ansible_become_user: "{{ postgres_server_user }}"
command: "{{ postgres_server_cmd_initdb }} {{ postgres_server_data_location }}"
args:
creates: "{{ postgres_server_data_location }}/PG_VERSION"
- name: install postgres configuration
template:
src: "{{ item }}.j2"
dest: "{{ postgres_server_config_location }}/{{ item }}"
owner: postgres
group: postgres
mode: u=rw,go=r
vars:
ansible_become: "{{ postgres_server_initdb_become }}"
ansible_become_user: "{{ postgres_server_user }}"
loop:
- pg_hba.conf
- postgresql.conf
- name: enable postgres service
systemd:
name: "{{ postgres_server_service_name }}"
state: started
enabled: yes
become: yes
- name: Hold until Postgresql is up and running
wait_for:
port: 5432
- name: Create users
become_user: postgres
become: yes
postgresql_user:
name: "{{ item.db_user }}"
password: "{{ item.db_password }}"
conn_limit: "-1"
loop: "{{ db_users|default([]) }}"
no_log: true # secret passwords
- name: Create a database
become_user: postgres
become: yes
postgresql_db:
name: "{{ item.db_name }}"
owner: "{{ item.db_owner }}"
encoding: UTF-8
loop: "{{ dbs|default([]) }}"
- name: Grant privs on db
become_user: postgres
become: yes
postgresql_privs:
database: "{{ item.db_name }}"
role: "{{ item.db_owner }}"
state: present
privs: ALL
type: database
loop: "{{ dbs|default([]) }}"
- name: restart postgres
service:
name: "{{ postgres_server_service_name }}"
state: restarted
become: yes
- debug:
msg: "Restarted postgres service {{ postgres_server_service_name }}"

View File

@@ -0,0 +1,7 @@
# TYPE DATABASE USER ADDRESS METHOD
## localhost connections through Unix port (user name), IPv4, IPv6 (MD5 pw).
local all all peer
host all all 127.0.0.1/32 md5
host all all ::1/128 md5
## remote connections IPv4
host all all 0.0.0.0/0 trust

View File

@@ -0,0 +1,681 @@
# -----------------------------
# PostgreSQL configuration file
# -----------------------------
#
# This file consists of lines of the form:
#
# name = value
#
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
# "#" anywhere on a line. The complete list of parameter names and allowed
# values can be found in the PostgreSQL documentation.
#
# The commented-out settings shown in this file represent the default values.
# Re-commenting a setting is NOT sufficient to revert it to the default value;
# you need to reload the server.
#
# This file is read on server startup and when the server receives a SIGHUP
# signal. If you edit the file on a running system, you have to SIGHUP the
# server for the changes to take effect, or use "pg_ctl reload". Some
# parameters, which are marked below, require a server shutdown and restart to
# take effect.
#
# Any parameter can also be given as a command-line option to the server, e.g.,
# "postgres -c log_connections=on". Some parameters can be changed at run time
# with the "SET" SQL command.
#
# Memory units: kB = kilobytes Time units: ms = milliseconds
# MB = megabytes s = seconds
# GB = gigabytes min = minutes
# TB = terabytes h = hours
# d = days
#------------------------------------------------------------------------------
# FILE LOCATIONS
#------------------------------------------------------------------------------
# The default values of these variables are driven from the -D command-line
# option or PGDATA environment variable, represented here as ConfigDir.
{% if postgres_server_config_data_directory is not none %}
data_directory = '{{ postgres_server_config_data_directory }}'
{% else %}
#data_directory = 'ConfigDir' # use data in another directory
# (change requires restart)
{% endif %}
{% if postgres_server_config_data_directory %}
hba_file = '{{ postgres_server_config_hba_file }}'
{% else %}
#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file
# (change requires restart)
{% endif %}
{% if postgres_server_config_data_directory %}
ident_file = '{{ postgres_server_config_ident_file }}'
{% else %}
#ident_file = 'ConfigDir/pg_ident.conf' # host-based authentication file
# (change requires restart)
{% endif %}
{% if postgres_server_config_external_pid_file %}
external_pid_file = '{{ postgres_server_config_external_pid_file }}'
{% else %}
# If external_pid_file is not explicitly set, no extra PID file is written.
#external_pid_file = '' # write an extra PID file
# (change requires restart)
{% endif %}
#------------------------------------------------------------------------------
# CONNECTIONS AND AUTHENTICATION
#------------------------------------------------------------------------------
# - Connection Settings -
listen_addresses = '0.0.0.0' # what IP address(es) to listen on;
# comma-separated list of addresses;
# defaults to 'localhost'; use '*' for all
# (change requires restart)
#port = 5432 # (change requires restart)
max_connections = {{ postgres_server_max_connections }} # (change requires restart)
#superuser_reserved_connections = 3 # (change requires restart)
#unix_socket_directories = '/var/run/postgresql, /tmp' # comma-separated list of directories
# (change requires restart)
#unix_socket_group = '' # (change requires restart)
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
# (change requires restart)
#bonjour = off # advertise server via Bonjour
# (change requires restart)
#bonjour_name = '' # defaults to the computer name
# (change requires restart)
# - Security and Authentication -
#authentication_timeout = 1min # 1s-600s
#ssl = off # (change requires restart)
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
# (change requires restart)
#ssl_prefer_server_ciphers = on # (change requires restart)
#ssl_ecdh_curve = 'prime256v1' # (change requires restart)
#ssl_cert_file = 'server.crt' # (change requires restart)
#ssl_key_file = 'server.key' # (change requires restart)
#ssl_ca_file = '' # (change requires restart)
#ssl_crl_file = '' # (change requires restart)
#password_encryption = on
#db_user_namespace = off
#row_security = on
# GSSAPI using Kerberos
#krb_server_keyfile = ''
#krb_caseins_users = off
# - TCP Keepalives -
# see "man 7 tcp" for details
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
# 0 selects the system default
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
# 0 selects the system default
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
# 0 selects the system default
#------------------------------------------------------------------------------
# RESOURCE USAGE (except WAL)
#------------------------------------------------------------------------------
# - Memory -
shared_buffers = {{ postgres_server_shared_buffers }} # min 128kB
# (change requires restart)
#huge_pages = try # on, off, or try
# (change requires restart)
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 0 # zero disables the feature
# (change requires restart)
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
# you actively intend to use prepared transactions.
#work_mem = 4MB # min 64kB
maintenance_work_mem = {{ postgres_server_maintenance_work_mem }} # min 1MB
#replacement_sort_tuples = 150000 # limits use of replacement selection sort
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
#max_stack_depth = 2MB # min 100kB
dynamic_shared_memory_type = posix # the default is the first option
# supported by the operating system:
# posix
# sysv
# windows
# mmap
# use none to disable dynamic shared memory
# (change requires restart)
# - Disk -
#temp_file_limit = -1 # limits per-process temp file space
# in kB, or -1 for no limit
# - Kernel Resource Usage -
#max_files_per_process = 1000 # min 25
# (change requires restart)
shared_preload_libraries = 'pg_stat_statements' # restart on change
# - Cost-Based Vacuum Delay -
#vacuum_cost_delay = 0 # 0-100 milliseconds
#vacuum_cost_page_hit = 1 # 0-10000 credits
#vacuum_cost_page_miss = 10 # 0-10000 credits
#vacuum_cost_page_dirty = 20 # 0-10000 credits
#vacuum_cost_limit = 200 # 1-10000 credits
# - Background Writer -
#bgwriter_delay = 200ms # 10-10000ms between rounds
#bgwriter_lru_maxpages = 100 # 0-1000 max buffers written/round
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
#bgwriter_flush_after = 512kB # measured in pages, 0 disables
# - Asynchronous Behavior -
effective_io_concurrency = {{ postgres_server_effective_io_concurrency }}
max_worker_processes = {{ postgres_server_max_worker_processes }}
max_parallel_workers_per_gather = {{ postgres_server_max_parallel_maintenance_workers }}
max_parallel_workers_per_gather = {{ postgres_server_max_parallel_workers_per_gather }}
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
# (change requires restart)
#backend_flush_after = 0 # measured in pages, 0 disables
{% if postgres_server_version|string != "9.6" %}
parallel_leader_participation = {{ "on" if postgres_server_parallel_leader_participation else "off" }}
max_parallel_maintenance_workers = {{ postgres_server_max_parallel_maintenance_workers }}
{% endif %}
#------------------------------------------------------------------------------
# WRITE AHEAD LOG
#------------------------------------------------------------------------------
# - Settings -
#wal_level = minimal # minimal, replica, or logical
# (change requires restart)
#fsync = on # flush data to disk for crash safety
# (turning this off can cause
# unrecoverable data corruption)
#synchronous_commit = on # synchronization level;
# off, local, remote_write, remote_apply, or on
#wal_sync_method = fsync # the default is the first option
# supported by the operating system:
# open_datasync
# fdatasync (default on Linux)
# fsync
# fsync_writethrough
# open_sync
full_page_writes = {{ "off" if postgres_server_volume_is_zfs else "on" }} # off OK on ZFS # recover from partial page writes
wal_compression = off # enable compression of full-page writes
#wal_log_hints = off # also do full page writes of non-critical updates
# (change requires restart)
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
# (change requires restart)
#wal_writer_delay = 200ms # 1-10000 milliseconds
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
commit_delay = 100000 # range 0-100000, in microseconds
#commit_siblings = 5 # range 1-1000
# - Checkpoints -
checkpoint_timeout = 4h # range 30s-1d
max_wal_size = 100GB
min_wal_size = 1GB
checkpoint_completion_target = 0.8 # checkpoint target duration, 0.0 - 1.0
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
#checkpoint_warning = 30s # 0 disables
# - Archiving -
#archive_mode = off # enables archiving; off, on, or always
# (change requires restart)
#archive_command = '' # command to use to archive a logfile segment
# placeholders: %p = path of file to archive
# %f = file name only
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
#archive_timeout = 0 # force a logfile segment switch after this
# number of seconds; 0 disables
#------------------------------------------------------------------------------
# REPLICATION
#------------------------------------------------------------------------------
# - Sending Server(s) -
# Set these on the master and on any standby that will send replication data.
#max_wal_senders = 0 # max number of walsender processes
# (change requires restart)
#wal_keep_segments = 0 # in logfile segments, 16MB each; 0 disables
#wal_sender_timeout = 60s # in milliseconds; 0 disables
#max_replication_slots = 0 # max number of replication slots
# (change requires restart)
#track_commit_timestamp = off # collect timestamp of transaction commit
# (change requires restart)
# - Master Server -
# These settings are ignored on a standby server.
#synchronous_standby_names = '' # standby servers that provide sync rep
# number of sync standbys and comma-separated list of application_name
# from standby(s); '*' = all
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
# - Standby Servers -
# These settings are ignored on a master server.
#hot_standby = off # "on" allows queries during recovery
# (change requires restart)
#max_standby_archive_delay = 30s # max delay before canceling queries
# when reading WAL from archive;
# -1 allows indefinite delay
#max_standby_streaming_delay = 30s # max delay before canceling queries
# when reading streaming WAL;
# -1 allows indefinite delay
#wal_receiver_status_interval = 10s # send replies at least this often
# 0 disables
#hot_standby_feedback = off # send info from standby to prevent
# query conflicts
#wal_receiver_timeout = 60s # time that receiver waits for
# communication from master
# in milliseconds; 0 disables
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
# retrieve WAL after a failed attempt
#------------------------------------------------------------------------------
# QUERY TUNING
#------------------------------------------------------------------------------
# - Planner Method Configuration -
#enable_bitmapscan = on
#enable_hashagg = on
#enable_hashjoin = on
#enable_indexscan = on
#enable_indexonlyscan = on
#enable_material = on
#enable_mergejoin = on
#enable_nestloop = on
#enable_seqscan = on
#enable_sort = on
#enable_tidscan = on
# - Planner Cost Constants -
#seq_page_cost = 1.0 # measured on an arbitrary scale
random_page_cost = {{ postgres_server_random_page_cost }}
#cpu_tuple_cost = 0.01 # same scale as above
#cpu_index_tuple_cost = 0.005 # same scale as above
#cpu_operator_cost = 0.0025 # same scale as above
#parallel_tuple_cost = 0.1 # same scale as above
#parallel_setup_cost = 1000.0 # same scale as above
#min_parallel_relation_size = 8MB
#effective_cache_size = 4GB
# - Genetic Query Optimizer -
#geqo = on
#geqo_threshold = 12
#geqo_effort = 5 # range 1-10
#geqo_pool_size = 0 # selects default based on effort
#geqo_generations = 0 # selects default based on effort
#geqo_selection_bias = 2.0 # range 1.5-2.0
#geqo_seed = 0.0 # range 0.0-1.0
# - Other Planner Options -
#default_statistics_target = 100 # range 1-10000
#constraint_exclusion = partition # on, off, or partition
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
#from_collapse_limit = 8
#join_collapse_limit = 8 # 1 disables collapsing of explicit
# JOIN clauses
#force_parallel_mode = off
#------------------------------------------------------------------------------
# ERROR REPORTING AND LOGGING
#------------------------------------------------------------------------------
# - Where to Log -
log_destination = 'stderr' # Valid values are combinations of
# stderr, csvlog, syslog, and eventlog,
# depending on platform. csvlog
# requires logging_collector to be on.
# This is used when logging to stderr:
logging_collector = on # Enable capturing of stderr and csvlog
# into log files. Required to be on for
# csvlogs.
# (change requires restart)
# These are only used if logging_collector is on:
log_directory = 'pg_log' # directory where log files are written,
# can be absolute or relative to PGDATA
log_filename = 'postgresql-%a.log' # log file name pattern,
# can include strftime() escapes
#log_file_mode = 0600 # creation mode for log files,
# begin with 0 to use octal notation
log_truncate_on_rotation = on # If on, an existing log file with the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
# time-driven rotation, not on restarts
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
log_rotation_age = 1d # Automatic rotation of logfiles will
# happen after that time. 0 disables.
log_rotation_size = 0 # Automatic rotation of logfiles will
# happen after that much log output.
# 0 disables.
# These are relevant when logging to syslog:
#syslog_facility = 'LOCAL0'
#syslog_ident = 'postgres'
#syslog_sequence_numbers = on
#syslog_split_messages = on
# This is only relevant when logging to eventlog (win32):
# (change requires restart)
#event_source = 'PostgreSQL'
# - When to Log -
#client_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# log
# notice
# warning
# error
#log_min_messages = warning # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic
#log_min_error_statement = error # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic (effectively off)
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
# and their durations, > 0 logs only
# statements running at least this number
# of milliseconds
# - What to Log -
#debug_print_parse = off
#debug_print_rewritten = off
#debug_print_plan = off
#debug_pretty_print = on
log_checkpoints = {{ "on" if postgres_server_log_checkpoints else "off" }}
log_connections = {{ "on" if postgres_server_log_connections else "off" }}
log_disconnections = {{ "on" if postgres_server_log_disconnections else "off" }}
log_duration = {{ "on" if postgres_server_log_duration else "off" }}
log_error_verbosity = {{ postgres_server_log_error_verbosity }} # terse, default, or verbose messages
log_hostname = {{ "on" if postgres_server_log_hostname else "off" }}
log_line_prefix = '< %m > ' # special values:
# %a = application name
# %u = user name
# %d = database name
# %r = remote host and port
# %h = remote host
# %p = process ID
# %t = timestamp without milliseconds
# %m = timestamp with milliseconds
# %n = timestamp with milliseconds (as a Unix epoch)
# %i = command tag
# %e = SQL state
# %c = session ID
# %l = session line number
# %s = session start timestamp
# %v = virtual transaction ID
# %x = transaction ID (0 if none)
# %q = stop here in non-session
# processes
# %% = '%'
# e.g. '<%u%%%d> '
log_lock_waits = {{ "on" if postgres_server_log_lock_waits else "off" }} # log lock waits >= deadlock_timeout
log_statement = '{{ postgres_server_log_statements }}' # none, ddl, mod, all
#log_replication_commands = off
#log_temp_files = -1 # log temporary files equal or larger
# than the specified size in kilobytes;
# -1 disables, 0 logs all temp files
log_timezone = 'Europe/Berlin'
# - Process Title -
#cluster_name = '' # added to process titles if nonempty
# (change requires restart)
#update_process_title = on
#------------------------------------------------------------------------------
# RUNTIME STATISTICS
#------------------------------------------------------------------------------
# - Query/Index Statistics Collector -
#track_activities = on
#track_counts = on
#track_io_timing = off
#track_functions = none # none, pl, all
track_activity_query_size = 102400 # (change requires restart)
#stats_temp_directory = 'pg_stat_tmp'
# Track statements generated by stored procedures as well
pg_stat_statements.track = all
# - Statistics Monitoring -
#log_parser_stats = off
#log_planner_stats = off
#log_executor_stats = off
#log_statement_stats = off
#------------------------------------------------------------------------------
# AUTOVACUUM PARAMETERS
#------------------------------------------------------------------------------
#autovacuum = on # Enable autovacuum subprocess? 'on'
# requires track_counts to also be on.
#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and
# their durations, > 0 logs only
# actions running at least this number
# of milliseconds.
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
# (change requires restart)
#autovacuum_naptime = 1min # time between autovacuum runs
#autovacuum_vacuum_threshold = 50 # min number of row updates before
# vacuum
#autovacuum_analyze_threshold = 50 # min number of row updates before
# analyze
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
# (change requires restart)
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
# before forced vacuum
# (change requires restart)
#autovacuum_vacuum_cost_delay = 20ms # default vacuum cost delay for
# autovacuum, in milliseconds;
# -1 means use vacuum_cost_delay
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
# autovacuum, -1 means use
# vacuum_cost_limit
#------------------------------------------------------------------------------
# CLIENT CONNECTION DEFAULTS
#------------------------------------------------------------------------------
# - Statement Behavior -
#search_path = '"$user", public' # schema names
#default_tablespace = '' # a tablespace name, '' uses the default
#temp_tablespaces = '' # a list of tablespace names, '' uses
# only default tablespace
#check_function_bodies = on
#default_transaction_isolation = 'read committed'
#default_transaction_read_only = off
#default_transaction_deferrable = off
#session_replication_role = 'origin'
#statement_timeout = 0 # in milliseconds, 0 is disabled
#lock_timeout = 0 # in milliseconds, 0 is disabled
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
#vacuum_freeze_min_age = 50000000
#vacuum_freeze_table_age = 150000000
#vacuum_multixact_freeze_min_age = 5000000
#vacuum_multixact_freeze_table_age = 150000000
#bytea_output = 'hex' # hex, escape
#xmlbinary = 'base64'
#xmloption = 'content'
#gin_fuzzy_search_limit = 0
#gin_pending_list_limit = 4MB
# - Locale and Formatting -
datestyle = 'iso, mdy'
#intervalstyle = 'postgres'
timezone = 'Europe/Berlin'
#timezone_abbreviations = 'Default' # Select the set of available time zone
# abbreviations. Currently, there are
# Default
# Australia (historical usage)
# India
# You can create your own file in
# share/timezonesets/.
#extra_float_digits = 0 # min -15, max 3
#client_encoding = sql_ascii # actually, defaults to database
# encoding
# These settings are initialized by initdb, but they can be changed.
lc_messages = 'en_US.UTF-8' # locale for system error message
# strings
lc_monetary = 'en_US.UTF-8' # locale for monetary formatting
lc_numeric = 'en_US.UTF-8' # locale for number formatting
lc_time = 'en_US.UTF-8' # locale for time formatting
# default configuration for text search
default_text_search_config = 'pg_catalog.english'
# - Other Defaults -
#dynamic_library_path = '$libdir'
#local_preload_libraries = ''
{% set preload_libraries = [] %}
{% if postgres_server_auto_explain_enabled %}
{{ preload_libraries.append("auto_explain") }}
{% endif %}
session_preload_libraries = '{{ ",".join(preload_libraries) }}'
#------------------------------------------------------------------------------
# auto_explain SETTINGS
#------------------------------------------------------------------------------
auto_explain.log_min_duration = {{ "on" if postgres_server_auto_explain_log_min_duration else "off" }}
auto_explain.log_analyze = {{ "on" if postgres_server_auto_explain_log_analyze else "off" }}
#------------------------------------------------------------------------------
# LOCK MANAGEMENT
#------------------------------------------------------------------------------
#deadlock_timeout = 1s
max_locks_per_transaction = {{ postgres_server_max_locks_per_transaction }} # min 10
# (change requires restart)
#max_pred_locks_per_transaction = 64 # min 10
# (change requires restart)
#------------------------------------------------------------------------------
# VERSION/PLATFORM COMPATIBILITY
#------------------------------------------------------------------------------
# - Previous PostgreSQL Versions -
#array_nulls = on
#backslash_quote = safe_encoding # on, off, or safe_encoding
#default_with_oids = off
#escape_string_warning = on
#lo_compat_privileges = off
#operator_precedence_warning = off
#quote_all_identifiers = off
#sql_inheritance = on
#standard_conforming_strings = on
#synchronize_seqscans = on
# - Other Platforms and Clients -
#transform_null_equals = off
#------------------------------------------------------------------------------
# ERROR HANDLING
#------------------------------------------------------------------------------
#exit_on_error = off # terminate session on any error?
#restart_after_crash = on # reinitialize after backend crash?
#------------------------------------------------------------------------------
# CONFIG FILE INCLUDES
#------------------------------------------------------------------------------
# These options allow settings to be loaded from files other than the
# default postgresql.conf.
#include_dir = 'conf.d' # include files ending in '.conf' from
# directory 'conf.d'
#include_if_exists = 'exists.conf' # include file only if it exists
#include = 'special.conf' # include file
#------------------------------------------------------------------------------
# CUSTOMIZED OPTIONS
#------------------------------------------------------------------------------
# Add settings for extensions here

View File

@@ -0,0 +1,12 @@
---
postgres_server_cmd_initdb: /usr/lib/postgresql/{{ postgres_server_version }}/bin/initdb -D
postgres_server_initdb_become: yes
postgres_server_data_location: /var/lib/postgresql/{{ postgres_server_version }}/main
postgres_server_config_location: /etc/postgresql/{{ postgres_server_version }}/main
postgres_server_service_name: postgresql@{{ postgres_server_version }}-main
postgres_server_config_data_directory: "/var/lib/postgresql/{{ postgres_server_version }}/main"
postgres_server_config_hba_file: "/etc/postgresql/{{ postgres_server_version }}/main/pg_hba.conf"
postgres_server_config_ident_file: "/etc/postgresql/{{ postgres_server_version }}/main/pg_ident.conf"
postgres_server_config_external_pid_file: "/var/run/postgresql/{{ postgres_server_version }}-main.pid"

View File

@@ -0,0 +1,11 @@
---
postgres_server_cmd_initdb: /usr/pgsql-{{ postgres_server_version }}/bin/postgresql{{ postgres_server_pkg_version }}-setup initdb -D
postgres_server_data_location: /var/lib/pgsql/{{ postgres_server_version }}/data
postgres_server_config_location: "{{ postgres_server_data_location }}"
postgres_server_service_name: postgresql-{{ postgres_server_version }}
postgres_server_config_data_directory: null
postgres_server_config_hba_file: null
postgres_server_config_ident_file: null
postgres_server_config_external_pid_file: null

View File

@@ -0,0 +1,4 @@
---
postgres_server_cmd_initdb: /usr/pgsql-{{ postgres_server_version }}/bin/postgresql{{ postgres_server_pkg_version }}-setup initdb
postgres_server_initdb_become: false

View File

@@ -0,0 +1,4 @@
---
postgres_server_cmd_initdb: /usr/pgsql-{{ postgres_server_version }}/bin/initdb -D /var/lib/pgsql/{{ postgres_server_version }}/data
postgres_server_initdb_become: yes

View File

@@ -0,0 +1,29 @@
---
language: python
python: "2.7"
# Use the new container infrastructure
sudo: false
# Install ansible
addons:
apt:
packages:
- python-pip
install:
# Install ansible
- pip install ansible
# Check ansible version
- ansible --version
# Create ansible.cfg with correct roles_path
- printf '[defaults]\nroles_path=../' >ansible.cfg
script:
# Basic role syntax check
- ansible-playbook tests/test.yml -i tests/inventory --syntax-check
notifications:
webhooks: https://galaxy.ansible.com/api/v1/notifications/

View File

@@ -0,0 +1,23 @@
---
# defaults file for xray
# indicates were this collection was downlaoded from (galaxy, automation_hub, standalone)
ansible_marketplace: standalone
# The version of xray to install
xray_version: 3.3.0
# whether to enable HA
xray_ha_enabled: true
# The location where xray should install.
xray_download_directory: /opt/jfrog
# The remote xray download file
xray_tar: https://bintray.com/standAloneDownload/downloadArtifact?agree=true&artifactPath=/jfrog/jfrog-xray/xray-linux/{{ xray_version }}/jfrog-xray-{{ xray_version }}-linux.tar.gz&callback_id=anonymous&product=xray
#The xray install directory
xray_home: "{{ xray_download_directory }}/jfrog-xray-{{ xray_version }}-linux"
#xray users and groups
xray_user: xray
xray_group: xray

View File

@@ -0,0 +1,2 @@
---
# handlers file for xray

View File

@@ -0,0 +1,53 @@
galaxy_info:
author: your name
description: your role description
company: your company (optional)
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Choose a valid license ID from https://spdx.org - some suggested licenses:
# - BSD-3-Clause (default)
# - MIT
# - GPL-2.0-or-later
# - GPL-3.0-only
# - Apache-2.0
# - CC-BY-4.0
license: license (GPL-2.0-or-later, MIT, etc)
min_ansible_version: 2.9
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
#
# Provide a list of supported platforms, and for each platform a list of versions.
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
# To view available platforms and versions (or releases), visit:
# https://galaxy.ansible.com/api/v1/platforms/
#
# platforms:
# - name: Fedora
# versions:
# - all
# - 25
# - name: SomePlatform
# versions:
# - all
# - 1.0
# - 7
# - 99.99
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: []
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.

View File

@@ -0,0 +1,37 @@
---
- name: Install db5.3-util
apt:
deb: "{{ xray_home }}/app/third-party/misc/db5.3-util_5.3.28-3ubuntu3_amd64.deb"
ignore_errors: yes
become: yes
- name: Install db-util
apt:
deb: "{{ xray_home }}/app/third-party/misc/db-util_1_3a5.3.21exp1ubuntu1_all.deb"
ignore_errors: yes
become: yes
- name: Install libssl
apt:
deb: "{{ xray_home }}/app/third-party/rabbitmq/libssl1.1_1.1.0j-1_deb9u1_amd64.deb"
ignore_errors: yes
become: yes
- name: Install socat
apt:
deb: "{{ xray_home }}/app/third-party/rabbitmq/socat_1.7.3.1-2+deb9u1_amd64.deb"
become: yes
- name: Install libwxbase3.0-0v5
apt:
name: libwxbase3.0-0v5
update_cache: yes
state: present
ignore_errors: yes
become: yes
- name: Install erlang
apt:
deb: "{{ xray_home }}/app/third-party/rabbitmq/esl-erlang_21.2.1-1~ubuntu~xenial_amd64.deb"
become: yes

View File

@@ -0,0 +1,16 @@
---
- name: Install db-utl
yum:
name: "{{ xray_home }}/app/third-party/misc/db4-utils-4.7.25-20.el6_7.x86_64.rpm"
state: present
- name: Install socat
yum:
name: "{{ xray_home }}/app/third-party/rabbitmq/socat-1.7.3.2-2.el7.x86_64.rpm"
state: present
- name: Install erlang
yum:
name: "{{ xray_home }}/app/third-party/rabbitmq/erlang-21.1.4-1.el7.centos.x86_64.rpm"
state: present

View File

@@ -0,0 +1,93 @@
---
- name: create group for xray
group:
name: "{{ xray_group }}"
state: present
become: yes
- name: create user for xray
user:
name: "{{ xray_user }}"
group: "{{ xray_group }}"
system: yes
become: yes
- name: ensure xray_download_directory exists
file:
path: "{{ xray_download_directory }}"
state: directory
become: yes
- name: download xray
unarchive:
src: "{{ xray_tar }}"
dest: "{{ xray_download_directory }}"
remote_src: yes
owner: "{{ xray_user }}"
group: "{{ xray_group }}"
creates: "{{ xray_home }}"
become: yes
register: downloadxray
until: downloadxray is succeeded
retries: 3
- name: perform prerequisite installation
include_tasks: "{{ ansible_os_family }}.yml"
- name: ensure etc exists
file:
path: "{{ xray_home }}/var/etc"
state: directory
owner: "{{ xray_user }}"
group: "{{ xray_group }}"
become: yes
- name: configure system yaml
template:
src: system.yaml.j2
dest: "{{ xray_home }}/var/etc/system.yaml"
become: yes
- name: ensure {{ xray_home }}/var/etc/security/ exists
file:
path: "{{ xray_home }}/var/etc/security/"
state: directory
owner: "{{ xray_user }}"
group: "{{ xray_group }}"
become: yes
- name: configure master key
template:
src: master.key.j2
dest: "{{ xray_home }}/var/etc/security/master.key"
become: yes
- name: configure join key
template:
src: join.key.j2
dest: "{{ xray_home }}/var/etc/security/join.key"
become: yes
- name: ensure {{ xray_home }}/var/etc/info/ exists
file:
path: "{{ xray_home }}/var/etc/info/"
state: directory
owner: "{{ xray_user }}"
group: "{{ xray_group }}"
become: yes
- name: configure installer info
template:
src: installer-info.json.j2
dest: "{{ xray_home }}/var/etc/info/installer-info.json"
become: yes
- name: create xray service
shell: "{{ xray_home }}/app/bin/installService.sh"
become: yes
- name: start and enable xray
service:
name: xray
state: restarted
become: yes

View File

@@ -0,0 +1,11 @@
{
"productId": "Ansible_artifactory/1.0.0",
"features": [
{
"featureId": "Partner/ACC-006973"
},
{
"featureId": "Channel/{{ ansible_marketplace }}"
}
]
}

View File

@@ -0,0 +1 @@
{{ join_key }}

View File

@@ -0,0 +1 @@
{{ master_key }}

View File

@@ -0,0 +1,36 @@
## @formatter:off
## JFROG ARTIFACTORY SYSTEM CONFIGURATION FILE
## HOW TO USE: comment-out any field and keep the correct yaml indentation by deleting only the leading '#' character.
configVersion: 1
## NOTE: JFROG_HOME is a place holder for the JFrog root directory containing the deployed product, the home directory for all JFrog products.
## Replace JFROG_HOME with the real path! For example, in RPM install, JFROG_HOME=/opt/jfrog
## NOTE: Sensitive information such as passwords and join key are encrypted on first read.
## NOTE: The provided commented key and value is the default.
## SHARED CONFIGURATIONS
## A shared section for keys across all services in this config
shared:
## Base URL of the JFrog Platform Deployment (JPD)
## This is the URL to the machine where JFrog Artifactory is deployed, or the load balancer pointing to it. It is recommended to use DNS names rather than direct IPs.
## Examples: "http://jfrog.acme.com" or "http://10.20.30.40:8082"
jfrogUrl: {{ jfrog_url }}
## Node Settings
node:
## A unique id to identify this node.
## Default: auto generated at startup.
id: {{ ansible_machine_id }}
## Database Configuration
database:
## One of: mysql, oracle, mssql, postgresql, mariadb
## Default: Embedded derby
## Example for mysql/postgresql
type: "{{ db_type }}"
driver: "{{ db_driver }}"
url: "{{ db_url }}"
username: "{{ db_user }}"
password: "{{ db_password }}"

View File

@@ -0,0 +1,2 @@
localhost

View File

@@ -0,0 +1,5 @@
---
- hosts: localhost
remote_user: root
roles:
- xray

View File

@@ -0,0 +1,2 @@
---
# vars file for xray

View File

@@ -0,0 +1,769 @@
{
"Description": "This template deploys a VPC, with a pair of public and private subnets spread across two Availability Zones. It deploys an internet gateway, with a default route on the public subnets. It deploys a pair of NAT gateways (one in each AZ), and default routes for them in the private subnets.",
"Parameters": {
"SSHKeyName": {
"Description": "Name of the ec2 key you need one to use this template",
"Type": "AWS::EC2::KeyPair::KeyName",
"Default": "choose-key"
},
"EnvironmentName": {
"Description": "An environment name that is prefixed to resource names",
"Type": "String",
"Default": "Ansible"
},
"VpcCIDR": {
"Description": "Please enter the IP range (CIDR notation) for this VPC",
"Type": "String",
"Default": "10.192.0.0/16"
},
"PublicSubnet1CIDR": {
"Description": "Please enter the IP range (CIDR notation) for the public subnet in the first Availability Zone",
"Type": "String",
"Default": "10.192.10.0/24"
},
"PublicSubnet2CIDR": {
"Description": "Please enter the IP range (CIDR notation) for the public subnet in the second Availability Zone",
"Type": "String",
"Default": "10.192.11.0/24"
},
"PrivateSubnet1CIDR": {
"Description": "Please enter the IP range (CIDR notation) for the private subnet in the first Availability Zone",
"Type": "String",
"Default": "10.192.20.0/24"
},
"PrivateSubnet2CIDR": {
"Description": "Please enter the IP range (CIDR notation) for the private subnet in the second Availability Zone",
"Type": "String",
"Default": "10.192.21.0/24"
}
},
"Mappings": {
"RegionToAmazonAMI": {
"us-east-1": {
"HVM64": "ami-03e33c1cefd1d3d74"
},
"us-east-2": {
"HVM64": "ami-07d9419c80dc1113c"
},
"us-west-1": {
"HVM64": "ami-0ee1a20d6b0c6a347"
},
"us-west-2": {
"HVM64": "ami-0813245c0939ab3ca"
}
}
},
"Resources": {
"VPC": {
"Type": "AWS::EC2::VPC",
"Properties": {
"CidrBlock": {
"Ref": "VpcCIDR"
},
"EnableDnsSupport": true,
"EnableDnsHostnames": true,
"Tags": [
{
"Key": "Name",
"Value": {
"Ref": "EnvironmentName"
}
}
]
}
},
"InternetGateway": {
"Type": "AWS::EC2::InternetGateway",
"Properties": {
"Tags": [
{
"Key": "Name",
"Value": {
"Ref": "EnvironmentName"
}
}
]
}
},
"InternetGatewayAttachment": {
"Type": "AWS::EC2::VPCGatewayAttachment",
"Properties": {
"InternetGatewayId": {
"Ref": "InternetGateway"
},
"VpcId": {
"Ref": "VPC"
}
}
},
"PublicSubnet1": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"VpcId": {
"Ref": "VPC"
},
"AvailabilityZone": {
"Fn::Select": [
0,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": {
"Ref": "PublicSubnet1CIDR"
},
"MapPublicIpOnLaunch": true,
"Tags": [
{
"Key": "Name",
"Value": {
"Fn::Sub": "${EnvironmentName} Public Subnet (AZ1)"
}
}
]
}
},
"PublicSubnet2": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"VpcId": {
"Ref": "VPC"
},
"AvailabilityZone": {
"Fn::Select": [
1,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": {
"Ref": "PublicSubnet2CIDR"
},
"MapPublicIpOnLaunch": true,
"Tags": [
{
"Key": "Name",
"Value": {
"Fn::Sub": "${EnvironmentName} Public Subnet (AZ2)"
}
}
]
}
},
"PrivateSubnet1": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"VpcId": {
"Ref": "VPC"
},
"AvailabilityZone": {
"Fn::Select": [
0,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": {
"Ref": "PrivateSubnet1CIDR"
},
"MapPublicIpOnLaunch": false,
"Tags": [
{
"Key": "Name",
"Value": {
"Fn::Sub": "${EnvironmentName} Private Subnet (AZ1)"
}
}
]
}
},
"PrivateSubnet2": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"VpcId": {
"Ref": "VPC"
},
"AvailabilityZone": {
"Fn::Select": [
1,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": {
"Ref": "PrivateSubnet2CIDR"
},
"MapPublicIpOnLaunch": false,
"Tags": [
{
"Key": "Name",
"Value": {
"Fn::Sub": "${EnvironmentName} Private Subnet (AZ2)"
}
}
]
}
},
"NatGateway1EIP": {
"Type": "AWS::EC2::EIP",
"DependsOn": "InternetGatewayAttachment",
"Properties": {
"Domain": "vpc"
}
},
"NatGateway2EIP": {
"Type": "AWS::EC2::EIP",
"DependsOn": "InternetGatewayAttachment",
"Properties": {
"Domain": "vpc"
}
},
"NatGateway1": {
"Type": "AWS::EC2::NatGateway",
"Properties": {
"AllocationId": {
"Fn::GetAtt": [
"NatGateway1EIP",
"AllocationId"
]
},
"SubnetId": {
"Ref": "PublicSubnet1"
}
}
},
"NatGateway2": {
"Type": "AWS::EC2::NatGateway",
"Properties": {
"AllocationId": {
"Fn::GetAtt": [
"NatGateway2EIP",
"AllocationId"
]
},
"SubnetId": {
"Ref": "PublicSubnet2"
}
}
},
"PublicRouteTable": {
"Type": "AWS::EC2::RouteTable",
"Properties": {
"VpcId": {
"Ref": "VPC"
},
"Tags": [
{
"Key": "Name",
"Value": {
"Fn::Sub": "${EnvironmentName} Public Routes"
}
}
]
}
},
"DefaultPublicRoute": {
"Type": "AWS::EC2::Route",
"DependsOn": "InternetGatewayAttachment",
"Properties": {
"RouteTableId": {
"Ref": "PublicRouteTable"
},
"DestinationCidrBlock": "0.0.0.0/0",
"GatewayId": {
"Ref": "InternetGateway"
}
}
},
"PublicSubnet1RouteTableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "PublicRouteTable"
},
"SubnetId": {
"Ref": "PublicSubnet1"
}
}
},
"PublicSubnet2RouteTableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "PublicRouteTable"
},
"SubnetId": {
"Ref": "PublicSubnet2"
}
}
},
"PrivateRouteTable1": {
"Type": "AWS::EC2::RouteTable",
"Properties": {
"VpcId": {
"Ref": "VPC"
},
"Tags": [
{
"Key": "Name",
"Value": {
"Fn::Sub": "${EnvironmentName} Private Routes (AZ1)"
}
}
]
}
},
"DefaultPrivateRoute1": {
"Type": "AWS::EC2::Route",
"Properties": {
"RouteTableId": {
"Ref": "PrivateRouteTable1"
},
"DestinationCidrBlock": "0.0.0.0/0",
"NatGatewayId": {
"Ref": "NatGateway1"
}
}
},
"PrivateSubnet1RouteTableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "PrivateRouteTable1"
},
"SubnetId": {
"Ref": "PrivateSubnet1"
}
}
},
"PrivateRouteTable2": {
"Type": "AWS::EC2::RouteTable",
"Properties": {
"VpcId": {
"Ref": "VPC"
},
"Tags": [
{
"Key": "Name",
"Value": {
"Fn::Sub": "${EnvironmentName} Private Routes (AZ2)"
}
}
]
}
},
"DefaultPrivateRoute2": {
"Type": "AWS::EC2::Route",
"Properties": {
"RouteTableId": {
"Ref": "PrivateRouteTable2"
},
"DestinationCidrBlock": "0.0.0.0/0",
"NatGatewayId": {
"Ref": "NatGateway2"
}
}
},
"PrivateSubnet2RouteTableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "PrivateRouteTable2"
},
"SubnetId": {
"Ref": "PrivateSubnet2"
}
}
},
"EC2SecurityGroup": {
"Type": "AWS::EC2::SecurityGroup",
"Properties": {
"GroupDescription": "SSH, Port 80, Database",
"VpcId": {
"Ref": "VPC"
},
"SecurityGroupIngress": [
{
"IpProtocol": "tcp",
"FromPort": 22,
"ToPort": 22,
"CidrIp": "0.0.0.0/0"
},
{
"IpProtocol": "tcp",
"FromPort": 5432,
"ToPort": 5432,
"CidrIp": "0.0.0.0/0"
},
{
"IpProtocol": "tcp",
"FromPort": 8082,
"ToPort": 8082,
"CidrIp": "0.0.0.0/0"
},
{
"IpProtocol": "tcp",
"FromPort": 80,
"ToPort": 80,
"SourceSecurityGroupId": {
"Ref": "ELBSecurityGroup"
}
}
]
}
},
"ELBSecurityGroup": {
"Type": "AWS::EC2::SecurityGroup",
"Properties": {
"GroupDescription": "SSH and Port 80",
"VpcId": {
"Ref": "VPC"
},
"SecurityGroupIngress": [
{
"IpProtocol": "tcp",
"FromPort": 80,
"ToPort": 80,
"CidrIp": "0.0.0.0/0"
}
]
}
},
"BastionInstance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": {
"Fn::FindInMap": [
"RegionToAmazonAMI",
{
"Ref": "AWS::Region"
},
"HVM64"
]
},
"InstanceInitiatedShutdownBehavior": "stop",
"InstanceType": "t2.medium",
"KeyName": {
"Ref": "SSHKeyName"
},
"Monitoring": "true",
"NetworkInterfaces": [
{
"AssociatePublicIpAddress": "true",
"DeviceIndex": "0",
"GroupSet": [
{
"Ref": "EC2SecurityGroup"
}
],
"SubnetId": {
"Ref": "PublicSubnet1"
}
}
],
"Tags": [
{
"Key": "Name",
"Value": "bastion"
}
],
"Tenancy": "default"
}
},
"RTPriInstance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": {
"Fn::FindInMap": [
"RegionToAmazonAMI",
{
"Ref": "AWS::Region"
},
"HVM64"
]
},
"InstanceInitiatedShutdownBehavior": "stop",
"InstanceType": "t2.medium",
"KeyName": {
"Ref": "SSHKeyName"
},
"Monitoring": "true",
"NetworkInterfaces": [
{
"AssociatePublicIpAddress": "false",
"DeviceIndex": "0",
"GroupSet": [
{
"Ref": "EC2SecurityGroup"
}
],
"SubnetId": {
"Ref": "PrivateSubnet1"
}
}
],
"Tags": [
{
"Key": "Name",
"Value": "rtprimary"
}
],
"Tenancy": "default"
}
},
"RTSecInstance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": {
"Fn::FindInMap": [
"RegionToAmazonAMI",
{
"Ref": "AWS::Region"
},
"HVM64"
]
},
"InstanceInitiatedShutdownBehavior": "stop",
"InstanceType": "t2.medium",
"KeyName": {
"Ref": "SSHKeyName"
},
"Monitoring": "true",
"NetworkInterfaces": [
{
"AssociatePublicIpAddress": "false",
"DeviceIndex": "0",
"GroupSet": [
{
"Ref": "EC2SecurityGroup"
}
],
"SubnetId": {
"Ref": "PrivateSubnet2"
}
}
],
"Tags": [
{
"Key": "Name",
"Value": "rtsecondary"
}
],
"Tenancy": "default"
}
},
"XrayInstance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": {
"Fn::FindInMap": [
"RegionToAmazonAMI",
{
"Ref": "AWS::Region"
},
"HVM64"
]
},
"InstanceInitiatedShutdownBehavior": "stop",
"InstanceType": "t2.medium",
"KeyName": {
"Ref": "SSHKeyName"
},
"Monitoring": "true",
"NetworkInterfaces": [
{
"AssociatePublicIpAddress": "false",
"DeviceIndex": "0",
"GroupSet": [
{
"Ref": "EC2SecurityGroup"
}
],
"SubnetId": {
"Ref": "PrivateSubnet1"
}
}
],
"Tags": [
{
"Key": "Name",
"Value": "xray"
}
],
"Tenancy": "default"
}
},
"DBInstance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": {
"Fn::FindInMap": [
"RegionToAmazonAMI",
{
"Ref": "AWS::Region"
},
"HVM64"
]
},
"InstanceInitiatedShutdownBehavior": "stop",
"InstanceType": "t2.medium",
"KeyName": {
"Ref": "SSHKeyName"
},
"Monitoring": "true",
"NetworkInterfaces": [
{
"AssociatePublicIpAddress": "false",
"DeviceIndex": "0",
"GroupSet": [
{
"Ref": "EC2SecurityGroup"
}
],
"SubnetId": {
"Ref": "PrivateSubnet1"
}
}
],
"Tags": [
{
"Key": "Name",
"Value": "database"
}
],
"Tenancy": "default"
}
},
"EC2TargetGroup": {
"Type": "AWS::ElasticLoadBalancingV2::TargetGroup",
"Properties": {
"HealthCheckIntervalSeconds": 30,
"HealthCheckProtocol": "HTTP",
"HealthCheckTimeoutSeconds": 15,
"HealthyThresholdCount": 2,
"Matcher": {
"HttpCode": "200,302"
},
"Name": "EC2TargetGroup",
"Port": 80,
"Protocol": "HTTP",
"TargetGroupAttributes": [
{
"Key": "deregistration_delay.timeout_seconds",
"Value": "20"
}
],
"Targets": [
{
"Id": {
"Ref": "RTPriInstance"
}
},
{
"Id": {
"Ref": "RTSecInstance"
},
"Port": 80
}
],
"UnhealthyThresholdCount": 3,
"VpcId": {
"Ref": "VPC"
},
"Tags": [
{
"Key": "Name",
"Value": "EC2TargetGroup"
},
{
"Key": "Port",
"Value": 80
}
]
}
},
"ALBListener": {
"Type": "AWS::ElasticLoadBalancingV2::Listener",
"Properties": {
"DefaultActions": [
{
"Type": "forward",
"TargetGroupArn": {
"Ref": "EC2TargetGroup"
}
}
],
"LoadBalancerArn": {
"Ref": "ApplicationLoadBalancer"
},
"Port": 80,
"Protocol": "HTTP"
}
},
"ApplicationLoadBalancer": {
"Type": "AWS::ElasticLoadBalancingV2::LoadBalancer",
"Properties": {
"Scheme": "internet-facing",
"Subnets": [
{
"Ref": "PublicSubnet1"
},
{
"Ref": "PublicSubnet2"
}
],
"SecurityGroups": [
{
"Ref": "ELBSecurityGroup"
}
]
}
}
},
"Outputs": {
"VPC": {
"Description": "Virtual Private Cloud",
"Value": {
"Ref": "VPC"
}
},
"ALBHostName": {
"Description": "Application Load Balancer Hostname",
"Value": {
"Fn::GetAtt": [
"ApplicationLoadBalancer",
"DNSName"
]
}
},
"BastionInstancePublic": {
"Description": "Bastion",
"Value": { "Fn::GetAtt" : [ "BastionInstance", "PublicIp" ]}
},
"BastionInstancePrivate": {
"Description": "Bastion",
"Value": { "Fn::GetAtt" : [ "BastionInstance", "PrivateIp" ]}
},
"RTPriInstancePrivate": {
"Description": "RTPriInstance",
"Value": { "Fn::GetAtt" : [ "RTPriInstance", "PrivateIp" ]}
},
"RTSecInstancePrivate": {
"Description": "RTSecInstance",
"Value": { "Fn::GetAtt" : [ "RTSecInstance", "PrivateIp" ]}
},
"XrayInstancePrivate": {
"Description": "XrayInstance",
"Value": { "Fn::GetAtt" : [ "XrayInstance", "PrivateIp" ]}
},
"DBInstancePrivate": {
"Description": "DBInstance",
"Value": { "Fn::GetAtt" : [ "DBInstance", "PrivateIp" ]}
}
}
}

View File

@@ -0,0 +1,679 @@
{
"$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"vnetName": {
"type": "string",
"defaultValue": "vnet01",
"metadata": {
"description": "Name of new vnet to deploy into."
}
},
"vnetAddressRange": {
"type": "string",
"defaultValue": "10.0.0.0/16",
"metadata": {
"description": "IP prefix for available addresses in vnet address space."
}
},
"subnetAddressRange": {
"type": "string",
"defaultValue": "10.0.0.0/24",
"metadata": {
"description": "Subnet IP prefix MUST be within vnet IP prefix address space."
}
},
"location": {
"type": "string",
"defaultValue": "[resourceGroup().location]",
"metadata": {
"description": "Location for all resources."
}
},
"adminPublicKey": {
"type": "string",
"metadata": {
"description": "The ssh public key for the VMs."
}
},
"sizeOfDiskInGB": {
"type": "int",
"defaultValue": 128,
"minValue": 128,
"maxValue": 1024,
"metadata": {
"description": "Size of data disk in GB 128-1024"
}
},
"vmSize": {
"type": "string",
"defaultValue": "Standard_D2s_v3",
"metadata": {
"description": "Size of the VMs"
}
},
"numberOfArtifactory": {
"type": "int",
"defaultValue": 1,
"minValue": 1,
"maxValue": 5,
"metadata": {
"description": "Number of Artifactory servers."
}
},
"numberOfXray": {
"type": "int",
"defaultValue": 1,
"minValue": 1,
"maxValue": 5,
"metadata": {
"description": "Number of Xray servers."
}
},
"numberOfDb": {
"type": "int",
"defaultValue": 1,
"minValue": 1,
"maxValue": 2,
"metadata": {
"description": "Number of database servers."
}
}
},
"variables": {
"vnetName": "[parameters('vnetName')]",
"vnetAddressRange": "[parameters('vnetAddressRange')]",
"subnetAddressRange": "[parameters('subnetAddressRange')]",
"subnetName": "mainSubnet",
"loadBalancerName": "LB",
"loadBalancerIp": "lbIp",
"numberOfArtifactory": "[parameters('numberOfArtifactory')]",
"numberOfXray": "[parameters('numberOfXray')]",
"numberOfDb": "[parameters('numberOfDb')]",
"availabilitySetName": "availSet",
"vmArtPri": "vmArtPri",
"vmArtSec": "vmArtSec",
"vmXray": "vmXray",
"vmDb": "vmDb",
"storageAccountNameDiag": "[concat('diag',uniqueString(resourceGroup().id))]",
"subnet-id": "[resourceId('Microsoft.Network/virtualNetworks/subnets',variables('vnetName'),variables('subnetName'))]",
"imagePublisher": "Canonical",
"imageOffer": "UbuntuServer",
"imageSku": "16.04-LTS",
"mainNsg": "mainNsg",
"adminUsername": "ubuntu"
},
"resources": [
{
"apiVersion": "2019-08-01",
"type": "Microsoft.Network/publicIPAddresses",
"name": "[variables('loadBalancerIp')]",
"location": "[parameters('location')]",
"properties": {
"publicIPAllocationMethod": "Static"
}
},
{
"type": "Microsoft.Compute/availabilitySets",
"name": "[variables('availabilitySetName')]",
"apiVersion": "2019-12-01",
"location": "[parameters('location')]",
"sku": {
"name": "Aligned"
},
"properties": {
"platformFaultDomainCount": 2,
"platformUpdateDomainCount": 2
}
},
{
"apiVersion": "2019-06-01",
"type": "Microsoft.Storage/storageAccounts",
"name": "[variables('storageAccountNameDiag')]",
"location": "[parameters('location')]",
"kind": "StorageV2",
"sku": {
"name": "Standard_LRS"
}
},
{
"comments": "Simple Network Security Group for subnet [Subnet]",
"type": "Microsoft.Network/networkSecurityGroups",
"apiVersion": "2019-08-01",
"name": "[variables('mainNsg')]",
"location": "[parameters('location')]",
"properties": {
"securityRules": [
{
"name": "allow-ssh",
"properties": {
"description": "Allow SSH",
"protocol": "TCP",
"sourcePortRange": "*",
"destinationPortRange": "22",
"sourceAddressPrefix": "*",
"destinationAddressPrefix": "*",
"access": "Allow",
"priority": 100,
"direction": "Inbound",
"sourcePortRanges": [],
"destinationPortRanges": [],
"sourceAddressPrefixes": [],
"destinationAddressPrefixes": []
}
},
{
"name": "allow-http",
"properties": {
"description": "Allow HTTP",
"protocol": "TCP",
"sourcePortRange": "*",
"destinationPortRange": "80",
"sourceAddressPrefix": "*",
"destinationAddressPrefix": "*",
"access": "Allow",
"priority": 110,
"direction": "Inbound",
"sourcePortRanges": [],
"destinationPortRanges": [],
"sourceAddressPrefixes": [],
"destinationAddressPrefixes": []
}
}
]
}
},
{
"apiVersion": "2019-08-01",
"type": "Microsoft.Network/virtualNetworks",
"name": "[variables('vnetName')]",
"location": "[parameters('location')]",
"dependsOn": [
"[resourceId('Microsoft.Network/networkSecurityGroups', variables('mainNsg'))]"
],
"properties": {
"addressSpace": {
"addressPrefixes": [
"[variables('vnetAddressRange')]"
]
},
"subnets": [
{
"name": "[variables('subnetName')]",
"properties": {
"addressPrefix": "[variables('subnetAddressRange')]",
"networkSecurityGroup": {
"id": "[resourceId('Microsoft.Network/networkSecurityGroups', variables('mainNsg'))]"
}
}
}
]
}
},
{
"apiVersion": "2018-10-01",
"name": "[variables('loadBalancerName')]",
"type": "Microsoft.Network/loadBalancers",
"location": "[parameters('location')]",
"dependsOn": [
"[concat('Microsoft.Network/publicIPAddresses/',variables('loadBalancerIp'))]"
],
"properties": {
"frontendIpConfigurations": [
{
"name": "LBFE",
"properties": {
"publicIPAddress": {
"id": "[resourceId('Microsoft.Network/publicIPAddresses',variables('loadBalancerIp'))]"
}
}
}
],
"backendAddressPools": [
{
"name": "LBArt"
}
],
"inboundNatRules": [
{
"name": "ssh",
"properties": {
"frontendIPConfiguration": {
"id": "[resourceId('Microsoft.Network/loadBalancers/frontendIPConfigurations',variables('loadBalancerName'),'LBFE')]"
},
"frontendPort": 22,
"backendPort": 22,
"enableFloatingIP": false,
"idleTimeoutInMinutes": 4,
"protocol": "Tcp",
"enableTcpReset": false
}
}
],
"loadBalancingRules": [
{
"properties": {
"frontendIPConfiguration": {
"id": "[resourceId('Microsoft.Network/loadBalancers/frontendIPConfigurations', variables('loadBalancerName'), 'LBFE')]"
},
"backendAddressPool": {
"id": "[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', variables('loadBalancerName'), 'LBArt')]"
},
"probe": {
"id": "[resourceId('Microsoft.Network/loadBalancers/probes', variables('loadBalancerName'), 'lbprobe')]"
},
"protocol": "Tcp",
"frontendPort": 80,
"backendPort": 80,
"idleTimeoutInMinutes": 15
},
"name": "lbrule"
}
],
"probes": [
{
"properties": {
"protocol": "Tcp",
"port": 80,
"intervalInSeconds": 15,
"numberOfProbes": 2
},
"name": "lbprobe"
}
]
}
},
{
"apiVersion": "2019-08-01",
"type": "Microsoft.Network/networkInterfaces",
"name": "[variables('vmArtPri')]",
"location": "[parameters('location')]",
"dependsOn": [
"[variables('vnetName')]",
"[variables('loadBalancerName')]"
],
"properties": {
"ipConfigurations": [
{
"name": "ipconfig",
"properties": {
"privateIPAllocationMethod": "Dynamic",
"subnet": {
"id": "[variables('subnet-id')]"
},
"loadBalancerBackendAddressPools": [
{
"id": "[resourceId('Microsoft.Network/loadBalancers/backendAddressPools',variables('loadBalancerName'),'LBArt')]"
}
],
"loadBalancerInboundNatRules": [
{
"id": "[resourceId('Microsoft.Network/loadBalancers/inboundNatRules', variables('loadBalancerName'), 'ssh')]"
}
]
}
}
]
}
},
{
"apiVersion": "2019-08-01",
"type": "Microsoft.Network/networkInterfaces",
"name": "[concat(variables('vmArtSec'),copyindex())]",
"copy": {
"name": "netIntLoop",
"count": "[sub(variables('numberOfArtifactory'),1)]"
},
"location": "[parameters('location')]",
"dependsOn": [
"[variables('vnetName')]",
"[variables('loadBalancerName')]"
],
"properties": {
"ipConfigurations": [
{
"name": "ipconfig",
"properties": {
"privateIPAllocationMethod": "Dynamic",
"subnet": {
"id": "[variables('subnet-id')]"
},
"loadBalancerBackendAddressPools": [
{
"id": "[resourceId('Microsoft.Network/loadBalancers/backendAddressPools',variables('loadBalancerName'),'LBArt')]"
}
]
}
}
]
}
},
{
"apiVersion": "2019-08-01",
"type": "Microsoft.Network/networkInterfaces",
"name": "[concat(variables('vmXray'),copyindex())]",
"copy": {
"name": "netXrLoop",
"count": "[variables('numberOfXray')]"
},
"location": "[parameters('location')]",
"dependsOn": [
"[variables('vnetName')]"
],
"properties": {
"ipConfigurations": [
{
"name": "ipconfig",
"properties": {
"privateIPAllocationMethod": "Dynamic",
"subnet": {
"id": "[variables('subnet-id')]"
}
}
}
]
}
},
{
"apiVersion": "2019-08-01",
"type": "Microsoft.Network/networkInterfaces",
"name": "[concat(variables('vmDb'),copyindex())]",
"copy": {
"name": "netDbLoop",
"count": "[variables('numberOfDb')]"
},
"location": "[parameters('location')]",
"dependsOn": [
"[variables('vnetName')]"
],
"properties": {
"ipConfigurations": [
{
"name": "ipconfig",
"properties": {
"privateIPAllocationMethod": "Dynamic",
"subnet": {
"id": "[variables('subnet-id')]"
}
}
}
]
}
},
{
"apiVersion": "2019-12-01",
"type": "Microsoft.Compute/virtualMachines",
"name": "[variables('vmArtPri')]",
"location": "[parameters('location')]",
"dependsOn": [
"[resourceId('Microsoft.Storage/storageAccounts', variables('storageAccountNameDiag'))]",
"[resourceId('Microsoft.Network/networkInterfaces', variables('vmArtPri'))]",
"[resourceId('Microsoft.Compute/availabilitySets', variables('availabilitySetName'))]"
],
"properties": {
"availabilitySet": {
"id": "[resourceId('Microsoft.Compute/availabilitySets', variables('availabilitySetName'))]"
},
"hardwareProfile": {
"vmSize": "[parameters('vmSize')]"
},
"osProfile": {
"computerName": "[variables('vmArtPri')]",
"adminUsername": "[variables('adminUsername')]",
"linuxConfiguration": {
"disablePasswordAuthentication": true,
"ssh": {
"publicKeys": [
{
"path": "[concat('/home/', variables('adminUsername'), '/.ssh/authorized_keys')]",
"keyData": "[parameters('adminPublicKey')]"
}
]
}
}
},
"storageProfile": {
"imageReference": {
"publisher": "[variables('imagePublisher')]",
"offer": "[variables('imageOffer')]",
"sku": "[variables('imageSku')]",
"version": "latest"
},
"osDisk": {
"createOption": "FromImage"
}
},
"networkProfile": {
"networkInterfaces": [
{
"id": "[resourceId('Microsoft.Network/networkInterfaces',variables('vmArtPri'))]"
}
]
},
"diagnosticsProfile": {
"bootDiagnostics": {
"enabled": true,
"storageUri": "[reference(variables('storageAccountNameDiag'), '2019-06-01').primaryEndpoints.blob]"
}
}
}
},
{
"apiVersion": "2019-12-01",
"type": "Microsoft.Compute/virtualMachines",
"name": "[concat(variables('vmArtSec'), copyindex())]",
"copy": {
"name": "virtualMachineLoop",
"count": "[sub(variables('numberOfArtifactory'),1)]"
},
"location": "[parameters('location')]",
"dependsOn": [
"[resourceId('Microsoft.Storage/storageAccounts', variables('storageAccountNameDiag'))]",
"[resourceId('Microsoft.Network/networkInterfaces', concat(variables('vmArtSec'),copyindex()))]",
"[resourceId('Microsoft.Compute/availabilitySets', variables('availabilitySetName'))]"
],
"properties": {
"availabilitySet": {
"id": "[resourceId('Microsoft.Compute/availabilitySets', variables('availabilitySetName'))]"
},
"hardwareProfile": {
"vmSize": "[parameters('vmSize')]"
},
"osProfile": {
"computerName": "[concat(variables('vmArtSec'), copyindex())]",
"adminUsername": "[variables('adminUsername')]",
"linuxConfiguration": {
"disablePasswordAuthentication": true,
"ssh": {
"publicKeys": [
{
"path": "[concat('/home/', variables('adminUsername'), '/.ssh/authorized_keys')]",
"keyData": "[parameters('adminPublicKey')]"
}
]
}
}
},
"storageProfile": {
"imageReference": {
"publisher": "[variables('imagePublisher')]",
"offer": "[variables('imageOffer')]",
"sku": "[variables('imageSku')]",
"version": "latest"
},
"osDisk": {
"createOption": "FromImage"
}
},
"networkProfile": {
"networkInterfaces": [
{
"id": "[resourceId('Microsoft.Network/networkInterfaces',concat(variables('vmArtSec'),copyindex()))]"
}
]
},
"diagnosticsProfile": {
"bootDiagnostics": {
"enabled": true,
"storageUri": "[reference(variables('storageAccountNameDiag'), '2019-06-01').primaryEndpoints.blob]"
}
}
}
},
{
"apiVersion": "2019-12-01",
"type": "Microsoft.Compute/virtualMachines",
"name": "[concat(variables('vmXray'), copyindex())]",
"copy": {
"name": "virtualMachineLoop",
"count": "[variables('numberOfXray')]"
},
"location": "[parameters('location')]",
"dependsOn": [
"[resourceId('Microsoft.Storage/storageAccounts', variables('storageAccountNameDiag'))]",
"[resourceId('Microsoft.Network/networkInterfaces', concat(variables('vmXray'),copyindex()))]",
"[resourceId('Microsoft.Compute/availabilitySets', variables('availabilitySetName'))]"
],
"properties": {
"availabilitySet": {
"id": "[resourceId('Microsoft.Compute/availabilitySets', variables('availabilitySetName'))]"
},
"hardwareProfile": {
"vmSize": "[parameters('vmSize')]"
},
"osProfile": {
"computerName": "[concat(variables('vmXray'), copyindex())]",
"adminUsername": "[variables('adminUsername')]",
"linuxConfiguration": {
"disablePasswordAuthentication": true,
"ssh": {
"publicKeys": [
{
"path": "[concat('/home/', variables('adminUsername'), '/.ssh/authorized_keys')]",
"keyData": "[parameters('adminPublicKey')]"
}
]
}
}
},
"storageProfile": {
"imageReference": {
"publisher": "[variables('imagePublisher')]",
"offer": "[variables('imageOffer')]",
"sku": "[variables('imageSku')]",
"version": "latest"
},
"osDisk": {
"createOption": "FromImage"
}
},
"networkProfile": {
"networkInterfaces": [
{
"id": "[resourceId('Microsoft.Network/networkInterfaces',concat(variables('vmXray'),copyindex()))]"
}
]
},
"diagnosticsProfile": {
"bootDiagnostics": {
"enabled": true,
"storageUri": "[reference(variables('storageAccountNameDiag'), '2019-06-01').primaryEndpoints.blob]"
}
}
}
},
{
"apiVersion": "2019-12-01",
"type": "Microsoft.Compute/virtualMachines",
"name": "[concat(variables('vmDb'), copyindex())]",
"copy": {
"name": "virtualMachineLoop",
"count": "[variables('numberOfDb')]"
},
"location": "[parameters('location')]",
"dependsOn": [
"[resourceId('Microsoft.Storage/storageAccounts', variables('storageAccountNameDiag'))]",
"[resourceId('Microsoft.Network/networkInterfaces', concat(variables('vmDb'),copyindex()))]",
"[resourceId('Microsoft.Compute/availabilitySets', variables('availabilitySetName'))]"
],
"properties": {
"availabilitySet": {
"id": "[resourceId('Microsoft.Compute/availabilitySets', variables('availabilitySetName'))]"
},
"hardwareProfile": {
"vmSize": "[parameters('vmSize')]"
},
"osProfile": {
"computerName": "[concat(variables('vmDb'), copyindex())]",
"adminUsername": "[variables('adminUsername')]",
"linuxConfiguration": {
"disablePasswordAuthentication": true,
"ssh": {
"publicKeys": [
{
"path": "[concat('/home/', variables('adminUsername'), '/.ssh/authorized_keys')]",
"keyData": "[parameters('adminPublicKey')]"
}
]
}
}
},
"storageProfile": {
"imageReference": {
"publisher": "[variables('imagePublisher')]",
"offer": "[variables('imageOffer')]",
"sku": "[variables('imageSku')]",
"version": "latest"
},
"osDisk": {
"createOption": "FromImage"
}
},
"networkProfile": {
"networkInterfaces": [
{
"id": "[resourceId('Microsoft.Network/networkInterfaces',concat(variables('vmDb'),copyindex()))]"
}
]
},
"diagnosticsProfile": {
"bootDiagnostics": {
"enabled": true,
"storageUri": "[reference(variables('storageAccountNameDiag'), '2019-06-01').primaryEndpoints.blob]"
}
}
}
}
],
"outputs": {
"lbIp": {
"type": "string",
"value": "[reference(resourceId('Microsoft.Network/publicIPAddresses', variables('loadBalancerIp'))).ipAddress]"
},
"vmArtPriIp": {
"type": "string",
"value": "[reference(resourceId('Microsoft.Network/networkInterfaces', variables('vmArtPri'))).ipConfigurations[0].properties.privateIPAddress]"
},
"vmArtSecArrIp": {
"type": "array",
"copy": {
"count": "[sub(variables('numberOfArtifactory'),1)]",
"input": "[reference(resourceId('Microsoft.Network/networkInterfaces', concat(variables('vmArtSec'),copyindex()))).ipConfigurations[0].properties.privateIPAddress]"
}
},
"vmXrayArrIp": {
"type": "array",
"copy": {
"count": "[variables('numberOfXray')]",
"input": "[reference(resourceId('Microsoft.Network/networkInterfaces', concat(variables('vmXray'),copyindex()))).ipConfigurations[0].properties.privateIPAddress]"
}
},
"vmDbArrIp": {
"type": "array",
"copy": {
"count": "[variables('numberOfDb')]",
"input": "[reference(resourceId('Microsoft.Network/networkInterfaces', concat(variables('vmDb'),copyindex()))).ipConfigurations[0].properties.privateIPAddress]"
}
}
}
}

102
Ansible/pipelines.yaml Normal file
View File

@@ -0,0 +1,102 @@
resources:
- name: ansibleRepo
type: GitRepo
configuration:
gitProvider: jefferyfryGithub
path: jefferyfry/JFrog-Cloud-Installers
pipelines:
- name: ansible_aws_azure_automation_pipeline
steps:
- name: execute_aws_ansible_playbook
type: Bash
configuration:
runtime:
type: image
image:
auto:
language: java
versions:
- "8"
integrations:
- name: ansibleAwsKeys
- name: ansibleEnvVars
- name: ansiblePrivateKey
inputResources:
- name: ansibleRepo
execution:
onStart:
- echo "Executing AWS Ansible playbook..."
onExecute:
- sudo apt-get update
- sudo apt-get install gnupg2
- sudo apt-get install software-properties-common
- sudo apt-add-repository --yes --update ppa:ansible/ansible
- sudo apt -y --allow-unauthenticated install ansible
- sudo pip install packaging
- sudo pip install boto3 botocore
- cd ../dependencyState/resources/ansibleRepo
- echo 'Setting environment variables...'
- export artifactory_license1="$int_ansibleEnvVars_artifactory_license1"
- export artifactory_license2="$int_ansibleEnvVars_artifactory_license2"
- export artifactory_license3="$int_ansibleEnvVars_artifactory_license3"
- export master_key="$int_ansibleEnvVars_master_key"
- export join_key="$int_ansibleEnvVars_join_key"
- export ssh_public_key_name="$int_ansibleEnvVars_ssh_public_key_name"
- export cfn_template="$int_ansibleEnvVars_cfn_template"
- export stack_name="$int_ansibleEnvVars_stack_name"
- export AWS_ACCESS_KEY_ID="$int_ansibleEnvVars_AWS_ACCESS_KEY_ID"
- export AWS_SECRET_KEY="$int_ansibleEnvVars_AWS_SECRET_KEY"
- printenv
- eval $(ssh-agent -s)
- ssh-add <(echo "$int_ansiblePrivateKey_key")
- ansible-playbook Ansible/test/aws/playbook.yaml
onComplete:
- echo "AWS Ansible playbook complete."
- name: execute_azure_ansible_playbook
type: Bash
configuration:
runtime:
type: image
image:
auto:
language: java
versions:
- "8"
integrations:
- name: ansibleAzureKeys
- name: ansibleEnvVars
- name: ansiblePrivateKey
inputResources:
- name: ansibleRepo
execution:
onStart:
- echo "Executing Azure Ansible playbook..."
onExecute:
- sudo apt-get update
- sudo apt-get install gnupg2
- sudo apt-get install software-properties-common
- sudo apt-add-repository --yes --update ppa:ansible/ansible
- sudo apt -y --allow-unauthenticated install ansible
- sudo pip install packaging
- sudo pip install msrestazure
- sudo pip install ansible[azure]
- cd ../dependencyState/resources/ansibleRepo
- echo 'Setting environment variables...'
- export artifactory_license1="$int_ansibleEnvVars_artifactory_license1"
- export artifactory_license2="$int_ansibleEnvVars_artifactory_license2"
- export artifactory_license3="$int_ansibleEnvVars_artifactory_license3"
- export master_key="$int_ansibleEnvVars_master_key"
- export join_key="$int_ansibleEnvVars_join_key"
- export ssh_public_key="$int_ansibleEnvVars_ssh_public_key"
- export arm_template="$int_ansibleEnvVars_arm_template"
- export azure_resource_group="$int_ansibleEnvVars_azure_resource_group"
- export clientId="$int_ansibleAzureKeys_appId"
- export clientSecret="$int_ansibleAzureKeys_password"
- export tenantId="$int_ansibleAzureKeys_tenant"
- printenv
- eval $(ssh-agent -s)
- ssh-add <(echo "$int_ansiblePrivateKey_key")
- az login --service-principal -u "$clientId" -p "$clientSecret" --tenant "$tenantId"
- ansible-playbook Ansible/test/azure/playbook.yaml
onComplete:
- echo "Azure Ansible playbook complete."

View File

@@ -0,0 +1,51 @@
---
all:
vars:
ansible_user: "ubuntu"
ansible_ssh_private_key_file: "/Users/jefff/.ssh/ansible-priv.pem"
children:
database:
hosts:
#artifactory database
52.86.32.79:
db_users:
- { db_user: "artifactory", db_password: "Art1fAct0ry" }
dbs:
- { db_name: "artifactory", db_owner: "artifactory" }
artifactory:
vars:
artifactory_ha_enabled: true
master_key: "c97b862469de0d94fbb7d48130637a5a"
join_key: "9bcca98f375c0728d907cc6ee39d4f02"
db_download_url: "https://jdbc.postgresql.org/download/postgresql-42.2.12.jar"
db_type: "postgresql"
db_driver: "org.postgresql.Driver"
db_url: "jdbc:postgresql://10.0.0.160:5432/artifactory"
db_user: "artifactory"
db_password: "Art1fAct0ry"
server_name: "ec2-100-25-104-198.compute-1.amazonaws.com"
certificate: |
-----BEGIN CERTIFICATE-----
x
-----END CERTIFICATE-----
certificate_key: |
-----BEGIN PRIVATE KEY-----
x
-----END PRIVATE KEY-----
children:
primary:
hosts:
100.25.104.198:
artifactory_is_primary: true
artifactory_license1: x
artifactory_license2: x
artifactory_license3: x
artifactory_license4: x
artifactory_license5: x
secondary:
hosts:
54.160.107.157:
35.153.79.44:
vars:
artifactory_is_primary: false

View File

@@ -0,0 +1,11 @@
---
- hosts: database
gather_facts: true
roles:
- jfrog/ansible/roles/postgres
- hosts: primary:secondary
gather_facts: true
roles:
- jfrog/ansible/roles/artifactory
- jfrog/ansible/roles/artifactory-nginx-ssl

View File

@@ -0,0 +1,40 @@
---
all:
vars:
ansible_user: "ubuntu"
ansible_ssh_private_key_file: "/Users/jefff/.ssh/ansible-priv.pem"
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -A ubuntu@13.82.225.20 -W %h:%p"'
children:
database:
hosts:
34.239.107.0:
dbs:
- { db_name: "artifactory", db_owner: "artifactory" }
- { db_name: "xraydb", db_owner: "xray" }
db_users:
- { db_user: "artifactory", db_password: "Art1fAct0ry" }
- { db_user: "xray", db_password: "xray" }
artifactory:
hosts:
54.237.207.135:
artifactory_license1: x
artifactory_license2: x
artifactory_license3: x
artifactory_license4: x
artifactory_license5: x
db_download_url: "https://jdbc.postgresql.org/download/postgresql-42.2.12.jar"
db_type: "postgresql"
db_driver: "org.postgresql.Driver"
db_url: "jdbc:postgresql://10.0.0.59:5432/artifactory"
db_user: "artifactory"
db_password: "Art1fAct0ry"
server_name: "ec2-54-237-207-135.compute-1.amazonaws.com"
xray:
hosts:
100.25.104.174:
jfrog_url: "http://ec2-54-237-207-135.compute-1.amazonaws.com"
db_type: "postgresql"
db_driver: "org.postgresql.Driver"
db_url: "postgres://10.0.0.59:5432/xraydb?sslmode=disable"
db_user: "xray"
db_password: "xray"

View File

@@ -0,0 +1,18 @@
---
- debug:
var: master_key
- debug:
var: join_key
- hosts: database
roles:
- postgres
- hosts: artifactory
roles:
- artifactory
- hosts: xray
roles:
- xray

View File

@@ -0,0 +1,3 @@
#!/usr/bin/env bash
ansible-playbook -i hosts.yml playbook.yml --extra-vars "master_key=$(openssl rand -hex 16) join_key=$(openssl rand -hex 16)"

View File

@@ -0,0 +1,60 @@
---
all:
vars:
ansible_user: "ubuntu"
ansible_ssh_private_key_file: "/Users/jefff/.ssh/ansible-priv.pem"
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -A ubuntu@13.82.225.20 -W %h:%p"'
children:
database:
hosts:
#artifactory database
10.0.0.6:
db_users:
- { db_user: "artifactory", db_password: "Art1fAct0ry" }
dbs:
- { db_name: "artifactory", db_owner: "artifactory" }
#xray database
10.0.0.4:
dbs:
- { db_name: "xraydb", db_owner: "xray" }
db_users:
- { db_user: "xray", db_password: "xray" }
artifactory:
vars:
artifactory_ha_enabled: true
master_key: "c97b862469de0d94fbb7d48130637a5a"
join_key: "9bcca98f375c0728d907cc6ee39d4f02"
db_download_url: "https://jdbc.postgresql.org/download/postgresql-42.2.12.jar"
db_type: "postgresql"
db_driver: "org.postgresql.Driver"
db_url: "jdbc:postgresql://10.0.0.6:5432/artifactory"
db_user: "artifactory"
db_password: "Art1fAct0ry"
server_name: "rt.13.82.225.208.xip.io"
children:
primary:
hosts:
10.0.0.8:
artifactory_is_primary: true
artifactory_license1: x
artifactory_license2: x
artifactory_license3: x
artifactory_license4: x
artifactory_license5: x
secondary:
hosts:
10.0.0.9:
vars:
artifactory_is_primary: false
xray:
vars:
jfrog_url: http://rt.13.82.225.208.xip.io/
master_key: "c97b862469de0d94fbb7d48130637a5a"
join_key: "9bcca98f375c0728d907cc6ee39d4f02"
db_type: "postgresql"
db_driver: "org.postgresql.Driver"
db_url: "postgres://10.0.0.4:5432/xraydb?sslmode=disable"
db_user: "xray"
db_password: "xray"
hosts:
10.0.0.5:

View File

@@ -0,0 +1,12 @@
---
- hosts: database
roles:
- postgres
- hosts: primary:secondary
roles:
- artifactory
- hosts: xray
roles:
- xray

View File

@@ -0,0 +1,55 @@
---
all:
vars:
ansible_user: "ubuntu"
ansible_ssh_private_key_file: "/Users/jefff/.ssh/ansible-priv.pem"
children:
database:
hosts:
#artifactory database
52.86.32.79:
dbs:
- { db_name: "artifactory", db_owner: "artifactory" }
db_users:
- { db_user: "artifactory", db_password: "Art1fAct0ry" }
#xray database
100.25.152.93:
dbs:
- { db_name: "xraydb", db_owner: "xray" }
db_users:
- { db_user: "xray", db_password: "xray" }
artifactory:
vars:
artifactory_ha_enabled: true
master_key: "c97b862469de0d94fbb7d48130637a5a"
join_key: "9bcca98f375c0728d907cc6ee39d4f02"
db_download_url: "https://jdbc.postgresql.org/download/postgresql-42.2.12.jar"
db_type: "postgresql"
db_driver: "org.postgresql.Driver"
db_url: "jdbc:postgresql://10.0.0.51:5432/artifactory"
db_user: "artifactory"
db_password: "Art1fAct0ry"
server_name: "ec2-18-210-33-94.compute-1.amazonaws.com"
children:
primary:
hosts:
18.210.33.94:
artifactory_is_primary: true
artifactory_license1: x
artifactory_license2: x
artifactory_license3: x
artifactory_license4: x
artifactory_license5: x
xray:
vars:
jfrog_url: http://ec2-18-210-33-94.compute-1.amazonaws.com
master_key: "c97b862469de0d94fbb7d48130637a5a"
join_key: "9bcca98f375c0728d907cc6ee39d4f02"
db_type: "postgresql"
db_driver: "org.postgresql.Driver"
db_url: "postgres://10.0.0.5:5432/xraydb?sslmode=disable"
db_user: "xray"
db_password: "xray"
hosts:
# 34.229.56.166:
54.237.68.180

View File

@@ -0,0 +1,12 @@
---
- hosts: database
roles:
- postgres
- hosts: primary
roles:
- artifactory
- hosts: xray
roles:
- xray

View File

@@ -0,0 +1,43 @@
---
all:
vars:
ansible_user: "ubuntu"
ansible_ssh_private_key_file: "/Users/jefff/.ssh/ansible-priv.pem"
children:
database:
hosts:
34.239.107.0:
dbs:
- { db_name: "artifactory", db_owner: "artifactory" }
- { db_name: "xraydb", db_owner: "xray" }
db_users:
- { db_user: "artifactory", db_password: "Art1fAct0ry" }
- { db_user: "xray", db_password: "xray" }
artifactory:
hosts:
54.237.207.135:
artifactory_license1: x
artifactory_license2: x
artifactory_license3: x
artifactory_license4: x
artifactory_license5: x
master_key: "c97b862469de0d94fbb7d48130637a5a"
join_key: "9bcca98f375c0728d907cc6ee39d4f02"
db_download_url: "https://jdbc.postgresql.org/download/postgresql-42.2.12.jar"
db_type: "postgresql"
db_driver: "org.postgresql.Driver"
db_url: "jdbc:postgresql://10.0.0.59:5432/artifactory"
db_user: "artifactory"
db_password: "Art1fAct0ry"
server_name: "ec2-54-237-207-135.compute-1.amazonaws.com"
xray:
hosts:
100.25.104.174:
jfrog_url: "http://ec2-54-237-207-135.compute-1.amazonaws.com"
master_key: "c97b862469de0d94fbb7d48130637a5a"
join_key: "9bcca98f375c0728d907cc6ee39d4f02"
db_type: "postgresql"
db_driver: "org.postgresql.Driver"
db_url: "postgres://10.0.0.59:5432/xraydb?sslmode=disable"
db_user: "xray"
db_password: "xray"

View File

@@ -0,0 +1,12 @@
---
- hosts: database
roles:
- postgres
- hosts: artifactory
roles:
- artifactory
- hosts: xray
roles:
- xray

View File

@@ -0,0 +1,24 @@
---
all:
vars:
ansible_user: "ubuntu"
children:
database:
hosts:
54.83.163.100:
db_users:
- { db_user: "artifactory", db_password: "{{ lookup('env', 'artifactory_password') }}" }
dbs:
- { db_name: "artifactory", db_owner: "artifactory" }
primary:
hosts:
54.165.47.191:
artifactory_is_primary: true
artifactory_license_file: "{{ lookup('env', 'artifactory_license_file') }}"
db_download_url: "https://jdbc.postgresql.org/download/postgresql-42.2.12.jar"
db_type: "postgresql"
db_driver: "org.postgresql.Driver"
db_url: "jdbc:postgresql://10.0.0.219:5432/artifactory"
db_user: "artifactory"
db_password: "{{ lookup('env', 'artifactory_password') }}"
server_name: "ec2-54-165-47-191.compute-1.amazonaws.com"

View File

@@ -0,0 +1,8 @@
---
- hosts: database
roles:
- postgres
- hosts: primary
roles:
- artifactory

View File

@@ -0,0 +1,3 @@
#!/usr/bin/env bash
ansible-playbook -i Ansible/project/rt/hosts.yml Ansible/project/rt/playbook.yml --extra-vars "master_key=$(openssl rand -hex 16) join_key=$(openssl rand -hex 16)"

View File

@@ -0,0 +1,39 @@
---
all:
vars:
ansible_user: "ubuntu"
ansible_ssh_private_key_file: "/Users/jefff/.ssh/ansible-priv.pem"
children:
database:
hosts:
52.86.32.79:
db_users:
- { db_user: "artifactory", db_password: "Art1fAct0ry" }
dbs:
- { db_name: "artifactory", db_owner: "artifactory" }
primary:
hosts:
100.25.104.198:
artifactory_is_primary: true
artifactory_license1: x
artifactory_license2: x
artifactory_license3: x
artifactory_license4: x
artifactory_license5: x
master_key: "c97b862469de0d94fbb7d48130637a5a"
join_key: "9bcca98f375c0728d907cc6ee39d4f02"
db_download_url: "https://jdbc.postgresql.org/download/postgresql-42.2.12.jar"
db_type: "postgresql"
db_driver: "org.postgresql.Driver"
db_url: "jdbc:postgresql://10.0.0.160:5432/artifactory"
db_user: "artifactory"
db_password: "Art1fAct0ry"
server_name: "ec2-100-25-104-198.compute-1.amazonaws.com"
certificate: |
-----BEGIN CERTIFICATE-----
x
-----END CERTIFICATE-----
certificate_key: |
-----BEGIN PRIVATE KEY-----
x
-----END PRIVATE KEY-----

View File

@@ -0,0 +1,9 @@
---
- hosts: database
roles:
- postgres
- hosts: primary
roles:
- artifactory
- artifactory-nginx-ssl

View File

@@ -0,0 +1,148 @@
---
- name: Provision AWS test infrastructure
hosts: localhost
tasks:
- shell: 'pwd'
register: cmd
- debug:
msg: "{{ cmd.stdout }}"
- name: Create AWS test system
cloudformation:
stack_name: "{{ lookup('env', 'stack_name') }}"
state: "present"
region: "us-east-1"
disable_rollback: true
template: "{{ lookup('env', 'cfn_template') }}"
template_parameters:
SSHKeyName: "{{ lookup('env', 'ssh_public_key_name') }}"
tags:
Stack: "{{ lookup('env', 'stack_name') }}"
register: AWSDeployment
- name: Get AWS deployment details
debug:
var: AWSDeployment
- name: Add bastion
add_host:
hostname: "{{ AWSDeployment.stack_outputs.BastionInstancePublic }}"
groups: bastion
ansible_user: "ubuntu"
- name: Add new RT primary to host group
add_host:
hostname: "{{ AWSDeployment.stack_outputs.RTPriInstancePrivate }}"
ansible_user: "ubuntu"
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -A ubuntu@{{ AWSDeployment.stack_outputs.BastionInstancePublic }} -W %h:%p"'
db_url: "jdbc:postgresql://{{ AWSDeployment.stack_outputs.DBInstancePrivate }}:5432/artifactory"
server_name: "{{ AWSDeployment.stack_outputs.ALBHostName }}"
artifactory_is_primary: true
artifactory_license1: "{{ lookup('env', 'artifactory_license1') }}"
artifactory_license2: "{{ lookup('env', 'artifactory_license2') }}"
artifactory_license3: "{{ lookup('env', 'artifactory_license3') }}"
groups:
- artifactory
- name: Add RT secondaries to host group
add_host:
hostname: "{{ AWSDeployment.stack_outputs.RTSecInstancePrivate }}"
ansible_user: "ubuntu"
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -A ubuntu@{{ AWSDeployment.stack_outputs.BastionInstancePublic }} -W %h:%p"'
db_url: "jdbc:postgresql://{{ AWSDeployment.stack_outputs.DBInstancePrivate }}:5432/artifactory"
server_name: "{{ AWSDeployment.stack_outputs.ALBHostName }}"
artifactory_is_primary: false
groups:
- artifactory
- name: Add xrays to host group
add_host:
hostname: "{{ AWSDeployment.stack_outputs.XrayInstancePrivate }}"
ansible_user: "ubuntu"
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -A ubuntu@{{ AWSDeployment.stack_outputs.BastionInstancePublic }} -W %h:%p"'
jfrog_url: "http://{{ AWSDeployment.stack_outputs.ALBHostName }}"
master_key: "{{ lookup('env', 'master_key') }}"
join_key: "{{ lookup('env', 'join_key') }}"
db_type: "postgresql"
db_driver: "org.postgresql.Driver"
db_user: "xray"
db_password: "xray"
db_url: "postgres://{{ AWSDeployment.stack_outputs.DBInstancePrivate }}:5432/xraydb?sslmode=disable"
groups: xray
- name: Add DBs to host group
add_host:
hostname: "{{ AWSDeployment.stack_outputs.DBInstancePrivate }}"
ansible_user: "ubuntu"
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -A ubuntu@{{ AWSDeployment.stack_outputs.BastionInstancePublic }} -W %h:%p"'
db_users:
- { db_user: "artifactory", db_password: "Art1fAct0ry" }
- { db_user: "xray", db_password: "xray" }
dbs:
- { db_name: "artifactory", db_owner: "artifactory" }
- { db_name: "xraydb", db_owner: "xray" }
groups: database
- name: Set up test environment url
replace:
path: ../tests/src/test/resources/testenv.yaml
regexp: 'urlval'
replace: "http://{{ AWSDeployment.stack_outputs.ALBHostName }}"
- name: Set up test environment external_ip
replace:
path: ../tests/src/test/resources/testenv.yaml
regexp: 'ipval'
replace: "{{ AWSDeployment.stack_outputs.ALBHostName }}"
- name: Set up test environment rt_password
replace:
path: ../tests/src/test/resources/testenv.yaml
regexp: 'passval'
replace: "password"
- name: show testenv.yaml
debug: var=item
with_file:
- ../tests/src/test/resources/testenv.yaml
- name: Wait 300 seconds for port 22
wait_for:
port: 22
host: "{{ AWSDeployment.stack_outputs.BastionInstancePublic }}"
delay: 10
- debug:
msg: "Unified URL is at http://{{ AWSDeployment.stack_outputs.ALBHostName }}"
- hosts: database
roles:
- postgres
- hosts: artifactory
vars:
artifactory_ha_enabled: true
master_key: "{{ lookup('env', 'master_key') }}"
join_key: "{{ lookup('env', 'join_key') }}"
db_download_url: "https://jdbc.postgresql.org/download/postgresql-42.2.12.jar"
db_type: "postgresql"
db_driver: "org.postgresql.Driver"
db_user: "artifactory"
db_password: "Art1fAct0ry"
roles:
- artifactory
- hosts: xray
roles:
- xray
- name: Test
hosts: localhost
tasks:
- name: Run tests
shell:
cmd: ./gradlew clean unified_test
chdir: ../tests/
- name: Cleanup and delete stack
cloudformation:
stack_name: "{{ lookup('env', 'stack_name') }}"
region: "us-east-1"
state: "absent"

3
Ansible/test/aws/runAws.sh Executable file
View File

@@ -0,0 +1,3 @@
#!/usr/bin/env bash
ansible-playbook Ansible/test/aws/playbook.yaml

View File

@@ -0,0 +1,162 @@
---
- name: Provision Azure test infrastructure
hosts: localhost
tasks:
- name: Create azure test system
azure_rm_deployment:
resource_group: "{{ lookup('env', 'azure_resource_group') }}"
location: eastus
name: AzureAnsibleInfra
parameters:
vnetName:
value: "vnetAnsible"
vnetAddressRange:
value: "10.0.0.0/16"
subnetAddressRange:
value: "10.0.0.0/24"
location:
value: "eastus"
adminPublicKey:
value: "{{ lookup('env', 'ssh_public_key') }}"
sizeOfDiskInGB:
value: 128
vmSize:
value: Standard_D2s_v3
numberOfArtifactory:
value: 2
numberOfXray:
value: 1
numberOfDb:
value: 1
template_link: "{{ lookup('env', 'arm_template') }}"
register: azureDeployment
- name: Get Azure deployment details
debug:
var: azureDeployment
- name: Add bastion
add_host:
hostname: "{{ azureDeployment.deployment.outputs.lbIp.value }}"
groups: bastion
ansible_user: "ubuntu"
- name: Add new RT primary to host group
add_host:
hostname: "{{ azureDeployment.deployment.outputs.vmArtPriIp.value }}"
ansible_user: "ubuntu"
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -A ubuntu@{{ azureDeployment.deployment.outputs.lbIp.value }} -W %h:%p"'
db_url: "jdbc:postgresql://{{ azureDeployment.deployment.outputs.vmDbArrIp.value[0] }}:5432/artifactory"
server_name: "rt.{{ azureDeployment.deployment.outputs.lbIp.value }}.xip.io"
artifactory_is_primary: true
artifactory_license1: "{{ lookup('env', 'artifactory_license1') }}"
artifactory_license2: "{{ lookup('env', 'artifactory_license2') }}"
artifactory_license3: "{{ lookup('env', 'artifactory_license3') }}"
groups:
- artifactory
- name: Add RT secondaries to host group
add_host:
hostname: "{{ item }}"
ansible_user: "ubuntu"
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -A ubuntu@{{ azureDeployment.deployment.outputs.lbIp.value }} -W %h:%p"'
db_url: "jdbc:postgresql://{{ azureDeployment.deployment.outputs.vmDbArrIp.value[0] }}:5432/artifactory"
server_name: "rt.{{ azureDeployment.deployment.outputs.lbIp.value }}.xip.io"
artifactory_is_primary: false
groups:
- artifactory
loop: "{{ azureDeployment.deployment.outputs.vmArtSecArrIp.value }}"
- name: Add xrays to host group
add_host:
hostname: "{{ item }}"
ansible_user: "ubuntu"
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -A ubuntu@{{ azureDeployment.deployment.outputs.lbIp.value }} -W %h:%p"'
jfrog_url: "http://rt.{{ azureDeployment.deployment.outputs.lbIp.value }}.xip.io"
master_key: "{{ lookup('env', 'master_key') }}"
join_key: "{{ lookup('env', 'join_key') }}"
db_type: "postgresql"
db_driver: "org.postgresql.Driver"
db_user: "xray"
db_password: "xray"
db_url: "postgres://{{ azureDeployment.deployment.outputs.vmDbArrIp.value[0] }}:5432/xraydb?sslmode=disable"
groups: xray
loop: "{{ azureDeployment.deployment.outputs.vmXrayArrIp.value }}"
- name: Add DBs to host group
add_host:
hostname: "{{ item }}"
ansible_user: "ubuntu"
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -A ubuntu@{{ azureDeployment.deployment.outputs.lbIp.value }} -W %h:%p"'
db_users:
- { db_user: "artifactory", db_password: "Art1fAct0ry" }
- { db_user: "xray", db_password: "xray" }
dbs:
- { db_name: "artifactory", db_owner: "artifactory" }
- { db_name: "xraydb", db_owner: "xray" }
groups: database
loop: "{{ azureDeployment.deployment.outputs.vmDbArrIp.value }}"
- name: Set up test environment url
replace:
path: ../tests/src/test/resources/testenv.yaml
regexp: 'urlval'
replace: "http://rt.{{ azureDeployment.deployment.outputs.lbIp.value }}.xip.io"
- name: Set up test environment external_ip
replace:
path: ../tests/src/test/resources/testenv.yaml
regexp: 'ipval'
replace: "{{ azureDeployment.deployment.outputs.lbIp.value }}"
- name: Set up test environment rt_password
replace:
path: ../tests/src/test/resources/testenv.yaml
regexp: 'passval'
replace: "password"
- name: show testenv.yaml
debug: var=item
with_file:
- ../tests/src/test/resources/testenv.yaml
- name: Wait 300 seconds for port 22
wait_for:
port: 22
host: "{{ azureDeployment.deployment.outputs.lbIp.value }}"
delay: 10
- debug:
msg: "Unified URL is at http://rt.{{ azureDeployment.deployment.outputs.lbIp.value }}.xip.io"
- hosts: database
roles:
- postgres
- hosts: artifactory
vars:
artifactory_ha_enabled: true
master_key: "{{ lookup('env', 'master_key') }}"
join_key: "{{ lookup('env', 'join_key') }}"
db_download_url: "https://jdbc.postgresql.org/download/postgresql-42.2.12.jar"
db_type: "postgresql"
db_driver: "org.postgresql.Driver"
db_user: "artifactory"
db_password: "Art1fAct0ry"
roles:
- artifactory
- hosts: xray
roles:
- xray
- name: Test
hosts: localhost
tasks:
- name: Run tests
shell:
cmd: ./gradlew clean unified_test
chdir: ../tests/
- name: Cleanup and delete resource group
azure_rm_resourcegroup:
name: "{{ lookup('env', 'azure_resource_group') }}"
force_delete_nonempty: yes
state: absent

3
Ansible/test/azure/runAzure.sh Executable file
View File

@@ -0,0 +1,3 @@
#!/usr/bin/env bash
ansible-playbook Ansible/test/azure/playbook.yaml

19
Ansible/test/tests/README.md Executable file
View File

@@ -0,0 +1,19 @@
## Test framework
### How to run it locally
```
./gradlew clean commonTests
```
### Adding new tests
### Gradle cleanup. Delete the folder:
```
~/.gradle/caches/
./gradlew clean
```
### Or run
```
./gradlew clean
```

Some files were not shown because too many files have changed in this diff Show More