diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..1e6e65c --- /dev/null +++ b/.gitignore @@ -0,0 +1,8 @@ +.molecule +*.log +*.swp +.tox +./idea +.idea/ +.DS_Store + diff --git a/Ansible/collection/.ansible-lint b/Ansible/collection/.ansible-lint new file mode 100644 index 0000000..a59f903 --- /dev/null +++ b/Ansible/collection/.ansible-lint @@ -0,0 +1,8 @@ +# +# Ansible managed +# +exclude_paths: + - ./meta/version.yml + - ./meta/exception.yml + - ./meta/preferences.yml + - ./molecule/default/verify.yml diff --git a/Ansible/collection/.yamllint b/Ansible/collection/.yamllint new file mode 100644 index 0000000..c5ae64b --- /dev/null +++ b/Ansible/collection/.yamllint @@ -0,0 +1,12 @@ +--- +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + line-length: disable + truthy: disable diff --git a/Ansible/collection/README.md b/Ansible/collection/README.md new file mode 100644 index 0000000..edae0e5 --- /dev/null +++ b/Ansible/collection/README.md @@ -0,0 +1,84 @@ +# Ansible +This repo contains the Ansible collection for JFrog roles. These roles allow you to provision Artifactory for High-Availability using a Primary node and multiple Secondary nodes. Additionally, a Postgresql role is provided for installing an Artifactory Postgresql database. + +## Roles Provided +### artifactory +The artifactory role installs the Artifactory Pro software onto the host. Per the Vars below, it will configure a node as primary or secondary. This role uses secondary roles artifactory-nginx to install nginx. + +### artifactory-nginx-ssl +The artifactory-nginx-ssl role installs and configures nginx for SSL. + +### postgres +The postgres role will install Postgresql software and configure a database and user to support an Artifactory or Xray server. + +### xray +The xray role will install Xray software onto the host. An Artifactory server and Postgress database is required. + +## Vars Required +The following Vars must be configured. + +### databsase vars +* db_users: This is a list of database users to create. eg. db_users: - { db_user: "artifactory", db_password: "Art1fAct0ry" } +* dbs: This is the database to create. eg. dbs: - { db_name: "artifactory", db_owner: "artifactory" } + +### artifactory vars +* artifactory_version: The version of Artifactory to install. eg. "7.4.1" +* master_key: This is the Artifactory Master Key. +* join_key: This is the Artifactory Join Key. +* db_download_url: This is the download URL for the JDBC driver for your database. eg. "https://jdbc.postgresql.org/download/postgresql-42.2.12.jar" +* db_type: This is the database type. eg. "postgresql" +* db_driver: This is the JDBC driver class. eg. "org.postgresql.Driver" +* db_url: This is the JDBC database url. eg. "jdbc:postgresql://10.0.0.120:5432/artifactory" +* db_user: The database user to configure. eg. "artifactory" +* db_password: The database password to configure. "Art1fact0ry" +* server_name: This is the server name. eg. "artifactory.54.175.51.178.xip.io" + +### primary vars +* artifactory_is_primary: For the primary node this must be set to **true**. +* artifactory_license1 - 5: These are the cluster licenses. + +### secondary vars +* artifactory_is_primary: For the secondary node(s) this must be set to **false**. + +### ssl vars (Used with artifactory-nginx-ssl role) +* certificate: This is the SSL cert. +* certificate_key: This is the SSL private key. + +### xray vars +* xray_version: The version of Artifactory to install. eg. "3.3.0" +* jfrog_url: This is the URL to the Artifactory base URL. eg. "http://ec2-54-237-207-135.compute-1.amazonaws.com" +* master_key: This is the Artifactory Master Key. +* join_key: This is the Artifactory Join Key. +* db_type: This is the database type. eg. "postgresql" +* db_driver: This is the JDBC driver class. eg. "org.postgresql.Driver" +* db_url: This is the database url. eg. "postgres://10.0.0.59:5432/xraydb?sslmode=disable" +* db_user: The database user to configure. eg. "xray" +* db_password: The database password to configure. "xray" + +## Example Inventory and Playbooks +Example playbooks are located in the [project](../project) directory. This directory contains several example inventory and plaaybooks for different Artifactory, HA and Xray architectures. + +## Executing a Playbook +``` +ansible-playbook -i + +eg. + ansible-playbook -i example-playbooks/rt-xray-ha/hosts.yml example-playbooks/rt-xray-ha/playbook.yml +``` + +## Autogenerating Master and Join Keys +You may want to auto-generate your master amd join keys and apply it to all the nodes. + +``` +ansible-playbook -i hosts.yml playbook.yml --extra-vars "master_key=$(openssl rand -hex 16) join_key=$(openssl rand -hex 16)" +``` + +## Bastion Hosts +In many cases, you may want to run this Ansible collection through a Bastion host to provision JFrog servers. You can include the following Var for a host or group of hosts: + +``` +ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -A user@host -W %h:%p"' + +eg. +ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -A ubuntu@{{ azureDeployment.deployment.outputs.lbIp.value }} -W %h:%p"' +``` \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/galaxy.yml b/Ansible/collection/jfrog/ansible/galaxy.yml new file mode 100644 index 0000000..2b44148 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/galaxy.yml @@ -0,0 +1,57 @@ +### REQUIRED + +# The namespace of the collection. This can be a company/brand/organization or product namespace under which all +# content lives. May only contain alphanumeric lowercase characters and underscores. Namespaces cannot start with +# underscores or numbers and cannot contain consecutive underscores +namespace: jfrog + +# The name of the collection. Has the same character restrictions as 'namespace' +name: ansible + +# The version of the collection. Must be compatible with semantic versioning +version: 1.0.0 + +# The path to the Markdown (.md) readme file. This path is relative to the root of the collection +readme: README.md + +# A list of the collection's content authors. Can be just the name or in the format 'Full Name (url) +# @nicks:irc/im.site#channel' +authors: +- your name + + +### OPTIONAL but strongly recommended + +# A short summary description of the collection +description: your collection description + +# Either a single license or a list of licenses for content inside of a collection. Ansible Galaxy currently only +# accepts L(SPDX,https://spdx.org/licenses/) licenses. This key is mutually exclusive with 'license_file' +license: +- GPL-2.0-or-later + +# The path to the license file for the collection. This path is relative to the root of the collection. This key is +# mutually exclusive with 'license' +license_file: '' + +# A list of tags you want to associate with the collection for indexing/searching. A tag name has the same character +# requirements as 'namespace' and 'name' +tags: [] + +# Collections that this collection requires to be installed for it to be usable. The key of the dict is the +# collection label 'namespace.name'. The value is a version range +# L(specifiers,https://python-semanticversion.readthedocs.io/en/latest/#requirement-specification). Multiple version +# range specifiers can be set and are separated by ',' +dependencies: {} + +# The URL of the originating SCM repository +repository: http://example.com/repository + +# The URL to any online docs +documentation: http://docs.example.com + +# The URL to the homepage of the collection/project +homepage: http://example.com + +# The URL to the collection issue tracker +issues: http://example.com/issue/tracker diff --git a/Ansible/collection/jfrog/ansible/plugins/README.md b/Ansible/collection/jfrog/ansible/plugins/README.md new file mode 100644 index 0000000..6541cf7 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/plugins/README.md @@ -0,0 +1,31 @@ +# Collections Plugins Directory + +This directory can be used to ship various plugins inside an Ansible collection. Each plugin is placed in a folder that +is named after the type of plugin it is in. It can also include the `module_utils` and `modules` directory that +would contain module utils and modules respectively. + +Here is an example directory of the majority of plugins currently supported by Ansible: + +``` +└── plugins + ├── action + ├── become + ├── cache + ├── callback + ├── cliconf + ├── connection + ├── filter + ├── httpapi + ├── inventory + ├── lookup + ├── module_utils + ├── modules + ├── netconf + ├── shell + ├── strategy + ├── terminal + ├── test + └── vars +``` + +A full list of plugin types can be found at [Working With Plugins](https://docs.ansible.com/ansible/2.9/plugins/plugins.html). \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/.travis.yml b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/.travis.yml new file mode 100644 index 0000000..36bbf62 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/defaults/main.yml b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/defaults/main.yml new file mode 100644 index 0000000..6b28347 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for artifactory-nginx \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/handlers/main.yml b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/handlers/main.yml new file mode 100644 index 0000000..d212386 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for artifactory-nginx \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/meta/main.yml b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/meta/main.yml new file mode 100644 index 0000000..227ad9c --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/tasks/main.yml b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/tasks/main.yml new file mode 100644 index 0000000..ba37c53 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/tasks/main.yml @@ -0,0 +1,41 @@ +--- +# tasks file for artifactory-nginx +- name: configure the artifactory nginx conf + template: + src: artifactory.conf.j2 + dest: /etc/nginx/conf.d/artifactory.conf + owner: root + group: root + mode: '0755' + become: yes + +- name: ensure nginx dir exists + file: + path: "/var/opt/jfrog/nginx/ssl" + state: directory + become: yes + +- name: configure certificate + template: + src: certificate.pem.j2 + dest: "/var/opt/jfrog/nginx/ssl/cert.pem" + become: yes + +- name: ensure pki exists + file: + path: "/etc/pki/tls" + state: directory + become: yes + +- name: configure key + template: + src: certificate.key.j2 + dest: "/etc/pki/tls/cert.key" + become: yes + +- name: restart nginx + service: + name: nginx + state: restarted + enabled: yes + become: yes \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/templates/artifactory.conf.j2 b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/templates/artifactory.conf.j2 new file mode 100644 index 0000000..315a601 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/templates/artifactory.conf.j2 @@ -0,0 +1,48 @@ +########################################################### +## this configuration was generated by JFrog Artifactory ## + ########################################################### + + ## add HA entries when ha is configure + upstream artifactory { + server 127.0.0.1:8082; +} + upstream artifactory-direct { + server 127.0.0.1:8081; +} + ssl_protocols TLSv1.1 TLSv1.2; + ssl_certificate /var/opt/jfrog/nginx/ssl/cert.pem; + ssl_certificate_key /etc/pki/tls/cert.key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + ## server configuration + server { + listen 443 ssl http2; + server_name {{ server_name }}; + if ($http_x_forwarded_proto = '') { + set $http_x_forwarded_proto $scheme; + } + ## Application specific logs + access_log /var/log/nginx/artifactory-access.log; + error_log /var/log/nginx/artifactory-error.log; + rewrite ^/$ /ui/ redirect; + rewrite ^/ui$ /ui/ redirect; + chunked_transfer_encoding on; + client_max_body_size 0; + location / { + proxy_read_timeout 2400s; + proxy_pass_header Server; + proxy_cookie_path ~*^/.* /; + proxy_pass "http://artifactory"; + proxy_next_upstream error timeout non_idempotent; + proxy_next_upstream_tries 1; + proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host:$server_port; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + + location ~ ^/artifactory/ { + proxy_pass http://artifactory-direct; + } + } +} \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/templates/certificate.key.j2 b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/templates/certificate.key.j2 new file mode 100644 index 0000000..2c46be0 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/templates/certificate.key.j2 @@ -0,0 +1,4 @@ +{% set cert = certificate_key.split('|') %} +{% for line in cert %} +{{ line }} +{% endfor %} \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/templates/certificate.pem.j2 b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/templates/certificate.pem.j2 new file mode 100644 index 0000000..71e936d --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/templates/certificate.pem.j2 @@ -0,0 +1,4 @@ +{% set cert = certificate.split('|') %} +{% for line in cert %} +{{ line }} +{% endfor %} \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/tests/inventory b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/tests/inventory new file mode 100644 index 0000000..878877b --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/tests/test.yml b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/tests/test.yml new file mode 100644 index 0000000..7560bbb --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - artifactory-nginx \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/vars/main.yml b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/vars/main.yml new file mode 100644 index 0000000..7465197 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx-ssl/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for artifactory-nginx \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/.travis.yml b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/.travis.yml new file mode 100644 index 0000000..36bbf62 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/defaults/main.yml b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/defaults/main.yml new file mode 100644 index 0000000..6b28347 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for artifactory-nginx \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/files/nginx.conf b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/files/nginx.conf new file mode 100644 index 0000000..19f9422 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/files/nginx.conf @@ -0,0 +1,37 @@ +#user nobody; +worker_processes 1; +error_log /var/log/nginx/error.log info; +#pid logs/nginx.pid; +events { + worker_connections 1024; +} +http { + include mime.types; + variables_hash_max_size 1024; + variables_hash_bucket_size 64; + server_names_hash_max_size 4096; + server_names_hash_bucket_size 128; + types_hash_max_size 2048; + types_hash_bucket_size 64; + proxy_read_timeout 2400s; + client_header_timeout 2400s; + client_body_timeout 2400s; + proxy_connect_timeout 75s; + proxy_send_timeout 2400s; + proxy_buffer_size 32k; + proxy_buffers 40 32k; + proxy_busy_buffers_size 64k; + proxy_temp_file_write_size 250m; + proxy_http_version 1.1; + client_body_buffer_size 128k; + include /etc/nginx/conf.d/*.conf; + default_type application/octet-stream; + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' +'$status $body_bytes_sent "$http_referer" ' +'"$http_user_agent" "$http_x_forwarded_for"'; + access_log /var/log/nginx/access.log main; + sendfile on; + #tcp_nopush on; + #keepalive_timeout 0; + keepalive_timeout 65; +} \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/handlers/main.yml b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/handlers/main.yml new file mode 100644 index 0000000..d212386 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for artifactory-nginx \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/meta/main.yml b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/meta/main.yml new file mode 100644 index 0000000..227ad9c --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/tasks/main.yml b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/tasks/main.yml new file mode 100644 index 0000000..5146b14 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/tasks/main.yml @@ -0,0 +1,35 @@ +--- +- name: install nginx + package: + name: nginx + state: present + register: package_res + retries: 5 + delay: 60 + become: yes + until: package_res is success + +- name: configure main nginx conf file. + copy: + src: nginx.conf + dest: /etc/nginx/nginx.conf + owner: root + group: root + mode: '0755' + become: yes + +- name: configure the artifactory nginx conf + template: + src: artifactory.conf.j2 + dest: /etc/nginx/conf.d/artifactory.conf + owner: root + group: root + mode: '0755' + become: yes + +- name: restart nginx + service: + name: nginx + state: restarted + enabled: yes + become: yes diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/templates/artifactory.conf.j2 b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/templates/artifactory.conf.j2 new file mode 100644 index 0000000..58280d9 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/templates/artifactory.conf.j2 @@ -0,0 +1,43 @@ +########################################################### +## this configuration was generated by JFrog Artifactory ## + ########################################################### + + ## add HA entries when ha is configure + upstream artifactory { + server 127.0.0.1:8082; +} + upstream artifactory-direct { + server 127.0.0.1:8081; +} + ## server configuration + server { + listen 80 ; + server_name {{ server_name }}; + if ($http_x_forwarded_proto = '') { + set $http_x_forwarded_proto $scheme; + } + ## Application specific logs + access_log /var/log/nginx/artifactory-access.log; + error_log /var/log/nginx/artifactory-error.log; + rewrite ^/$ /ui/ redirect; + rewrite ^/ui$ /ui/ redirect; + chunked_transfer_encoding on; + client_max_body_size 0; + location / { + proxy_read_timeout 2400s; + proxy_pass_header Server; + proxy_cookie_path ~*^/.* /; + proxy_pass "http://artifactory"; + proxy_next_upstream error timeout non_idempotent; + proxy_next_upstream_tries 1; + proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host:$server_port; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + + location ~ ^/artifactory/ { + proxy_pass http://artifactory-direct; + } + } +} \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/tests/inventory b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/tests/inventory new file mode 100644 index 0000000..878877b --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/tests/test.yml b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/tests/test.yml new file mode 100644 index 0000000..7560bbb --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - artifactory-nginx \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/vars/main.yml b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/vars/main.yml new file mode 100644 index 0000000..7465197 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory-nginx/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for artifactory-nginx \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory/.travis.yml b/Ansible/collection/jfrog/ansible/roles/artifactory/.travis.yml new file mode 100644 index 0000000..36bbf62 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory/defaults/main.yml b/Ansible/collection/jfrog/ansible/roles/artifactory/defaults/main.yml new file mode 100644 index 0000000..dd8cac9 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory/defaults/main.yml @@ -0,0 +1,51 @@ +--- +# defaults file for artifactory +# indicates were this collection was downlaoded from (galaxy, automation_hub, standalone) +ansible_marketplace: standalone + +# The version of Artifactory to install +artifactory_version: 7.4.1 + +# licenses +artifactory_license1: +artifactory_license2: +artifactory_license3: +artifactory_license4: +artifactory_license5: + +# whether to enable HA +artifactory_ha_enabled: true + +# value for whether a host is primary. this should be set in host vars +artifactory_is_primary: true + +# The location where Artifactory should install. +artifactory_download_directory: /opt/jfrog + +# The location where Artifactory should store data. +artifactory_file_store_dir: /data + +# Pick the Artifactory flavour to install, can be also cpp-ce, jcr, pro. +# note that for "pro" version, the artifactory_zip URL would need to be overridden to e.g.: +# https://dl.bintray.com/jfrog/artifactory-pro/org/artifactory/pro/jfrog-artifactory-pro/{{ artifactory_version }}/jfrog-artifactory-pro-{{ artifactory_version }}.zip +# https://dl.bintray.com/jfrog/artifactory-pro/org/artifactory/pro/jfrog-artifactory-pro/{{ artifactory_version }}/jfrog-artifactory-pro-{{ artifactory_version }}-linux.tar.gz +artifactory_flavour: pro + +extra_java_opts: -server -Xms2g -Xmx14g -Xss256k -XX:+UseG1GC + +artifactory_tar: https://dl.bintray.com/jfrog/artifactory-pro/org/artifactory/pro/jfrog-artifactory-pro/{{ artifactory_version }}/jfrog-artifactory-pro-{{ artifactory_version }}-linux.tar.gz +artifactory_home: "{{ artifactory_download_directory }}/artifactory-{{ artifactory_flavour }}-{{ artifactory_version }}" + +artifactory_user: artifactory +artifactory_group: artifactory + +# Set the parameters required for the service. +service_list: + - name: artifactory + description: Start script for Artifactory + start_command: "{{ artifactory_home }}/bin/artifactory.sh start" + stop_command: "{{ artifactory_home }}/bin/artifactory.sh stop" + type: forking + status_pattern: artifactory + user_name: "{{ artifactory_user }}" + group_name: "{{ artifactory_group }}" diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory/handlers/main.yml b/Ansible/collection/jfrog/ansible/roles/artifactory/handlers/main.yml new file mode 100644 index 0000000..6f8fcda --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory/handlers/main.yml @@ -0,0 +1,10 @@ +--- +# handlers file for artifactory +- name: systemctl daemon-reload + systemd: + daemon_reload: yes + +- name: restart artifactory + service: + name: artifactory + state: restarted diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory/meta/exception.yml b/Ansible/collection/jfrog/ansible/roles/artifactory/meta/exception.yml new file mode 100644 index 0000000..7de46df --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory/meta/exception.yml @@ -0,0 +1,6 @@ +--- +exceptions: + - variation: Alpine + reason: Artifactory start/stop scripts don't properly work. + - variation: amazonlinux:1 + reason: "Shutting down artifactory: /usr/bin/java\nfinding\nUsing the default catalina management port (8015) to test shutdown\nArtifactory Tomcat already stopped" diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory/meta/main.yml b/Ansible/collection/jfrog/ansible/roles/artifactory/meta/main.yml new file mode 100644 index 0000000..0dc573a --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory/meta/main.yml @@ -0,0 +1,35 @@ +--- +galaxy_info: + author: Robert de Bock + role_name: artifactory + description: Install and configure artifactory on your system. + license: Apache-2.0 + company: none + min_ansible_version: 2.8 + + platforms: + - name: Debian + versions: + - all + - name: EL + versions: + - 7 + - 8 + - name: Fedora + versions: + - all + - name: OpenSUSE + versions: + - all + - name: Ubuntu + versions: + - bionic + + galaxy_tags: + - artifactory + - centos + - redhat + - server + - system + +dependencies: [] diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory/meta/preferences.yml b/Ansible/collection/jfrog/ansible/roles/artifactory/meta/preferences.yml new file mode 100644 index 0000000..e7fdebf --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory/meta/preferences.yml @@ -0,0 +1,2 @@ +--- +tox_parallel: yes diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory/meta/version.yml b/Ansible/collection/jfrog/ansible/roles/artifactory/meta/version.yml new file mode 100644 index 0000000..ea2ef8f --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory/meta/version.yml @@ -0,0 +1,6 @@ +--- +project_name: JFrog +reference: "https://github.com/robertdebock/ansible-role-artifactory/blob/master/defaults/main.yml" +versions: + - name: Artifactory + url: "https://dl.bintray.com/jfrog/artifactory/" diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory/tasks/main.yml b/Ansible/collection/jfrog/ansible/roles/artifactory/tasks/main.yml new file mode 100644 index 0000000..a23a047 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory/tasks/main.yml @@ -0,0 +1,137 @@ +--- +# tasks file for artifactory +- name: install nginx + include_role: + name: artifactory-nginx + +- name: create group for artifactory + group: + name: "{{ artifactory_group }}" + state: present + become: yes + +- name: create user for artifactory + user: + name: "{{ artifactory_user }}" + group: "{{ artifactory_group }}" + system: yes + become: yes + +- name: ensure artifactory_download_directory exists + file: + path: "{{ artifactory_download_directory }}" + state: directory + become: yes + +- name: download artifactory + unarchive: + src: "{{ artifactory_tar }}" + dest: "{{ artifactory_download_directory }}" + remote_src: yes + owner: "{{ artifactory_user }}" + group: "{{ artifactory_group }}" + creates: "{{ artifactory_home }}" + become: yes + register: downloadartifactory + until: downloadartifactory is succeeded + retries: 3 + +- name: ensure artifactory_file_store_dir exists + file: + path: "{{ artifactory_file_store_dir }}" + state: directory + owner: "{{ artifactory_user }}" + group: "{{ artifactory_group }}" + become: yes + +- name: ensure etc exists + file: + path: "{{ artifactory_home }}/var/etc" + state: directory + owner: "{{ artifactory_user }}" + group: "{{ artifactory_group }}" + become: yes + +- name: configure system yaml + template: + src: system.yaml.j2 + dest: "{{ artifactory_home }}/var/etc/system.yaml" + become: yes + +- name: ensure {{ artifactory_home }}/var/etc/security/ exists + file: + path: "{{ artifactory_home }}/var/etc/security/" + state: directory + owner: "{{ artifactory_user }}" + group: "{{ artifactory_group }}" + become: yes + +- name: configure master key + template: + src: master.key.j2 + dest: "{{ artifactory_home }}/var/etc/security/master.key" + become: yes + +- name: configure join key + template: + src: join.key.j2 + dest: "{{ artifactory_home }}/var/etc/security/join.key" + become: yes + +- name: ensure {{ artifactory_home }}/var/etc/info/ exists + file: + path: "{{ artifactory_home }}/var/etc/info/" + state: directory + owner: "{{ artifactory_user }}" + group: "{{ artifactory_group }}" + become: yes + +- name: configure installer info + template: + src: installer-info.json.j2 + dest: "{{ artifactory_home }}/var/etc/info/installer-info.json" + become: yes + +- name: configure binary store + template: + src: binarystore.xml.j2 + dest: "{{ artifactory_home }}/var/etc/binarystore.xml" + become: yes + +- name: configure cluster license + template: + src: artifactory.cluster.license.j2 + dest: "{{ artifactory_home }}/var/etc/artifactory/artifactory.cluster.license" + become: yes + when: artifactory_is_primary == true + +- name: download database driver + get_url: + url: "{{ db_download_url }}" + dest: "{{ artifactory_home }}/var/bootstrap/artifactory/tomcat/lib" + owner: "{{ artifactory_user }}" + group: "{{ artifactory_group }}" + become: yes + +- name: create artifactory service + shell: "{{ artifactory_home }}/app/bin/installService.sh" + become: yes + +- name: start and enable the primary node + service: + name: artifactory + state: restarted + become: yes + when: artifactory_is_primary == true + +- name: random wait before restarting to prevent secondary nodes from hitting DB first + pause: + seconds: "{{ 120 | random + 10}}" + when: artifactory_is_primary == false + +- name: start and enable the secondary nodes + service: + name: artifactory + state: restarted + become: yes + when: artifactory_is_primary == false diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory/templates/artifactory.cluster.license.j2 b/Ansible/collection/jfrog/ansible/roles/artifactory/templates/artifactory.cluster.license.j2 new file mode 100644 index 0000000..3f674f6 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory/templates/artifactory.cluster.license.j2 @@ -0,0 +1,31 @@ +{% if artifactory_license1 %} +{% if artifactory_license1|length %} +{{ artifactory_license1 }} +{% endif %} +{% endif %} +{% if artifactory_license2 %} + + +{% if artifactory_license2|length %} +{{ artifactory_license2 }} +{% endif %} +{% endif %} +{% if artifactory_license3 %} + + +{% if artifactory_license3|length %} +{{ artifactory_license3 }} +{% endif %} +{% endif %} +{% if artifactory_license4 %} + +{% if artifactory_license4|length %} +{{ artifactory_license4 }} +{% endif %} +{% endif %} +{% if artifactory_license5 %} + +{% if artifactory_license5|length %} +{{ artifactory_license5 }} +{% endif %} +{% endif %} diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory/templates/binarystore.xml.j2 b/Ansible/collection/jfrog/ansible/roles/artifactory/templates/binarystore.xml.j2 new file mode 100644 index 0000000..f85f16f --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory/templates/binarystore.xml.j2 @@ -0,0 +1,4 @@ + + + + diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory/templates/installer-info.json.j2 b/Ansible/collection/jfrog/ansible/roles/artifactory/templates/installer-info.json.j2 new file mode 100644 index 0000000..2d818d1 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory/templates/installer-info.json.j2 @@ -0,0 +1,8 @@ +{ + "productId": "Ansible_{{ ansible_marketplace }}_artifactory-pro-{{artifactory_version}}/1.0.0", + "features": [ + { + "featureId": "Partner/ACC-006973" + } + ] +} \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory/templates/join.key.j2 b/Ansible/collection/jfrog/ansible/roles/artifactory/templates/join.key.j2 new file mode 100644 index 0000000..17d05d2 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory/templates/join.key.j2 @@ -0,0 +1 @@ +{{ join_key }} \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory/templates/master.key.j2 b/Ansible/collection/jfrog/ansible/roles/artifactory/templates/master.key.j2 new file mode 100644 index 0000000..0462a64 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory/templates/master.key.j2 @@ -0,0 +1 @@ +{{ master_key }} \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory/templates/system.yaml.j2 b/Ansible/collection/jfrog/ansible/roles/artifactory/templates/system.yaml.j2 new file mode 100644 index 0000000..419a0c3 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory/templates/system.yaml.j2 @@ -0,0 +1,38 @@ +## @formatter:off +## JFROG ARTIFACTORY SYSTEM CONFIGURATION FILE +## HOW TO USE: comment-out any field and keep the correct yaml indentation by deleting only the leading '#' character. +configVersion: 1 + +## NOTE: JFROG_HOME is a place holder for the JFrog root directory containing the deployed product, the home directory for all JFrog products. +## Replace JFROG_HOME with the real path! For example, in RPM install, JFROG_HOME=/opt/jfrog + +## NOTE: Sensitive information such as passwords and join key are encrypted on first read. +## NOTE: The provided commented key and value is the default. + +## SHARED CONFIGURATIONS +## A shared section for keys across all services in this config +shared: + + ## Node Settings + node: + ## A unique id to identify this node. + ## Default: auto generated at startup. + id: {{ ansible_machine_id }} + + ## Sets this node as primary in HA installation + primary: {{ artifactory_is_primary }} + + ## Sets this node as part of HA installation + haEnabled: {{ artifactory_ha_enabled }} + + ## Database Configuration + database: + ## One of: mysql, oracle, mssql, postgresql, mariadb + ## Default: Embedded derby + + ## Example for mysql/postgresql + type: "{{ db_type }}" + driver: "{{ db_driver }}" + url: "{{ db_url }}" + username: "{{ db_user }}" + password: "{{ db_password }}" \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/artifactory/vars/main.yml b/Ansible/collection/jfrog/ansible/roles/artifactory/vars/main.yml new file mode 100644 index 0000000..cd21505 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/artifactory/vars/main.yml @@ -0,0 +1,2 @@ +--- + diff --git a/Ansible/collection/jfrog/ansible/roles/postgres/.travis.yml b/Ansible/collection/jfrog/ansible/roles/postgres/.travis.yml new file mode 100644 index 0000000..9d4d136 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/postgres/.travis.yml @@ -0,0 +1,30 @@ +--- +language: python + +services: + - docker + +env: + global: + - DEBUG=--debug + matrix: + - MOLECULE_DISTRO=centos7 MOLECULE_SCENARIO=default + - MOLECULE_DISTRO=centos7 MOLECULE_SCENARIO=version11 + # - MOLECULE_DISTRO: fedora27 + # - MOLECULE_DISTRO: fedora29 + - MOLECULE_DISTRO=ubuntu1604 MOLECULE_SCENARIO=default + - MOLECULE_DISTRO=ubuntu1604 MOLECULE_SCENARIO=version11 + - MOLECULE_DISTRO=ubuntu1804 MOLECULE_SCENARIO=default + - MOLECULE_DISTRO=ubuntu1804 MOLECULE_SCENARIO=version11 + # - MOLECULE_DISTRO: debian9 + +before_install: + - sudo apt-get -qq update + - sudo apt-get install -y net-tools +install: + - pip install molecule docker-py + +script: + - molecule --version + - ansible --version + - molecule $DEBUG test -s $MOLECULE_SCENARIO diff --git a/Ansible/collection/jfrog/ansible/roles/postgres/defaults/main.yml b/Ansible/collection/jfrog/ansible/roles/postgres/defaults/main.yml new file mode 100644 index 0000000..9f7a0ad --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/postgres/defaults/main.yml @@ -0,0 +1,84 @@ +--- +# Put database into alternative location with a bind mount. +postgres_server_bind_mount_var_lib_pgsql: false + +# Where to put database. +postgres_server_bind_mount_var_lib_pgsql_target: "" + +# Default version of Postgres server to install. +postgres_server_version: "9.6" + +# Server version in package: +postgres_server_pkg_version: "{{ postgres_server_version|replace('.', '') }}" + +# Whether or not the files are on ZFS. +postgres_server_volume_is_zfs: false + +# Postgres setting max_connections. +postgres_server_max_connections: 100 + +# Postgres setting shared_buffers. +postgres_server_shared_buffers: 128MB + +# Postgres setting maintenance_work_mem. +postgres_server_maintenance_work_mem: 64MB + +# Postgres setting effective_io_concurrency. +postgres_server_effective_io_concurrency: 1 + +# Postgres setting max_worker_processes. +postgres_server_max_worker_processes: 8 + +# Postgres setting max_parallel_maintenance_workers. +postgres_server_max_parallel_maintenance_workers: 2 + +# Postgres setting max_parallel_workers_per_gather. +postgres_server_max_parallel_workers_per_gather: 2 + +# Postgres setting parallel_leader_participation. +postgres_server_parallel_leader_participation: true + +# Postgres setting max_parallel_workers. +postgres_server_max_parallel_workers: 8 + +# Postgres setting max_locks_per_transaction. +postgres_server_max_locks_per_transaction: 64 + +# Configuration for "random access" cost. +postgres_server_random_page_cost: "4.0" + +# User name that the postgres user runs as. +postgres_server_user: postgres + +# Whether or not to lock checkpoints. +postgres_server_log_checkpoints: false + +# Whether or not to lock connects. +postgres_server_log_connections: false + +# Whether or not to lock disconnects. +postgres_server_log_disconnections: false + +# Whether or not to log duration +postgres_server_log_duration: false + +# Error logging verbosity. +postgres_server_log_error_verbosity: "default" + +# Whether or not to log the host name. +postgres_server_log_hostname: false + +# Whether or not to lock waits. +postgres_server_log_lock_waits: false + +# Which statements to log. +postgres_server_log_statements: "none" + +# Whether or not to enable the auto_explain module. +postgres_server_auto_explain_enabled: false + +# Minimal duration to log auto explain for. +postgres_server_auto_explain_log_min_duration: -1 + +# Whether or not to use EXPLAIN ANALYZE. +postgres_server_auto_explain_log_analyze: true diff --git a/Ansible/collection/jfrog/ansible/roles/postgres/handlers/main.yml b/Ansible/collection/jfrog/ansible/roles/postgres/handlers/main.yml new file mode 100644 index 0000000..5341b3d --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/postgres/handlers/main.yml @@ -0,0 +1,4 @@ +--- + +- name: restart postgres + systemd: name={{ postgres_server_service_name }} state=restarted diff --git a/Ansible/collection/jfrog/ansible/roles/postgres/meta/main.yml b/Ansible/collection/jfrog/ansible/roles/postgres/meta/main.yml new file mode 100644 index 0000000..cc79dee --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/postgres/meta/main.yml @@ -0,0 +1,25 @@ +--- + +galaxy_info: + role_name: postgres_server + author: Jeff Fry + description: Installation of Postgres for Artifactory HA + company: JFrog + min_ansible_version: 2.8 + platforms: + - name: Fedora + versions: + - 27 + - 29 + - name: Ubuntu + versions: + - xenial + - bionic + - name: Debian + versions: + - stretch + galaxy_tags: + - postgres + - postgresql + +dependencies: [] diff --git a/Ansible/collection/jfrog/ansible/roles/postgres/tasks/Debian.yml b/Ansible/collection/jfrog/ansible/roles/postgres/tasks/Debian.yml new file mode 100644 index 0000000..611332e --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/postgres/tasks/Debian.yml @@ -0,0 +1,35 @@ +--- +- name: install python2 psycopg2 + apt: + name: python-psycopg2 + update_cache: yes + become: yes + +- name: install python3 psycopg2 + apt: + name: python3-psycopg2 + update_cache: yes + become: yes + +- name: add postgres apt key + apt_key: + url: https://www.postgresql.org/media/keys/ACCC4CF8.asc + id: "0x7FCC7D46ACCC4CF8" + state: present + become: yes + +- name: register APT repository + apt_repository: + repo: deb http://apt.postgresql.org/pub/repos/apt/ {{ ansible_distribution_release }}-pgdg main + state: present + filename: pgdg + become: yes + +- name: install postgres packages + apt: + name: + - postgresql-{{ postgres_server_version }} + - postgresql-server-dev-{{ postgres_server_version }} + - postgresql-contrib-{{ postgres_server_version }} + state: present + become: yes diff --git a/Ansible/collection/jfrog/ansible/roles/postgres/tasks/RedHat.yml b/Ansible/collection/jfrog/ansible/roles/postgres/tasks/RedHat.yml new file mode 100644 index 0000000..a30eba9 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/postgres/tasks/RedHat.yml @@ -0,0 +1,72 @@ +--- + +- name: install EPEL repository + yum: name=epel-release state=present + when: > # not for Fedora + ansible_distribution == 'CentOS' or + ansible_distribution == 'Red Hat Enterprise Linux' + become: yes + +- name: install python2 psycopg2 + yum: + name: + - python-psycopg2 + - sudo + - wget + - perl + state: present + +- name: install python3 psycopg2 + yum: + name: + - python3-psycopg2 + - sudo + - wget + - perl + state: present + +- name: fixup some locale issues + lineinfile: + dest: /etc/default/locale + line: 'LANGUAGE="{{ item }}"' + state: present + create: yes + loop: + - 'en_US:en' + - 'en_us.UTF-8' + +- name: get latest version + vars: + base: http://download.postgresql.org/pub/repos/yum + ver: "{{ ansible_distribution_version }}" + shell: | + set -eo pipefail + wget -O - {{ base }}/{{ postgres_server_version }}/redhat/rhel-{{ ver }}-x86_64/ 2>/dev/null | \ + grep 'pgdg-redhat' | \ + perl -pe 's/^.*rpm">//g' | \ + perl -pe 's/<\/a>.*//g' | \ + tail -n 1 + args: + executable: /bin/bash + changed_when: false + check_mode: false + register: latest_version + tags: [skip_ansible_lint] # yes, I want wget here + +- name: config postgres repository + vars: + base: http://download.postgresql.org/pub/repos/yum + ver: "{{ ansible_distribution_version }}" + yum: + name: "{{ base }}/{{ postgres_server_version }}/redhat/rhel-{{ ver }}-x86_64/{{ latest_version.stdout }}" + state: present + become: yes + +- name: install postgres packages + yum: + name: + - postgresql{{ postgres_server_pkg_version }}-server + - postgresql{{ postgres_server_pkg_version }}-contrib + - postgresql{{ postgres_server_pkg_version }}-devel + state: present + become: yes diff --git a/Ansible/collection/jfrog/ansible/roles/postgres/tasks/main.yml b/Ansible/collection/jfrog/ansible/roles/postgres/tasks/main.yml new file mode 100644 index 0000000..c267ba9 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/postgres/tasks/main.yml @@ -0,0 +1,105 @@ +--- +- name: define distribution-specific variables + include_vars: "{{ ansible_os_family }}.yml" + +- name: create directory for bind mount if necessary + file: + path: "{{ postgres_server_bind_mount_var_lib_pgsql_target }}" + state: directory + become: yes + when: postgres_server_bind_mount_var_lib_pgsql + + +- name: perform bind mount if necessary + mount: + path: "/var/lib/pgsql" + src: "{{ postgres_server_bind_mount_var_lib_pgsql_target }}" + opts: bind + state: mounted + fstype: none + become: yes + when: postgres_server_bind_mount_var_lib_pgsql + +- name: perform installation + include_tasks: "{{ ansible_os_family }}.yml" + +- name: extend path + copy: + dest: /etc/profile.d/postgres-path.sh + mode: a=rx + content: "export PATH=$PATH:/usr/pgsql-{{ postgres_server_version }}/bin" + become: yes + +- name: initialize PostgreSQL database cluster + environment: + LC_ALL: "en_US.UTF-8" + vars: + ansible_become: "{{ postgres_server_initdb_become }}" + ansible_become_user: "{{ postgres_server_user }}" + command: "{{ postgres_server_cmd_initdb }} {{ postgres_server_data_location }}" + args: + creates: "{{ postgres_server_data_location }}/PG_VERSION" + +- name: install postgres configuration + template: + src: "{{ item }}.j2" + dest: "{{ postgres_server_config_location }}/{{ item }}" + owner: postgres + group: postgres + mode: u=rw,go=r + vars: + ansible_become: "{{ postgres_server_initdb_become }}" + ansible_become_user: "{{ postgres_server_user }}" + loop: + - pg_hba.conf + - postgresql.conf + +- name: enable postgres service + systemd: + name: "{{ postgres_server_service_name }}" + state: started + enabled: yes + become: yes + +- name: Hold until Postgresql is up and running + wait_for: + port: 5432 + +- name: Create users + become_user: postgres + become: yes + postgresql_user: + name: "{{ item.db_user }}" + password: "{{ item.db_password }}" + conn_limit: "-1" + loop: "{{ db_users|default([]) }}" + no_log: true # secret passwords + +- name: Create a database + become_user: postgres + become: yes + postgresql_db: + name: "{{ item.db_name }}" + owner: "{{ item.db_owner }}" + encoding: UTF-8 + loop: "{{ dbs|default([]) }}" + +- name: Grant privs on db + become_user: postgres + become: yes + postgresql_privs: + database: "{{ item.db_name }}" + role: "{{ item.db_owner }}" + state: present + privs: ALL + type: database + loop: "{{ dbs|default([]) }}" + +- name: restart postgres + service: + name: "{{ postgres_server_service_name }}" + state: restarted + become: yes + +- debug: + msg: "Restarted postgres service {{ postgres_server_service_name }}" \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/postgres/templates/pg_hba.conf.j2 b/Ansible/collection/jfrog/ansible/roles/postgres/templates/pg_hba.conf.j2 new file mode 100644 index 0000000..7f0bc2c --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/postgres/templates/pg_hba.conf.j2 @@ -0,0 +1,7 @@ +# TYPE DATABASE USER ADDRESS METHOD +## localhost connections through Unix port (user name), IPv4, IPv6 (MD5 pw). +local all all peer +host all all 127.0.0.1/32 md5 +host all all ::1/128 md5 +## remote connections IPv4 +host all all 0.0.0.0/0 trust diff --git a/Ansible/collection/jfrog/ansible/roles/postgres/templates/postgresql.conf.j2 b/Ansible/collection/jfrog/ansible/roles/postgres/templates/postgresql.conf.j2 new file mode 100644 index 0000000..c213a99 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/postgres/templates/postgresql.conf.j2 @@ -0,0 +1,681 @@ +# ----------------------------- +# PostgreSQL configuration file +# ----------------------------- +# +# This file consists of lines of the form: +# +# name = value +# +# (The "=" is optional.) Whitespace may be used. Comments are introduced with +# "#" anywhere on a line. The complete list of parameter names and allowed +# values can be found in the PostgreSQL documentation. +# +# The commented-out settings shown in this file represent the default values. +# Re-commenting a setting is NOT sufficient to revert it to the default value; +# you need to reload the server. +# +# This file is read on server startup and when the server receives a SIGHUP +# signal. If you edit the file on a running system, you have to SIGHUP the +# server for the changes to take effect, or use "pg_ctl reload". Some +# parameters, which are marked below, require a server shutdown and restart to +# take effect. +# +# Any parameter can also be given as a command-line option to the server, e.g., +# "postgres -c log_connections=on". Some parameters can be changed at run time +# with the "SET" SQL command. +# +# Memory units: kB = kilobytes Time units: ms = milliseconds +# MB = megabytes s = seconds +# GB = gigabytes min = minutes +# TB = terabytes h = hours +# d = days + + +#------------------------------------------------------------------------------ +# FILE LOCATIONS +#------------------------------------------------------------------------------ + +# The default values of these variables are driven from the -D command-line +# option or PGDATA environment variable, represented here as ConfigDir. + +{% if postgres_server_config_data_directory is not none %} +data_directory = '{{ postgres_server_config_data_directory }}' +{% else %} +#data_directory = 'ConfigDir' # use data in another directory + # (change requires restart) +{% endif %} + +{% if postgres_server_config_data_directory %} +hba_file = '{{ postgres_server_config_hba_file }}' +{% else %} +#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file + # (change requires restart) +{% endif %} + +{% if postgres_server_config_data_directory %} +ident_file = '{{ postgres_server_config_ident_file }}' +{% else %} +#ident_file = 'ConfigDir/pg_ident.conf' # host-based authentication file + # (change requires restart) +{% endif %} + +{% if postgres_server_config_external_pid_file %} +external_pid_file = '{{ postgres_server_config_external_pid_file }}' +{% else %} +# If external_pid_file is not explicitly set, no extra PID file is written. +#external_pid_file = '' # write an extra PID file + # (change requires restart) +{% endif %} + + +#------------------------------------------------------------------------------ +# CONNECTIONS AND AUTHENTICATION +#------------------------------------------------------------------------------ + +# - Connection Settings - + +listen_addresses = '0.0.0.0' # what IP address(es) to listen on; + # comma-separated list of addresses; + # defaults to 'localhost'; use '*' for all + # (change requires restart) +#port = 5432 # (change requires restart) +max_connections = {{ postgres_server_max_connections }} # (change requires restart) +#superuser_reserved_connections = 3 # (change requires restart) +#unix_socket_directories = '/var/run/postgresql, /tmp' # comma-separated list of directories + # (change requires restart) +#unix_socket_group = '' # (change requires restart) +#unix_socket_permissions = 0777 # begin with 0 to use octal notation + # (change requires restart) +#bonjour = off # advertise server via Bonjour + # (change requires restart) +#bonjour_name = '' # defaults to the computer name + # (change requires restart) + +# - Security and Authentication - + +#authentication_timeout = 1min # 1s-600s +#ssl = off # (change requires restart) +#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers + # (change requires restart) +#ssl_prefer_server_ciphers = on # (change requires restart) +#ssl_ecdh_curve = 'prime256v1' # (change requires restart) +#ssl_cert_file = 'server.crt' # (change requires restart) +#ssl_key_file = 'server.key' # (change requires restart) +#ssl_ca_file = '' # (change requires restart) +#ssl_crl_file = '' # (change requires restart) +#password_encryption = on +#db_user_namespace = off +#row_security = on + +# GSSAPI using Kerberos +#krb_server_keyfile = '' +#krb_caseins_users = off + +# - TCP Keepalives - +# see "man 7 tcp" for details + +#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; + # 0 selects the system default +#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; + # 0 selects the system default +#tcp_keepalives_count = 0 # TCP_KEEPCNT; + # 0 selects the system default + + +#------------------------------------------------------------------------------ +# RESOURCE USAGE (except WAL) +#------------------------------------------------------------------------------ + +# - Memory - + +shared_buffers = {{ postgres_server_shared_buffers }} # min 128kB + # (change requires restart) +#huge_pages = try # on, off, or try + # (change requires restart) +#temp_buffers = 8MB # min 800kB +#max_prepared_transactions = 0 # zero disables the feature + # (change requires restart) +# Caution: it is not advisable to set max_prepared_transactions nonzero unless +# you actively intend to use prepared transactions. +#work_mem = 4MB # min 64kB +maintenance_work_mem = {{ postgres_server_maintenance_work_mem }} # min 1MB +#replacement_sort_tuples = 150000 # limits use of replacement selection sort +#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem +#max_stack_depth = 2MB # min 100kB +dynamic_shared_memory_type = posix # the default is the first option + # supported by the operating system: + # posix + # sysv + # windows + # mmap + # use none to disable dynamic shared memory + # (change requires restart) + +# - Disk - + +#temp_file_limit = -1 # limits per-process temp file space + # in kB, or -1 for no limit + +# - Kernel Resource Usage - + +#max_files_per_process = 1000 # min 25 + # (change requires restart) +shared_preload_libraries = 'pg_stat_statements' # restart on change + +# - Cost-Based Vacuum Delay - + +#vacuum_cost_delay = 0 # 0-100 milliseconds +#vacuum_cost_page_hit = 1 # 0-10000 credits +#vacuum_cost_page_miss = 10 # 0-10000 credits +#vacuum_cost_page_dirty = 20 # 0-10000 credits +#vacuum_cost_limit = 200 # 1-10000 credits + +# - Background Writer - + +#bgwriter_delay = 200ms # 10-10000ms between rounds +#bgwriter_lru_maxpages = 100 # 0-1000 max buffers written/round +#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round +#bgwriter_flush_after = 512kB # measured in pages, 0 disables + +# - Asynchronous Behavior - + +effective_io_concurrency = {{ postgres_server_effective_io_concurrency }} +max_worker_processes = {{ postgres_server_max_worker_processes }} +max_parallel_workers_per_gather = {{ postgres_server_max_parallel_maintenance_workers }} +max_parallel_workers_per_gather = {{ postgres_server_max_parallel_workers_per_gather }} +#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate + # (change requires restart) +#backend_flush_after = 0 # measured in pages, 0 disables +{% if postgres_server_version|string != "9.6" %} +parallel_leader_participation = {{ "on" if postgres_server_parallel_leader_participation else "off" }} +max_parallel_maintenance_workers = {{ postgres_server_max_parallel_maintenance_workers }} +{% endif %} + +#------------------------------------------------------------------------------ +# WRITE AHEAD LOG +#------------------------------------------------------------------------------ + +# - Settings - + +#wal_level = minimal # minimal, replica, or logical + # (change requires restart) +#fsync = on # flush data to disk for crash safety + # (turning this off can cause + # unrecoverable data corruption) +#synchronous_commit = on # synchronization level; + # off, local, remote_write, remote_apply, or on +#wal_sync_method = fsync # the default is the first option + # supported by the operating system: + # open_datasync + # fdatasync (default on Linux) + # fsync + # fsync_writethrough + # open_sync +full_page_writes = {{ "off" if postgres_server_volume_is_zfs else "on" }} # off OK on ZFS # recover from partial page writes + +wal_compression = off # enable compression of full-page writes +#wal_log_hints = off # also do full page writes of non-critical updates + # (change requires restart) +#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers + # (change requires restart) +#wal_writer_delay = 200ms # 1-10000 milliseconds +#wal_writer_flush_after = 1MB # measured in pages, 0 disables + +commit_delay = 100000 # range 0-100000, in microseconds +#commit_siblings = 5 # range 1-1000 + +# - Checkpoints - + +checkpoint_timeout = 4h # range 30s-1d +max_wal_size = 100GB +min_wal_size = 1GB +checkpoint_completion_target = 0.8 # checkpoint target duration, 0.0 - 1.0 +#checkpoint_flush_after = 256kB # measured in pages, 0 disables +#checkpoint_warning = 30s # 0 disables + +# - Archiving - + +#archive_mode = off # enables archiving; off, on, or always + # (change requires restart) +#archive_command = '' # command to use to archive a logfile segment + # placeholders: %p = path of file to archive + # %f = file name only + # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' +#archive_timeout = 0 # force a logfile segment switch after this + # number of seconds; 0 disables + + +#------------------------------------------------------------------------------ +# REPLICATION +#------------------------------------------------------------------------------ + +# - Sending Server(s) - + +# Set these on the master and on any standby that will send replication data. + +#max_wal_senders = 0 # max number of walsender processes + # (change requires restart) +#wal_keep_segments = 0 # in logfile segments, 16MB each; 0 disables +#wal_sender_timeout = 60s # in milliseconds; 0 disables + +#max_replication_slots = 0 # max number of replication slots + # (change requires restart) +#track_commit_timestamp = off # collect timestamp of transaction commit + # (change requires restart) + +# - Master Server - + +# These settings are ignored on a standby server. + +#synchronous_standby_names = '' # standby servers that provide sync rep + # number of sync standbys and comma-separated list of application_name + # from standby(s); '*' = all +#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed + +# - Standby Servers - + +# These settings are ignored on a master server. + +#hot_standby = off # "on" allows queries during recovery + # (change requires restart) +#max_standby_archive_delay = 30s # max delay before canceling queries + # when reading WAL from archive; + # -1 allows indefinite delay +#max_standby_streaming_delay = 30s # max delay before canceling queries + # when reading streaming WAL; + # -1 allows indefinite delay +#wal_receiver_status_interval = 10s # send replies at least this often + # 0 disables +#hot_standby_feedback = off # send info from standby to prevent + # query conflicts +#wal_receiver_timeout = 60s # time that receiver waits for + # communication from master + # in milliseconds; 0 disables +#wal_retrieve_retry_interval = 5s # time to wait before retrying to + # retrieve WAL after a failed attempt + + +#------------------------------------------------------------------------------ +# QUERY TUNING +#------------------------------------------------------------------------------ + +# - Planner Method Configuration - + +#enable_bitmapscan = on +#enable_hashagg = on +#enable_hashjoin = on +#enable_indexscan = on +#enable_indexonlyscan = on +#enable_material = on +#enable_mergejoin = on +#enable_nestloop = on +#enable_seqscan = on +#enable_sort = on +#enable_tidscan = on + +# - Planner Cost Constants - + +#seq_page_cost = 1.0 # measured on an arbitrary scale +random_page_cost = {{ postgres_server_random_page_cost }} +#cpu_tuple_cost = 0.01 # same scale as above +#cpu_index_tuple_cost = 0.005 # same scale as above +#cpu_operator_cost = 0.0025 # same scale as above +#parallel_tuple_cost = 0.1 # same scale as above +#parallel_setup_cost = 1000.0 # same scale as above +#min_parallel_relation_size = 8MB +#effective_cache_size = 4GB + +# - Genetic Query Optimizer - + +#geqo = on +#geqo_threshold = 12 +#geqo_effort = 5 # range 1-10 +#geqo_pool_size = 0 # selects default based on effort +#geqo_generations = 0 # selects default based on effort +#geqo_selection_bias = 2.0 # range 1.5-2.0 +#geqo_seed = 0.0 # range 0.0-1.0 + +# - Other Planner Options - + +#default_statistics_target = 100 # range 1-10000 +#constraint_exclusion = partition # on, off, or partition +#cursor_tuple_fraction = 0.1 # range 0.0-1.0 +#from_collapse_limit = 8 +#join_collapse_limit = 8 # 1 disables collapsing of explicit + # JOIN clauses +#force_parallel_mode = off + + +#------------------------------------------------------------------------------ +# ERROR REPORTING AND LOGGING +#------------------------------------------------------------------------------ + +# - Where to Log - + +log_destination = 'stderr' # Valid values are combinations of + # stderr, csvlog, syslog, and eventlog, + # depending on platform. csvlog + # requires logging_collector to be on. + +# This is used when logging to stderr: +logging_collector = on # Enable capturing of stderr and csvlog + # into log files. Required to be on for + # csvlogs. + # (change requires restart) + +# These are only used if logging_collector is on: +log_directory = 'pg_log' # directory where log files are written, + # can be absolute or relative to PGDATA +log_filename = 'postgresql-%a.log' # log file name pattern, + # can include strftime() escapes +#log_file_mode = 0600 # creation mode for log files, + # begin with 0 to use octal notation +log_truncate_on_rotation = on # If on, an existing log file with the + # same name as the new log file will be + # truncated rather than appended to. + # But such truncation only occurs on + # time-driven rotation, not on restarts + # or size-driven rotation. Default is + # off, meaning append to existing files + # in all cases. +log_rotation_age = 1d # Automatic rotation of logfiles will + # happen after that time. 0 disables. +log_rotation_size = 0 # Automatic rotation of logfiles will + # happen after that much log output. + # 0 disables. + +# These are relevant when logging to syslog: +#syslog_facility = 'LOCAL0' +#syslog_ident = 'postgres' +#syslog_sequence_numbers = on +#syslog_split_messages = on + +# This is only relevant when logging to eventlog (win32): +# (change requires restart) +#event_source = 'PostgreSQL' + +# - When to Log - + +#client_min_messages = notice # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # log + # notice + # warning + # error + +#log_min_messages = warning # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic + +#log_min_error_statement = error # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic (effectively off) + +#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements + # and their durations, > 0 logs only + # statements running at least this number + # of milliseconds + + +# - What to Log - + +#debug_print_parse = off +#debug_print_rewritten = off +#debug_print_plan = off +#debug_pretty_print = on +log_checkpoints = {{ "on" if postgres_server_log_checkpoints else "off" }} +log_connections = {{ "on" if postgres_server_log_connections else "off" }} +log_disconnections = {{ "on" if postgres_server_log_disconnections else "off" }} +log_duration = {{ "on" if postgres_server_log_duration else "off" }} +log_error_verbosity = {{ postgres_server_log_error_verbosity }} # terse, default, or verbose messages +log_hostname = {{ "on" if postgres_server_log_hostname else "off" }} +log_line_prefix = '< %m > ' # special values: + # %a = application name + # %u = user name + # %d = database name + # %r = remote host and port + # %h = remote host + # %p = process ID + # %t = timestamp without milliseconds + # %m = timestamp with milliseconds + # %n = timestamp with milliseconds (as a Unix epoch) + # %i = command tag + # %e = SQL state + # %c = session ID + # %l = session line number + # %s = session start timestamp + # %v = virtual transaction ID + # %x = transaction ID (0 if none) + # %q = stop here in non-session + # processes + # %% = '%' + # e.g. '<%u%%%d> ' +log_lock_waits = {{ "on" if postgres_server_log_lock_waits else "off" }} # log lock waits >= deadlock_timeout +log_statement = '{{ postgres_server_log_statements }}' # none, ddl, mod, all +#log_replication_commands = off +#log_temp_files = -1 # log temporary files equal or larger + # than the specified size in kilobytes; + # -1 disables, 0 logs all temp files +log_timezone = 'Europe/Berlin' + + +# - Process Title - + +#cluster_name = '' # added to process titles if nonempty + # (change requires restart) +#update_process_title = on + + +#------------------------------------------------------------------------------ +# RUNTIME STATISTICS +#------------------------------------------------------------------------------ + +# - Query/Index Statistics Collector - + +#track_activities = on +#track_counts = on +#track_io_timing = off +#track_functions = none # none, pl, all +track_activity_query_size = 102400 # (change requires restart) +#stats_temp_directory = 'pg_stat_tmp' + +# Track statements generated by stored procedures as well +pg_stat_statements.track = all + + +# - Statistics Monitoring - + +#log_parser_stats = off +#log_planner_stats = off +#log_executor_stats = off +#log_statement_stats = off + + +#------------------------------------------------------------------------------ +# AUTOVACUUM PARAMETERS +#------------------------------------------------------------------------------ + +#autovacuum = on # Enable autovacuum subprocess? 'on' + # requires track_counts to also be on. +#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and + # their durations, > 0 logs only + # actions running at least this number + # of milliseconds. +#autovacuum_max_workers = 3 # max number of autovacuum subprocesses + # (change requires restart) +#autovacuum_naptime = 1min # time between autovacuum runs +#autovacuum_vacuum_threshold = 50 # min number of row updates before + # vacuum +#autovacuum_analyze_threshold = 50 # min number of row updates before + # analyze +#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum +#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze +#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum + # (change requires restart) +#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age + # before forced vacuum + # (change requires restart) +#autovacuum_vacuum_cost_delay = 20ms # default vacuum cost delay for + # autovacuum, in milliseconds; + # -1 means use vacuum_cost_delay +#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for + # autovacuum, -1 means use + # vacuum_cost_limit + + +#------------------------------------------------------------------------------ +# CLIENT CONNECTION DEFAULTS +#------------------------------------------------------------------------------ + +# - Statement Behavior - + +#search_path = '"$user", public' # schema names +#default_tablespace = '' # a tablespace name, '' uses the default +#temp_tablespaces = '' # a list of tablespace names, '' uses + # only default tablespace +#check_function_bodies = on +#default_transaction_isolation = 'read committed' +#default_transaction_read_only = off +#default_transaction_deferrable = off +#session_replication_role = 'origin' +#statement_timeout = 0 # in milliseconds, 0 is disabled +#lock_timeout = 0 # in milliseconds, 0 is disabled +#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled +#vacuum_freeze_min_age = 50000000 +#vacuum_freeze_table_age = 150000000 +#vacuum_multixact_freeze_min_age = 5000000 +#vacuum_multixact_freeze_table_age = 150000000 +#bytea_output = 'hex' # hex, escape +#xmlbinary = 'base64' +#xmloption = 'content' +#gin_fuzzy_search_limit = 0 +#gin_pending_list_limit = 4MB + +# - Locale and Formatting - + +datestyle = 'iso, mdy' +#intervalstyle = 'postgres' +timezone = 'Europe/Berlin' +#timezone_abbreviations = 'Default' # Select the set of available time zone + # abbreviations. Currently, there are + # Default + # Australia (historical usage) + # India + # You can create your own file in + # share/timezonesets/. +#extra_float_digits = 0 # min -15, max 3 +#client_encoding = sql_ascii # actually, defaults to database + # encoding + +# These settings are initialized by initdb, but they can be changed. +lc_messages = 'en_US.UTF-8' # locale for system error message + # strings +lc_monetary = 'en_US.UTF-8' # locale for monetary formatting +lc_numeric = 'en_US.UTF-8' # locale for number formatting +lc_time = 'en_US.UTF-8' # locale for time formatting + +# default configuration for text search +default_text_search_config = 'pg_catalog.english' + +# - Other Defaults - + +#dynamic_library_path = '$libdir' +#local_preload_libraries = '' +{% set preload_libraries = [] %} +{% if postgres_server_auto_explain_enabled %} + {{ preload_libraries.append("auto_explain") }} +{% endif %} + +session_preload_libraries = '{{ ",".join(preload_libraries) }}' + +#------------------------------------------------------------------------------ +# auto_explain SETTINGS +#------------------------------------------------------------------------------ + +auto_explain.log_min_duration = {{ "on" if postgres_server_auto_explain_log_min_duration else "off" }} +auto_explain.log_analyze = {{ "on" if postgres_server_auto_explain_log_analyze else "off" }} + +#------------------------------------------------------------------------------ +# LOCK MANAGEMENT +#------------------------------------------------------------------------------ + +#deadlock_timeout = 1s +max_locks_per_transaction = {{ postgres_server_max_locks_per_transaction }} # min 10 + # (change requires restart) +#max_pred_locks_per_transaction = 64 # min 10 + # (change requires restart) + + +#------------------------------------------------------------------------------ +# VERSION/PLATFORM COMPATIBILITY +#------------------------------------------------------------------------------ + +# - Previous PostgreSQL Versions - + +#array_nulls = on +#backslash_quote = safe_encoding # on, off, or safe_encoding +#default_with_oids = off +#escape_string_warning = on +#lo_compat_privileges = off +#operator_precedence_warning = off +#quote_all_identifiers = off +#sql_inheritance = on +#standard_conforming_strings = on +#synchronize_seqscans = on + +# - Other Platforms and Clients - + +#transform_null_equals = off + + +#------------------------------------------------------------------------------ +# ERROR HANDLING +#------------------------------------------------------------------------------ + +#exit_on_error = off # terminate session on any error? +#restart_after_crash = on # reinitialize after backend crash? + + +#------------------------------------------------------------------------------ +# CONFIG FILE INCLUDES +#------------------------------------------------------------------------------ + +# These options allow settings to be loaded from files other than the +# default postgresql.conf. + +#include_dir = 'conf.d' # include files ending in '.conf' from + # directory 'conf.d' +#include_if_exists = 'exists.conf' # include file only if it exists +#include = 'special.conf' # include file + + +#------------------------------------------------------------------------------ +# CUSTOMIZED OPTIONS +#------------------------------------------------------------------------------ + +# Add settings for extensions here + diff --git a/Ansible/collection/jfrog/ansible/roles/postgres/vars/Debian.yml b/Ansible/collection/jfrog/ansible/roles/postgres/vars/Debian.yml new file mode 100644 index 0000000..1c1a7f4 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/postgres/vars/Debian.yml @@ -0,0 +1,12 @@ +--- + +postgres_server_cmd_initdb: /usr/lib/postgresql/{{ postgres_server_version }}/bin/initdb -D +postgres_server_initdb_become: yes +postgres_server_data_location: /var/lib/postgresql/{{ postgres_server_version }}/main +postgres_server_config_location: /etc/postgresql/{{ postgres_server_version }}/main +postgres_server_service_name: postgresql@{{ postgres_server_version }}-main + +postgres_server_config_data_directory: "/var/lib/postgresql/{{ postgres_server_version }}/main" +postgres_server_config_hba_file: "/etc/postgresql/{{ postgres_server_version }}/main/pg_hba.conf" +postgres_server_config_ident_file: "/etc/postgresql/{{ postgres_server_version }}/main/pg_ident.conf" +postgres_server_config_external_pid_file: "/var/run/postgresql/{{ postgres_server_version }}-main.pid" diff --git a/Ansible/collection/jfrog/ansible/roles/postgres/vars/RedHat.yml b/Ansible/collection/jfrog/ansible/roles/postgres/vars/RedHat.yml new file mode 100644 index 0000000..f6faafd --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/postgres/vars/RedHat.yml @@ -0,0 +1,11 @@ +--- + +postgres_server_cmd_initdb: /usr/pgsql-{{ postgres_server_version }}/bin/postgresql{{ postgres_server_pkg_version }}-setup initdb -D +postgres_server_data_location: /var/lib/pgsql/{{ postgres_server_version }}/data +postgres_server_config_location: "{{ postgres_server_data_location }}" +postgres_server_service_name: postgresql-{{ postgres_server_version }} + +postgres_server_config_data_directory: null +postgres_server_config_hba_file: null +postgres_server_config_ident_file: null +postgres_server_config_external_pid_file: null diff --git a/Ansible/collection/jfrog/ansible/roles/postgres/vars/RedHat_pg-9.6.yml b/Ansible/collection/jfrog/ansible/roles/postgres/vars/RedHat_pg-9.6.yml new file mode 100644 index 0000000..56d0263 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/postgres/vars/RedHat_pg-9.6.yml @@ -0,0 +1,4 @@ +--- + +postgres_server_cmd_initdb: /usr/pgsql-{{ postgres_server_version }}/bin/postgresql{{ postgres_server_pkg_version }}-setup initdb +postgres_server_initdb_become: false diff --git a/Ansible/collection/jfrog/ansible/roles/postgres/vars/RedHat_pg-default.yml b/Ansible/collection/jfrog/ansible/roles/postgres/vars/RedHat_pg-default.yml new file mode 100644 index 0000000..3d974c2 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/postgres/vars/RedHat_pg-default.yml @@ -0,0 +1,4 @@ +--- + +postgres_server_cmd_initdb: /usr/pgsql-{{ postgres_server_version }}/bin/initdb -D /var/lib/pgsql/{{ postgres_server_version }}/data +postgres_server_initdb_become: yes diff --git a/Ansible/collection/jfrog/ansible/roles/xray/.travis.yml b/Ansible/collection/jfrog/ansible/roles/xray/.travis.yml new file mode 100644 index 0000000..36bbf62 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/xray/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/xray/defaults/main.yml b/Ansible/collection/jfrog/ansible/roles/xray/defaults/main.yml new file mode 100644 index 0000000..b8aad42 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/xray/defaults/main.yml @@ -0,0 +1,23 @@ +--- +# defaults file for xray +# indicates were this collection was downlaoded from (galaxy, automation_hub, standalone) +ansible_marketplace: standalone + +# The version of xray to install +xray_version: 3.3.0 + +# whether to enable HA +xray_ha_enabled: true + +# The location where xray should install. +xray_download_directory: /opt/jfrog + +# The remote xray download file +xray_tar: https://bintray.com/standAloneDownload/downloadArtifact?agree=true&artifactPath=/jfrog/jfrog-xray/xray-linux/{{ xray_version }}/jfrog-xray-{{ xray_version }}-linux.tar.gz&callback_id=anonymous&product=xray + +#The xray install directory +xray_home: "{{ xray_download_directory }}/jfrog-xray-{{ xray_version }}-linux" + +#xray users and groups +xray_user: xray +xray_group: xray diff --git a/Ansible/collection/jfrog/ansible/roles/xray/handlers/main.yml b/Ansible/collection/jfrog/ansible/roles/xray/handlers/main.yml new file mode 100644 index 0000000..f236fe3 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/xray/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for xray \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/xray/meta/main.yml b/Ansible/collection/jfrog/ansible/roles/xray/meta/main.yml new file mode 100644 index 0000000..227ad9c --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/xray/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/xray/tasks/Debian.yml b/Ansible/collection/jfrog/ansible/roles/xray/tasks/Debian.yml new file mode 100644 index 0000000..420c2d0 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/xray/tasks/Debian.yml @@ -0,0 +1,37 @@ +--- +- name: Install db5.3-util + apt: + deb: "{{ xray_home }}/app/third-party/misc/db5.3-util_5.3.28-3ubuntu3_amd64.deb" + ignore_errors: yes + become: yes + +- name: Install db-util + apt: + deb: "{{ xray_home }}/app/third-party/misc/db-util_1_3a5.3.21exp1ubuntu1_all.deb" + ignore_errors: yes + become: yes + +- name: Install libssl + apt: + deb: "{{ xray_home }}/app/third-party/rabbitmq/libssl1.1_1.1.0j-1_deb9u1_amd64.deb" + ignore_errors: yes + become: yes + +- name: Install socat + apt: + deb: "{{ xray_home }}/app/third-party/rabbitmq/socat_1.7.3.1-2+deb9u1_amd64.deb" + become: yes + +- name: Install libwxbase3.0-0v5 + apt: + name: libwxbase3.0-0v5 + update_cache: yes + state: present + ignore_errors: yes + become: yes + +- name: Install erlang + apt: + deb: "{{ xray_home }}/app/third-party/rabbitmq/esl-erlang_21.2.1-1~ubuntu~xenial_amd64.deb" + become: yes + diff --git a/Ansible/collection/jfrog/ansible/roles/xray/tasks/RedHat.yml b/Ansible/collection/jfrog/ansible/roles/xray/tasks/RedHat.yml new file mode 100644 index 0000000..8d24eb7 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/xray/tasks/RedHat.yml @@ -0,0 +1,16 @@ +--- +- name: Install db-utl + yum: + name: "{{ xray_home }}/app/third-party/misc/db4-utils-4.7.25-20.el6_7.x86_64.rpm" + state: present + +- name: Install socat + yum: + name: "{{ xray_home }}/app/third-party/rabbitmq/socat-1.7.3.2-2.el7.x86_64.rpm" + state: present + +- name: Install erlang + yum: + name: "{{ xray_home }}/app/third-party/rabbitmq/erlang-21.1.4-1.el7.centos.x86_64.rpm" + state: present + diff --git a/Ansible/collection/jfrog/ansible/roles/xray/tasks/main.yml b/Ansible/collection/jfrog/ansible/roles/xray/tasks/main.yml new file mode 100644 index 0000000..16810f6 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/xray/tasks/main.yml @@ -0,0 +1,93 @@ +--- +- name: create group for xray + group: + name: "{{ xray_group }}" + state: present + become: yes + +- name: create user for xray + user: + name: "{{ xray_user }}" + group: "{{ xray_group }}" + system: yes + become: yes + +- name: ensure xray_download_directory exists + file: + path: "{{ xray_download_directory }}" + state: directory + become: yes + +- name: download xray + unarchive: + src: "{{ xray_tar }}" + dest: "{{ xray_download_directory }}" + remote_src: yes + owner: "{{ xray_user }}" + group: "{{ xray_group }}" + creates: "{{ xray_home }}" + become: yes + register: downloadxray + until: downloadxray is succeeded + retries: 3 + +- name: perform prerequisite installation + include_tasks: "{{ ansible_os_family }}.yml" + +- name: ensure etc exists + file: + path: "{{ xray_home }}/var/etc" + state: directory + owner: "{{ xray_user }}" + group: "{{ xray_group }}" + become: yes + +- name: configure system yaml + template: + src: system.yaml.j2 + dest: "{{ xray_home }}/var/etc/system.yaml" + become: yes + +- name: ensure {{ xray_home }}/var/etc/security/ exists + file: + path: "{{ xray_home }}/var/etc/security/" + state: directory + owner: "{{ xray_user }}" + group: "{{ xray_group }}" + become: yes + +- name: configure master key + template: + src: master.key.j2 + dest: "{{ xray_home }}/var/etc/security/master.key" + become: yes + +- name: configure join key + template: + src: join.key.j2 + dest: "{{ xray_home }}/var/etc/security/join.key" + become: yes + +- name: ensure {{ xray_home }}/var/etc/info/ exists + file: + path: "{{ xray_home }}/var/etc/info/" + state: directory + owner: "{{ xray_user }}" + group: "{{ xray_group }}" + become: yes + +- name: configure installer info + template: + src: installer-info.json.j2 + dest: "{{ xray_home }}/var/etc/info/installer-info.json" + become: yes + +- name: create xray service + shell: "{{ xray_home }}/app/bin/installService.sh" + become: yes + +- name: start and enable xray + service: + name: xray + state: restarted + become: yes \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/xray/templates/installer-info.json.j2 b/Ansible/collection/jfrog/ansible/roles/xray/templates/installer-info.json.j2 new file mode 100644 index 0000000..35bf1d0 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/xray/templates/installer-info.json.j2 @@ -0,0 +1,8 @@ +{ + "productId": "Ansible_{{ ansible_marketplace }}_xray-{{xray_version}}/1.0.0", + "features": [ + { + "featureId": "Partner/ACC-006973" + } + ] +} \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/xray/templates/join.key.j2 b/Ansible/collection/jfrog/ansible/roles/xray/templates/join.key.j2 new file mode 100644 index 0000000..17d05d2 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/xray/templates/join.key.j2 @@ -0,0 +1 @@ +{{ join_key }} \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/xray/templates/master.key.j2 b/Ansible/collection/jfrog/ansible/roles/xray/templates/master.key.j2 new file mode 100644 index 0000000..0462a64 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/xray/templates/master.key.j2 @@ -0,0 +1 @@ +{{ master_key }} \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/xray/templates/system.yaml.j2 b/Ansible/collection/jfrog/ansible/roles/xray/templates/system.yaml.j2 new file mode 100644 index 0000000..206eb77 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/xray/templates/system.yaml.j2 @@ -0,0 +1,36 @@ +## @formatter:off +## JFROG ARTIFACTORY SYSTEM CONFIGURATION FILE +## HOW TO USE: comment-out any field and keep the correct yaml indentation by deleting only the leading '#' character. +configVersion: 1 + +## NOTE: JFROG_HOME is a place holder for the JFrog root directory containing the deployed product, the home directory for all JFrog products. +## Replace JFROG_HOME with the real path! For example, in RPM install, JFROG_HOME=/opt/jfrog + +## NOTE: Sensitive information such as passwords and join key are encrypted on first read. +## NOTE: The provided commented key and value is the default. + +## SHARED CONFIGURATIONS +## A shared section for keys across all services in this config +shared: + ## Base URL of the JFrog Platform Deployment (JPD) + ## This is the URL to the machine where JFrog Artifactory is deployed, or the load balancer pointing to it. It is recommended to use DNS names rather than direct IPs. + ## Examples: "http://jfrog.acme.com" or "http://10.20.30.40:8082" + jfrogUrl: {{ jfrog_url }} + + ## Node Settings + node: + ## A unique id to identify this node. + ## Default: auto generated at startup. + id: {{ ansible_machine_id }} + + ## Database Configuration + database: + ## One of: mysql, oracle, mssql, postgresql, mariadb + ## Default: Embedded derby + + ## Example for mysql/postgresql + type: "{{ db_type }}" + driver: "{{ db_driver }}" + url: "{{ db_url }}" + username: "{{ db_user }}" + password: "{{ db_password }}" \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/xray/tests/inventory b/Ansible/collection/jfrog/ansible/roles/xray/tests/inventory new file mode 100644 index 0000000..878877b --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/xray/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/Ansible/collection/jfrog/ansible/roles/xray/tests/test.yml b/Ansible/collection/jfrog/ansible/roles/xray/tests/test.yml new file mode 100644 index 0000000..f296da6 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/xray/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - xray \ No newline at end of file diff --git a/Ansible/collection/jfrog/ansible/roles/xray/vars/main.yml b/Ansible/collection/jfrog/ansible/roles/xray/vars/main.yml new file mode 100644 index 0000000..55363e6 --- /dev/null +++ b/Ansible/collection/jfrog/ansible/roles/xray/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for xray \ No newline at end of file diff --git a/Ansible/infra/aws/lb-rt-xray-ha.json b/Ansible/infra/aws/lb-rt-xray-ha.json new file mode 100644 index 0000000..867e1df --- /dev/null +++ b/Ansible/infra/aws/lb-rt-xray-ha.json @@ -0,0 +1,769 @@ +{ + "Description": "This template deploys a VPC, with a pair of public and private subnets spread across two Availability Zones. It deploys an internet gateway, with a default route on the public subnets. It deploys a pair of NAT gateways (one in each AZ), and default routes for them in the private subnets.", + "Parameters": { + "SSHKeyName": { + "Description": "Name of the ec2 key you need one to use this template", + "Type": "AWS::EC2::KeyPair::KeyName", + "Default": "choose-key" + }, + "EnvironmentName": { + "Description": "An environment name that is prefixed to resource names", + "Type": "String", + "Default": "Ansible" + }, + "VpcCIDR": { + "Description": "Please enter the IP range (CIDR notation) for this VPC", + "Type": "String", + "Default": "10.192.0.0/16" + }, + "PublicSubnet1CIDR": { + "Description": "Please enter the IP range (CIDR notation) for the public subnet in the first Availability Zone", + "Type": "String", + "Default": "10.192.10.0/24" + }, + "PublicSubnet2CIDR": { + "Description": "Please enter the IP range (CIDR notation) for the public subnet in the second Availability Zone", + "Type": "String", + "Default": "10.192.11.0/24" + }, + "PrivateSubnet1CIDR": { + "Description": "Please enter the IP range (CIDR notation) for the private subnet in the first Availability Zone", + "Type": "String", + "Default": "10.192.20.0/24" + }, + "PrivateSubnet2CIDR": { + "Description": "Please enter the IP range (CIDR notation) for the private subnet in the second Availability Zone", + "Type": "String", + "Default": "10.192.21.0/24" + } + }, + "Mappings": { + "RegionToAmazonAMI": { + "us-east-1": { + "HVM64": "ami-03e33c1cefd1d3d74" + }, + "us-east-2": { + "HVM64": "ami-07d9419c80dc1113c" + }, + "us-west-1": { + "HVM64": "ami-0ee1a20d6b0c6a347" + }, + "us-west-2": { + "HVM64": "ami-0813245c0939ab3ca" + } + } + }, + "Resources": { + "VPC": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": { + "Ref": "VpcCIDR" + }, + "EnableDnsSupport": true, + "EnableDnsHostnames": true, + "Tags": [ + { + "Key": "Name", + "Value": { + "Ref": "EnvironmentName" + } + } + ] + } + }, + "InternetGateway": { + "Type": "AWS::EC2::InternetGateway", + "Properties": { + "Tags": [ + { + "Key": "Name", + "Value": { + "Ref": "EnvironmentName" + } + } + ] + } + }, + "InternetGatewayAttachment": { + "Type": "AWS::EC2::VPCGatewayAttachment", + "Properties": { + "InternetGatewayId": { + "Ref": "InternetGateway" + }, + "VpcId": { + "Ref": "VPC" + } + } + }, + "PublicSubnet1": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "VpcId": { + "Ref": "VPC" + }, + "AvailabilityZone": { + "Fn::Select": [ + 0, + { + "Fn::GetAZs": "" + } + ] + }, + "CidrBlock": { + "Ref": "PublicSubnet1CIDR" + }, + "MapPublicIpOnLaunch": true, + "Tags": [ + { + "Key": "Name", + "Value": { + "Fn::Sub": "${EnvironmentName} Public Subnet (AZ1)" + } + } + ] + } + }, + "PublicSubnet2": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "VpcId": { + "Ref": "VPC" + }, + "AvailabilityZone": { + "Fn::Select": [ + 1, + { + "Fn::GetAZs": "" + } + ] + }, + "CidrBlock": { + "Ref": "PublicSubnet2CIDR" + }, + "MapPublicIpOnLaunch": true, + "Tags": [ + { + "Key": "Name", + "Value": { + "Fn::Sub": "${EnvironmentName} Public Subnet (AZ2)" + } + } + ] + } + }, + "PrivateSubnet1": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "VpcId": { + "Ref": "VPC" + }, + "AvailabilityZone": { + "Fn::Select": [ + 0, + { + "Fn::GetAZs": "" + } + ] + }, + "CidrBlock": { + "Ref": "PrivateSubnet1CIDR" + }, + "MapPublicIpOnLaunch": false, + "Tags": [ + { + "Key": "Name", + "Value": { + "Fn::Sub": "${EnvironmentName} Private Subnet (AZ1)" + } + } + ] + } + }, + "PrivateSubnet2": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "VpcId": { + "Ref": "VPC" + }, + "AvailabilityZone": { + "Fn::Select": [ + 1, + { + "Fn::GetAZs": "" + } + ] + }, + "CidrBlock": { + "Ref": "PrivateSubnet2CIDR" + }, + "MapPublicIpOnLaunch": false, + "Tags": [ + { + "Key": "Name", + "Value": { + "Fn::Sub": "${EnvironmentName} Private Subnet (AZ2)" + } + } + ] + } + }, + "NatGateway1EIP": { + "Type": "AWS::EC2::EIP", + "DependsOn": "InternetGatewayAttachment", + "Properties": { + "Domain": "vpc" + } + }, + "NatGateway2EIP": { + "Type": "AWS::EC2::EIP", + "DependsOn": "InternetGatewayAttachment", + "Properties": { + "Domain": "vpc" + } + }, + "NatGateway1": { + "Type": "AWS::EC2::NatGateway", + "Properties": { + "AllocationId": { + "Fn::GetAtt": [ + "NatGateway1EIP", + "AllocationId" + ] + }, + "SubnetId": { + "Ref": "PublicSubnet1" + } + } + }, + "NatGateway2": { + "Type": "AWS::EC2::NatGateway", + "Properties": { + "AllocationId": { + "Fn::GetAtt": [ + "NatGateway2EIP", + "AllocationId" + ] + }, + "SubnetId": { + "Ref": "PublicSubnet2" + } + } + }, + "PublicRouteTable": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "VPC" + }, + "Tags": [ + { + "Key": "Name", + "Value": { + "Fn::Sub": "${EnvironmentName} Public Routes" + } + } + ] + } + }, + "DefaultPublicRoute": { + "Type": "AWS::EC2::Route", + "DependsOn": "InternetGatewayAttachment", + "Properties": { + "RouteTableId": { + "Ref": "PublicRouteTable" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "GatewayId": { + "Ref": "InternetGateway" + } + } + }, + "PublicSubnet1RouteTableAssociation": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "PublicRouteTable" + }, + "SubnetId": { + "Ref": "PublicSubnet1" + } + } + }, + "PublicSubnet2RouteTableAssociation": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "PublicRouteTable" + }, + "SubnetId": { + "Ref": "PublicSubnet2" + } + } + }, + "PrivateRouteTable1": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "VPC" + }, + "Tags": [ + { + "Key": "Name", + "Value": { + "Fn::Sub": "${EnvironmentName} Private Routes (AZ1)" + } + } + ] + } + }, + "DefaultPrivateRoute1": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "PrivateRouteTable1" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "NatGatewayId": { + "Ref": "NatGateway1" + } + } + }, + "PrivateSubnet1RouteTableAssociation": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "PrivateRouteTable1" + }, + "SubnetId": { + "Ref": "PrivateSubnet1" + } + } + }, + "PrivateRouteTable2": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "VPC" + }, + "Tags": [ + { + "Key": "Name", + "Value": { + "Fn::Sub": "${EnvironmentName} Private Routes (AZ2)" + } + } + ] + } + }, + "DefaultPrivateRoute2": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "PrivateRouteTable2" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "NatGatewayId": { + "Ref": "NatGateway2" + } + } + }, + "PrivateSubnet2RouteTableAssociation": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "PrivateRouteTable2" + }, + "SubnetId": { + "Ref": "PrivateSubnet2" + } + } + }, + "EC2SecurityGroup": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "SSH, Port 80, Database", + "VpcId": { + "Ref": "VPC" + }, + "SecurityGroupIngress": [ + { + "IpProtocol": "tcp", + "FromPort": 22, + "ToPort": 22, + "CidrIp": "0.0.0.0/0" + }, + { + "IpProtocol": "tcp", + "FromPort": 5432, + "ToPort": 5432, + "CidrIp": "0.0.0.0/0" + }, + { + "IpProtocol": "tcp", + "FromPort": 8082, + "ToPort": 8082, + "CidrIp": "0.0.0.0/0" + }, + { + "IpProtocol": "tcp", + "FromPort": 80, + "ToPort": 80, + "SourceSecurityGroupId": { + "Ref": "ELBSecurityGroup" + } + } + ] + } + }, + "ELBSecurityGroup": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "SSH and Port 80", + "VpcId": { + "Ref": "VPC" + }, + "SecurityGroupIngress": [ + { + "IpProtocol": "tcp", + "FromPort": 80, + "ToPort": 80, + "CidrIp": "0.0.0.0/0" + } + ] + } + }, + "BastionInstance": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": { + "Fn::FindInMap": [ + "RegionToAmazonAMI", + { + "Ref": "AWS::Region" + }, + "HVM64" + ] + }, + "InstanceInitiatedShutdownBehavior": "stop", + "InstanceType": "t2.medium", + "KeyName": { + "Ref": "SSHKeyName" + }, + "Monitoring": "true", + "NetworkInterfaces": [ + { + "AssociatePublicIpAddress": "true", + "DeviceIndex": "0", + "GroupSet": [ + { + "Ref": "EC2SecurityGroup" + } + ], + "SubnetId": { + "Ref": "PublicSubnet1" + } + } + ], + "Tags": [ + { + "Key": "Name", + "Value": "bastion" + } + ], + "Tenancy": "default" + } + }, + "RTPriInstance": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": { + "Fn::FindInMap": [ + "RegionToAmazonAMI", + { + "Ref": "AWS::Region" + }, + "HVM64" + ] + }, + "InstanceInitiatedShutdownBehavior": "stop", + "InstanceType": "t2.medium", + "KeyName": { + "Ref": "SSHKeyName" + }, + "Monitoring": "true", + "NetworkInterfaces": [ + { + "AssociatePublicIpAddress": "false", + "DeviceIndex": "0", + "GroupSet": [ + { + "Ref": "EC2SecurityGroup" + } + ], + "SubnetId": { + "Ref": "PrivateSubnet1" + } + } + ], + "Tags": [ + { + "Key": "Name", + "Value": "rtprimary" + } + ], + "Tenancy": "default" + } + }, + "RTSecInstance": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": { + "Fn::FindInMap": [ + "RegionToAmazonAMI", + { + "Ref": "AWS::Region" + }, + "HVM64" + ] + }, + "InstanceInitiatedShutdownBehavior": "stop", + "InstanceType": "t2.medium", + "KeyName": { + "Ref": "SSHKeyName" + }, + "Monitoring": "true", + "NetworkInterfaces": [ + { + "AssociatePublicIpAddress": "false", + "DeviceIndex": "0", + "GroupSet": [ + { + "Ref": "EC2SecurityGroup" + } + ], + "SubnetId": { + "Ref": "PrivateSubnet2" + } + } + ], + "Tags": [ + { + "Key": "Name", + "Value": "rtsecondary" + } + ], + "Tenancy": "default" + } + }, + "XrayInstance": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": { + "Fn::FindInMap": [ + "RegionToAmazonAMI", + { + "Ref": "AWS::Region" + }, + "HVM64" + ] + }, + "InstanceInitiatedShutdownBehavior": "stop", + "InstanceType": "t2.medium", + "KeyName": { + "Ref": "SSHKeyName" + }, + "Monitoring": "true", + "NetworkInterfaces": [ + { + "AssociatePublicIpAddress": "false", + "DeviceIndex": "0", + "GroupSet": [ + { + "Ref": "EC2SecurityGroup" + } + ], + "SubnetId": { + "Ref": "PrivateSubnet1" + } + } + ], + "Tags": [ + { + "Key": "Name", + "Value": "xray" + } + ], + "Tenancy": "default" + } + }, + "DBInstance": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": { + "Fn::FindInMap": [ + "RegionToAmazonAMI", + { + "Ref": "AWS::Region" + }, + "HVM64" + ] + }, + "InstanceInitiatedShutdownBehavior": "stop", + "InstanceType": "t2.medium", + "KeyName": { + "Ref": "SSHKeyName" + }, + "Monitoring": "true", + "NetworkInterfaces": [ + { + "AssociatePublicIpAddress": "false", + "DeviceIndex": "0", + "GroupSet": [ + { + "Ref": "EC2SecurityGroup" + } + ], + "SubnetId": { + "Ref": "PrivateSubnet1" + } + } + ], + "Tags": [ + { + "Key": "Name", + "Value": "database" + } + ], + "Tenancy": "default" + } + }, + "EC2TargetGroup": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "HealthCheckIntervalSeconds": 30, + "HealthCheckProtocol": "HTTP", + "HealthCheckTimeoutSeconds": 15, + "HealthyThresholdCount": 2, + "Matcher": { + "HttpCode": "200,302" + }, + "Name": "EC2TargetGroup", + "Port": 80, + "Protocol": "HTTP", + "TargetGroupAttributes": [ + { + "Key": "deregistration_delay.timeout_seconds", + "Value": "20" + } + ], + "Targets": [ + { + "Id": { + "Ref": "RTPriInstance" + } + }, + { + "Id": { + "Ref": "RTSecInstance" + }, + "Port": 80 + } + ], + "UnhealthyThresholdCount": 3, + "VpcId": { + "Ref": "VPC" + }, + "Tags": [ + { + "Key": "Name", + "Value": "EC2TargetGroup" + }, + { + "Key": "Port", + "Value": 80 + } + ] + } + }, + "ALBListener": { + "Type": "AWS::ElasticLoadBalancingV2::Listener", + "Properties": { + "DefaultActions": [ + { + "Type": "forward", + "TargetGroupArn": { + "Ref": "EC2TargetGroup" + } + } + ], + "LoadBalancerArn": { + "Ref": "ApplicationLoadBalancer" + }, + "Port": 80, + "Protocol": "HTTP" + } + }, + "ApplicationLoadBalancer": { + "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", + "Properties": { + "Scheme": "internet-facing", + "Subnets": [ + { + "Ref": "PublicSubnet1" + }, + { + "Ref": "PublicSubnet2" + } + ], + "SecurityGroups": [ + { + "Ref": "ELBSecurityGroup" + } + ] + } + } + }, + + "Outputs": { + "VPC": { + "Description": "Virtual Private Cloud", + "Value": { + "Ref": "VPC" + } + }, + "ALBHostName": { + "Description": "Application Load Balancer Hostname", + "Value": { + "Fn::GetAtt": [ + "ApplicationLoadBalancer", + "DNSName" + ] + } + }, + "BastionInstancePublic": { + "Description": "Bastion", + "Value": { "Fn::GetAtt" : [ "BastionInstance", "PublicIp" ]} + }, + "BastionInstancePrivate": { + "Description": "Bastion", + "Value": { "Fn::GetAtt" : [ "BastionInstance", "PrivateIp" ]} + }, + "RTPriInstancePrivate": { + "Description": "RTPriInstance", + "Value": { "Fn::GetAtt" : [ "RTPriInstance", "PrivateIp" ]} + }, + "RTSecInstancePrivate": { + "Description": "RTSecInstance", + "Value": { "Fn::GetAtt" : [ "RTSecInstance", "PrivateIp" ]} + }, + "XrayInstancePrivate": { + "Description": "XrayInstance", + "Value": { "Fn::GetAtt" : [ "XrayInstance", "PrivateIp" ]} + }, + "DBInstancePrivate": { + "Description": "DBInstance", + "Value": { "Fn::GetAtt" : [ "DBInstance", "PrivateIp" ]} + } + } +} \ No newline at end of file diff --git a/Ansible/infra/azure/lb-rt-xray-ha.json b/Ansible/infra/azure/lb-rt-xray-ha.json new file mode 100644 index 0000000..1211d17 --- /dev/null +++ b/Ansible/infra/azure/lb-rt-xray-ha.json @@ -0,0 +1,679 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "vnetName": { + "type": "string", + "defaultValue": "vnet01", + "metadata": { + "description": "Name of new vnet to deploy into." + } + }, + "vnetAddressRange": { + "type": "string", + "defaultValue": "10.0.0.0/16", + "metadata": { + "description": "IP prefix for available addresses in vnet address space." + } + }, + "subnetAddressRange": { + "type": "string", + "defaultValue": "10.0.0.0/24", + "metadata": { + "description": "Subnet IP prefix MUST be within vnet IP prefix address space." + } + }, + "location": { + "type": "string", + "defaultValue": "[resourceGroup().location]", + "metadata": { + "description": "Location for all resources." + } + }, + "adminPublicKey": { + "type": "string", + "metadata": { + "description": "The ssh public key for the VMs." + } + }, + "sizeOfDiskInGB": { + "type": "int", + "defaultValue": 128, + "minValue": 128, + "maxValue": 1024, + "metadata": { + "description": "Size of data disk in GB 128-1024" + } + }, + "vmSize": { + "type": "string", + "defaultValue": "Standard_D2s_v3", + "metadata": { + "description": "Size of the VMs" + } + }, + "numberOfArtifactory": { + "type": "int", + "defaultValue": 1, + "minValue": 1, + "maxValue": 5, + "metadata": { + "description": "Number of Artifactory servers." + } + }, + "numberOfXray": { + "type": "int", + "defaultValue": 1, + "minValue": 1, + "maxValue": 5, + "metadata": { + "description": "Number of Xray servers." + } + }, + "numberOfDb": { + "type": "int", + "defaultValue": 1, + "minValue": 1, + "maxValue": 2, + "metadata": { + "description": "Number of database servers." + } + } + }, + "variables": { + "vnetName": "[parameters('vnetName')]", + "vnetAddressRange": "[parameters('vnetAddressRange')]", + "subnetAddressRange": "[parameters('subnetAddressRange')]", + "subnetName": "mainSubnet", + "loadBalancerName": "LB", + "loadBalancerIp": "lbIp", + "numberOfArtifactory": "[parameters('numberOfArtifactory')]", + "numberOfXray": "[parameters('numberOfXray')]", + "numberOfDb": "[parameters('numberOfDb')]", + "availabilitySetName": "availSet", + "vmArtPri": "vmArtPri", + "vmArtSec": "vmArtSec", + "vmXray": "vmXray", + "vmDb": "vmDb", + "storageAccountNameDiag": "[concat('diag',uniqueString(resourceGroup().id))]", + "subnet-id": "[resourceId('Microsoft.Network/virtualNetworks/subnets',variables('vnetName'),variables('subnetName'))]", + "imagePublisher": "Canonical", + "imageOffer": "UbuntuServer", + "imageSku": "16.04-LTS", + "mainNsg": "mainNsg", + "adminUsername": "ubuntu" + }, + "resources": [ + { + "apiVersion": "2019-08-01", + "type": "Microsoft.Network/publicIPAddresses", + "name": "[variables('loadBalancerIp')]", + "location": "[parameters('location')]", + "properties": { + "publicIPAllocationMethod": "Static" + } + }, + { + "type": "Microsoft.Compute/availabilitySets", + "name": "[variables('availabilitySetName')]", + "apiVersion": "2019-12-01", + "location": "[parameters('location')]", + "sku": { + "name": "Aligned" + }, + "properties": { + "platformFaultDomainCount": 2, + "platformUpdateDomainCount": 2 + } + }, + { + "apiVersion": "2019-06-01", + "type": "Microsoft.Storage/storageAccounts", + "name": "[variables('storageAccountNameDiag')]", + "location": "[parameters('location')]", + "kind": "StorageV2", + "sku": { + "name": "Standard_LRS" + } + }, + { + "comments": "Simple Network Security Group for subnet [Subnet]", + "type": "Microsoft.Network/networkSecurityGroups", + "apiVersion": "2019-08-01", + "name": "[variables('mainNsg')]", + "location": "[parameters('location')]", + "properties": { + "securityRules": [ + { + "name": "allow-ssh", + "properties": { + "description": "Allow SSH", + "protocol": "TCP", + "sourcePortRange": "*", + "destinationPortRange": "22", + "sourceAddressPrefix": "*", + "destinationAddressPrefix": "*", + "access": "Allow", + "priority": 100, + "direction": "Inbound", + "sourcePortRanges": [], + "destinationPortRanges": [], + "sourceAddressPrefixes": [], + "destinationAddressPrefixes": [] + } + }, + { + "name": "allow-http", + "properties": { + "description": "Allow HTTP", + "protocol": "TCP", + "sourcePortRange": "*", + "destinationPortRange": "80", + "sourceAddressPrefix": "*", + "destinationAddressPrefix": "*", + "access": "Allow", + "priority": 110, + "direction": "Inbound", + "sourcePortRanges": [], + "destinationPortRanges": [], + "sourceAddressPrefixes": [], + "destinationAddressPrefixes": [] + } + } + ] + } + }, + { + "apiVersion": "2019-08-01", + "type": "Microsoft.Network/virtualNetworks", + "name": "[variables('vnetName')]", + "location": "[parameters('location')]", + "dependsOn": [ + "[resourceId('Microsoft.Network/networkSecurityGroups', variables('mainNsg'))]" + ], + "properties": { + "addressSpace": { + "addressPrefixes": [ + "[variables('vnetAddressRange')]" + ] + }, + "subnets": [ + { + "name": "[variables('subnetName')]", + "properties": { + "addressPrefix": "[variables('subnetAddressRange')]", + "networkSecurityGroup": { + "id": "[resourceId('Microsoft.Network/networkSecurityGroups', variables('mainNsg'))]" + } + } + } + ] + } + }, + { + "apiVersion": "2018-10-01", + "name": "[variables('loadBalancerName')]", + "type": "Microsoft.Network/loadBalancers", + "location": "[parameters('location')]", + "dependsOn": [ + "[concat('Microsoft.Network/publicIPAddresses/',variables('loadBalancerIp'))]" + ], + "properties": { + "frontendIpConfigurations": [ + { + "name": "LBFE", + "properties": { + "publicIPAddress": { + "id": "[resourceId('Microsoft.Network/publicIPAddresses',variables('loadBalancerIp'))]" + } + } + } + ], + "backendAddressPools": [ + { + "name": "LBArt" + } + ], + "inboundNatRules": [ + { + "name": "ssh", + "properties": { + "frontendIPConfiguration": { + "id": "[resourceId('Microsoft.Network/loadBalancers/frontendIPConfigurations',variables('loadBalancerName'),'LBFE')]" + }, + "frontendPort": 22, + "backendPort": 22, + "enableFloatingIP": false, + "idleTimeoutInMinutes": 4, + "protocol": "Tcp", + "enableTcpReset": false + } + } + ], + "loadBalancingRules": [ + { + "properties": { + "frontendIPConfiguration": { + "id": "[resourceId('Microsoft.Network/loadBalancers/frontendIPConfigurations', variables('loadBalancerName'), 'LBFE')]" + }, + "backendAddressPool": { + "id": "[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', variables('loadBalancerName'), 'LBArt')]" + }, + "probe": { + "id": "[resourceId('Microsoft.Network/loadBalancers/probes', variables('loadBalancerName'), 'lbprobe')]" + }, + "protocol": "Tcp", + "frontendPort": 80, + "backendPort": 80, + "idleTimeoutInMinutes": 15 + }, + "name": "lbrule" + } + ], + "probes": [ + { + "properties": { + "protocol": "Tcp", + "port": 80, + "intervalInSeconds": 15, + "numberOfProbes": 2 + }, + "name": "lbprobe" + } + ] + } + }, + { + "apiVersion": "2019-08-01", + "type": "Microsoft.Network/networkInterfaces", + "name": "[variables('vmArtPri')]", + "location": "[parameters('location')]", + "dependsOn": [ + "[variables('vnetName')]", + "[variables('loadBalancerName')]" + ], + "properties": { + "ipConfigurations": [ + { + "name": "ipconfig", + "properties": { + "privateIPAllocationMethod": "Dynamic", + "subnet": { + "id": "[variables('subnet-id')]" + }, + "loadBalancerBackendAddressPools": [ + { + "id": "[resourceId('Microsoft.Network/loadBalancers/backendAddressPools',variables('loadBalancerName'),'LBArt')]" + } + ], + "loadBalancerInboundNatRules": [ + { + "id": "[resourceId('Microsoft.Network/loadBalancers/inboundNatRules', variables('loadBalancerName'), 'ssh')]" + } + ] + } + } + ] + } + }, + { + "apiVersion": "2019-08-01", + "type": "Microsoft.Network/networkInterfaces", + "name": "[concat(variables('vmArtSec'),copyindex())]", + "copy": { + "name": "netIntLoop", + "count": "[sub(variables('numberOfArtifactory'),1)]" + }, + "location": "[parameters('location')]", + "dependsOn": [ + "[variables('vnetName')]", + "[variables('loadBalancerName')]" + ], + "properties": { + "ipConfigurations": [ + { + "name": "ipconfig", + "properties": { + "privateIPAllocationMethod": "Dynamic", + "subnet": { + "id": "[variables('subnet-id')]" + }, + "loadBalancerBackendAddressPools": [ + { + "id": "[resourceId('Microsoft.Network/loadBalancers/backendAddressPools',variables('loadBalancerName'),'LBArt')]" + } + ] + } + } + ] + } + }, + { + "apiVersion": "2019-08-01", + "type": "Microsoft.Network/networkInterfaces", + "name": "[concat(variables('vmXray'),copyindex())]", + "copy": { + "name": "netXrLoop", + "count": "[variables('numberOfXray')]" + }, + "location": "[parameters('location')]", + "dependsOn": [ + "[variables('vnetName')]" + ], + "properties": { + "ipConfigurations": [ + { + "name": "ipconfig", + "properties": { + "privateIPAllocationMethod": "Dynamic", + "subnet": { + "id": "[variables('subnet-id')]" + } + } + } + ] + } + }, + { + "apiVersion": "2019-08-01", + "type": "Microsoft.Network/networkInterfaces", + "name": "[concat(variables('vmDb'),copyindex())]", + "copy": { + "name": "netDbLoop", + "count": "[variables('numberOfDb')]" + }, + "location": "[parameters('location')]", + "dependsOn": [ + "[variables('vnetName')]" + ], + "properties": { + "ipConfigurations": [ + { + "name": "ipconfig", + "properties": { + "privateIPAllocationMethod": "Dynamic", + "subnet": { + "id": "[variables('subnet-id')]" + } + } + } + ] + } + }, + { + "apiVersion": "2019-12-01", + "type": "Microsoft.Compute/virtualMachines", + "name": "[variables('vmArtPri')]", + "location": "[parameters('location')]", + "dependsOn": [ + "[resourceId('Microsoft.Storage/storageAccounts', variables('storageAccountNameDiag'))]", + "[resourceId('Microsoft.Network/networkInterfaces', variables('vmArtPri'))]", + "[resourceId('Microsoft.Compute/availabilitySets', variables('availabilitySetName'))]" + ], + "properties": { + "availabilitySet": { + "id": "[resourceId('Microsoft.Compute/availabilitySets', variables('availabilitySetName'))]" + }, + "hardwareProfile": { + "vmSize": "[parameters('vmSize')]" + }, + "osProfile": { + "computerName": "[variables('vmArtPri')]", + "adminUsername": "[variables('adminUsername')]", + "linuxConfiguration": { + "disablePasswordAuthentication": true, + "ssh": { + "publicKeys": [ + { + "path": "[concat('/home/', variables('adminUsername'), '/.ssh/authorized_keys')]", + "keyData": "[parameters('adminPublicKey')]" + } + ] + } + } + }, + "storageProfile": { + "imageReference": { + "publisher": "[variables('imagePublisher')]", + "offer": "[variables('imageOffer')]", + "sku": "[variables('imageSku')]", + "version": "latest" + }, + "osDisk": { + "createOption": "FromImage" + } + }, + "networkProfile": { + "networkInterfaces": [ + { + "id": "[resourceId('Microsoft.Network/networkInterfaces',variables('vmArtPri'))]" + } + ] + }, + "diagnosticsProfile": { + "bootDiagnostics": { + "enabled": true, + "storageUri": "[reference(variables('storageAccountNameDiag'), '2019-06-01').primaryEndpoints.blob]" + } + } + } + }, + { + "apiVersion": "2019-12-01", + "type": "Microsoft.Compute/virtualMachines", + "name": "[concat(variables('vmArtSec'), copyindex())]", + "copy": { + "name": "virtualMachineLoop", + "count": "[sub(variables('numberOfArtifactory'),1)]" + }, + "location": "[parameters('location')]", + "dependsOn": [ + "[resourceId('Microsoft.Storage/storageAccounts', variables('storageAccountNameDiag'))]", + "[resourceId('Microsoft.Network/networkInterfaces', concat(variables('vmArtSec'),copyindex()))]", + "[resourceId('Microsoft.Compute/availabilitySets', variables('availabilitySetName'))]" + ], + "properties": { + "availabilitySet": { + "id": "[resourceId('Microsoft.Compute/availabilitySets', variables('availabilitySetName'))]" + }, + "hardwareProfile": { + "vmSize": "[parameters('vmSize')]" + }, + "osProfile": { + "computerName": "[concat(variables('vmArtSec'), copyindex())]", + "adminUsername": "[variables('adminUsername')]", + "linuxConfiguration": { + "disablePasswordAuthentication": true, + "ssh": { + "publicKeys": [ + { + "path": "[concat('/home/', variables('adminUsername'), '/.ssh/authorized_keys')]", + "keyData": "[parameters('adminPublicKey')]" + } + ] + } + } + }, + "storageProfile": { + "imageReference": { + "publisher": "[variables('imagePublisher')]", + "offer": "[variables('imageOffer')]", + "sku": "[variables('imageSku')]", + "version": "latest" + }, + "osDisk": { + "createOption": "FromImage" + } + }, + "networkProfile": { + "networkInterfaces": [ + { + "id": "[resourceId('Microsoft.Network/networkInterfaces',concat(variables('vmArtSec'),copyindex()))]" + } + ] + }, + "diagnosticsProfile": { + "bootDiagnostics": { + "enabled": true, + "storageUri": "[reference(variables('storageAccountNameDiag'), '2019-06-01').primaryEndpoints.blob]" + } + } + } + }, + { + "apiVersion": "2019-12-01", + "type": "Microsoft.Compute/virtualMachines", + "name": "[concat(variables('vmXray'), copyindex())]", + "copy": { + "name": "virtualMachineLoop", + "count": "[variables('numberOfXray')]" + }, + "location": "[parameters('location')]", + "dependsOn": [ + "[resourceId('Microsoft.Storage/storageAccounts', variables('storageAccountNameDiag'))]", + "[resourceId('Microsoft.Network/networkInterfaces', concat(variables('vmXray'),copyindex()))]", + "[resourceId('Microsoft.Compute/availabilitySets', variables('availabilitySetName'))]" + ], + "properties": { + "availabilitySet": { + "id": "[resourceId('Microsoft.Compute/availabilitySets', variables('availabilitySetName'))]" + }, + "hardwareProfile": { + "vmSize": "[parameters('vmSize')]" + }, + "osProfile": { + "computerName": "[concat(variables('vmXray'), copyindex())]", + "adminUsername": "[variables('adminUsername')]", + "linuxConfiguration": { + "disablePasswordAuthentication": true, + "ssh": { + "publicKeys": [ + { + "path": "[concat('/home/', variables('adminUsername'), '/.ssh/authorized_keys')]", + "keyData": "[parameters('adminPublicKey')]" + } + ] + } + } + }, + "storageProfile": { + "imageReference": { + "publisher": "[variables('imagePublisher')]", + "offer": "[variables('imageOffer')]", + "sku": "[variables('imageSku')]", + "version": "latest" + }, + "osDisk": { + "createOption": "FromImage" + } + }, + "networkProfile": { + "networkInterfaces": [ + { + "id": "[resourceId('Microsoft.Network/networkInterfaces',concat(variables('vmXray'),copyindex()))]" + } + ] + }, + "diagnosticsProfile": { + "bootDiagnostics": { + "enabled": true, + "storageUri": "[reference(variables('storageAccountNameDiag'), '2019-06-01').primaryEndpoints.blob]" + } + } + } + }, + { + "apiVersion": "2019-12-01", + "type": "Microsoft.Compute/virtualMachines", + "name": "[concat(variables('vmDb'), copyindex())]", + "copy": { + "name": "virtualMachineLoop", + "count": "[variables('numberOfDb')]" + }, + "location": "[parameters('location')]", + "dependsOn": [ + "[resourceId('Microsoft.Storage/storageAccounts', variables('storageAccountNameDiag'))]", + "[resourceId('Microsoft.Network/networkInterfaces', concat(variables('vmDb'),copyindex()))]", + "[resourceId('Microsoft.Compute/availabilitySets', variables('availabilitySetName'))]" + ], + "properties": { + "availabilitySet": { + "id": "[resourceId('Microsoft.Compute/availabilitySets', variables('availabilitySetName'))]" + }, + "hardwareProfile": { + "vmSize": "[parameters('vmSize')]" + }, + "osProfile": { + "computerName": "[concat(variables('vmDb'), copyindex())]", + "adminUsername": "[variables('adminUsername')]", + "linuxConfiguration": { + "disablePasswordAuthentication": true, + "ssh": { + "publicKeys": [ + { + "path": "[concat('/home/', variables('adminUsername'), '/.ssh/authorized_keys')]", + "keyData": "[parameters('adminPublicKey')]" + } + ] + } + } + }, + "storageProfile": { + "imageReference": { + "publisher": "[variables('imagePublisher')]", + "offer": "[variables('imageOffer')]", + "sku": "[variables('imageSku')]", + "version": "latest" + }, + "osDisk": { + "createOption": "FromImage" + } + }, + "networkProfile": { + "networkInterfaces": [ + { + "id": "[resourceId('Microsoft.Network/networkInterfaces',concat(variables('vmDb'),copyindex()))]" + } + ] + }, + "diagnosticsProfile": { + "bootDiagnostics": { + "enabled": true, + "storageUri": "[reference(variables('storageAccountNameDiag'), '2019-06-01').primaryEndpoints.blob]" + } + } + } + } + ], + "outputs": { + "lbIp": { + "type": "string", + "value": "[reference(resourceId('Microsoft.Network/publicIPAddresses', variables('loadBalancerIp'))).ipAddress]" + }, + "vmArtPriIp": { + "type": "string", + "value": "[reference(resourceId('Microsoft.Network/networkInterfaces', variables('vmArtPri'))).ipConfigurations[0].properties.privateIPAddress]" + }, + "vmArtSecArrIp": { + "type": "array", + "copy": { + "count": "[sub(variables('numberOfArtifactory'),1)]", + "input": "[reference(resourceId('Microsoft.Network/networkInterfaces', concat(variables('vmArtSec'),copyindex()))).ipConfigurations[0].properties.privateIPAddress]" + } + }, + "vmXrayArrIp": { + "type": "array", + "copy": { + "count": "[variables('numberOfXray')]", + "input": "[reference(resourceId('Microsoft.Network/networkInterfaces', concat(variables('vmXray'),copyindex()))).ipConfigurations[0].properties.privateIPAddress]" + } + }, + "vmDbArrIp": { + "type": "array", + "copy": { + "count": "[variables('numberOfDb')]", + "input": "[reference(resourceId('Microsoft.Network/networkInterfaces', concat(variables('vmDb'),copyindex()))).ipConfigurations[0].properties.privateIPAddress]" + } + } + } +} \ No newline at end of file diff --git a/Ansible/pipelines.yaml b/Ansible/pipelines.yaml new file mode 100644 index 0000000..3fd14a5 --- /dev/null +++ b/Ansible/pipelines.yaml @@ -0,0 +1,102 @@ +resources: + - name: ansibleRepo + type: GitRepo + configuration: + gitProvider: jefferyfryGithub + path: jefferyfry/JFrog-Cloud-Installers +pipelines: + - name: ansible_aws_azure_automation_pipeline + steps: + - name: execute_aws_ansible_playbook + type: Bash + configuration: + runtime: + type: image + image: + auto: + language: java + versions: + - "8" + integrations: + - name: ansibleAwsKeys + - name: ansibleEnvVars + - name: ansiblePrivateKey + inputResources: + - name: ansibleRepo + execution: + onStart: + - echo "Executing AWS Ansible playbook..." + onExecute: + - sudo apt-get update + - sudo apt-get install gnupg2 + - sudo apt-get install software-properties-common + - sudo apt-add-repository --yes --update ppa:ansible/ansible + - sudo apt -y --allow-unauthenticated install ansible + - sudo pip install packaging + - sudo pip install boto3 botocore + - cd ../dependencyState/resources/ansibleRepo + - echo 'Setting environment variables...' + - export artifactory_license1="$int_ansibleEnvVars_artifactory_license1" + - export artifactory_license2="$int_ansibleEnvVars_artifactory_license2" + - export artifactory_license3="$int_ansibleEnvVars_artifactory_license3" + - export master_key="$int_ansibleEnvVars_master_key" + - export join_key="$int_ansibleEnvVars_join_key" + - export ssh_public_key_name="$int_ansibleEnvVars_ssh_public_key_name" + - export cfn_template="$int_ansibleEnvVars_cfn_template" + - export stack_name="$int_ansibleEnvVars_stack_name" + - export AWS_ACCESS_KEY_ID="$int_ansibleEnvVars_AWS_ACCESS_KEY_ID" + - export AWS_SECRET_KEY="$int_ansibleEnvVars_AWS_SECRET_KEY" + - printenv + - eval $(ssh-agent -s) + - ssh-add <(echo "$int_ansiblePrivateKey_key") + - ansible-playbook Ansible/test/aws/playbook.yaml + onComplete: + - echo "AWS Ansible playbook complete." + - name: execute_azure_ansible_playbook + type: Bash + configuration: + runtime: + type: image + image: + auto: + language: java + versions: + - "8" + integrations: + - name: ansibleAzureKeys + - name: ansibleEnvVars + - name: ansiblePrivateKey + inputResources: + - name: ansibleRepo + execution: + onStart: + - echo "Executing Azure Ansible playbook..." + onExecute: + - sudo apt-get update + - sudo apt-get install gnupg2 + - sudo apt-get install software-properties-common + - sudo apt-add-repository --yes --update ppa:ansible/ansible + - sudo apt -y --allow-unauthenticated install ansible + - sudo pip install packaging + - sudo pip install msrestazure + - sudo pip install ansible[azure] + - cd ../dependencyState/resources/ansibleRepo + - echo 'Setting environment variables...' + - export artifactory_license1="$int_ansibleEnvVars_artifactory_license1" + - export artifactory_license2="$int_ansibleEnvVars_artifactory_license2" + - export artifactory_license3="$int_ansibleEnvVars_artifactory_license3" + - export master_key="$int_ansibleEnvVars_master_key" + - export join_key="$int_ansibleEnvVars_join_key" + - export ssh_public_key="$int_ansibleEnvVars_ssh_public_key" + - export arm_template="$int_ansibleEnvVars_arm_template" + - export azure_resource_group="$int_ansibleEnvVars_azure_resource_group" + - export clientId="$int_ansibleAzureKeys_appId" + - export clientSecret="$int_ansibleAzureKeys_password" + - export tenantId="$int_ansibleAzureKeys_tenant" + - printenv + - eval $(ssh-agent -s) + - ssh-add <(echo "$int_ansiblePrivateKey_key") + - az login --service-principal -u "$clientId" -p "$clientSecret" --tenant "$tenantId" + - ansible-playbook Ansible/test/azure/playbook.yaml + onComplete: + - echo "Azure Ansible playbook complete." \ No newline at end of file diff --git a/Ansible/project/rt-ha/hosts.yml b/Ansible/project/rt-ha/hosts.yml new file mode 100644 index 0000000..6246c53 --- /dev/null +++ b/Ansible/project/rt-ha/hosts.yml @@ -0,0 +1,51 @@ +--- +all: + vars: + ansible_user: "ubuntu" + ansible_ssh_private_key_file: "/Users/jefff/.ssh/ansible-priv.pem" + children: + database: + hosts: + #artifactory database + 52.86.32.79: + db_users: + - { db_user: "artifactory", db_password: "Art1fAct0ry" } + dbs: + - { db_name: "artifactory", db_owner: "artifactory" } + artifactory: + vars: + artifactory_ha_enabled: true + master_key: "c97b862469de0d94fbb7d48130637a5a" + join_key: "9bcca98f375c0728d907cc6ee39d4f02" + db_download_url: "https://jdbc.postgresql.org/download/postgresql-42.2.12.jar" + db_type: "postgresql" + db_driver: "org.postgresql.Driver" + db_url: "jdbc:postgresql://10.0.0.160:5432/artifactory" + db_user: "artifactory" + db_password: "Art1fAct0ry" + server_name: "ec2-100-25-104-198.compute-1.amazonaws.com" + certificate: | + -----BEGIN CERTIFICATE----- + x + -----END CERTIFICATE----- + certificate_key: | + -----BEGIN PRIVATE KEY----- + x + -----END PRIVATE KEY----- + children: + primary: + hosts: + 100.25.104.198: + artifactory_is_primary: true + artifactory_license1: x + artifactory_license2: x + artifactory_license3: x + artifactory_license4: x + artifactory_license5: x + secondary: + hosts: + 54.160.107.157: + 35.153.79.44: + vars: + artifactory_is_primary: false + diff --git a/Ansible/project/rt-ha/playbook.yml b/Ansible/project/rt-ha/playbook.yml new file mode 100644 index 0000000..ae9639e --- /dev/null +++ b/Ansible/project/rt-ha/playbook.yml @@ -0,0 +1,11 @@ +--- +- hosts: database + gather_facts: true + roles: + - jfrog/ansible/roles/postgres + +- hosts: primary:secondary + gather_facts: true + roles: + - jfrog/ansible/roles/artifactory + - jfrog/ansible/roles/artifactory-nginx-ssl \ No newline at end of file diff --git a/Ansible/project/rt-xray-auto-keys/hosts.yml b/Ansible/project/rt-xray-auto-keys/hosts.yml new file mode 100644 index 0000000..76babec --- /dev/null +++ b/Ansible/project/rt-xray-auto-keys/hosts.yml @@ -0,0 +1,40 @@ +--- +all: + vars: + ansible_user: "ubuntu" + ansible_ssh_private_key_file: "/Users/jefff/.ssh/ansible-priv.pem" + ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -A ubuntu@13.82.225.20 -W %h:%p"' + children: + database: + hosts: + 34.239.107.0: + dbs: + - { db_name: "artifactory", db_owner: "artifactory" } + - { db_name: "xraydb", db_owner: "xray" } + db_users: + - { db_user: "artifactory", db_password: "Art1fAct0ry" } + - { db_user: "xray", db_password: "xray" } + artifactory: + hosts: + 54.237.207.135: + artifactory_license1: x + artifactory_license2: x + artifactory_license3: x + artifactory_license4: x + artifactory_license5: x + db_download_url: "https://jdbc.postgresql.org/download/postgresql-42.2.12.jar" + db_type: "postgresql" + db_driver: "org.postgresql.Driver" + db_url: "jdbc:postgresql://10.0.0.59:5432/artifactory" + db_user: "artifactory" + db_password: "Art1fAct0ry" + server_name: "ec2-54-237-207-135.compute-1.amazonaws.com" + xray: + hosts: + 100.25.104.174: + jfrog_url: "http://ec2-54-237-207-135.compute-1.amazonaws.com" + db_type: "postgresql" + db_driver: "org.postgresql.Driver" + db_url: "postgres://10.0.0.59:5432/xraydb?sslmode=disable" + db_user: "xray" + db_password: "xray" diff --git a/Ansible/project/rt-xray-auto-keys/playbook.yml b/Ansible/project/rt-xray-auto-keys/playbook.yml new file mode 100644 index 0000000..10dc19d --- /dev/null +++ b/Ansible/project/rt-xray-auto-keys/playbook.yml @@ -0,0 +1,21 @@ +--- +- debug: + var: master_key + +- debug: + var: join_key + +- hosts: database + gather_facts: true + roles: + - jfrog/ansible/roles/postgres + +- hosts: artifactory + gather_facts: true + roles: + - jfrog/ansible/roles/artifactory + +- hosts: xray + gather_facts: true + roles: + - jfrog/ansible/roles/xray \ No newline at end of file diff --git a/Ansible/project/rt-xray-auto-keys/runAutoKeysPlaybook.sh b/Ansible/project/rt-xray-auto-keys/runAutoKeysPlaybook.sh new file mode 100755 index 0000000..0949cb0 --- /dev/null +++ b/Ansible/project/rt-xray-auto-keys/runAutoKeysPlaybook.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +ansible-playbook -i hosts.yml playbook.yml --extra-vars "master_key=$(openssl rand -hex 16) join_key=$(openssl rand -hex 16)" \ No newline at end of file diff --git a/Ansible/project/rt-xray-ha-ssh-proxy/hosts.yml b/Ansible/project/rt-xray-ha-ssh-proxy/hosts.yml new file mode 100644 index 0000000..8651299 --- /dev/null +++ b/Ansible/project/rt-xray-ha-ssh-proxy/hosts.yml @@ -0,0 +1,60 @@ +--- +all: + vars: + ansible_user: "ubuntu" + ansible_ssh_private_key_file: "/Users/jefff/.ssh/ansible-priv.pem" + ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -A ubuntu@13.82.225.20 -W %h:%p"' + children: + database: + hosts: + #artifactory database + 10.0.0.6: + db_users: + - { db_user: "artifactory", db_password: "Art1fAct0ry" } + dbs: + - { db_name: "artifactory", db_owner: "artifactory" } + #xray database + 10.0.0.4: + dbs: + - { db_name: "xraydb", db_owner: "xray" } + db_users: + - { db_user: "xray", db_password: "xray" } + artifactory: + vars: + artifactory_ha_enabled: true + master_key: "c97b862469de0d94fbb7d48130637a5a" + join_key: "9bcca98f375c0728d907cc6ee39d4f02" + db_download_url: "https://jdbc.postgresql.org/download/postgresql-42.2.12.jar" + db_type: "postgresql" + db_driver: "org.postgresql.Driver" + db_url: "jdbc:postgresql://10.0.0.6:5432/artifactory" + db_user: "artifactory" + db_password: "Art1fAct0ry" + server_name: "rt.13.82.225.208.xip.io" + children: + primary: + hosts: + 10.0.0.8: + artifactory_is_primary: true + artifactory_license1: x + artifactory_license2: x + artifactory_license3: x + artifactory_license4: x + artifactory_license5: x + secondary: + hosts: + 10.0.0.9: + vars: + artifactory_is_primary: false + xray: + vars: + jfrog_url: http://rt.13.82.225.208.xip.io/ + master_key: "c97b862469de0d94fbb7d48130637a5a" + join_key: "9bcca98f375c0728d907cc6ee39d4f02" + db_type: "postgresql" + db_driver: "org.postgresql.Driver" + db_url: "postgres://10.0.0.4:5432/xraydb?sslmode=disable" + db_user: "xray" + db_password: "xray" + hosts: + 10.0.0.5: diff --git a/Ansible/project/rt-xray-ha-ssh-proxy/playbook.yml b/Ansible/project/rt-xray-ha-ssh-proxy/playbook.yml new file mode 100644 index 0000000..e47c473 --- /dev/null +++ b/Ansible/project/rt-xray-ha-ssh-proxy/playbook.yml @@ -0,0 +1,15 @@ +--- +- hosts: database + gather_facts: true + roles: + - jfrog/ansible/roles/postgres + +- hosts: primary:secondary + gather_facts: true + roles: + - jfrog/ansible/roles/artifactory + +- hosts: xray + gather_facts: true + roles: + - jfrog/ansible/roles/xray \ No newline at end of file diff --git a/Ansible/project/rt-xray-ha/hosts.yml b/Ansible/project/rt-xray-ha/hosts.yml new file mode 100644 index 0000000..0a79555 --- /dev/null +++ b/Ansible/project/rt-xray-ha/hosts.yml @@ -0,0 +1,55 @@ +--- +all: + vars: + ansible_user: "ubuntu" + ansible_ssh_private_key_file: "/Users/jefff/.ssh/ansible-priv.pem" + children: + database: + hosts: + #artifactory database + 52.86.32.79: + dbs: + - { db_name: "artifactory", db_owner: "artifactory" } + db_users: + - { db_user: "artifactory", db_password: "Art1fAct0ry" } + #xray database + 100.25.152.93: + dbs: + - { db_name: "xraydb", db_owner: "xray" } + db_users: + - { db_user: "xray", db_password: "xray" } + artifactory: + vars: + artifactory_ha_enabled: true + master_key: "c97b862469de0d94fbb7d48130637a5a" + join_key: "9bcca98f375c0728d907cc6ee39d4f02" + db_download_url: "https://jdbc.postgresql.org/download/postgresql-42.2.12.jar" + db_type: "postgresql" + db_driver: "org.postgresql.Driver" + db_url: "jdbc:postgresql://10.0.0.51:5432/artifactory" + db_user: "artifactory" + db_password: "Art1fAct0ry" + server_name: "ec2-18-210-33-94.compute-1.amazonaws.com" + children: + primary: + hosts: + 18.210.33.94: + artifactory_is_primary: true + artifactory_license1: x + artifactory_license2: x + artifactory_license3: x + artifactory_license4: x + artifactory_license5: x + xray: + vars: + jfrog_url: http://ec2-18-210-33-94.compute-1.amazonaws.com + master_key: "c97b862469de0d94fbb7d48130637a5a" + join_key: "9bcca98f375c0728d907cc6ee39d4f02" + db_type: "postgresql" + db_driver: "org.postgresql.Driver" + db_url: "postgres://10.0.0.5:5432/xraydb?sslmode=disable" + db_user: "xray" + db_password: "xray" + hosts: +# 34.229.56.166: + 54.237.68.180 diff --git a/Ansible/project/rt-xray-ha/playbook.yml b/Ansible/project/rt-xray-ha/playbook.yml new file mode 100644 index 0000000..1f55bcf --- /dev/null +++ b/Ansible/project/rt-xray-ha/playbook.yml @@ -0,0 +1,15 @@ +--- +- hosts: database + gather_facts: true + roles: + - jfrog/ansible/roles/postgres + +- hosts: primary + gather_facts: true + roles: + - jfrog/ansible/roles/artifactory + +- hosts: xray + gather_facts: true + roles: + - jfrog/ansible/roles/xray \ No newline at end of file diff --git a/Ansible/project/rt-xray/hosts.yml b/Ansible/project/rt-xray/hosts.yml new file mode 100644 index 0000000..c553954 --- /dev/null +++ b/Ansible/project/rt-xray/hosts.yml @@ -0,0 +1,43 @@ +--- +all: + vars: + ansible_user: "ubuntu" + ansible_ssh_private_key_file: "/Users/jefff/.ssh/ansible-priv.pem" + children: + database: + hosts: + 34.239.107.0: + dbs: + - { db_name: "artifactory", db_owner: "artifactory" } + - { db_name: "xraydb", db_owner: "xray" } + db_users: + - { db_user: "artifactory", db_password: "Art1fAct0ry" } + - { db_user: "xray", db_password: "xray" } + artifactory: + hosts: + 54.237.207.135: + artifactory_license1: x + artifactory_license2: x + artifactory_license3: x + artifactory_license4: x + artifactory_license5: x + master_key: "c97b862469de0d94fbb7d48130637a5a" + join_key: "9bcca98f375c0728d907cc6ee39d4f02" + db_download_url: "https://jdbc.postgresql.org/download/postgresql-42.2.12.jar" + db_type: "postgresql" + db_driver: "org.postgresql.Driver" + db_url: "jdbc:postgresql://10.0.0.59:5432/artifactory" + db_user: "artifactory" + db_password: "Art1fAct0ry" + server_name: "ec2-54-237-207-135.compute-1.amazonaws.com" + xray: + hosts: + 100.25.104.174: + jfrog_url: "http://ec2-54-237-207-135.compute-1.amazonaws.com" + master_key: "c97b862469de0d94fbb7d48130637a5a" + join_key: "9bcca98f375c0728d907cc6ee39d4f02" + db_type: "postgresql" + db_driver: "org.postgresql.Driver" + db_url: "postgres://10.0.0.59:5432/xraydb?sslmode=disable" + db_user: "xray" + db_password: "xray" diff --git a/Ansible/project/rt-xray/playbook.yml b/Ansible/project/rt-xray/playbook.yml new file mode 100644 index 0000000..9dea61f --- /dev/null +++ b/Ansible/project/rt-xray/playbook.yml @@ -0,0 +1,15 @@ +--- +- hosts: database + gather_facts: true + roles: + - jfrog/ansible/roles/postgres + +- hosts: artifactory + gather_facts: true + roles: + - jfrog/ansible/roles/artifactory + +- hosts: xray + gather_facts: true + roles: + - jfrog/ansible/roles/xray \ No newline at end of file diff --git a/Ansible/project/rt/hosts.yml b/Ansible/project/rt/hosts.yml new file mode 100644 index 0000000..79cf45d --- /dev/null +++ b/Ansible/project/rt/hosts.yml @@ -0,0 +1,31 @@ +--- +all: + vars: + ansible_user: "ubuntu" + ansible_ssh_private_key_file: "/Users/jefff/.ssh/ansible-priv.pem" + children: + database: + hosts: + 52.86.32.79: + db_users: + - { db_user: "artifactory", db_password: "Art1fAct0ry" } + dbs: + - { db_name: "artifactory", db_owner: "artifactory" } + primary: + hosts: + 100.25.104.198: + artifactory_is_primary: true + artifactory_license1: x + artifactory_license2: x + artifactory_license3: x + artifactory_license4: x + artifactory_license5: x + master_key: "c97b862469de0d94fbb7d48130637a5a" + join_key: "9bcca98f375c0728d907cc6ee39d4f02" + db_download_url: "https://jdbc.postgresql.org/download/postgresql-42.2.12.jar" + db_type: "postgresql" + db_driver: "org.postgresql.Driver" + db_url: "jdbc:postgresql://10.0.0.160:5432/artifactory" + db_user: "artifactory" + db_password: "Art1fAct0ry" + server_name: "ec2-100-25-104-198.compute-1.amazonaws.com" \ No newline at end of file diff --git a/Ansible/project/rt/playbook.yml b/Ansible/project/rt/playbook.yml new file mode 100644 index 0000000..472706a --- /dev/null +++ b/Ansible/project/rt/playbook.yml @@ -0,0 +1,10 @@ +--- +- hosts: database + gather_facts: true + roles: + - jfrog/ansible/roles/postgres + +- hosts: primary + gather_facts: true + roles: + - jfrog/ansible/roles/artifactory diff --git a/Ansible/project/ssl/hosts.yml b/Ansible/project/ssl/hosts.yml new file mode 100644 index 0000000..c6c8f9f --- /dev/null +++ b/Ansible/project/ssl/hosts.yml @@ -0,0 +1,39 @@ +--- +all: + vars: + ansible_user: "ubuntu" + ansible_ssh_private_key_file: "/Users/jefff/.ssh/ansible-priv.pem" + children: + database: + hosts: + 52.86.32.79: + db_users: + - { db_user: "artifactory", db_password: "Art1fAct0ry" } + dbs: + - { db_name: "artifactory", db_owner: "artifactory" } + primary: + hosts: + 100.25.104.198: + artifactory_is_primary: true + artifactory_license1: x + artifactory_license2: x + artifactory_license3: x + artifactory_license4: x + artifactory_license5: x + master_key: "c97b862469de0d94fbb7d48130637a5a" + join_key: "9bcca98f375c0728d907cc6ee39d4f02" + db_download_url: "https://jdbc.postgresql.org/download/postgresql-42.2.12.jar" + db_type: "postgresql" + db_driver: "org.postgresql.Driver" + db_url: "jdbc:postgresql://10.0.0.160:5432/artifactory" + db_user: "artifactory" + db_password: "Art1fAct0ry" + server_name: "ec2-100-25-104-198.compute-1.amazonaws.com" + certificate: | + -----BEGIN CERTIFICATE----- + x + -----END CERTIFICATE----- + certificate_key: | + -----BEGIN PRIVATE KEY----- + x + -----END PRIVATE KEY----- diff --git a/Ansible/project/ssl/playbook.yml b/Ansible/project/ssl/playbook.yml new file mode 100644 index 0000000..6f1ba22 --- /dev/null +++ b/Ansible/project/ssl/playbook.yml @@ -0,0 +1,11 @@ +--- +- hosts: database + gather_facts: true + roles: + - jfrog/ansible/roles/postgres + +- hosts: primary + gather_facts: true + roles: + - jfrog/ansible/roles/artifactory + - jfrog/ansible/roles/artifactory-nginx-ssl diff --git a/Ansible/test/aws/playbook.yaml b/Ansible/test/aws/playbook.yaml new file mode 100644 index 0000000..fafe557 --- /dev/null +++ b/Ansible/test/aws/playbook.yaml @@ -0,0 +1,148 @@ +--- +- name: Provision AWS test infrastructure + hosts: localhost + tasks: + - shell: 'pwd' + register: cmd + + - debug: + msg: "{{ cmd.stdout }}" + - name: Create AWS test system + cloudformation: + stack_name: "{{ lookup('env', 'stack_name') }}" + state: "present" + region: "us-east-1" + disable_rollback: true + template: "{{ lookup('env', 'cfn_template') }}" + template_parameters: + SSHKeyName: "{{ lookup('env', 'ssh_public_key_name') }}" + tags: + Stack: "{{ lookup('env', 'stack_name') }}" + register: AWSDeployment + - name: Get AWS deployment details + debug: + var: AWSDeployment + + - name: Add bastion + add_host: + hostname: "{{ AWSDeployment.stack_outputs.BastionInstancePublic }}" + groups: bastion + ansible_user: "ubuntu" + - name: Add new RT primary to host group + add_host: + hostname: "{{ AWSDeployment.stack_outputs.RTPriInstancePrivate }}" + ansible_user: "ubuntu" + ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -A ubuntu@{{ AWSDeployment.stack_outputs.BastionInstancePublic }} -W %h:%p"' + db_url: "jdbc:postgresql://{{ AWSDeployment.stack_outputs.DBInstancePrivate }}:5432/artifactory" + server_name: "{{ AWSDeployment.stack_outputs.ALBHostName }}" + artifactory_is_primary: true + artifactory_license1: "{{ lookup('env', 'artifactory_license1') }}" + artifactory_license2: "{{ lookup('env', 'artifactory_license2') }}" + artifactory_license3: "{{ lookup('env', 'artifactory_license3') }}" + groups: + - artifactory + + - name: Add RT secondaries to host group + add_host: + hostname: "{{ AWSDeployment.stack_outputs.RTSecInstancePrivate }}" + ansible_user: "ubuntu" + ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -A ubuntu@{{ AWSDeployment.stack_outputs.BastionInstancePublic }} -W %h:%p"' + db_url: "jdbc:postgresql://{{ AWSDeployment.stack_outputs.DBInstancePrivate }}:5432/artifactory" + server_name: "{{ AWSDeployment.stack_outputs.ALBHostName }}" + artifactory_is_primary: false + groups: + - artifactory + + - name: Add xrays to host group + add_host: + hostname: "{{ AWSDeployment.stack_outputs.XrayInstancePrivate }}" + ansible_user: "ubuntu" + ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -A ubuntu@{{ AWSDeployment.stack_outputs.BastionInstancePublic }} -W %h:%p"' + jfrog_url: "http://{{ AWSDeployment.stack_outputs.ALBHostName }}" + master_key: "{{ lookup('env', 'master_key') }}" + join_key: "{{ lookup('env', 'join_key') }}" + db_type: "postgresql" + db_driver: "org.postgresql.Driver" + db_user: "xray" + db_password: "xray" + db_url: "postgres://{{ AWSDeployment.stack_outputs.DBInstancePrivate }}:5432/xraydb?sslmode=disable" + groups: xray + + - name: Add DBs to host group + add_host: + hostname: "{{ AWSDeployment.stack_outputs.DBInstancePrivate }}" + ansible_user: "ubuntu" + ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -A ubuntu@{{ AWSDeployment.stack_outputs.BastionInstancePublic }} -W %h:%p"' + db_users: + - { db_user: "artifactory", db_password: "Art1fAct0ry" } + - { db_user: "xray", db_password: "xray" } + dbs: + - { db_name: "artifactory", db_owner: "artifactory" } + - { db_name: "xraydb", db_owner: "xray" } + groups: database + + - name: Set up test environment url + replace: + path: ../tests/src/test/resources/testenv.yaml + regexp: 'urlval' + replace: "http://{{ AWSDeployment.stack_outputs.ALBHostName }}" + + - name: Set up test environment external_ip + replace: + path: ../tests/src/test/resources/testenv.yaml + regexp: 'ipval' + replace: "{{ AWSDeployment.stack_outputs.ALBHostName }}" + + - name: Set up test environment rt_password + replace: + path: ../tests/src/test/resources/testenv.yaml + regexp: 'passval' + replace: "password" + + - name: show testenv.yaml + debug: var=item + with_file: + - ../tests/src/test/resources/testenv.yaml + + - name: Wait 300 seconds for port 22 + wait_for: + port: 22 + host: "{{ AWSDeployment.stack_outputs.BastionInstancePublic }}" + delay: 10 + + - debug: + msg: "Unified URL is at http://{{ AWSDeployment.stack_outputs.ALBHostName }}" + +- hosts: database + roles: + - postgres + +- hosts: artifactory + vars: + artifactory_ha_enabled: true + master_key: "{{ lookup('env', 'master_key') }}" + join_key: "{{ lookup('env', 'join_key') }}" + db_download_url: "https://jdbc.postgresql.org/download/postgresql-42.2.12.jar" + db_type: "postgresql" + db_driver: "org.postgresql.Driver" + db_user: "artifactory" + db_password: "Art1fAct0ry" + roles: + - artifactory + +- hosts: xray + roles: + - xray + +- name: Test + hosts: localhost + tasks: + - name: Run tests + shell: + cmd: ./gradlew clean unified_test + chdir: ../tests/ + - name: Cleanup and delete stack + cloudformation: + stack_name: "{{ lookup('env', 'stack_name') }}" + region: "us-east-1" + state: "absent" \ No newline at end of file diff --git a/Ansible/test/aws/runAws.sh b/Ansible/test/aws/runAws.sh new file mode 100755 index 0000000..fa8da2a --- /dev/null +++ b/Ansible/test/aws/runAws.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +ansible-playbook Ansible/test/aws/playbook.yaml \ No newline at end of file diff --git a/Ansible/test/azure/playbook.yaml b/Ansible/test/azure/playbook.yaml new file mode 100644 index 0000000..f4eb24a --- /dev/null +++ b/Ansible/test/azure/playbook.yaml @@ -0,0 +1,162 @@ +--- +- name: Provision Azure test infrastructure + hosts: localhost + tasks: + - name: Create azure test system + azure_rm_deployment: + resource_group: "{{ lookup('env', 'azure_resource_group') }}" + location: eastus + name: AzureAnsibleInfra + parameters: + vnetName: + value: "vnetAnsible" + vnetAddressRange: + value: "10.0.0.0/16" + subnetAddressRange: + value: "10.0.0.0/24" + location: + value: "eastus" + adminPublicKey: + value: "{{ lookup('env', 'ssh_public_key') }}" + sizeOfDiskInGB: + value: 128 + vmSize: + value: Standard_D2s_v3 + numberOfArtifactory: + value: 2 + numberOfXray: + value: 1 + numberOfDb: + value: 1 + template_link: "{{ lookup('env', 'arm_template') }}" + register: azureDeployment + - name: Get Azure deployment details + debug: + var: azureDeployment + + - name: Add bastion + add_host: + hostname: "{{ azureDeployment.deployment.outputs.lbIp.value }}" + groups: bastion + ansible_user: "ubuntu" + - name: Add new RT primary to host group + add_host: + hostname: "{{ azureDeployment.deployment.outputs.vmArtPriIp.value }}" + ansible_user: "ubuntu" + ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -A ubuntu@{{ azureDeployment.deployment.outputs.lbIp.value }} -W %h:%p"' + db_url: "jdbc:postgresql://{{ azureDeployment.deployment.outputs.vmDbArrIp.value[0] }}:5432/artifactory" + server_name: "rt.{{ azureDeployment.deployment.outputs.lbIp.value }}.xip.io" + artifactory_is_primary: true + artifactory_license1: "{{ lookup('env', 'artifactory_license1') }}" + artifactory_license2: "{{ lookup('env', 'artifactory_license2') }}" + artifactory_license3: "{{ lookup('env', 'artifactory_license3') }}" + groups: + - artifactory + + - name: Add RT secondaries to host group + add_host: + hostname: "{{ item }}" + ansible_user: "ubuntu" + ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -A ubuntu@{{ azureDeployment.deployment.outputs.lbIp.value }} -W %h:%p"' + db_url: "jdbc:postgresql://{{ azureDeployment.deployment.outputs.vmDbArrIp.value[0] }}:5432/artifactory" + server_name: "rt.{{ azureDeployment.deployment.outputs.lbIp.value }}.xip.io" + artifactory_is_primary: false + groups: + - artifactory + loop: "{{ azureDeployment.deployment.outputs.vmArtSecArrIp.value }}" + + - name: Add xrays to host group + add_host: + hostname: "{{ item }}" + ansible_user: "ubuntu" + ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -A ubuntu@{{ azureDeployment.deployment.outputs.lbIp.value }} -W %h:%p"' + jfrog_url: "http://rt.{{ azureDeployment.deployment.outputs.lbIp.value }}.xip.io" + master_key: "{{ lookup('env', 'master_key') }}" + join_key: "{{ lookup('env', 'join_key') }}" + db_type: "postgresql" + db_driver: "org.postgresql.Driver" + db_user: "xray" + db_password: "xray" + db_url: "postgres://{{ azureDeployment.deployment.outputs.vmDbArrIp.value[0] }}:5432/xraydb?sslmode=disable" + groups: xray + loop: "{{ azureDeployment.deployment.outputs.vmXrayArrIp.value }}" + + - name: Add DBs to host group + add_host: + hostname: "{{ item }}" + ansible_user: "ubuntu" + ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -A ubuntu@{{ azureDeployment.deployment.outputs.lbIp.value }} -W %h:%p"' + db_users: + - { db_user: "artifactory", db_password: "Art1fAct0ry" } + - { db_user: "xray", db_password: "xray" } + dbs: + - { db_name: "artifactory", db_owner: "artifactory" } + - { db_name: "xraydb", db_owner: "xray" } + groups: database + loop: "{{ azureDeployment.deployment.outputs.vmDbArrIp.value }}" + + - name: Set up test environment url + replace: + path: ../tests/src/test/resources/testenv.yaml + regexp: 'urlval' + replace: "http://rt.{{ azureDeployment.deployment.outputs.lbIp.value }}.xip.io" + + - name: Set up test environment external_ip + replace: + path: ../tests/src/test/resources/testenv.yaml + regexp: 'ipval' + replace: "{{ azureDeployment.deployment.outputs.lbIp.value }}" + + - name: Set up test environment rt_password + replace: + path: ../tests/src/test/resources/testenv.yaml + regexp: 'passval' + replace: "password" + + - name: show testenv.yaml + debug: var=item + with_file: + - ../tests/src/test/resources/testenv.yaml + + - name: Wait 300 seconds for port 22 + wait_for: + port: 22 + host: "{{ azureDeployment.deployment.outputs.lbIp.value }}" + delay: 10 + + - debug: + msg: "Unified URL is at http://rt.{{ azureDeployment.deployment.outputs.lbIp.value }}.xip.io" + +- hosts: database + roles: + - postgres + +- hosts: artifactory + vars: + artifactory_ha_enabled: true + master_key: "{{ lookup('env', 'master_key') }}" + join_key: "{{ lookup('env', 'join_key') }}" + db_download_url: "https://jdbc.postgresql.org/download/postgresql-42.2.12.jar" + db_type: "postgresql" + db_driver: "org.postgresql.Driver" + db_user: "artifactory" + db_password: "Art1fAct0ry" + roles: + - artifactory + +- hosts: xray + roles: + - xray + +- name: Test + hosts: localhost + tasks: + - name: Run tests + shell: + cmd: ./gradlew clean unified_test + chdir: ../tests/ + - name: Cleanup and delete resource group + azure_rm_resourcegroup: + name: "{{ lookup('env', 'azure_resource_group') }}" + force_delete_nonempty: yes + state: absent \ No newline at end of file diff --git a/Ansible/test/azure/runAzure.sh b/Ansible/test/azure/runAzure.sh new file mode 100755 index 0000000..c9d7e80 --- /dev/null +++ b/Ansible/test/azure/runAzure.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +ansible-playbook Ansible/test/azure/playbook.yaml \ No newline at end of file diff --git a/Ansible/test/tests/README.md b/Ansible/test/tests/README.md new file mode 100755 index 0000000..21db3cf --- /dev/null +++ b/Ansible/test/tests/README.md @@ -0,0 +1,19 @@ +## Test framework + +### How to run it locally + +``` +./gradlew clean commonTests +``` + +### Adding new tests + +### Gradle cleanup. Delete the folder: +``` + ~/.gradle/caches/ + ./gradlew clean +``` +### Or run +``` + ./gradlew clean +``` \ No newline at end of file diff --git a/Ansible/test/tests/build.gradle b/Ansible/test/tests/build.gradle new file mode 100644 index 0000000..1a41ee3 --- /dev/null +++ b/Ansible/test/tests/build.gradle @@ -0,0 +1,63 @@ +plugins { + id 'groovy' +} + +group 'org.example' +version '1.0-SNAPSHOT' + +repositories { + mavenCentral() +} + +dependencies { + compile 'org.codehaus.groovy:groovy-all:3.0.0' + testCompile 'io.rest-assured:rest-assured:4.1.1' + testCompile 'org.testng:testng:6.14.3' + testCompile 'org.yaml:snakeyaml:1.17' +} + +test { + outputs.upToDateWhen { false } + useTestNG(){ + suites("src/test/groovy/testng.xml") + } + //maxParallelForks = Runtime.runtime.availableProcessors().intdiv(2) ?: 1 + testLogging { + showStandardStreams = true + } + +} + +task artifactory_jcr_test(type: Test) { + useTestNG() { + useDefaultListeners = true + suites 'src/test/groovy/testng.xml' + includeGroups ('common', 'jcr') + } + testLogging { + showStandardStreams = true + } +} + +task artifactory_ha_test(type: Test) { + useTestNG() { + useDefaultListeners = true + suites 'src/test/groovy/testng.xml' + includeGroups('common','pro') + } + testLogging { + showStandardStreams = true + } +} + +task unified_test(type: Test) { + useTestNG() { + useDefaultListeners = true + suites 'src/test/groovy/testng.xml' + includeGroups('common','pro','xray') + } + testLogging { + showStandardStreams = true + } +} + diff --git a/Ansible/test/tests/gradle/wrapper/gradle-wrapper.jar b/Ansible/test/tests/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 0000000..87b738c Binary files /dev/null and b/Ansible/test/tests/gradle/wrapper/gradle-wrapper.jar differ diff --git a/Ansible/test/tests/gradle/wrapper/gradle-wrapper.properties b/Ansible/test/tests/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 0000000..09c586a --- /dev/null +++ b/Ansible/test/tests/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,6 @@ +#Wed Feb 12 10:23:21 PST 2020 +distributionUrl=https\://services.gradle.org/distributions/gradle-5.2.1-all.zip +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +zipStorePath=wrapper/dists +zipStoreBase=GRADLE_USER_HOME diff --git a/Ansible/test/tests/gradlew b/Ansible/test/tests/gradlew new file mode 100755 index 0000000..af6708f --- /dev/null +++ b/Ansible/test/tests/gradlew @@ -0,0 +1,172 @@ +#!/usr/bin/env sh + +############################################################################## +## +## Gradle start up script for UN*X +## +############################################################################## + +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >/dev/null +APP_HOME="`pwd -P`" +cd "$SAVED" >/dev/null + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m"' + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn () { + echo "$*" +} + +die () { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MINGW* ) + msys=true + ;; + NONSTOP* ) + nonstop=true + ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin, switch paths to Windows format before running java +if $cygwin ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + JAVACMD=`cygpath --unix "$JAVACMD"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=$((i+1)) + done + case $i in + (0) set -- ;; + (1) set -- "$args0" ;; + (2) set -- "$args0" "$args1" ;; + (3) set -- "$args0" "$args1" "$args2" ;; + (4) set -- "$args0" "$args1" "$args2" "$args3" ;; + (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Escape application args +save () { + for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done + echo " " +} +APP_ARGS=$(save "$@") + +# Collect all arguments for the java command, following the shell quoting and substitution rules +eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" + +# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong +if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then + cd "$(dirname "$0")" +fi + +exec "$JAVACMD" "$@" diff --git a/Ansible/test/tests/gradlew.bat b/Ansible/test/tests/gradlew.bat new file mode 100644 index 0000000..6d57edc --- /dev/null +++ b/Ansible/test/tests/gradlew.bat @@ -0,0 +1,84 @@ +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto init + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto init + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:init +@rem Get command-line arguments, handling Windows variants + +if not "%OS%" == "Windows_NT" goto win9xME_args + +:win9xME_args +@rem Slurp the command line arguments. +set CMD_LINE_ARGS= +set _SKIP=2 + +:win9xME_args_slurp +if "x%~1" == "x" goto execute + +set CMD_LINE_ARGS=%* + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/Ansible/test/tests/settings.gradle b/Ansible/test/tests/settings.gradle new file mode 100644 index 0000000..d30fe0d --- /dev/null +++ b/Ansible/test/tests/settings.gradle @@ -0,0 +1,2 @@ +rootProject.name = 'fozzie_jfrog_tests' + diff --git a/Ansible/test/tests/src/test/groovy/steps/RepositorySteps.groovy b/Ansible/test/tests/src/test/groovy/steps/RepositorySteps.groovy new file mode 100644 index 0000000..3ae8b07 --- /dev/null +++ b/Ansible/test/tests/src/test/groovy/steps/RepositorySteps.groovy @@ -0,0 +1,139 @@ +package steps + + +import static io.restassured.RestAssured.given + +class RepositorySteps { + + def getHealthCheckResponse(artifactoryURL) { + return given() + .when() + .get("http://" + artifactoryURL + "/router/api/v1/system/health") + .then() + .extract().response() + } + + def ping() { + return given() + .when() + .get("/api/system/ping") + .then() + .extract().response() + } + + def createRepositories(File body, username, password) { + return given() + .auth() + .preemptive() + .basic("${username}", "${password}") + .header("Cache-Control", "no-cache") + .header("content-Type", "application/yaml") + .body(body) + .when() + .patch("/api/system/configuration") + .then() + .extract().response() + } + // https://www.jfrog.com/confluence/display/JFROG/Artifactory+REST+API#ArtifactoryRESTAPI-GetRepositories + def getRepos() { + return given() + .header("Cache-Control", "no-cache") + .header("content-Type", "application/yaml") + .when() + .get("/api/repositories") + .then() + .extract().response() + + } + // https://www.jfrog.com/confluence/display/JFROG/Artifactory+REST+API#ArtifactoryRESTAPI-DeleteRepository + def deleteRepository(repoName, username, password) { + return given() + .auth() + .preemptive() + .basic("${username}", "${password}") + .header("Cache-Control", "no-cache") + .header("content-Type", "application/yaml") + .when() + .delete("/api/repositories/" + repoName) + .then() + .extract().response() + + } + + def createDirectory(repoName, directoryName) { + return given() + .header("Cache-Control", "no-cache") + .header("content-Type", "application/yaml") + .when() + .put("/" + repoName + "/" + directoryName) + .then() + .extract().response() + + } + + def deployArtifact(repoName, directoryName, artifact, filename) { + return given() + .header("Cache-Control", "no-cache") + .header("Content-Type", "application/json") + .body(artifact) + .when() + .put("/" + repoName + "/" + directoryName + "/" + filename) + .then() + .extract().response() + + } + + def deleteItem(repoName, directoryName, artifact, filename) { + return given() + .header("Cache-Control", "no-cache") + .header("Content-Type", "application/json") + .body(artifact) + .when() + .delete("/" + repoName + "/" + directoryName + "/" + filename) + .then() + .extract().response() + + } + + def getInfo(repoName, directoryName, artifact, filename) { + return given() + .header("Cache-Control", "no-cache") + .header("Content-Type", "application/json") + .body(artifact) + .when() + .get("/api/storage/" + repoName + "/" + directoryName + "/" + filename) + .then() + .extract().response() + + } + + def createSupportBundle(name, startDate, endDate) { + return given() + .header("Cache-Control", "no-cache") + .header("Content-Type", "application/json") + .body("{ \n" + + " \"name\":\"${name}\",\n" + + " \"description\":\"desc\",\n" + + " \"parameters\":{ \n" + + " \"configuration\": \"true\",\n" + + " \"system\": \"true\", \n" + + " \"logs\":{ \n" + + " \"include\": \"true\", \n" + + " \"start_date\":\"${startDate}\",\n" + + " \"end_date\":\"${endDate}\"\n" + + " },\n" + + " \"thread_dump\":{ \n" + + " \"count\": 1,\n" + + " \"interval\": 0\n" + + " }\n" + + " }\n" + + "}") + .when() + .post("/api/system/support/bundle") + .then() + .extract().response() + + } + + +} \ No newline at end of file diff --git a/Ansible/test/tests/src/test/groovy/steps/SecuritytSteps.groovy b/Ansible/test/tests/src/test/groovy/steps/SecuritytSteps.groovy new file mode 100644 index 0000000..4450b39 --- /dev/null +++ b/Ansible/test/tests/src/test/groovy/steps/SecuritytSteps.groovy @@ -0,0 +1,196 @@ +package steps + +import org.testng.annotations.DataProvider + +import static io.restassured.RestAssured.given + + +class SecuritytSteps { + + def createUser(usernameRt, emailRt, passwordRt) { + return given() + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .body("{\n" + + " \"email\" : \"${emailRt}\",\n" + + " \"password\": \"${passwordRt}\"\n" + + "}") + .when() + .put("/api/security/users/${usernameRt}") + .then() + .extract().response() + } + + def getUserDetails(usernameRt) { + return given() + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .when() + .get("/api/security/users/${usernameRt}") + .then() + .extract().response() + } + + def deleteUser(usernameRt) { + return given() + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .when() + .delete("/api/security/users/${usernameRt}") + .then() + .extract().response() + } + + def generateAPIKey(usernameRt, passwordRt) { + return given() + .auth() + .preemptive() + .basic("${usernameRt}", "${passwordRt}") + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .when() + .post("/api/security/apiKey") + .then() + .extract().response() + } + + def getAPIKey(usernameRt, passwordRt) { + return given() + .auth() + .preemptive() + .basic("${usernameRt}", "${passwordRt}") + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .when() + .get("/api/security/apiKey") + .then() + .extract().response() + } + + def regenerateAPIKey(usernameRt, passwordRt) { + return given() + .auth() + .preemptive() + .basic("${usernameRt}", "${passwordRt}") + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .when() + .put("/api/security/apiKey") + .then() + .extract().response() + } + + def createGroup(groupName) { + return given() + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .body("{\"name\": \"${groupName}\"}") + .when() + .put("/api/security/groups/${groupName}") + .then() + .extract().response() + } + + def getGroup(groupName) { + return given() + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .body("{\"name\": \"${groupName}\"}") + .when() + .get("/api/security/groups/${groupName}") + .then() + .extract().response() + } + + def deleteGroup(groupName) { + return given() + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .when() + .delete("/api/security/groups/${groupName}") + .then() + .extract().response() + } + + def createPermissions(permissionName, repository, user1, user2, + group1, group2, action1, action2, action3) { + return given() + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .body("{\n" + + " \"name\": \"${permissionName}\",\n" + + " \"repo\": {\n" + + " \"repositories\": [ \"${repository}\" ],\n" + + " \"actions\": {\n" + + " \"users\" : {\n" + + " \"${user1}\": [ \"${action1}\",\"${action2}\",\"${action3}\" ], \n" + + " \"${user2}\" : [ \"${action1}\",\"${action2}\",\"${action3}\" ]\n" + + " },\n" + + " \"groups\" : {\n" + + " \"${group1}\" : [ \"${action1}\",\"${action2}\",\"${action3}\" ],\n" + + " \"${group2}\" : [ \"${action1}\",\"${action2}\" ]\n" + + " }\n" + + " }\n" + + " }\n" + + "}") + .when() + .put("/api/v2/security/permissions/testPermission") + .then() + .extract().response() + } + + def getPermissions( permissionName) { + return given() + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .when() + .get("/api/v2/security/permissions/${permissionName}") + .then() + .extract().response() + } + + def deletePermissions(permissionName) { + return given() + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .when() + .delete("/api/v2/security/permissions/${permissionName}") + .then() + .extract().response() + } + + + // Data providers + + @DataProvider(name="users") + public Object[][] users() { + return new Object[][]{ + ["testuser0", "email0@jfrog.com", "password123"], + ["testuser1", "email1@jfrog.com", "password123"], + ["testuser2", "email2@jfrog.com", "password123"], + ["testuser3", "email3@jfrog.com", "password123"], + ["testuser4", "email4@jfrog.com", "password123"], + ["testuser5", "email5@jfrog.com", "password123"], + ["testuser6", "email6@jfrog.com", "password123"], + ["testuser7", "email7@jfrog.com", "password123"], + ["testuser8", "email8@jfrog.com", "password123"], + ["testuser9", "email9@jfrog.com", "password123"] + } + } + + @DataProvider(name="groups") + public Object[][] groups() { + return new Object[][]{ + ["test-group-0"], + ["test-group-1"], + ["test-group-2"], + ["test-group-3"], + ["test-group-4"], + ["test-group-5"], + ["test-group-6"], + ["test-group-7"], + ["test-group-8"], + ["test-group-9"] + } + } +} diff --git a/Ansible/test/tests/src/test/groovy/steps/XraySteps.groovy b/Ansible/test/tests/src/test/groovy/steps/XraySteps.groovy new file mode 100644 index 0000000..552ba93 --- /dev/null +++ b/Ansible/test/tests/src/test/groovy/steps/XraySteps.groovy @@ -0,0 +1,585 @@ +package steps + +import org.testng.annotations.DataProvider + +import static io.restassured.RestAssured.given + +class XraySteps { + + def createIssueEvent(issueID, cve, summary, description, username, password) { + return given() + .auth() + .preemptive() + .basic("${username}", "${password}") + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .body("{\n" + + " \"id\": \"${issueID}\",\n" + + " \"type\": \"Security\",\n" + + " \"provider\": \"JFrog\",\n" + + " \"package_type\": \"maven\",\n" + + " \"severity\": \"High\",\n" + + " \"components\": [\n" + + " {\n" + + " \"id\": \"aero:aero\",\n" + + " \"vulnerable_versions\": [\n" + + " \"[0.2.3]\"\n" + + " ]\n" + + " }\n" + + " ],\n" + + " \"cves\": [\n" + + " {\n" + + " \"cve\": \"${cve}\",\n" + + " \"cvss_v2\": \"2.4\"\n" + + " }\n" + + " ],\n" + + " \"summary\": \"${summary}\",\n" + + " \"description\": \"${description}\",\n" + + " \"sources\": [\n" + + " {\n" + + " \"source_id\": \"${cve}\"\n" + + " }\n" + + " ]\n" + + "}") + .when() + .post("/v1/events") + .then() + .extract().response() + } + + def updateIssueEvent(issueID, cve, summary, description, username, password) { + return given() + .auth() + .preemptive() + .basic("${username}", "${password}") + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .body("{\n" + + " \"type\": \"Security\",\n" + + " \"provider\": \"JFrog\",\n" + + " \"package_type\": \"maven\",\n" + + " \"severity\": \"High\",\n" + + " \"components\": [\n" + + " {\n" + + " \"id\": \"aero:aero\",\n" + + " \"vulnerable_versions\": [\n" + + " \"[0.2.3]\"\n" + + " ]\n" + + " }\n" + + " ],\n" + + " \"cves\": [\n" + + " {\n" + + " \"cve\": \"${cve}\",\n" + + " \"cvss_v2\": \"2.4\"\n" + + " }\n" + + " ],\n" + + " \"summary\": \"${summary}\",\n" + + " \"description\": \"${description}\",\n" + + " \"sources\": [\n" + + " {\n" + + " \"source_id\": \"${cve}\"\n" + + " }\n" + + " ]\n" + + "}") + .when() + .put("/v1/events/${issueID}") + .then() + .extract().response() + } + + def createPolicy(policyName, username, password) { + return given() + .auth() + .preemptive() + .basic("${username}", "${password}") + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .body("{\n" + + " \"name\": \"${policyName}\",\n" + + " \"type\": \"security\",\n" + + " \"description\": \"some description\",\n" + + " \"rules\": [\n" + + " {\n" + + " \"name\": \"securityRule\",\n" + + " \"priority\": 1,\n" + + " \"criteria\": {\n" + + " \"min_severity\": \"High\"\n" + + " },\n" + + " \"actions\": {\n" + + " \"mails\": [\n" + + " \"mail1@example.com\",\n" + + " \"mail2@example.com\"\n" + + " ],\n" + + " \"fail_build\": true,\n" + + " \"block_download\": {\n" + + " \"unscanned\": true,\n" + + " \"active\": true\n" + + " }\n" + + " }\n" + + " }\n" + + " ]\n" + + "}") + .when() + .post("/v1/policies") + .then() + .extract().response() + } + + def updatePolicy(policyName, description, username, password) { + return given() + .auth() + .preemptive() + .basic("${username}", "${password}") + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .body("{\n" + + " \"name\": \"${policyName}\",\n" + + " \"type\": \"security\",\n" + + " \"description\": \"${description}\",\n" + + " \"rules\": [\n" + + " {\n" + + " \"name\": \"securityRule\",\n" + + " \"priority\": 1,\n" + + " \"criteria\": {\n" + + " \"min_severity\": \"High\"\n" + + " },\n" + + " \"actions\": {\n" + + " \"mails\": [\n" + + " \"mail1@example.com\",\n" + + " \"mail2@example.com\"\n" + + " ],\n" + + " \"fail_build\": true,\n" + + " \"block_download\": {\n" + + " \"unscanned\": true,\n" + + " \"active\": true\n" + + " }\n" + + " }\n" + + " }\n" + + " ]\n" + + "}") + .when() + .put("/v1/policies/${policyName}") + .then() + .extract().response() + } + + def getPolicy(policyName, username, password) { + return given() + .auth() + .preemptive() + .basic("${username}", "${password}") + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .when() + .get("/v1/policies/${policyName}") + .then() + .extract().response() + } + + def getPolicies(username, password) { + return given() + .auth() + .preemptive() + .basic("${username}", "${password}") + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .when() + .get("/v1/policies") + .then() + .extract().response() + } + + def deletePolicy(policyName, username, password) { + return given() + .auth() + .preemptive() + .basic("${username}", "${password}") + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .when() + .delete("/v1/policies/${policyName}") + .then() + .extract().response() + } + + def getIssueEvent(issueID, username, password) { + return given() + .auth() + .preemptive() + .basic("${username}", "${password}") + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .when() + .get("/v1/events/${issueID}") + .then() + .extract().response() + } + + def createWatchEvent(watchName, policyName, username, password) { + return given() + .auth() + .preemptive() + .basic("${username}", "${password}") + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .body("{\n" + + " \"general_data\": {\n" + + " \"name\": \"${watchName}\",\n" + + " \"description\": \"This is a new watch created using API V2\",\n" + + " \"active\": true\n" + + " },\n" + + " \"project_resources\": {\n" + + " \"resources\": [\n" + + " {\n" + + " \"type\": \"all-repos\",\n" + + " \"filters\": [\n" + + " {\n" + + " \"type\": \"package-type\",\n" + + " \"value\": \"Docker\"\n" + + " },\n" + + " {\n" + + " \"type\": \"package-type\",\n" + + " \"value\": \"Debian\"\n" + + " }\n" + + " ]\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"assigned_policies\": [\n" + + " {\n" + + " \"name\": \"${policyName}\",\n" + + " \"type\": \"security\"\n" + + " }\n" + + " ]\n" + + "}") + .when() + .post("/v2/watches") + .then() + .extract().response() + } + + def updateWatchEvent(watchName, description, policyName, username, password) { + return given() + .auth() + .preemptive() + .basic("${username}", "${password}") + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .body("{\n" + + " \"general_data\": {\n" + + " \"name\": \"${watchName}\",\n" + + " \"description\": \"${description}\",\n" + + " \"active\": true\n" + + " },\n" + + " \"project_resources\": {\n" + + " \"resources\": [\n" + + " {\n" + + " \"type\": \"all-repos\",\n" + + " \"filters\": [\n" + + " {\n" + + " \"type\": \"package-type\",\n" + + " \"value\": \"Docker\"\n" + + " },\n" + + " {\n" + + " \"type\": \"package-type\",\n" + + " \"value\": \"Debian\"\n" + + " }\n" + + " ]\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"assigned_policies\": [\n" + + " {\n" + + " \"name\": \"${policyName}\",\n" + + " \"type\": \"security\"\n" + + " }\n" + + " ]\n" + + "}") + .when() + .put("/v2/watches/${watchName}") + .then() + .extract().response() + } + + def getWatchEvent(watchName, username, password) { + return given() + .auth() + .preemptive() + .basic("${username}", "${password}") + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .when() + .get("/v2/watches/${watchName}") + .then() + .extract().response() + } + + def deleteWatchEvent(watchName, username, password) { + return given() + .auth() + .preemptive() + .basic("${username}", "${password}") + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .when() + .delete("/v2/watches/${watchName}") + .then() + .extract().response() + } + + def assignPolicy(watchName, policyName, username, password) { + return given() + .auth() + .preemptive() + .basic("${username}", "${password}") + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .body("{\n" + + " \"watches\": [\n" + + " \"${watchName}\"\n" + + " ]\n" + + "}") + .when() + .post("/v1/policies/${policyName}/assign") + .then() + .extract().response() + } + + def getIntegrationConfiguration(username, password) { + return given() + .auth() + .preemptive() + .basic("${username}", "${password}") + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .when() + .get("/v1/integration") + .then() + .extract().response() + } + + def addtIntegrationConfiguration(username, password, vendorName) { + return given() + .auth() + .preemptive() + .basic("${username}", "${password}") + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .body("{\n" + + " \"vendor\": \"${vendorName}\",\n" + + " \"api_key\": \"12345\",\n" + + " \"enabled\": true,\n" + + " \"context\": \"project_id\",\n" + + " \"url\": \"https://saas.whitesourcesoftware.com/xray\",\n" + + " \"description\": \"WhiteSource provides a simple yet powerful open source security and licenses management solution. More details at http://www.whitesourcesoftware.com.\",\n" + + " \"test_url\": \"https://saas.whitesourcesoftware.com/xray/api/checkauth\"\n" + + "}") + .when() + .post("/v1/integration") + .then() + .extract().response() + } + + def postSystemParameters(username, password, body) { + return given() + .auth() + .preemptive() + .basic("${username}", "${password}") + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .body(body) + .when() + .put("/v1/configuration/systemParameters") + .then() + .extract().response() + } + + def getSystemParameters(username, password) { + return given() + .auth() + .preemptive() + .basic("${username}", "${password}") + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .when() + .get("/v1/configuration/systemParameters") + .then() + .extract().response() + } + + def getBinaryManager(username, password) { + return given() + .auth() + .preemptive() + .basic("${username}", "${password}") + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .when() + .get("/v1/binMgr/default") + .then() + .extract().response() + } + + + def getIndexingConfiguration(username, password) { + return given() + .auth() + .preemptive() + .basic("${username}", "${password}") + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .when() + .get("/v1/binMgr/default/repos") + .then() + .extract().response() + } + + def updateIndexingConfiguration(username, password) { + return given() + .auth() + .preemptive() + .basic("${username}", "${password}") + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .body("{\n" + + " \"indexed_repos\": [\n" + + " {\n" + + " \"name\": \"docker-local\",\n" + + " \"type\": \"local\",\n" + + " \"pkg_type\": \"Docker\"\n" + + " },\n" + + " {\n" + + " \"name\": \"generic-dev-local\",\n" + + " \"type\": \"local\",\n" + + " \"pkg_type\": \"Generic\"\n" + + " }\n" + + " ],\n" + + " \"non_indexed_repos\": []\n" + + "}") + .when() + .put("/v1/binMgr/default/repos") + .then() + .extract().response() + } + + + + def startScan(username, password, componentID) { + return given() + .auth() + .preemptive() + .basic("${username}", "${password}") + .header("Cache-Control", "no-cache") + .header("Content-Type", "application/json") + .body("{\n" + + " \"componentID\": \"${componentID}\"\n" + + "}") + .when() + .post("/v1/scanArtifact") + .then() + .extract().response() + } + + + def artifactSummary(username, password, artifactPath) { + return given() + .auth() + .preemptive() + .basic("${username}", "${password}") + .header("Cache-Control", "no-cache") + .header("content-Type", "application/json") + .body("{\n" + + " \"checksums\": [\n" + + " \"\"\n" + + " ],\n" + + " \"paths\": [\n" + + " \"${artifactPath}\"\n" + + " ]\n" + + "}") + .when() + .post("/v1/summary/artifact") + .then() + .extract().response() + } + + def createSupportBundle(username, password, name, startDate, endDate) { + return given() + .auth() + .preemptive() + .basic("${username}", "${password}") + .header("Cache-Control", "no-cache") + .header("Content-Type", "application/json") + .body("{ \n" + + " \"name\":\"${name}\",\n" + + " \"description\":\"desc\",\n" + + " \"parameters\":{ \n" + + " \"configuration\": true,\n" + + " \"system\": true, \n" + + " \"logs\":{ \n" + + " \"include\": true, \n" + + " \"start_date\":\"${startDate}\",\n" + + " \"end_date\":\"${endDate}\"\n" + + " },\n" + + " \"thread_dump\":{ \n" + + " \"count\": 1,\n" + + " \"interval\": 0\n" + + " }\n" + + " }\n" + + "}") + .when() + .post("/v1/system/support/bundle") + .then() + .extract().response() + + } + + def getSystemMonitoringStatus(username, password) { + return given() + .auth() + .preemptive() + .basic("${username}", "${password}") + .header("Cache-Control", "no-cache") + .header("Content-Type", "application/json") + .when() + .get("/v1/monitor") + .then() + .extract().response() + } + + def xrayPingRequest(username, password) { + return given() + .auth() + .preemptive() + .basic("${username}", "${password}") + .header("Cache-Control", "no-cache") + .header("Content-Type", "application/json") + .when() + .get("/v1/system/ping") + .then() + .extract().response() + } + + def xrayGetVersion(username, password) { + return given() + .auth() + .preemptive() + .basic("${username}", "${password}") + .header("Cache-Control", "no-cache") + .header("Content-Type", "application/json") + .when() + .get("/v1/system/version") + .then() + .extract().response() + } + + // Data providers + + @DataProvider(name = "issueEvents") + public Object[][] issueEvents() { + return new Object[][]{ + ["XRAY-", "CVE-2017-2000386", "A very important custom issue", "A very important custom issue"] + + } + } + +} \ No newline at end of file diff --git a/Ansible/test/tests/src/test/groovy/testng.xml b/Ansible/test/tests/src/test/groovy/testng.xml new file mode 100644 index 0000000..5342660 --- /dev/null +++ b/Ansible/test/tests/src/test/groovy/testng.xml @@ -0,0 +1,25 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/Ansible/test/tests/src/test/groovy/tests/HealthCheckTest.groovy b/Ansible/test/tests/src/test/groovy/tests/HealthCheckTest.groovy new file mode 100644 index 0000000..4e7fcce --- /dev/null +++ b/Ansible/test/tests/src/test/groovy/tests/HealthCheckTest.groovy @@ -0,0 +1,57 @@ +package tests + +import io.restassured.RestAssured +import io.restassured.path.json.JsonPath +import io.restassured.response.Response +import org.hamcrest.Matchers +import org.testng.Reporter +import org.testng.annotations.BeforeSuite +import org.testng.annotations.Test +import steps.RepositorySteps +import org.yaml.snakeyaml.Yaml +import utils.Shell + +class HealthCheckTest extends RepositorySteps{ + Yaml yaml = new Yaml() + def configFile = new File("./src/test/resources/testenv.yaml") + def config = yaml.load(configFile.text) + def artifactoryURL + + + @BeforeSuite(alwaysRun = true) + def setUp() { + artifactoryURL = config.artifactory.external_ip + RestAssured.baseURI = "http://${artifactoryURL}/artifactory" + } + + + @Test(priority=0, groups="common", testName = "Health check for all 4 services") + void healthCheckTest(){ + Response response = getHealthCheckResponse(artifactoryURL) + response.then().assertThat().statusCode(200). + body("router.state", Matchers.equalTo("HEALTHY")) + + int bodySize = response.body().jsonPath().getList("services").size() + for (int i = 0; i < bodySize; i++) { + JsonPath jsonPathEvaluator = response.jsonPath() + String serviceID = jsonPathEvaluator.getString("services[" + i + "].service_id") + String nodeID = jsonPathEvaluator.getString("services[" + i + "].node_id") + response.then(). + body("services[" + i + "].state", Matchers.equalTo("HEALTHY")) + + Reporter.log("- Health check. Service \"" + serviceID + "\" on node \"" + nodeID + "\" is healthy", true) + } + + } + + @Test(priority=1, groups=["ping","common"], testName = "Ping (In HA 200 only when licences were added)") + void pingTest() { + Response response = ping() + response.then().assertThat().statusCode(200). + body(Matchers.hasToString("OK")) + Reporter.log("- Ping test. Service is OK", true) + } + + + +} diff --git a/Ansible/test/tests/src/test/groovy/tests/RepositoryTest.groovy b/Ansible/test/tests/src/test/groovy/tests/RepositoryTest.groovy new file mode 100644 index 0000000..3deb72a --- /dev/null +++ b/Ansible/test/tests/src/test/groovy/tests/RepositoryTest.groovy @@ -0,0 +1,302 @@ +package tests + +import io.restassured.RestAssured +import io.restassured.path.json.JsonPath +import io.restassured.response.Response +import org.hamcrest.Matchers +import org.testng.Reporter +import org.testng.annotations.BeforeTest +import org.testng.annotations.Test +import org.yaml.snakeyaml.Yaml +import steps.RepositorySteps + +import java.time.LocalDate + +import static org.hamcrest.Matchers.containsString +import static org.hamcrest.Matchers.containsStringIgnoringCase +import static org.hamcrest.Matchers.equalTo +import static org.hamcrest.Matchers.equalToIgnoringCase +import static org.hamcrest.Matchers.greaterThanOrEqualTo + + +class RepositoryTest extends RepositorySteps{ + Yaml yaml = new Yaml() + def configFile = new File("./src/test/resources/testenv.yaml") + def repoListHA = new File("./src/test/resources/repositories/CreateDefault.yaml") + def repoListJCR = new File("./src/test/resources/repositories/CreateJCR.yaml") + def artifact = new File("./src/test/resources/repositories/artifact.zip") + def config = yaml.load(configFile.text) + def artifactoryURL + def username + def password + + @BeforeTest(groups=["jcr", "pro"]) + def setUp() { + artifactoryURL = config.artifactory.external_ip + username = config.artifactory.rt_username + password = config.artifactory.rt_password + RestAssured.baseURI = "http://${artifactoryURL}/artifactory" + RestAssured.authentication = RestAssured.basic(username, password); + RestAssured.useRelaxedHTTPSValidation(); + } + + + @Test(priority=1, groups=["pro"], testName = "Delete sample repositories") + void deleteReposTest(){ + Response getRepoResponse = getRepos() + JsonPath jsonPathEvaluator = getRepoResponse.jsonPath() + List repoNames = jsonPathEvaluator.getList("key", String.class) + for (int i = 0; i < repoNames.size(); i ++){ + Response delete = deleteRepository(repoNames[i], username, password) + delete.then().statusCode(200) + } + + Reporter.log("- Delete sample HA repositories. All repositories were successfully deleted", true) + } + + @Test(priority=1, groups=["jcr",], testName = "Delete sample repositories JCR") + void deleteDefaultJCRReposTest(){ + Response getRepoResponse = getRepos() + JsonPath jsonPathEvaluator = getRepoResponse.jsonPath() + List repoNames = jsonPathEvaluator.getList("key", String.class) + for (int i = 0; i < repoNames.size(); i ++){ + Response delete = deleteRepository(repoNames[i], username, password) + delete.then().statusCode(400).body("errors[0].message", + containsStringIgnoringCase("This REST API is available only in Artifactory Pro")) + } + + Reporter.log("- Delete sample JCR repositories. " + + "Verified - this REST API is available only in Artifactory Pro", true) + } + + @Test(priority=2, groups=["pro"], testName = "Create a list of repositories for HA, specified in YAML file") + void createDefaultHAReposTest(){ + def body + def expectedMessage + body = repoListHA + expectedMessage = "383 changes to config merged successfully" + Response response = createRepositories(body, username, password) + response.then().assertThat().statusCode(200) + .body(Matchers.hasToString(expectedMessage)) + .log().body() + + Reporter.log("- Create repositories for HA distribution. Successfully created", true) + } + + @Test(priority=2, groups=["jcr"], testName = "Create a list of repositories for JCR, specified in YAML file") + void createDefaultJCRReposTest(){ + def body + def expectedMessage + body = repoListJCR + expectedMessage = "82 changes to config merged successfully" + Response response = createRepositories(body, username, password) + response.then().assertThat().statusCode(200) + .body(Matchers.hasToString(expectedMessage)) + .log().body() + + Reporter.log("- Create repositories for JCR. Successfully created", true) + } + + @Test(priority=3, groups=["pro"], testName = "Verify HA repositories were created successfully") + void checkDefaultHAReposTest(){ + Response response = getRepos() + def numberOfRepos = response.then().extract().path("size()") + def expectedReposNumber = 84 + println("Number of created repositories is ${numberOfRepos}") + response.then().assertThat().statusCode(200) + .body("size()", greaterThanOrEqualTo(expectedReposNumber)) + + Reporter.log("- Verify HA repos were created. ${numberOfRepos} repositories were created", true) + } + + @Test(priority=3, groups=["jcr"], testName = "Verify JCR repositories were created successfully") + void checkDefaultJCRReposTest(){ + Response response = getRepos() + def numberOfRepos = response.then().extract().path("size()") + def expectedReposNumber = 17 + response.then().assertThat().statusCode(200) + .body("size()", greaterThanOrEqualTo(expectedReposNumber)) + + Reporter.log("- Verify JCR repos were created. ${numberOfRepos} repositories were created", true) + } + + /*@Test(priority=4, groups=["jcr","pro"], testName = "Create a directory in generic repo") + void createDirectoryTest(){ + def repoName = "generic-dev-local" + def directoryName = "test-directory/" + Response response = createDirectory(repoName, directoryName) + response.then().assertThat().statusCode(201) + .body("repo", equalTo(repoName)) + .body("path", equalTo("/" + directoryName)) + .body("uri", equalTo("http://" + artifactoryURL + ":80/artifactory/" + repoName + "/" + directoryName)) + + Reporter.log("- Create folder. Folder successfully created", true) + } + + @Test(priority=5, groups=["jcr","pro"], testName = "Deploy file to generic repo") + void deployArtifactToGenericTest(){ + def repoName = "generic-dev-local" + def directoryName = "test-directory" + def filename = "artifact.zip" + Response response = deployArtifact(repoName, directoryName, artifact, filename) + response.then().assertThat().statusCode(201) + .body("repo", equalTo(repoName)) + .body("path", equalTo("/" + directoryName + "/" + filename)) + .body("downloadUri", equalTo("http://" + artifactoryURL + ":80/artifactory/" + repoName + "/" + + directoryName + "/" + filename)) + + Reporter.log("- Deploy artifact. Artifact successfully deployed", true) + } + + @Test(priority=6, groups=["jcr", "pro"], testName = "Get the artifact info") + void getArtifactinfoTest(){ + def repoName = "generic-dev-local" + def directoryName = "test-directory" + def filename = "artifact.zip" + Response response = getInfo(repoName, directoryName, artifact, filename) + response.then().assertThat().statusCode(200) + .body("repo", equalTo(repoName)) + .body("path", equalTo("/" + directoryName + "/" + filename)) + .body("downloadUri", equalTo("http://" + artifactoryURL + ":80/artifactory/" + repoName + "/" + + directoryName + "/" + filename)) + + Reporter.log("- Get the artifact info. Artifact info is successfully returned", true) + }*/ + +/* @Test(priority=7, groups=["jcr", "pro"], testName = "Delete item") + void deleteJCRItemTest(){ + def repoName = "generic-dev-local" + def directoryName = "test-directory" + def filename = "artifact.zip" + Response response = deleteItem(repoName, directoryName, artifact, filename) + response.then().assertThat().statusCode(204) + + Response verification = getInfo(repoName, directoryName, artifact, filename) + verification.then().statusCode(404) + .body("errors[0].message", equalToIgnoringCase("Unable to find item")) + + Reporter.log("- Delete item. File has been deleted successfully", true) + }*/ + + /*@Test(priority=8, groups=["pro"], testName = "Create support bundle") + void createSupportBundleHATest(){ + def name = "Support Bundle" + LocalDate startDate = LocalDate.now().minusDays(5) + LocalDate endDate = LocalDate.now() + Response response = createSupportBundle(name, startDate, endDate) + response.then().assertThat().statusCode(200) + .body("artifactory.bundle_url", containsString(artifactoryURL)) + + Reporter.log("- Create support bundle. Successfully created", true) + }*/ + + @Test(priority=8, groups=["jcr"], testName = "Create support bundle") + void createSupportBundleJCATest(){ + def name = "Support Bundle" + LocalDate startDate = LocalDate.now().minusDays(5) + LocalDate endDate = LocalDate.now() + Response response = createSupportBundle(name, startDate, endDate) + response.then().assertThat().statusCode(400) + .body("errors[0].message", + containsStringIgnoringCase("This REST API is available only in Artifactory Pro")) + + Reporter.log("- Create support bundle, JCR. " + + "Call is not supported in JCR version, error message is correct", true) + } + + @Test(priority=9, groups=["pro"], testName = "Delete created repositories") + void deleteDefaultReposTest(){ + Response getRepoResponse = getRepos() + JsonPath jsonPathEvaluator = getRepoResponse.jsonPath() + List repoNames = jsonPathEvaluator.getList("key", String.class) + for (int i = 0; i < repoNames.size(); i ++){ + Response delete = deleteRepository(repoNames[i], username, password) + delete.then().statusCode(200) + } + + Reporter.log("- Delete HA repositories. All repositories were successfully deleted", true) + } + + @Test(priority=9, groups=["jcr",], testName = "Delete sample repositories JCR") + void deleteJCRReposTest(){ + Response getRepoResponse = getRepos() + JsonPath jsonPathEvaluator = getRepoResponse.jsonPath() + List repoNames = jsonPathEvaluator.getList("key", String.class) + for (int i = 0; i < repoNames.size(); i ++){ + Response delete = deleteRepository(repoNames[i], username, password) + delete.then().statusCode(400).body("errors[0].message", + containsStringIgnoringCase("This REST API is available only in Artifactory Pro")) + } + + Reporter.log("- Delete sample JCR repositories. All repositories were successfully deleted", true) + } + + @Test(priority=10, groups=["pro"], testName = "Verify repositories were deleted successfully") + void checkReposAreDeleted(){ + Response response = getRepos() + def numberOfRepos = response.then().extract().path("size()") + def expectedReposNumber = 0 + response.then().assertThat().statusCode(200) + .body("size()", equalTo(expectedReposNumber)) + + Reporter.log("- Verify repo were deleted. ${numberOfRepos} repositories remain", true) + } + + @Test(priority=11, groups=["pro"], testName = "Re-Create a list of repositories, for the next tests") + void reCreateDefaultHAReposTest(){ + def body + def expectedMessage + body = repoListHA + expectedMessage = "383 changes to config merged successfully" + Response response = createRepositories(body, username, password) + response.then().assertThat().statusCode(200) + .body(Matchers.hasToString(expectedMessage)) + .log().body() + + Reporter.log("- Re-create repositories for HA distribution. Successfully created", true) + } + + @Test(priority=12, groups=["jcr"], testName = "Re-Create a list of repositories, for the next tests") + void reCreateDefaultJCRReposTest(){ + def body + def expectedMessage + body = repoListJCR + expectedMessage = "82 changes to config merged successfully" + Response response = createRepositories(body, username, password) + response.then().assertThat().statusCode(200) + .body(Matchers.hasToString(expectedMessage)) + .log().body() + + Reporter.log("- Re-create repositories for JCR distribution. Successfully created", true) + } + +/* @Test(priority=13, groups=["jcr","pro"], testName = "Create a directory in generic repo") + void reCreateDirectoryTest(){ + def repoName = "generic-dev-local" + def directoryName = "test-directory/" + Response response = createDirectory(repoName, directoryName) + response.then().assertThat().statusCode(201) + .body("repo", equalTo(repoName)) + .body("path", equalTo("/" + directoryName)) + .body("uri", equalTo("http://" + artifactoryURL + ":80/artifactory/" + repoName + "/" + directoryName)) + + Reporter.log("- Create folder. Folder successfully created", true) + }*/ + +/* @Test(priority=14, groups=["jcr","pro"], testName = "Deploy file to generic repo") + void reDeployArtifactToGenericTest(){ + def repoName = "generic-dev-local" + def directoryName = "test-directory" + def filename = "artifact.zip" + Response response = deployArtifact(repoName, directoryName, artifact, filename) + response.then().assertThat().statusCode(201) + .body("repo", equalTo(repoName)) + .body("path", equalTo("/" + directoryName + "/" + filename)) + .body("downloadUri", equalTo("http://" + artifactoryURL + ":80/artifactory/" + repoName + "/" + + directoryName + "/" + filename)) + + Reporter.log("- Deploy artifact. Artifact successfully deployed", true) + }*/ + + +} diff --git a/Ansible/test/tests/src/test/groovy/tests/SecurityTest.groovy b/Ansible/test/tests/src/test/groovy/tests/SecurityTest.groovy new file mode 100644 index 0000000..a166cd0 --- /dev/null +++ b/Ansible/test/tests/src/test/groovy/tests/SecurityTest.groovy @@ -0,0 +1,153 @@ +package tests + +import io.restassured.RestAssured +import io.restassured.response.Response +import org.hamcrest.Matchers +import org.junit.Assert +import org.testng.Reporter +import org.testng.annotations.BeforeSuite +import org.testng.annotations.Test +import org.yaml.snakeyaml.Yaml +import steps.SecuritytSteps + + +class SecurityTest extends SecuritytSteps{ + + Yaml yaml = new Yaml() + def configFile = new File("./src/test/resources/testenv.yaml") + def config = yaml.load(configFile.text) + def artifactoryURL + def distribution + def username + def password + + @BeforeSuite(groups=["jcr","pro"]) + def setUp() { + artifactoryURL = config.artifactory.external_ip + distribution = config.artifactory.distribution + username = config.artifactory.rt_username + password = config.artifactory.rt_password + RestAssured.baseURI = "http://${artifactoryURL}/artifactory" + RestAssured.authentication = RestAssured.basic(username, password); + RestAssured.useRelaxedHTTPSValidation(); + } + + @Test(priority=1, groups=["pro"], dataProvider = "users", testName = "Create users") + void createUsersTest(usernameRt, emailRt, passwordRt){ + Response response = createUser(usernameRt, emailRt, passwordRt) + response.then().statusCode(201) + + Reporter.log("- Create users. User ${usernameRt} created successfully", true) + } + + @Test(priority=2, groups=["pro"], dataProvider = "users", testName = "Verify users were created successfully") + void verifyUsersTest(usernameRt, emailRt, passwordRt){ + Response response = getUserDetails(usernameRt) + response.then().statusCode(200). + body("name", Matchers.equalTo(usernameRt)). + body("email", Matchers.equalTo(emailRt)). + body("admin", Matchers.equalTo(false)). + body("groups[0]", Matchers.equalTo("readers")) + + Reporter.log("- Verify created users. User ${usernameRt} was successfully verified", true) + } + + @Test(priority=3, groups=["pro"], dataProvider = "users", testName = "Generate API keys") + void generateAPIKeysTest(usernameRt, emailRt, passwordRt) { + Response createKey = generateAPIKey(usernameRt, passwordRt) + def errorMessage = createKey.then().extract().path("error") + if (errorMessage == null) { + def key = createKey.then().extract().path("apiKey") + Response getKey = getAPIKey(usernameRt, passwordRt) + def keyVerification = getKey.then().extract().path("apiKey") + Assert.assertTrue(key == keyVerification) + Reporter.log("- Generate API keys. Key for ${usernameRt} created successfully", true) + } else if (errorMessage.toString().contains("Api key already exists for user:")){ + Reporter.log("- Generate API keys. Key for ${usernameRt} already exists, skipped", true) + } + } + + @Test(priority=4, groups=["pro"], dataProvider = "users", testName = "Re-generate API keys") + void regenerateAPIKeysTest(usernameRt, emailRt, passwordRt){ + Response regenerated = regenerateAPIKey(usernameRt, passwordRt) + regenerated.then().statusCode(200) + def key = regenerated.then().extract().path("apiKey") + Response getKey = getAPIKey(usernameRt, passwordRt) + getKey.then().statusCode(200) + def keyVerification = getKey.then().extract().path("apiKey") + Assert.assertTrue(key == keyVerification) + + Reporter.log("- Re-generate API keys. Key for ${usernameRt} re-generated successfully", true) + } + + @Test(priority=5, groups=["pro"], dataProvider = "groups", testName = "Create a group") + void createGroupTest(groupName){ + Response create = createGroup(groupName) + create.then().statusCode(201) + + Response get = getGroup(groupName) + get.then().statusCode(200) + def name = get.then().extract().path("name") + def adminPrivileges = get.then().extract().path("adminPrivileges") + Assert.assertTrue(name == groupName) + Assert.assertTrue(adminPrivileges == false) + + Reporter.log("- Create group. Group ${groupName} was successfully created", true) + } + + @Test(priority=6, groups=["pro"], testName = "Create and verify permissions") + void createPermissionsTest(){ + def permissionName = "testPermission" + def repository = "ANY" + def user1 = "testuser0" + def user2 = "testuser1" + def group1 = "test-group-0" + def group2 = "test-group-1" + def action1 = "read" + def action2 = "write" + def action3 = "manage" + Response create = createPermissions(permissionName, repository, user1, user2, + group1, group2, action1, action2, action3) + create.then().statusCode(200) + + Response get = getPermissions(permissionName) + get.then().statusCode(200) + Assert.assertTrue(permissionName == get.then().extract().path("name")) + Assert.assertTrue(repository == get.then().extract().path("repo.repositories[0]")) + Assert.assertTrue(action1 == get.then().extract().path("repo.actions.users.${user1}[0]")) + Assert.assertTrue(action2 == get.then().extract().path("repo.actions.users.${user1}[1]")) + Assert.assertTrue(action3 == get.then().extract().path("repo.actions.users.${user1}[2]")) + Assert.assertTrue(action1 == get.then().extract().path("repo.actions.users.${user2}[0]")) + Assert.assertTrue(action2 == get.then().extract().path("repo.actions.users.${user2}[1]")) + Assert.assertTrue(action3 == get.then().extract().path("repo.actions.users.${user2}[2]")) + Assert.assertTrue(action1 == get.then().extract().path("repo.actions.groups.${group1}[0]")) + Assert.assertTrue(action2 == get.then().extract().path("repo.actions.groups.${group2}[1]")) + Assert.assertTrue(action1 == get.then().extract().path("repo.actions.groups.${group2}[0]")) + Assert.assertTrue(action2 == get.then().extract().path("repo.actions.groups.${group2}[1]")) + + Reporter.log("- Create permissions. Permissions successfully created and verified", true) + } + + @Test(priority=7, groups=["pro"], testName = "Delete permissions") + void deletePermissionsTest(){ + def permissionName = "testPermission" + Response delete = deletePermissions(permissionName) + delete.then().statusCode(200) + Reporter.log("- Delete permissions. User ${permissionName} has been removed successfully", true) + } + + @Test(priority=8, groups=["pro"], dataProvider = "users", testName = "Delete non-default users") + void deleteUserTest(usernameRt, email, passwordRt){ + Response delete = deleteUser(usernameRt) + delete.then().statusCode(200).body(Matchers.containsString("${usernameRt}")) + Reporter.log("- Delete user. User ${usernameRt} has been removed successfully", true) + } + + @Test(priority=9, groups=["pro"], dataProvider = "groups", testName = "Delete non-default groups") + void deleteGroupTest(groupName){ + Response delete = deleteGroup(groupName) + delete.then().statusCode(200).body(Matchers.containsString("${groupName}")) + Reporter.log("- Delete group. Group ${groupName} has been removed successfully", true) + } + +} diff --git a/Ansible/test/tests/src/test/groovy/tests/XrayTest.groovy b/Ansible/test/tests/src/test/groovy/tests/XrayTest.groovy new file mode 100644 index 0000000..ef6e4c4 --- /dev/null +++ b/Ansible/test/tests/src/test/groovy/tests/XrayTest.groovy @@ -0,0 +1,332 @@ +package tests + +import io.restassured.RestAssured +import io.restassured.response.Response +import org.testng.Assert +import org.testng.Reporter +import org.testng.annotations.BeforeSuite +import org.testng.annotations.BeforeTest +import org.testng.annotations.Test +import org.yaml.snakeyaml.Yaml +import steps.XraySteps + +import java.time.LocalDate + +import static org.hamcrest.Matchers.containsStringIgnoringCase +import static org.hamcrest.Matchers.emptyArray +import static org.hamcrest.Matchers.equalTo +import static org.hamcrest.Matchers.hasItem +import static org.hamcrest.Matchers.is +import static org.hamcrest.Matchers.isA +import static org.hamcrest.Matchers.not +import static org.hamcrest.Matchers.notNullValue +import static org.hamcrest.Matchers.nullValue + + +class XrayTest extends XraySteps{ + + Yaml yaml = new Yaml() + def configFile = new File("./src/test/resources/testenv.yaml") + def config = yaml.load(configFile.text) + def artifactoryURL + def distribution + def username + def password + def randomIndex + def policyName + def watchName + + @BeforeSuite(groups=["xray"]) + def setUp() { + artifactoryURL = config.artifactory.external_ip + distribution = config.artifactory.distribution + username = config.artifactory.rt_username + password = config.artifactory.rt_password + RestAssured.authentication = RestAssured.basic(username, password) + RestAssured.useRelaxedHTTPSValidation() + Random random = new Random() + randomIndex = random.nextInt(10000000) + policyName = "security_policy_${randomIndex}" + watchName = "all-repositories_${randomIndex}" + } + @BeforeTest(groups=["xray"]) + def testSetUp() { + RestAssured.baseURI = "http://${artifactoryURL}/xray/api" + } + + @Test(priority=1, groups=["xray"], dataProvider = "issueEvents", testName = "Create Issue Event") + void createIssueEventTest(issueID, cve, summary, description){ + Response create = createIssueEvent(issueID+randomIndex, cve, summary, description, username, password) + create.then().statusCode(201) + Response get = getIssueEvent(issueID+randomIndex, username, password) + get.then().statusCode(200) + def issueIDverification = get.then().extract().path("id") + def cveVerification = get.then().extract().path("source_id") + def summaryVerification = get.then().extract().path("summary") + def descriptionVerification = get.then().extract().path("description") + Assert.assertTrue(issueID+randomIndex == issueIDverification) + Assert.assertTrue(cve == cveVerification) + Assert.assertTrue(summary == summaryVerification) + Assert.assertTrue(description == descriptionVerification) + + Reporter.log("- Create issue event. Issue event with ID ${issueID+randomIndex} created and verified successfully", true) + } + + @Test(priority=2, groups=["xray"], dataProvider = "issueEvents", testName = "Update Issue Event", + dependsOnMethods = "createIssueEventTest") + void updateIssueEventTest(issueID, cve, summary, description){ + cve = "CVE-2017-0000000" + summary = "Updated" + description = "Updated" + Response update = updateIssueEvent(issueID+randomIndex, cve, summary, description, username, password) + update.then().statusCode(200) + Response get = getIssueEvent(issueID+randomIndex, username, password) + get.then().statusCode(200) + def cveVerification = get.then().extract().path("source_id") + def summaryVerification = get.then().extract().path("summary") + def descriptionVerification = get.then().extract().path("description") + Assert.assertTrue(cve == cveVerification) + Assert.assertTrue(summary == summaryVerification) + Assert.assertTrue(description == descriptionVerification) + + Reporter.log("- Update issue event. Issue event with ID ${issueID+randomIndex} updated and verified successfully", true) + } + + @Test(priority=3, groups=["xray"], testName = "Create policy") + void createPolicyTest(){ + Response create = createPolicy(policyName, username, password) + create.then().statusCode(201) + + Response get = getPolicy(policyName, username, password) + get.then().statusCode(200) + def policyNameVerification = get.then().extract().path("name") + Assert.assertTrue(policyName == policyNameVerification) + + Reporter.log("- Create policy. Policy with name ${policyName} created and verified successfully", true) + } + + @Test(priority=4, groups=["xray"], testName = "Update policy", dependsOnMethods = "createPolicyTest") + void updatePolicyTest(){ + def description = "Updated description" + Response update = updatePolicy(policyName, description, username, password) + update.then().statusCode(200) + + Response get = getPolicy(policyName, username, password) + get.then().statusCode(200) + def descriptionVerification = get.then().extract().path("description") + Assert.assertTrue(description == descriptionVerification) + + Reporter.log("- Update policy. Policy with name ${policyName} updated and verified successfully", true) + } + + @Test(priority=4, groups=["xray"], testName = "Get policies") + void getPoliciesTest(){ + Response response = getPolicies(username, password) + response.then().statusCode(200) + .body("name", notNullValue()) + .body("type", notNullValue()) + .body("description", notNullValue()) + .body("author", notNullValue()) + .body("rules", notNullValue()) + .body("created", notNullValue()) + .body("modified", notNullValue()) + def policies = response.then().extract().path("name") + Reporter.log("- Get policies. Policies list is returned successfully. " + + "Policies returned: ${policies}", true) + } + + @Test(priority=5, groups=["xray"], testName = "Create watch for the repositories", dependsOnMethods = "createPolicyTest") + void createWatchTest(){ + Response create = createWatchEvent(watchName, policyName, username, password) + create.then().statusCode(201) + .body("info", + equalTo("Watch has been successfully created")) + + Response get = getWatchEvent(watchName, username, password) + get.then().statusCode(200) + .body("general_data.name", equalTo((watchName).toString())) + + Reporter.log("- Create watch. Watch with name ${watchName} has been created and verified successfully", true) + } + + @Test(priority=6, groups=["xray"], testName = "Update watch for the repositories", dependsOnMethods = "createWatchTest") + void updateWatchTest(){ + def description = "Updated watch" + Response create = updateWatchEvent(watchName, description, policyName, username, password) + create.then().statusCode(200) + .body("info", + equalTo("Watch was successfully updated")) + + Response get = getWatchEvent(watchName, username, password) + get.then().statusCode(200) + .body("general_data.description", equalTo(description)) + + Reporter.log("- Update watch. Watch with name ${watchName} has been updated and verified successfully", true) + } + + @Test(priority=7, groups=["xray"], testName = "Assign policy to watches") + void assignPolicyToWatchTest(){ + Response response = assignPolicy(watchName, policyName, username, password) + response.then().statusCode(200) + .body("result.${watchName}", + equalTo("Policy assigned successfully to Watch")) + + Reporter.log("- Assign policy to watch. Policy assigned successfully to Watch", true) + } + + @Test(priority=8, groups=["xray"], testName = "Delete watch") + void deleteWatchTest(){ + Response response = deleteWatchEvent(watchName, username, password) + response.then().statusCode(200) + .body("info", + equalTo("Watch was deleted successfully")) + + Reporter.log("- Delete watch. Watch ${watchName} has been successfully deleted", true) + } + + @Test(priority=9, groups=["xray"], testName = "Delete policy") + void deletePolicyTest(){ + + Response response = deletePolicy(policyName, username, password) + response.then().statusCode(200) + .body("info", + equalTo(("Policy ${policyName} was deleted successfully").toString())) + + Reporter.log("- Delete policy. Policy ${policyName} has been successfully deleted", true) + } + +/* @Test(priority=10, groups=["xray"], testName = "Start scan") + void startScanTest(){ + def artifactPath = "default/generic-dev-local/test-directory/artifact.zip" + Response getSha = artifactSummary(username, password, artifactPath) + def componentID = getSha.then().extract().path("artifacts[0].licenses[0].components[0]") + + Response scan = startScan(username, password, componentID) + scan.then().statusCode(200) + .body("info", + equalTo(("Scan of artifact is in progress").toString())) + + Reporter.log("- Start scan. Scan of ${componentID} has been started successfully", true) + }*/ + + @Test(priority=11, groups=["xray"], testName = "Create and get integration configuration") + void integrationConfigurationTest(){ + def vendorName = "vendor_${randomIndex}" + Response post = addtIntegrationConfiguration(username, password, vendorName) + post.then().statusCode(200) + + Response get = getIntegrationConfiguration(username, password) + int bodySize = get.body().jsonPath().getList(".").size() + get.then().statusCode(200) + .body("[" + (bodySize-1) + "].vendor", equalTo(vendorName.toString())) + + Reporter.log("- Integration configuration. " + + "Configuration for vendor ${vendorName} has been successfully added and verified", true) + } + + @Test(priority=12, groups=["xray"], testName = "Enable TLS for RabbitMQ") + void enableTLSRabbitMQTest(){ + File body = new File("./src/test/resources/enableRabbitMQ.json") + Response post = postSystemParameters(username, password, body) + post.then().statusCode(200) + + Response get = getSystemParameters(username, password) + get.then().statusCode(200).body("enableTlsConnectionToRabbitMQ", equalTo(true)) + + Reporter.log("- Enable TLS for RabbitMQ. TLS for RabbitMQ has been successfully enabled and verified", true) + } + + @Test(priority=13, groups=["xray"], testName = "Get binary manager") + void getBinaryManagerTest(){ + Response response = getBinaryManager(username, password) + response.then().statusCode(200) + .body("binMgrId", equalTo("default")) + .body("license_valid", equalTo(true)) + .body("binMgrId", equalTo("default")) + def version = response.then().extract().path("version") + + Reporter.log("- Get binary manager. Binary manager is verified, connected RT version: ${version}", true) + } + + @Test(priority=14, groups=["xray"], testName = "Get repo indexing configuration") + void getIndexingConfigurationTest(){ + Response response = getIndexingConfiguration(username, password) + response.then().statusCode(200) + .body("bin_mgr_id", equalTo("default")) + .body("indexed_repos.name", hasItem("generic-dev-local")) + + Reporter.log("- Get repo indexing configuration.", true) + } + + @Test(priority=15, groups=["xray"], testName = "Update repo indexing configuration") + void updateIndexingConfigurationTest(){ + Response response = updateIndexingConfiguration(username, password) + response.then().statusCode(200) + .body("info", equalTo("Repositories list has been successfully sent to Artifactory")) + + Reporter.log("- Update repo indexing configuration. Successfully updated", true) + } + + // Force reindex test, add in latest versions of X-ray + +/* @Test(priority=16, groups=["xray"], testName = "Get artifact summary") + void artifactSummaryTest(){ + def artifactPath = "default/generic-dev-local/test-directory/artifact.zip" + Response post = artifactSummary(username, password, artifactPath) + post.then().statusCode(200) + .body("artifacts[0].general.path", equalTo(artifactPath)) + + Reporter.log("- Get artifact summary. Artifact summary has been returned successfully", true) + }*/ + + @Test(priority=17, groups=["xray"], testName = "Create support bundle") + void createSupportBundleTest(){ + def name = "Support Bundle" + LocalDate startDate = LocalDate.now().minusDays(5) + LocalDate endDate = LocalDate.now() + Response response = createSupportBundle(username, password, name, startDate, endDate) + def bundle_url = response.then().extract().path("artifactory.bundle_url") + if ((bundle_url.toString()).contains(artifactoryURL)) { + Reporter.log("- Create support bundle. Successfully created using X-ray API", true) + } else if (((bundle_url.toString()).contains("localhost"))){ + Reporter.log("- Create support bundle. Created with a bug, localhost instead of the hostname", true) + } + } + + @Test(priority=18, groups=["xray"], testName = "Get system monitoring status") + void getSystemMonitoringTest(){ + Response response = getSystemMonitoringStatus(username, password) + response.then().statusCode(200) + + Reporter.log("- Get system monitoring status. Data returned successfully", true) + } + + @Test(priority=19, groups=["xray"], testName = "X-ray ping request") + void xrayPingRequestTest(){ + Response response = xrayPingRequest(username, password) + response.then().statusCode(200) + .body("status", equalTo("pong")) + + Reporter.log("- Get system monitoring status. Data returned successfully", true) + } + + @Test(priority=20, groups=["xray"], testName = "X-ray version") + void xrayGetVersionTest(){ + Response response = xrayGetVersion(username, password) + response.then().statusCode(200) + .body("xray_version", notNullValue()) + .body("xray_revision", notNullValue()) + def version = response.then().extract().path("xray_version") + def revision = response.then().extract().path("xray_revision") + + Reporter.log("- Get X-ray version. Version: ${version}, revision: ${revision}", true) + } + + + + +} + + + + diff --git a/Ansible/test/tests/src/test/groovy/utils/ConfigurationUtil.groovy b/Ansible/test/tests/src/test/groovy/utils/ConfigurationUtil.groovy new file mode 100644 index 0000000..208bf29 --- /dev/null +++ b/Ansible/test/tests/src/test/groovy/utils/ConfigurationUtil.groovy @@ -0,0 +1,19 @@ +package utils + +class ConfigurationUtil { + + static def getEnvironmentVariableValue(def name) { + def value = System.getProperty(name) + if (value == null) { + value = System.getenv(name) + if (value == null) { + throw new Exception("Environment variable $name not set!"); + } + } + return value + } + + + + +} diff --git a/Ansible/test/tests/src/test/groovy/utils/DSL.groovy b/Ansible/test/tests/src/test/groovy/utils/DSL.groovy new file mode 100644 index 0000000..6a14f48 --- /dev/null +++ b/Ansible/test/tests/src/test/groovy/utils/DSL.groovy @@ -0,0 +1,81 @@ +package utils + +import utils.ProcessOutputStream + +/** + * Created by eliom on 6/19/18. + */ +class DSL { + + /** + * Run shell command + */ + static def sh = { command, outputBuffer = null, folder = null, silent = false, customEnvVariables = null, errorBuffer = null -> + //def workdir = ConfigurationUtil.getEnvironmentVariableValue("KERMIT_WORKSPACE_DIR") + def workdir = "/Users/danielmi/projects/soldev/.kermit-workspace" + def commandFolder + if (folder != null) { + commandFolder = new File(folder, workdir) + } else { + commandFolder = workdir + } + + if (!silent) { + println "Running command at ${commandFolder}: $command" + } + def proc = null + try { + def env = System.getenv().collect { k, v -> "$k=$v" } + + if (customEnvVariables != null) { + env.addAll( customEnvVariables.collect { k, v -> "$k=$v" }) + } + + if (command instanceof List && command.size() > 0 && command[0] instanceof List) { + //Pipe Commands + command.each { + if (proc != null) { + proc = proc | it.execute(env, commandFolder) + } else { + proc = it.execute(env, commandFolder) + } + } + } else { + proc = command.execute(env, commandFolder) + } + } catch (IOException e) { + println "Failed to execute command: ${e.getMessage()}" + return -1 + } + def processOutput = new ProcessOutputStream(silent, outputBuffer == null) + def errorOutput = processOutput + if (errorBuffer != null) { + errorOutput = new ProcessOutputStream(silent, errorBuffer == null) + } + + proc.consumeProcessOutput(processOutput, errorOutput) + def exitStatus = proc.waitFor() + if (!silent) { + println "Exit: $exitStatus" + } + if (outputBuffer != null) { + outputBuffer.append(processOutput.toString()) + } + + processOutput.close() + + if (errorBuffer != null) { + errorBuffer.append(errorOutput.toString()) + errorOutput.close() + } + + return exitStatus + } + + + //... + + + + +} diff --git a/Ansible/test/tests/src/test/groovy/utils/EnvironmentConfig.groovy b/Ansible/test/tests/src/test/groovy/utils/EnvironmentConfig.groovy new file mode 100644 index 0000000..db7d017 --- /dev/null +++ b/Ansible/test/tests/src/test/groovy/utils/EnvironmentConfig.groovy @@ -0,0 +1,10 @@ +package utils + + + +class EnvironmentConfig { + + + + +} diff --git a/Ansible/test/tests/src/test/groovy/utils/ProcessOutputStream.groovy b/Ansible/test/tests/src/test/groovy/utils/ProcessOutputStream.groovy new file mode 100644 index 0000000..aff11c2 --- /dev/null +++ b/Ansible/test/tests/src/test/groovy/utils/ProcessOutputStream.groovy @@ -0,0 +1,32 @@ +package utils + +public class ProcessOutputStream extends ByteArrayOutputStream{ + + private boolean silent = false; + private boolean discardOutput = false; + + public ProcessOutputStream(boolean silent, boolean discardOutput) { + this.silent = silent; + this.discardOutput = discardOutput; + } + + @Override + public synchronized void write(int b) { + if (!silent) { + System.out.write(b); + } + if (!discardOutput) { + super.write(b); + } + } + + @Override + public synchronized void write(byte[] b, int off, int len) { + if (!silent) { + System.out.write(b, off, len); + } + if (!discardOutput) { + super.write(b, off, len); + } + } +} \ No newline at end of file diff --git a/Ansible/test/tests/src/test/groovy/utils/Shell.groovy b/Ansible/test/tests/src/test/groovy/utils/Shell.groovy new file mode 100644 index 0000000..87d4aaf --- /dev/null +++ b/Ansible/test/tests/src/test/groovy/utils/Shell.groovy @@ -0,0 +1,17 @@ +package utils + +class Shell { + + def executeProc(cmd) { + println(cmd) + def proc = cmd.execute() + + proc.in.eachLine {line -> + println line + } + + println proc.err.text + + proc.exitValue() + } +} diff --git a/Ansible/test/tests/src/test/groovy/utils/WorkSpaceManager.groovy b/Ansible/test/tests/src/test/groovy/utils/WorkSpaceManager.groovy new file mode 100644 index 0000000..f1262bb --- /dev/null +++ b/Ansible/test/tests/src/test/groovy/utils/WorkSpaceManager.groovy @@ -0,0 +1,32 @@ +package utils + +/** + * Created by eliom on 6/26/18. + */ +class WorkspaceManager { + + //TODO: Make it Thread safe + def static currentPath = [] + + def static pushPath(path) { + currentPath.push(path) + } + + def static popPath() { + currentPath.pop() + } + + def static getCurrentDir() { + def workspaceRoot = ConfigurationUtil.getWorkspaceDir() + if (currentPath.size() > 0) { + def currentDir = new File(currentPath.join('/'), workspaceRoot) + if (!currentDir.exists()) { + currentDir.mkdirs() + } + return currentDir + } else { + return workspaceRoot + } + } + +} diff --git a/Ansible/test/tests/src/test/resources/enableRabbitMQ.json b/Ansible/test/tests/src/test/resources/enableRabbitMQ.json new file mode 100644 index 0000000..c6e54d4 --- /dev/null +++ b/Ansible/test/tests/src/test/resources/enableRabbitMQ.json @@ -0,0 +1,11 @@ +{ + "sslInsecure": false, + "maxDiskDataUsage": 80, + "monitorSamplingInterval": 300, + "mailNoSsl": false, + "messageMaxTTL": 7, + "jobInterval": 86400, + "allowSendingAnalytics": true, + "httpsPort": 443, + "enableTlsConnectionToRabbitMQ": true +} \ No newline at end of file diff --git a/Ansible/test/tests/src/test/resources/integration.json b/Ansible/test/tests/src/test/resources/integration.json new file mode 100644 index 0000000..08ba303 --- /dev/null +++ b/Ansible/test/tests/src/test/resources/integration.json @@ -0,0 +1,9 @@ +{ + "vendor": "whitesource5", + "api_key": "12345", + "enabled": true, + "context": "project_id", + "url": "https://saas.whitesourcesoftware.com/xray", + "description": "WhiteSource provides a simple yet powerful open source security and licenses management solution. More details at http://www.whitesourcesoftware.com.", + "test_url": "https://saas.whitesourcesoftware.com/xray/api/checkauth" +} \ No newline at end of file diff --git a/Ansible/test/tests/src/test/resources/repositories/CreateDefault.yaml b/Ansible/test/tests/src/test/resources/repositories/CreateDefault.yaml new file mode 100644 index 0000000..ef1f5fd --- /dev/null +++ b/Ansible/test/tests/src/test/resources/repositories/CreateDefault.yaml @@ -0,0 +1,554 @@ +localRepositories: + libs-release-local: + type: maven + description: "production deployment" + repoLayout: maven-2-default + xray: + enabled: true + libs-snapshot-local: + type: maven + description: "snapshot deployment" + repoLayout: maven-2-default + xray: + enabled: true + maven-prod-local: + type: maven + description: "production release deployment" + repoLayout: maven-2-default + xray: + enabled: true + maven-dev-local: + type: maven + description: "development release deployment" + repoLayout: maven-2-default + xray: + enabled: true + maven-release-local: + type: maven + description: "development release deployment" + repoLayout: maven-2-default + xray: + enabled: true + maven-snapshot-local: + type: maven + description: "development release deployment" + repoLayout: maven-2-default + xray: + enabled: true + gradle-prod-local: + type: gradle + description: "production deployment" + repoLayout: gradle-default + xray: + enabled: true + gradle-dev-local: + type: gradle + description: "development deployment" + repoLayout: gradle-default + xray: + enabled: true + tomcat-local: + type: generic + description: "used by demo" + repoLayout: simple-default + xray: + enabled: true + generic-prod-local: + type: generic + description: "production deployment" + repoLayout: simple-default + xray: + enabled: true + generic-dev-local: + type: generic + description: "development deployment" + repoLayout: simple-default + xray: + enabled: true + ivy-prod-local: + type: ivy + description: "production deployment" + repoLayout: "ivy-default" + xray: + enabled: true + ivy-dev-local: + type: ivy + description: "development deployment" + repoLayout: ivy-default + xray: + enabled: true + helm-prod-local: + type: helm + description: "production deployment" + repoLayout: simple-default + xray: + enabled: true + helm-dev-local: + type: helm + description: "development deployment" + repoLayout: simple-default + xray: + enabled: true + sbt-prod-local: + type: sbt + description: "production deployment" + repoLayout: sbt-default + xray: + enabled: true + sbt-dev-local: + type: sbt + description: "development deployment" + repoLayout: sbt-default + xray: + enabled: true + nuget-prod-local: + type: nuget + description: "production deployment" + repoLayout: nuget-default + xray: + enabled: true + nuget-dev-local: + type: nuget + description: "development deployment" + repoLayout: nuget-default + xray: + enabled: true + gems-prod-local: + type: gems + description: "production deployment" + repoLayout: simple-default + xray: + enabled: true + gems-dev-local: + type: gems + description: "development deployment" + repoLayout: simple-default + xray: + enabled: true + npm-prod-local: + type: npm + description: "production deployment" + repoLayout: npm-default + xray: + enabled: true + npm-dev-local: + type: npm + description: "development deployment" + repoLayout: npm-default + xray: + enabled: true + bower-prod-local: + type: bower + description: "production deployment" + repoLayout: bower-default + xray: + enabled: true + bower-dev-local: + type: bower + description: "development deployment" + repoLayout: bower-default + xray: + enabled: true + debian-prod-local: + type: debian + description: "production deployment" + repoLayout: simple-default + xray: + enabled: true + debian-dev-local: + type: debian + description: "development deployment" + repoLayout: simple-default + xray: + enabled: true + php-prod-local: + type: composer + description: "production deployment" + repoLayout: composer-default + xray: + enabled: true + php-dev-local: + type: composer + description: "development deployment" + repoLayout: composer-default + xray: + enabled: true + pypi-prod-local: + type: pypi + description: "production deployment" + repoLayout: simple-default + xray: + enabled: true + pypi-dev-local: + type: pypi + description: "development deployment" + repoLayout: simple-default + xray: + enabled: true + docker-prod-local: + type: docker + description: "production deployment" + repoLayout: simple-default + xray: + enabled: true + docker-stage-local: + type: docker + description: "stage deployment" + repoLayout: simple-default + xray: + enabled: true + docker-dev-local: + type: docker + description: "development deployment" + repoLayout: simple-default + xray: + enabled: true + docker-local: + type: docker + description: "docker deployment" + repoLayout: simple-default + xray: + enabled: true + docker-push: + type: docker + description: "docker push repo for push replication testing" + repoLayout: simple-default + xray: + enabled: true + vagrant-prod-local: + type: vagrant + description: "production deployment" + repoLayout: simple-default + xray: + enabled: true + vagrant-dev-local: + type: vagrant + description: "development deployment" + repoLayout: simple-default + xray: + enabled: true + gitlfs-prod-local: + type: gitlfs + description: "production deployment" + repoLayout: simple-default + xray: + enabled: true + gitlfs-dev-local: + type: gitlfs + description: "development deployment" + repoLayout: simple-default + xray: + enabled: true + rpm-prod-local: + type: yum + description: "production deployment" + repoLayout: simple-default + xray: + enabled: true + rpm-dev-local: + type: yum + description: "development deployment" + repoLayout: simple-default + xray: + enabled: true + conan-prod-local: + type: conan + description: "production deployment" + repoLayout: conan-default + xray: + enabled: true + conan-dev-local: + type: conan + description: "development deployment" + repoLayout: conan-default + xray: + enabled: true + chef-prod-local: + type: chef + description: "production deployment" + repoLayout: simple-default + xray: + enabled: true + chef-dev-local: + type: chef + description: "development deployment" + repoLayout: simple-default + xray: + enabled: true + puppet-prod-local: + type: puppet + description: "production deployment" + repoLayout: puppet-default + xray: + enabled: true + puppet-dev-local: + type: puppet + description: "development deployment" + repoLayout: puppet-default + xray: + enabled: true + go-prod-local: + type: go + description: "production deployment" + repoLayout: go-default + xray: + enabled: true + go-staging-local: + type: go + description: "production deployment" + repoLayout: go-default + xray: + enabled: true +remoteRepositories: + docker-remote: + type: docker + url: https://registry-1.docker.io + repoLayout: simple-default + enableTokenAuthentication: true + xray: + enabled: true + helm-remote: + type: helm + url: https://storage.googleapis.com/kubernetes-charts + repoLayout: simple-default + xray: + enabled: true + jcenter: + type: maven + url: https://jcenter.bintray.com + repoLayout: maven-2-default + xray: + enabled: true + npm-remote: + type: npm + url: https://registry.npmjs.org + repoLayout: npm-default + xray: + enabled: true + nuget-remote: + type: nuget + url: https://www.nuget.org/ + repoLayout: nuget-default + xray: + enabled: true + bower-remote: + type: bower + url: https://github.com/ + repoLayout: bower-default + xray: + enabled: true + gems-remote: + type: gems + url: https://rubygems.org/ + repoLayout: simple-default + xray: + enabled: true + debian-remote: + type: debian + url: http://archive.ubuntu.com/ubuntu/ + repoLayout: simple-default + xray: + enabled: true + php-remote: + type: composer + url: https://github.com/ + repoLayout: composer-default + xray: + enabled: true + pypi-remote: + type: pypi + url: https://files.pythonhosted.org + repoLayout: simple-default + xray: + enabled: true + rpm-remote: + type: yum + url: http://mirror.centos.org/centos/ + repoLayout: simple-default + xray: + enabled: true + chef-remote: + type: chef + url: https://supermarket.chef.io + repoLayout: simple-default + xray: + enabled: true + puppet-remote: + type: puppet + url: https://forgeapi.puppetlabs.com/ + repoLayout: puppet-default + xray: + enabled: true +virtualRepositories: + maven-release-virtual: + type: maven + repositories: + - maven-prod-local + - jcenter + - maven-release-local + - libs-release-local + description: "maven release virtual repositories" + defaultDeploymentRepo: maven-release-local + maven-snapshot-virtual: + type: maven + repositories: + - maven-snapshot-local + - jcenter + - maven-dev-local + - libs-snapshot-local + description: "maven snapshot virtual repositories" + defaultDeploymentRepo: maven-snapshot-local + gradle-virtual: + type: gradle + repositories: + - gradle-dev-local + - jcenter + - gradle-prod-local + - libs-release-local + description: "gradle virtual repositories" + defaultDeploymentRepo: gradle-dev-local + docker-PLACEHOLDERFORBUILDSTEP: + type: docker + repositories: + - docker-local + - docker-remote + - docker-dev-local + - docker-prod-local + - docker-stage-local + - docker-push + description: "docker virtual" + defaultDeploymentRepo: docker-stage-local + docker-virtual: + type: docker + repositories: + - docker-local + - docker-remote + - docker-dev-local + - docker-prod-local + - docker-stage-local + - docker-push + description: "docker virtual" + defaultDeploymentRepo: docker-stage-local + libs-release: + type: maven + repositories: + - libs-release-local + - jcenter + description: "maven libraries virtual" + defaultDeploymentRepo: libs-release-local + libs-snapshot: + type: maven + repositories: + - libs-snapshot-local + - jcenter + description: "maven libraries virtual" + defaultDeploymentRepo: libs-snapshot-local + ivy-virtual: + type: ivy + repositories: + - ivy-prod-local + - ivy-dev-local + - jcenter + description: "ivy virtual" + defaultDeploymentRepo: ivy-dev-local + generic-virtual: + type: generic + repositories: + - generic-prod-local + - generic-dev-local + description: "generic virtual" + defaultDeploymentRepo: generic-dev-local + helm-virtual: + type: helm + repositories: + - helm-prod-local + - helm-dev-local + - helm-remote + description: "helm virtual" + defaultDeploymentRepo: helm-dev-local + nuget-virtual: + type: nuget + repositories: + - nuget-prod-local + - nuget-dev-local + - nuget-remote + description: "nuget virtual" + defaultDeploymentRepo: nuget-dev-local + npm-virtual: + type: npm + repositories: + - npm-dev-local + - npm-remote + - npm-prod-local + description: "npm virtual" + defaultDeploymentRepo: npm-dev-local + chef-virtual: + type: chef + repositories: + - chef-dev-local + - chef-remote + - chef-prod-local + description: "chef virtual" + defaultDeploymentRepo: chef-dev-local + puppet-virtual: + type: puppet + repositories: + - puppet-dev-local + - puppet-remote + - puppet-prod-local + description: "puppet virtual" + defaultDeploymentRepo: puppet-dev-local + rpm-virtual: + type: yum + repositories: + - rpm-dev-local + - rpm-remote + - rpm-prod-local + description: "rpm virtual" + defaultDeploymentRepo: rpm-dev-local + gitlfs-virtual: + type: gitlfs + repositories: + - gitlfs-dev-local + - gitlfs-prod-local + description: "gitlfs virtual" + defaultDeploymentRepo: gitlfs-dev-local + pypi-virtual: + type: pypi + repositories: + - pypi-dev-local + - pypi-prod-local + - pypi-remote + description: "pypi virtual" + defaultDeploymentRepo: pypi-dev-local + bower-virtual: + type: bower + repositories: + - bower-dev-local + - bower-prod-local + - bower-remote + description: "bower virtual" + defaultDeploymentRepo: bower-dev-local + gems-virtual: + type: gems + repositories: + - gems-dev-local + - gems-prod-local + - gems-remote + description: "gems virtual" + defaultDeploymentRepo: gems-dev-local + sbt-virtual: + type: sbt + repositories: + - sbt-dev-local + - sbt-prod-local + - jcenter + description: "sbt virtual" + defaultDeploymentRepo: sbt-dev-local + go-staging: + type: go + repositories: + - go-staging-local + - go-prod-local + description: "go virtual" + defaultDeploymentRepo: go-staging-local diff --git a/Ansible/test/tests/src/test/resources/repositories/CreateJCR.yaml b/Ansible/test/tests/src/test/resources/repositories/CreateJCR.yaml new file mode 100644 index 0000000..38a5feb --- /dev/null +++ b/Ansible/test/tests/src/test/resources/repositories/CreateJCR.yaml @@ -0,0 +1,119 @@ +localRepositories: + tomcat-local: + type: generic + description: "used by demo" + repoLayout: simple-default + xray: + enabled: true + generic-prod-local: + type: generic + description: "production deployment" + repoLayout: simple-default + xray: + enabled: true + generic-dev-local: + type: generic + description: "development deployment" + repoLayout: simple-default + xray: + enabled: true + helm-prod-local: + type: helm + description: "production deployment" + repoLayout: simple-default + xray: + enabled: true + helm-dev-local: + type: helm + description: "development deployment" + repoLayout: simple-default + xray: + enabled: true + docker-generator: + type: docker + description: "docker generator repo for generation testing" + repoLayout: simple-default + xray: + enabled: true + docker-prod-local: + type: docker + description: "production deployment" + repoLayout: simple-default + xray: + enabled: true + docker-stage-local: + type: docker + description: "stage deployment" + repoLayout: simple-default + xray: + enabled: true + docker-dev-local: + type: docker + description: "development deployment" + repoLayout: simple-default + xray: + enabled: true + docker-local: + type: docker + description: "docker deployment" + repoLayout: simple-default + xray: + enabled: true + docker-push: + type: docker + description: "docker push repo for push replication testing" + repoLayout: simple-default + xray: + enabled: true +virtualRepositories: + generic-virtual: + type: generic + repositories: + - generic-prod-local + - generic-dev-local + description: "generic virtual" + defaultDeploymentRepo: generic-dev-local + helm-virtual: + type: helm + repositories: + - helm-prod-local + - helm-dev-local + - helm-remote + description: "helm virtual" + defaultDeploymentRepo: helm-dev-local + docker-PLACEHOLDERFORBUILDSTEP: + type: docker + repositories: + - docker-local + - docker-remote + - docker-dev-local + - docker-prod-local + - docker-stage-local + - docker-push + description: "docker virtual" + defaultDeploymentRepo: docker-stage-local + docker-virtual: + type: docker + repositories: + - docker-local + - docker-remote + - docker-dev-local + - docker-prod-local + - docker-stage-local + - docker-push + description: "docker virtual" + defaultDeploymentRepo: docker-stage-local +remoteRepositories: + helm-remote: + type: helm + url: https://storage.googleapis.com/kubernetes-charts + repoLayout: simple-default + xray: + enabled: true + docker-remote: + type: docker + url: https://registry-1.docker.io + repoLayout: simple-default + enableTokenAuthentication: true + xray: + enabled: true \ No newline at end of file diff --git a/Ansible/test/tests/src/test/resources/repositories/artifact.zip b/Ansible/test/tests/src/test/resources/repositories/artifact.zip new file mode 100644 index 0000000..0e86cb5 Binary files /dev/null and b/Ansible/test/tests/src/test/resources/repositories/artifact.zip differ diff --git a/Ansible/test/tests/src/test/resources/testenv.yaml b/Ansible/test/tests/src/test/resources/testenv.yaml new file mode 100644 index 0000000..55ff648 --- /dev/null +++ b/Ansible/test/tests/src/test/resources/testenv.yaml @@ -0,0 +1,6 @@ +artifactory: + url: urlval + external_ip: ipval + distribution: artifactory_ha + rt_username: admin + rt_password: passval \ No newline at end of file diff --git a/JFrog-Cloud-Installers.iml b/JFrog-Cloud-Installers.iml new file mode 100644 index 0000000..bbd4172 --- /dev/null +++ b/JFrog-Cloud-Installers.iml @@ -0,0 +1,11 @@ + + + + + + + + + + + \ No newline at end of file diff --git a/ansible.cfg b/ansible.cfg new file mode 100644 index 0000000..f0524a4 --- /dev/null +++ b/ansible.cfg @@ -0,0 +1,10 @@ +[defaults] +# Installs collections into [current dir]/ansible_collections/namespace/collection_name +collections_paths = ~/.ansible/collections:/usr/share/ansible/collections:collection + +# Installs roles into [current dir]/roles/namespace.rolename +roles_path = Ansible/collection/jfrog/ansible/roles + +host_key_checking = false + +deprecation_warnings=False \ No newline at end of file