[ansible] JFrog Platform 7.18.6 (#110)

This commit is contained in:
Ram Mohan Rao Chukka
2021-05-10 13:08:30 +05:30
committed by GitHub
parent 12d4e96727
commit 224ece535d
132 changed files with 164 additions and 118 deletions

View File

@@ -1,28 +0,0 @@
# artifactory
The artifactory role installs the Artifactory Pro software onto the host. Per the Vars below, it will configure a node as primary or secondary. This role uses secondary roles artifactory_nginx to install nginx.
## Role Variables
* _server_name_: **mandatory** This is the server name. eg. "artifactory.54.175.51.178.xip.io"
* _artifactory_upgrade_only_: Perform an software upgrade only. Default is false.
Additional variables can be found in [defaults/main.yml](./defaults/main.yml).
## Example Playbook
```
---
- hosts: artifactory_servers
roles:
- artifactory
```
## Upgrades
The Artifactory role supports software upgrades. To use a role to perform a software upgrade only, use the _artifactory_upgrade_only_ variable and specify the version. See the following example.
```
- hosts: artifactory_servers
vars:
artifactory_version: "{{ lookup('env', 'artifactory_version_upgrade') }}"
artifactory_upgrade_only: true
roles:
- artifactory
```

View File

@@ -1,57 +0,0 @@
---
# defaults file for artifactory
# indicates where this collection was downloaded from (galaxy, automation_hub, standalone)
ansible_marketplace: standalone
# Set this to true when SSL is enabled (to use artifactory_nginx_ssl role), default to false (implies artifactory uses artifactory_nginx role )
artifactory_nginx_ssl_enabled: false
# Provide single node license
# artifactory_single_license:
# Provide individual (HA) licenses file separated by new line and set artifactory_ha_enabled: true.
# Example:
# artifactory_licenses: |-
# <license_1>
# <license_2>
# <license_3>
# To enable HA, set to true
artifactory_ha_enabled: false
# By default, all nodes are primary (CNHA) - https://www.jfrog.com/confluence/display/JFROG/High+Availability#HighAvailability-Cloud-NativeHighAvailability
artifactory_taskAffinity: any
# The location where Artifactory should install.
jfrog_home_directory: /opt/jfrog
# The location where Artifactory should store data.
artifactory_file_store_dir: /data
# Pick the Artifactory flavour to install, can be also cpp-ce, jcr, pro.
artifactory_flavour: pro
artifactory_extra_java_opts: -server -Xms512m -Xmx2g -Xss256k -XX:+UseG1GC
artifactory_system_yaml_template: system.yaml.j2
artifactory_tar: https://releases.jfrog.io/artifactory/artifactory-pro/org/artifactory/pro/jfrog-artifactory-pro/{{ artifactory_version }}/jfrog-artifactory-pro-{{ artifactory_version }}-linux.tar.gz
artifactory_home: "{{ jfrog_home_directory }}/artifactory"
artifactory_untar_home: "{{ jfrog_home_directory }}/artifactory-{{ artifactory_flavour }}-{{ artifactory_version }}"
postgres_driver_download_url: https://repo1.maven.org/maven2/org/postgresql/postgresql/42.2.20/postgresql-42.2.20.jar
artifactory_user: artifactory
artifactory_group: artifactory
artifactory_daemon: artifactory
artifactory_uid: 1030
artifactory_gid: 1030
# if this is an upgrade
artifactory_upgrade_only: false
#default username and password
artifactory_admin_username: admin
artifactory_admin_password: password

View File

@@ -1,7 +0,0 @@
---
# handlers file for distribution
- name: restart artifactory
become: yes
systemd:
name: "{{ artifactory_daemon }}"
state: restarted

View File

@@ -1,16 +0,0 @@
galaxy_info:
author: "JFrog Maintainers Team <installers@jfrog.com>"
description: "The artifactory role installs the Artifactory Pro software onto the host. Per the Vars below, it will configure a node as primary or secondary. This role uses secondary roles artifactory_nginx to install nginx."
company: JFrog
issue_tracker_url: "https://github.com/jfrog/JFrog-Cloud-Installers/issues"
license: license (Apache-2.0)
min_ansible_version: 2.9
galaxy_tags:
- artifactory
- jfrog
dependencies: []

View File

@@ -1,161 +0,0 @@
---
- debug:
msg: "Performing installation of Artifactory version : {{ artifactory_version }} "
- name: install nginx
include_role:
name: artifactory_nginx
when: artifactory_nginx_ssl_enabled == false
- name: install nginx with SSL
include_role:
name: artifactory_nginx_ssl
when: artifactory_nginx_ssl_enabled == true
- name: Ensure group artifactory exist
become: yes
group:
name: "{{ artifactory_group }}"
gid: "{{ artifactory_gid }}"
state: present
- name: Ensure user artifactory exist
become: yes
user:
uid: "{{ artifactory_uid }}"
name: "{{ artifactory_user }}"
group: "{{ artifactory_group }}"
create_home: yes
home: "{{ artifactory_home }}"
shell: /bin/bash
state: present
- name: Download artifactory
become: yes
unarchive:
src: "{{ artifactory_tar }}"
dest: "{{ jfrog_home_directory }}"
remote_src: yes
owner: "{{ artifactory_user }}"
group: "{{ artifactory_group }}"
creates: "{{ artifactory_untar_home }}"
when: artifactory_tar is defined
register: downloadartifactory
until: downloadartifactory is succeeded
retries: 3
- name: Check if app directory exists
become: yes
stat:
path: "{{ artifactory_home }}/app"
register: app_dir_check
- name: Copy untar directory to artifactory home
become: yes
command: "cp -r {{ artifactory_untar_home }}/. {{ artifactory_home }}"
when: not app_dir_check.stat.exists
- name: Create required directories
become: yes
file:
path: "{{ item }}"
state: directory
recurse: yes
owner: "{{ artifactory_user }}"
group: "{{ artifactory_group }}"
loop:
- "{{ artifactory_file_store_dir }}"
- "{{ artifactory_home }}/var/data"
- "{{ artifactory_home }}/var/etc"
- "{{ artifactory_home }}/var/etc/security/"
- "{{ artifactory_home }}/var/etc/artifactory/info/"
- name: Configure systemyaml
become: yes
template:
src: "{{ artifactory_system_yaml_template }}"
dest: "{{ artifactory_home }}/var/etc/system.yaml"
notify: restart artifactory
- name: Configure master key
become: yes
copy:
dest: "{{ artifactory_home }}/var/etc/security/master.key"
content: |
{{ master_key }}
owner: "{{ artifactory_user }}"
group: "{{ artifactory_group }}"
mode: 0640
- name: Configure join key
become: yes
copy:
dest: "{{ artifactory_home }}/var/etc/security/join.key"
content: |
{{ join_key }}
owner: "{{ artifactory_user }}"
group: "{{ artifactory_group }}"
mode: 0640
notify: restart artifactory
- name: Configure installer info
become: yes
template:
src: installer-info.json.j2
dest: "{{ artifactory_home }}/var/etc/artifactory/info/installer-info.json"
notify: restart artifactory
- name: Configure binary store
become: yes
template:
src: binarystore.xml.j2
dest: "{{ artifactory_home }}/var/etc/artifactory/binarystore.xml"
notify: restart artifactory
- name: Configure single license
become: yes
template:
src: artifactory.lic.j2
dest: "{{ artifactory_home }}/var/etc/artifactory/artifactory.lic"
when: artifactory_single_license is defined
notify: restart artifactory
- name: Configure HA licenses
become: yes
template:
src: artifactory.cluster.license.j2
dest: "{{ artifactory_home }}/var/etc/artifactory/artifactory.cluster.license"
when: artifactory_licenses is defined
notify: restart artifactory
- name: Download database driver
become: yes
get_url:
url: "{{ postgres_driver_download_url }}"
dest: "{{ artifactory_home }}/var/bootstrap/artifactory/tomcat/lib"
owner: "{{ artifactory_user }}"
group: "{{ artifactory_group }}"
when: postgres_driver_download_url is defined
notify: restart artifactory
- name: Create artifactory service
become: yes
shell: "{{ artifactory_home }}/app/bin/installService.sh"
- name: Ensure permissions are correct
become: yes
file:
path: "{{ jfrog_home_directory }}"
group: "{{ artifactory_group }}"
owner: "{{ artifactory_user }}"
recurse: yes
- name: Restart artifactory
meta: flush_handlers
- name : Wait for artifactory to be fully deployed
uri: url=http://127.0.0.1:8082/router/api/v1/system/health timeout=130
register: result
until: result.status == 200
retries: 25
delay: 5

View File

@@ -1,6 +0,0 @@
- name: perform installation
include_tasks: "install.yml"
when: not artifactory_upgrade_only
- name: perform upgrade
include_tasks: "upgrade.yml"
when: artifactory_upgrade_only

View File

@@ -1,105 +0,0 @@
---
- debug:
msg: "Performing upgrade of Artifactory version to : {{ artifactory_version }} "
- name: Stop artifactory
become: yes
systemd:
name: "{{ artifactory_daemon }}"
state: stopped
- name: Ensure jfrog_home_directory exists
become: yes
file:
path: "{{ jfrog_home_directory }}"
state: directory
- name: Download artifactory for upgrade
become: yes
unarchive:
src: "{{ artifactory_tar }}"
dest: "{{ jfrog_home_directory }}"
remote_src: yes
owner: "{{ artifactory_user }}"
group: "{{ artifactory_group }}"
creates: "{{ artifactory_untar_home }}"
when: artifactory_tar is defined
register: downloadartifactory
until: downloadartifactory is succeeded
retries: 3
- name: Delete artifactory app directory
become: yes
file:
path: "{{ artifactory_home }}/app"
state: absent
- name: Copy new app to artifactory app
become: yes
command: "cp -r {{ artifactory_untar_home }}/app/. {{ artifactory_home }}/app"
- name: Configure join key
become: yes
copy:
dest: "{{ artifactory_home }}/var/etc/security/join.key"
content: |
{{ join_key }}
owner: "{{ artifactory_user }}"
group: "{{ artifactory_group }}"
mode: 0640
notify: restart artifactory
- name: Configure single license
become: yes
template:
src: artifactory.lic.j2
dest: "{{ artifactory_home }}/var/etc/artifactory/artifactory.lic"
when: artifactory_single_license is defined
notify: restart artifactory
- name: Configure HA licenses
become: yes
template:
src: artifactory.cluster.license.j2
dest: "{{ artifactory_home }}/var/etc/artifactory/artifactory.cluster.license"
when: artifactory_licenses is defined
notify: restart artifactory
- name: Configure installer info
become: yes
template:
src: installer-info.json.j2
dest: "{{ artifactory_home }}/var/etc/artifactory/info/installer-info.json"
notify: restart artifactory
- name: Configure binary store
become: yes
template:
src: binarystore.xml.j2
dest: "{{ artifactory_home }}/var/etc/artifactory/binarystore.xml"
notify: restart artifactory
- name: Configure systemyaml
become: yes
template:
src: "{{ artifactory_system_yaml_template }}"
dest: "{{ artifactory_home }}/var/etc/system.yaml"
notify: restart artifactory
- name: Ensure permissions are correct
become: yes
file:
path: "{{ jfrog_home_directory }}"
group: "{{ artifactory_group }}"
owner: "{{ artifactory_user }}"
recurse: yes
- name: Restart artifactory
meta: flush_handlers
- name : Wait for artifactory to be fully deployed
uri: url=http://127.0.0.1:8082/router/api/v1/system/health timeout=130
register: result
until: result.status == 200
retries: 25
delay: 5

View File

@@ -1,3 +0,0 @@
{% if (artifactory_licenses) and (artifactory_licenses|length > 0) %}
{{ artifactory_licenses }}
{% endif %}

View File

@@ -1,3 +0,0 @@
{% if (artifactory_single_license) and (artifactory_single_license|length > 0) %}
{{ artifactory_single_license }}
{% endif %}

View File

@@ -1,4 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<config version="2">
<chain template="cluster-file-system"/>
</config>

View File

@@ -1,9 +0,0 @@
{{ ansible_managed | comment }}
{
"productId": "Ansible_Artifactory/{{ platform_collection_version }}-{{ artifactory_version }}",
"features": [
{
"featureId": "Channel/{{ ansible_marketplace }}"
}
]
}

View File

@@ -1,17 +0,0 @@
configVersion: 1
shared:
extraJavaOpts: "{{ artifactory_extra_java_opts }}"
node:
id: {{ ansible_date_time.iso8601_micro | to_uuid }}
ip: {{ ansible_host }}
taskAffinity: {{ artifactory_taskAffinity }}
haEnabled: {{ artifactory_ha_enabled }}
database:
type: "{{ artifactory_db_type }}"
driver: "{{ artifactory_db_driver }}"
url: "{{ artifactory_db_url }}"
username: "{{ artifactory_db_user }}"
password: "{{ artifactory_db_password }}"
router:
entrypoints:
internalPort: 8046

View File

@@ -1,5 +0,0 @@
# artifactory_nginx
This role installs NGINX for artifactory. This role is automatically called by the artifactory role and isn't intended to be used separately.
## Role Variables
* _server_name_: **mandatory** This is the server name. eg. "artifactory.54.175.51.178.xip.io"

View File

@@ -1,7 +0,0 @@
---
# defaults file for artifactory_nginx
## For production deployments,You SHOULD change it.
server_name: test.artifactory.com
nginx_daemon: nginx

View File

@@ -1,37 +0,0 @@
#user nobody;
worker_processes 1;
error_log /var/log/nginx/error.log info;
#pid logs/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
variables_hash_max_size 1024;
variables_hash_bucket_size 64;
server_names_hash_max_size 4096;
server_names_hash_bucket_size 128;
types_hash_max_size 2048;
types_hash_bucket_size 64;
proxy_read_timeout 2400s;
client_header_timeout 2400s;
client_body_timeout 2400s;
proxy_connect_timeout 75s;
proxy_send_timeout 2400s;
proxy_buffer_size 32k;
proxy_buffers 40 32k;
proxy_busy_buffers_size 64k;
proxy_temp_file_write_size 250m;
proxy_http_version 1.1;
client_body_buffer_size 128k;
include /etc/nginx/conf.d/*.conf;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 65;
}

View File

@@ -1,8 +0,0 @@
---
# handlers file for artifactory_nginx
- name: restart nginx
become: yes
systemd:
name: "{{ nginx_daemon }}"
state: restarted
enabled: yes

View File

@@ -1,16 +0,0 @@
galaxy_info:
author: "JFrog Maintainers Team <installers@jfrog.com>"
description: "This role installs NGINX for artifactory. This role is automatically called by the artifactory role and isn't intended to be used separately."
company: JFrog
issue_tracker_url: "https://github.com/jfrog/JFrog-Cloud-Installers/issues"
license: license (Apache-2.0)
min_ansible_version: 2.9
galaxy_tags:
- artifactory
- jfrog
dependencies: []

View File

@@ -1,9 +0,0 @@
---
- name: apt-get update
become: yes
apt:
update_cache: yes
register: package_res
retries: 5
delay: 60
until: package_res is success

View File

@@ -1,6 +0,0 @@
---
- name: epel-release
become: yes
yum:
name: epel-release
state: present

View File

@@ -1,35 +0,0 @@
---
- name: Install dependencies
include_tasks: "{{ ansible_os_family }}.yml"
- name: Install nginx after dependency installation
become: yes
package:
name: nginx
state: present
register: package_res
retries: 5
delay: 60
until: package_res is success
- name: Configure main nginx conf file.
become: yes
copy:
src: nginx.conf
dest: /etc/nginx/nginx.conf
owner: root
group: root
mode: '0755'
- name: Configure the artifactory nginx conf
become: yes
template:
src: artifactory.conf.j2
dest: /etc/nginx/conf.d/artifactory.conf
owner: root
group: root
mode: '0755'
notify: restart nginx
- name: Restart nginx
meta: flush_handlers

View File

@@ -1,43 +0,0 @@
###########################################################
## this configuration was generated by JFrog Artifactory ##
###########################################################
## add HA entries when ha is configure
upstream artifactory {
server 127.0.0.1:8082;
}
upstream artifactory-direct {
server 127.0.0.1:8081;
}
## server configuration
server {
listen 80 ;
server_name {{ server_name }};
if ($http_x_forwarded_proto = '') {
set $http_x_forwarded_proto $scheme;
}
## Application specific logs
access_log /var/log/nginx/artifactory-access.log;
error_log /var/log/nginx/artifactory-error.log;
rewrite ^/$ /ui/ redirect;
rewrite ^/ui$ /ui/ redirect;
chunked_transfer_encoding on;
client_max_body_size 0;
location / {
proxy_read_timeout 2400s;
proxy_pass_header Server;
proxy_cookie_path ~*^/.* /;
proxy_pass "http://artifactory";
proxy_next_upstream error timeout non_idempotent;
proxy_next_upstream_tries 1;
proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host:$server_port;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
location ~ ^/artifactory/ {
proxy_pass http://artifactory-direct;
}
}
}

View File

@@ -1,2 +0,0 @@
---
# vars file for artifactory_nginx

View File

@@ -1,7 +0,0 @@
# artifactory_nginx_ssl
The artifactory_nginx_ssl role installs and configures nginx for SSL.
## Role Variables
* _server_name_: This is the server name. eg. "artifactory.54.175.51.178.xip.io"
* _certificate_: This is the SSL cert.
* _certificate_key_: This is the SSL private key.

View File

@@ -1,7 +0,0 @@
---
# defaults file for artifactory_nginx
## For production deployments,You SHOULD change it.
# server_name: test.artifactory.com
nginx_daemon: nginx

View File

@@ -1,8 +0,0 @@
---
# handlers file for artifactory_nginx_ssl
- name: restart nginx
become: yes
systemd:
name: "{{ nginx_daemon }}"
state: restarted
enabled: yes

View File

@@ -1,16 +0,0 @@
galaxy_info:
author: "JFrog Maintainers Team <installers@jfrog.com>"
description: "The artifactory_nginx_ssl role installs and configures nginx for SSL."
company: JFrog
issue_tracker_url: "https://github.com/jfrog/JFrog-Cloud-Installers/issues"
license: license (Apache-2.0)
min_ansible_version: 2.9
galaxy_tags:
- artifactory
- jfrog
dependencies: []

View File

@@ -1,40 +0,0 @@
---
# tasks file for artifactory_nginx
- name: Configure the artifactory nginx conf
become: yes
template:
src: artifactory.conf.j2
dest: /etc/nginx/conf.d/artifactory.conf
owner: root
group: root
mode: '0755'
notify: restart nginx
- name: Ensure nginx dir exists
become: yes
file:
path: "/var/opt/jfrog/nginx/ssl"
state: directory
- name: Configure certificate
become: yes
template:
src: certificate.pem.j2
dest: "/var/opt/jfrog/nginx/ssl/cert.pem"
notify: restart nginx
- name: Ensure pki exists
become: yes
file:
path: "/etc/pki/tls"
state: directory
- name: Configure key
become: yes
template:
src: certificate.key.j2
dest: "/etc/pki/tls/cert.key"
notify: restart nginx
- name: Restart nginx
meta: flush_handlers

View File

@@ -1,48 +0,0 @@
###########################################################
## this configuration was generated by JFrog Artifactory ##
###########################################################
## add HA entries when ha is configure
upstream artifactory {
server 127.0.0.1:8082;
}
upstream artifactory-direct {
server 127.0.0.1:8081;
}
ssl_protocols TLSv1.1 TLSv1.2;
ssl_certificate /var/opt/jfrog/nginx/ssl/cert.pem;
ssl_certificate_key /etc/pki/tls/cert.key;
ssl_session_cache shared:SSL:1m;
ssl_prefer_server_ciphers on;
## server configuration
server {
listen 443 ssl http2;
server_name {{ server_name }};
if ($http_x_forwarded_proto = '') {
set $http_x_forwarded_proto $scheme;
}
## Application specific logs
access_log /var/log/nginx/artifactory-access.log;
error_log /var/log/nginx/artifactory-error.log;
rewrite ^/$ /ui/ redirect;
rewrite ^/ui$ /ui/ redirect;
chunked_transfer_encoding on;
client_max_body_size 0;
location / {
proxy_read_timeout 2400s;
proxy_pass_header Server;
proxy_cookie_path ~*^/.* /;
proxy_pass "http://artifactory";
proxy_next_upstream error timeout non_idempotent;
proxy_next_upstream_tries 1;
proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host:$server_port;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
location ~ ^/artifactory/ {
proxy_pass http://artifactory-direct;
}
}
}

View File

@@ -1,4 +0,0 @@
{% set cert = certificate_key.split('|') %}
{% for line in cert %}
{{ line }}
{% endfor %}

View File

@@ -1,4 +0,0 @@
{% set cert = certificate.split('|') %}
{% for line in cert %}
{{ line }}
{% endfor %}

View File

@@ -1,2 +0,0 @@
---
# vars file for artifactory_nginx

View File

@@ -1,26 +0,0 @@
# Distribution
The Distribution role will install distribution software onto the host. An Artifactory server and Postgress database is required.
### Role Variables
* _distribution_upgrade_only_: Perform an software upgrade only. Default is false.
Additional variables can be found in [defaults/main.yml](./defaults/main.yml).
## Example Playbook
```
---
- hosts: distribution_servers
roles:
- distribution
```
## Upgrades
The distribution role supports software upgrades. To use a role to perform a software upgrade only, use the _xray_upgrade_only_ variables and specify the version. See the following example.
```
- hosts: distributionservers
vars:
distribution_version: "{{ lookup('env', 'distribution_version_upgrade') }}"
distribution_upgrade_only: true
roles:
- distribution
```

View File

@@ -1,43 +0,0 @@
---
# defaults file for distribution
# indicates were this collection was downlaoded from (galaxy, automation_hub, standalone)
ansible_marketplace: standalone
# whether to enable HA
distribution_ha_enabled: false
distribution_ha_node_type : master
# The location where distribution should install.
jfrog_home_directory: /opt/jfrog
# The remote distribution download file
distribution_tar: https://releases.jfrog.io/artifactory/jfrog-distribution/distribution-linux/{{ distribution_version }}/jfrog-distribution-{{ distribution_version }}-linux.tar.gz
#The distribution install directory
distribution_untar_home: "{{ jfrog_home_directory }}/jfrog-distribution-{{ distribution_version }}-linux"
distribution_home: "{{ jfrog_home_directory }}/distribution"
distribution_install_script_path: "{{ distribution_home }}/app/bin"
distribution_thirdparty_path: "{{ distribution_home }}/app/third-party"
distribution_archive_service_cmd: "{{ distribution_install_script_path }}/installService.sh"
#distribution users and groups
distribution_user: distribution
distribution_group: distribution
distribution_uid: 1040
distribution_gid: 1040
distribution_daemon: distribution
flow_type: archive
# Redis details
distribution_redis_url: "redis://localhost:6379"
distribution_redis_password: password
# if this is an upgrade
distribution_upgrade_only: false
distribution_system_yaml_template: system.yaml.j2

View File

@@ -1,7 +0,0 @@
---
# handlers file for distribution
- name: restart distribution
become: yes
systemd:
name: "{{ distribution_daemon }}"
state: restarted

View File

@@ -1,16 +0,0 @@
galaxy_info:
author: "JFrog Maintainers Team <installers@jfrog.com>"
description: "The distribution role will install distribution software onto the host. An Artifactory server and Postgress database is required."
company: JFrog
issue_tracker_url: "https://github.com/jfrog/JFrog-Cloud-Installers/issues"
license: license (Apache-2.0)
min_ansible_version: 2.9
galaxy_tags:
- distribution
- jfrog
dependencies: []

View File

@@ -1,44 +0,0 @@
- name: Prepare expect scenario script
set_fact:
expect_scenario: |
set timeout 300
spawn {{ exp_executable_cmd }}
expect_before timeout { exit 1 }
set CYCLE_END 0
set count 0
while { $CYCLE_END == 0 } {
expect {
{% for each_request in exp_scenarios %}
-nocase -re {{ '{' }}{{ each_request.expecting }}.*} {
send "{{ each_request.sending }}\n"
}
{% endfor %}
eof {
set CYCLE_END 1
}
}
set count "[expr $count + 1]"
if { $count > 16} {
exit 128
}
}
expect eof
lassign [wait] pid spawnid os_error_flag value
if {$os_error_flag == 0} {
puts "INSTALLER_EXIT_STATUS-$value"
} else {
puts "INSTALLER_EXIT_STATUS-$value"
}
- name: Interactive with expect
become: yes
ignore_errors: yes
shell: |
{{ expect_scenario }}
args:
executable: /usr/bin/expect
chdir: "{{ exp_dir }}"
register: exp_result

View File

@@ -1,155 +0,0 @@
---
- debug:
msg: "Performing installation of Distribution version - {{ distribution_version }}"
- name: Install expect dependency
yum:
name: expect
state: present
become: yes
when: ansible_os_family == 'Redhat'
- name: Install expect dependency
apt:
name: expect
state: present
update_cache: yes
become: yes
when: ansible_os_family == 'Debian'
- name: Ensure group jfdistribution exist
become: yes
group:
name: "{{ distribution_group }}"
gid: "{{ distribution_gid }}"
state: present
- name: Ensure user distribution exist
become: yes
user:
uid: "{{ distribution_uid }}"
name: "{{ distribution_user }}"
group: "{{ distribution_group }}"
create_home: yes
home: "{{ distribution_home }}"
shell: /bin/bash
state: present
- name: Download distribution
become: yes
unarchive:
src: "{{ distribution_tar }}"
dest: "{{ jfrog_home_directory }}"
remote_src: yes
owner: "{{ distribution_user }}"
group: "{{ distribution_group }}"
creates: "{{ distribution_untar_home }}"
register: downloaddistribution
until: downloaddistribution is succeeded
retries: 3
- name: Check if app directory exists
become: yes
stat:
path: "{{ distribution_home }}/app"
register: app_dir_check
- name: Copy untar directory to distribution home
become: yes
command: "cp -r {{ distribution_untar_home }}/. {{ distribution_home }}"
when: not app_dir_check.stat.exists
- name: Create required directories
become: yes
file:
path: "{{ item }}"
state: directory
recurse: yes
owner: "{{ distribution_user }}"
group: "{{ distribution_group }}"
loop:
- "{{ distribution_home }}/var/etc"
- "{{ distribution_home }}/var/etc/security/"
- "{{ distribution_home }}/var/etc/info/"
- "{{ distribution_home }}/var/etc/redis/"
- name: Configure master key
become: yes
copy:
dest: "{{ distribution_home }}/var/etc/security/master.key"
content: |
{{ master_key }}
owner: "{{ distribution_user }}"
group: "{{ distribution_group }}"
mode: 0640
- name: Check if install.sh wrapper script exist
become: yes
stat:
path: "{{ distribution_install_script_path }}/install.sh"
register: install_wrapper_script
- name: Include interactive installer scripts
include_vars: script/archive.yml
- name: Install Distribution
include_tasks: expect.yml
vars:
exp_executable_cmd: "./install.sh -u {{ distribution_user }} -g {{ distribution_group }}"
exp_dir: "{{ distribution_install_script_path }}"
exp_scenarios: "{{ distribution_installer_scenario['main'] }}"
args:
apply:
environment:
YQ_PATH: "{{ distribution_thirdparty_path }}/yq"
when: install_wrapper_script.stat.exists
- name: Configure redis config
become: yes
template:
src: "redis.conf.j2"
dest: "{{ distribution_home }}/var/etc/redis/redis.conf"
notify: restart distribution
- name: Configure systemyaml
become: yes
template:
src: "{{ distribution_system_yaml_template }}"
dest: "{{ distribution_home }}/var/etc/system.yaml"
notify: restart distribution
- name: Configure installer info
become: yes
template:
src: installer-info.json.j2
dest: "{{ distribution_home }}/var/etc/info/installer-info.json"
notify: restart distribution
- name: Update distribution permissions
become: yes
file:
path: "{{ distribution_home }}"
state: directory
recurse: yes
owner: "{{ distribution_user }}"
group: "{{ distribution_group }}"
mode: '0755'
- name: Install Distribution as a service
become: yes
shell: |
{{ distribution_archive_service_cmd }}
args:
chdir: "{{ distribution_install_script_path }}"
register: check_service_status_result
ignore_errors: yes
- name: Restart distribution
meta: flush_handlers
- name : Wait for distribution to be fully deployed
uri: url=http://127.0.0.1:8082/router/api/v1/system/health timeout=130
register: result
until: result.status == 200
retries: 25
delay: 5

View File

@@ -1,6 +0,0 @@
- name: perform installation
include_tasks: "install.yml"
when: not distribution_upgrade_only
- name: perform upgrade
include_tasks: "upgrade.yml"
when: distribution_upgrade_only

View File

@@ -1,111 +0,0 @@
---
- debug:
msg: "Performing upgrade of Distribution version to {{ distribution_version }} "
- name: Stop distribution
become: yes
systemd:
name: "{{ distribution_daemon }}"
state: stopped
- name: Download distribution for upgrade
become: yes
unarchive:
src: "{{ distribution_tar }}"
dest: "{{ jfrog_home_directory }}"
remote_src: yes
owner: "{{ distribution_user }}"
group: "{{ distribution_group }}"
creates: "{{ distribution_untar_home }}"
register: downloaddistribution
until: downloaddistribution is succeeded
retries: 3
- name: Delete distribution app
become: yes
file:
path: "{{ distribution_home }}/app"
state: absent
- name: Copy new app to distribution app
become: yes
command: "cp -r {{ distribution_untar_home }}/app/. {{ distribution_home }}/app"
- name: Check if install.sh wrapper script exist
become: yes
stat:
path: "{{ distribution_install_script_path }}/install.sh"
register: install_wrapper_script
- name: Include interactive installer scripts
include_vars: script/archive.yml
- name: Install Distribution
include_tasks: expect.yml
vars:
exp_executable_cmd: "./install.sh -u {{ distribution_user }} -g {{ distribution_group }}"
exp_dir: "{{ distribution_install_script_path }}"
exp_scenarios: "{{ distribution_installer_scenario['main'] }}"
args:
apply:
environment:
YQ_PATH: "{{ distribution_thirdparty_path }}/yq"
when: install_wrapper_script.stat.exists
- name: Ensure {{ distribution_home }}/var/etc/redis exists
become: yes
file:
path: "{{ distribution_home }}/var/etc/redis/"
state: directory
owner: "{{ distribution_user }}"
group: "{{ distribution_group }}"
- name: Configure redis config
become: yes
template:
src: "redis.conf.j2"
dest: "{{ distribution_home }}/var/etc/redis/redis.conf"
notify: restart distribution
- name: Configure installer info
become: yes
template:
src: installer-info.json.j2
dest: "{{ distribution_home }}/var/etc/info/installer-info.json"
notify: restart distribution
- name: Configure systemyaml
become: yes
template:
src: "{{ distribution_system_yaml_template }}"
dest: "{{ distribution_home }}/var/etc/system.yaml"
notify: restart distribution
- name: Update Distribution base dir owner and group
become: yes
file:
path: "{{ distribution_home }}"
state: directory
recurse: yes
owner: "{{ distribution_user }}"
group: "{{ distribution_group }}"
mode: '0755'
- name: Install Distribution as a service
become: yes
shell: |
{{ distribution_archive_service_cmd }}
args:
chdir: "{{ distribution_install_script_path }}"
register: check_service_status_result
ignore_errors: yes
- name: Restart distribution
meta: flush_handlers
- name : Wait for distribution to be fully deployed
uri: url=http://127.0.0.1:8082/router/api/v1/system/health timeout=130
register: result
until: result.status == 200
retries: 25
delay: 5

View File

@@ -1,9 +0,0 @@
{{ ansible_managed | comment }}
{
"productId": "Ansible_Distribution/{{ platform_collection_version }}-{{ distribution_version }}",
"features": [
{
"featureId": "Channel/{{ ansible_marketplace }}"
}
]
}

View File

@@ -1,15 +0,0 @@
{{ ansible_managed | comment }}
# Redis configuration file
# data directory for redis
dir {{ distribution_home }}/var/data/redis
# log directory for redis
logfile {{ distribution_home }}/var/log/redis/redis.log
# pid file location for redis
pidfile {{ distribution_home }}/app/run/redis.pid
# password for redis
# if changed, the same should be set as value for shared.redis.password in JF_PRODUCT_HOME/var/etc/system.yaml
requirepass {{ distribution_redis_password }}

View File

@@ -1,20 +0,0 @@
configVersion: 1
shared:
jfrogUrl: {{ jfrog_url }}
node:
ip: {{ ansible_host }}
id: {{ ansible_date_time.iso8601_micro | to_uuid }}
database:
type: "{{ distribution_db_type }}"
driver: "{{ distribution_db_driver }}"
url: "{{ distribution_db_url }}"
username: "{{ distribution_db_user }}"
password: "{{ distribution_db_password }}"
redis:
connectionString: "{{ distribution_redis_url }}"
password: "{{ distribution_redis_password }}"
security:
joinKey: {{ join_key }}
router:
entrypoints:
internalPort: 8046

View File

@@ -1,42 +0,0 @@
distribution_installer_scenario:
main:
- {
"expecting": "(data|installation) directory \\(",
"sending": "{{ distribution_home }}"
}
- {
"expecting": "join key.*:",
"sending": "{{ join_key }}"
}
- {
"expecting": "jfrog url:",
"sending": "{{ jfrog_url }}"
}
- {
"expecting": "do you want to continue",
"sending": "y"
}
- {
"expecting": "please specify the ip address of this machine",
"sending": "{% if distribution_ha_node_type is defined and distribution_ha_node_type == 'master' %}{{ ansible_host }}{% else %}{{ ansible_host }}{% endif %}"
}
- {
"expecting": "are you adding an additional node",
"sending": "{% if distribution_ha_node_type is defined and distribution_ha_node_type == 'master' %}n{% else %}y{% endif %}"
}
- {
"expecting": "do you want to install postgresql",
"sending": "n"
}
- {
"expecting": "postgresql url.*example",
"sending": "{{ distribution_db_url }}"
}
- {
"expecting": "(postgresql|database)?\\s?username.*",
"sending": "{{ distribution_db_user }}"
}
- {
"expecting": "(confirm\\s?)?(postgresql|database)?\\s?password.*:",
"sending": "{{ distribution_db_password }}"
}

View File

@@ -1,26 +0,0 @@
# MissionControl
The missioncontrol role will install missioncontrol software onto the host. An Artifactory server and Postgress database is required.
### Role Variables
* _mc_upgrade_only_: Perform an software upgrade only. Default is false.
Additional variables can be found in [defaults/main.yml](./defaults/main.yml).
## Example Playbook
```
---
- hosts: missioncontrol_servers
roles:
- missioncontrol
```
## Upgrades
The missioncontrol role supports software upgrades. To use a role to perform a software upgrade only, use the _xray_upgrade_only_ variables and specify the version. See the following example.
```
- hosts: missioncontrol_servers
vars:
missioncontrol_version: "{{ lookup('env', 'missioncontrol_version_upgrade') }}"
mc_upgrade_only: true
roles:
- missioncontrol
```

View File

@@ -1,58 +0,0 @@
---
# defaults file for mc
# indicates were this collection was downlaoded from (galaxy, automation_hub, standalone)
ansible_marketplace: standalone
# whether to enable HA
mc_ha_enabled: false
mc_ha_node_type : master
# The location where mc should install.
jfrog_home_directory: /opt/jfrog
# The remote mc download file
mc_tar: https://releases.jfrog.io/artifactory/jfrog-mc/linux/{{ missionControl_version }}/jfrog-mc-{{ missionControl_version }}-linux.tar.gz
#The mc install directory
mc_untar_home: "{{ jfrog_home_directory }}/jfrog-mc-{{ missionControl_version }}-linux"
mc_home: "{{ jfrog_home_directory }}/mc"
mc_install_script_path: "{{ mc_home }}/app/bin"
mc_thirdparty_path: "{{ mc_home }}/app/third-party"
mc_archive_service_cmd: "{{ mc_install_script_path }}/installService.sh"
#mc users and groups
mc_user: jfmc
mc_group: jfmc
mc_uid: 1050
mc_gid: 1050
mc_daemon: mc
# MissionContol ElasticSearch Details
es_uid: 1060
es_gid: 1060
mc_es_conf_base: "/etc/elasticsearch"
mc_es_user: admin
mc_es_password: admin
mc_es_url: "http://localhost:8082"
mc_es_base_url: "http://localhost:8082/elasticsearch"
mc_es_transport_port: 9300
mc_es_home: "/usr/share/elasticsearch"
mc_es_data_dir: "/var/lib/elasticsearch"
mc_es_log_dir: "/var/log/elasticsearch"
mc_es_java_home: "/usr/share/elasticsearch/jdk"
mc_es_script_path: "/usr/share/elasticsearch/bin"
mc_es_searchgaurd_home: "/usr/share/elasticsearch/plugins/search-guard-7"
flow_type: archive
# if this is an upgrade
mc_upgrade_only: false
mc_system_yaml_template: system.yaml.j2

View File

@@ -1,28 +0,0 @@
-----BEGIN PRIVATE KEY-----
MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDY1nDD1cW5ykZV
rTXrAMJeLuZknW9tg+4s8R+XYrzRMTNr9tAXEYNEa+T92HtqrKaVZtdGiQ6NmS95
EYezEgVmyGQEuuVlY8ChcX8XgpBsJPBV4+XIRju+RSyEW+ZNkT3EWTRKab+KSgN2
aZ2OT16UqfJd3JjATZw//xXHRWhCQhchX3nNyzkIgENPtdtSweSLG4NjOHY08U7g
Zee21MCqa/58NVECJXlqK/Tfw/3SPgCmSHLLCyybWfClLmXXIjBuSTtSOLDPj4pw
VrZeR0aePs7ZNJnX/tUICNSZeNzs7+n9QUoAiKYSNKSdDw270Lbo5GQdWuM7nkrc
2txeH8wvAgMBAAECggEAGzbuzZAVp40nlAvlPyrH5PeQmwLXarCq7Uu7Yfir0hA8
Gp9429cALqThXKrAR/yF+9eodTCGebxxejR6X5MyHQWm5/Znts307fjyBoqwgveF
N9fJOIBNce1PT7K+Y5szBrhbbmt59Wqh/J6iKQD1J0YdJoKlTp1vBZPdBoxDhZfN
TgayY4e71ox7Vew+QrxDXzMA3J+EbbBXFL2yOmpNI/FPpEtbCE9arjSa7oZXJAvd
Aenc6GYctkdbtjpX7zHXz5kHzaAEdmorR+q3w6k8cDHBvc+UoRYgLz3fBaVhhQca
rP4PYp04ztIn3qcOpVoisUkpsQcev2cJrWeFW0WgAQKBgQD7ZFsGH8cE84zFzOKk
ee53zjlmIvXqjQWzSkmxy9UmDnYxEOZbn6epK2I5dtCbU9ZZ3f4KM8TTAM5GCOB+
j4cN/rqM7MdhkgGL/Dgw+yxGVlwkSsQMil16vqdCIRhEhqjChc7KaixuaBNtIBV0
+9ZRfoS5fEjrctX4/lULwS6EAQKBgQDcz/C6PV3mXk8u7B48kGAJaKbafh8S3BnF
V0zA7qI/aQHuxmLGIQ7hNfihdZwFgYG4h5bXvBKGsxwu0JGvYDNL44R9zXuztsVX
PEixV572Bx87+mrVEt3bwj3lhbohzorjSF2nnJuFA+FZ0r4sQwudyZ2c8yCqRVhI
mfj36FWQLwKBgHNw1zfNuee1K6zddCpRb8eGZOdZIJJv5fE6KPNDhgLu2ymW+CGV
BDn0GSwIOq1JZ4JnJbRrp3O5x/9zLhwQLtWnZuU2CiztDlbJIMilXuSB3dgwmSyl
EV4/VLFSX0GAkNia96YN8Y9Vra4L8K6Cwx0zOyGuSBIO7uFjcYxvTrwBAoGAWeYn
AgweAL6Ayn/DR7EYCHydAfO7PvhxXZDPZPVDBUIBUW9fo36uCi7pDQNPBEbXw4Mg
fLDLch/V55Fu3tHx0IHO3VEdfet5qKyYg+tCgrQfmVG40QsfXGtWu+2X/E+U6Df8
OVNfVeZghytv1aFuR01gaBfsQqZ87QITBQuIWm0CgYAKdzhETd+jBBLYyOCaS8mh
zQr/ljIkrZIwPUlBkj6TAsmTJTbh7O6lf50CQMEHyE0MNFOHrvkKn89BObXcmwtV
92parLTR7RAeaPMRxCZs4Xd/oABYVGFjMa7TVNA2S6HReDqqTpJrCCkyVuYkr1f2
OflnwX2RlaWl45n0qkwkTw==
-----END PRIVATE KEY-----

View File

@@ -1,51 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIEcjCCA1qgAwIBAgIGAXY81RkkMA0GCSqGSIb3DQEBCwUAMG4xEzARBgoJkiaJ
k/IsZAEZFgNjb20xFTATBgoJkiaJk/IsZAEZFgVqZnJvZzEUMBIGA1UECgwLamZy
b2csIEluYy4xCzAJBgNVBAsMAkNBMR0wGwYDVQQDDBRzaWduaW5nLmNhLmpmcm9n
LmNvbTAeFw0yMDEyMDcxMDUyNDhaFw0zMDEyMDUxMDUyNDhaMGwxEzARBgoJkiaJ
k/IsZAEZFgNjb20xGTAXBgoJkiaJk/IsZAEZFglsb2NhbGhvc3QxGDAWBgNVBAoM
D2xvY2FsaG9zdCwgSW5jLjEMMAoGA1UECwwDT3BzMRIwEAYDVQQDDAlsb2NhbGhv
c3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDY1nDD1cW5ykZVrTXr
AMJeLuZknW9tg+4s8R+XYrzRMTNr9tAXEYNEa+T92HtqrKaVZtdGiQ6NmS95EYez
EgVmyGQEuuVlY8ChcX8XgpBsJPBV4+XIRju+RSyEW+ZNkT3EWTRKab+KSgN2aZ2O
T16UqfJd3JjATZw//xXHRWhCQhchX3nNyzkIgENPtdtSweSLG4NjOHY08U7gZee2
1MCqa/58NVECJXlqK/Tfw/3SPgCmSHLLCyybWfClLmXXIjBuSTtSOLDPj4pwVrZe
R0aePs7ZNJnX/tUICNSZeNzs7+n9QUoAiKYSNKSdDw270Lbo5GQdWuM7nkrc2txe
H8wvAgMBAAGjggEWMIIBEjCBmgYDVR0jBIGSMIGPgBSh7peJvc4Im3WkR6/FaUD/
aYDa8qF0pHIwcDETMBEGCgmSJomT8ixkARkWA2NvbTEaMBgGCgmSJomT8ixkARkW
Cmpmcm9namZyb2cxFDASBgNVBAoMC0pGcm9nLCBJbmMuMQswCQYDVQQLDAJDQTEa
MBgGA1UEAwwRcm9vdC5jYS5qZnJvZy5jb22CAQIwHQYDVR0OBBYEFIuWN8D/hFhl
w0bdSyG+PmymjpVUMAwGA1UdEwEB/wQCMAAwDgYDVR0PAQH/BAQDAgXgMCAGA1Ud
JQEB/wQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAUBgNVHREEDTALgglsb2NhbGhv
c3QwDQYJKoZIhvcNAQELBQADggEBAJQJljyNH/bpvmiYO0+d8El+BdaU7FI2Q2Sq
1xBz/qBQSVmUB0iIeblTdQ58nYW6A/pvh8EnTWE7tRPXw3WQR4it8ldGSDQe2zHt
9U0hcC7DSzYGxlHLm0UI/LNwzdRy0kY8LArE/zGDSQ+6hp2Op21IHtzGfJnILG5G
OZdDWOB/e4cQw2/AcnsrapJU4MJCx28l0N9aSx4wr7SNosHuYOO8CimAdsqQukVt
rcrJZyHNvG5eQUVuQnZRywXDX6tLj8HQHfYLRaMqD57GMU0dg/kvYTYrYR/krbcG
Qf1D/9GCsn081fYblSfSSRRxrbhdYcoI/6xNHIC2y7bO8ZJD9zw=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIEPTCCAyWgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBwMRMwEQYKCZImiZPyLGQB
GRYDY29tMRowGAYKCZImiZPyLGQBGRYKamZyb2dqZnJvZzEUMBIGA1UECgwLSkZy
b2csIEluYy4xCzAJBgNVBAsMAkNBMRowGAYDVQQDDBFyb290LmNhLmpmcm9nLmNv
bTAeFw0yMDEyMDcxMDUyNDhaFw0zMDEyMDUxMDUyNDhaMG4xEzARBgoJkiaJk/Is
ZAEZFgNjb20xFTATBgoJkiaJk/IsZAEZFgVqZnJvZzEUMBIGA1UECgwLamZyb2cs
IEluYy4xCzAJBgNVBAsMAkNBMR0wGwYDVQQDDBRzaWduaW5nLmNhLmpmcm9nLmNv
bTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALCe74VmqSryFPESO/oq
bgspiOSwGheG/AbUf/2XXPLZNbZJ/hhuI6T+iSW5FYy3jETwwODDlF8GBN6R33+U
gNCjXIMBDUOWkETe1fD2zj1HMTC6angykKJy2Xkw+sWniELbYfTu+SLHsBMPQnVI
jFwDLcbSMbs7ieU/IuQTEnEZxPiKcokOaF7vPntfPwdvRoGwMR0VuX7h+20Af1Il
3ntOuoasoV66K6KuiBRkSBcsV2ercCRQlpXCvIsTJVWASpSTNrpKy8zejjePw/xs
ieMGSo6WIxnIJnOLTJWnrw8sZt0tiNrLbB8npSvP67uUMDGhrZ3Tnro9JtujquOE
zMUCAwEAAaOB4zCB4DASBgNVHRMBAf8ECDAGAQH/AgEAMIGaBgNVHSMEgZIwgY+A
FBX3TQRxJRItQ/hi81MA3eZggFs7oXSkcjBwMRMwEQYKCZImiZPyLGQBGRYDY29t
MRowGAYKCZImiZPyLGQBGRYKamZyb2dqZnJvZzEUMBIGA1UECgwLSkZyb2csIElu
Yy4xCzAJBgNVBAsMAkNBMRowGAYDVQQDDBFyb290LmNhLmpmcm9nLmNvbYIBATAd
BgNVHQ4EFgQUoe6Xib3OCJt1pEevxWlA/2mA2vIwDgYDVR0PAQH/BAQDAgGGMA0G
CSqGSIb3DQEBCwUAA4IBAQAzkcvT1tTjnjguRH4jGPxP1fidiM0DWiWZQlRT9Evt
BkltRwkqOZIdrBLy/KJbOxRSCRaKpxyIYd5bWrCDCWvXArBFDY9j3jGGk8kqXb0/
VajEKDjHXzJM7HXAzyJO2hKVK4/OoPlzhKqR1ZbZF1F8Omzo7+oNwPqf5Y5hnn2M
qrUWxE216mWE8v7gvbfu39w9XKTFH1/RPgAAJet2dunyLbz3W5NgyBbCWGj/qJCz
TUDD9I8az/XX73HLpkXbcEY5/qrPV6EQWzf+ec4EcgrEi0f8gTKzl7OQaqYDxObk
yixmONVlwYD2FpWqJYAfg04u/CRQMXPPCdUQh/eKrHUg
-----END CERTIFICATE-----

View File

@@ -1,23 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDvjCCAqagAwIBAgIBATANBgkqhkiG9w0BAQsFADBwMRMwEQYKCZImiZPyLGQB
GRYDY29tMRowGAYKCZImiZPyLGQBGRYKamZyb2dqZnJvZzEUMBIGA1UECgwLSkZy
b2csIEluYy4xCzAJBgNVBAsMAkNBMRowGAYDVQQDDBFyb290LmNhLmpmcm9nLmNv
bTAeFw0yMDEyMDcxMDUyNDdaFw0zMDEyMDUxMDUyNDdaMHAxEzARBgoJkiaJk/Is
ZAEZFgNjb20xGjAYBgoJkiaJk/IsZAEZFgpqZnJvZ2pmcm9nMRQwEgYDVQQKDAtK
RnJvZywgSW5jLjELMAkGA1UECwwCQ0ExGjAYBgNVBAMMEXJvb3QuY2EuamZyb2cu
Y29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxyTSYCbGefbdAHgW
zxXhCh7gvOUzyThaC6bcvY7yMqVu3YPxMAV1LEz+J0VMeGvu5HzONyGq89TaIKtr
AyZKxM957Q/TK0NPi0HUIT1wZKPuH89DeH79gfBjyv8XMUhFzKxAaosEa4rhkAMe
B4ukk9twfGotKU1y4j6m1V1gckeDZDRIW4tNzQbEBsL+ZcxDnCeSAAHW3Djb5yzQ
Yj3LPIRN0yu0fL8oN4yVn5tysAfXTum7HIuyKp3gfxhQgSXGVIDHd7Z1HcLrUe2o
2Z7dlsrFCUgHPccOxyFzxGI8bCPFYU75QqbxP699L1chma0It/2D0YxcrXhRkzzg
wzrBFwIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFBX3TQRx
JRItQ/hi81MA3eZggFs7MB0GA1UdDgQWBBQV900EcSUSLUP4YvNTAN3mYIBbOzAO
BgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggEBAH5XYiOBvHdd3bRfyHeo
Y2i7+u59VU3HDdOm/FVI0JqkzFAp6DLk6Ow5w/2MXbasga03lJ9SpHvKVne+VOaH
Df7xEqCIZeQVofNyOfsl4NOu6NgPSlQx0FZ6lPToZDBGp7D6ftnJcUujGk0W9y7k
GwxojLnP1f/KyjYTCCK6sDXwSn3fZGF5WmnHlzZEyKlLQoLNoEZ1uTjg2CRsa/RU
QxobwNzHGbrLZw5pfeoiF7G27RGoUA/S6mfVFQJVDP5Y3/xJRii56tMaJPwPh0sN
QPLbNvNgeU1dET1msMBnZvzNUko2fmBc2+pU7PyrL9V2pgfHq981Db1ShkNYtMhD
bMw=
-----END CERTIFICATE-----

View File

@@ -1,7 +0,0 @@
_sg_meta:
type: "roles"
config_version: 2
sg_anonymous:
cluster_permissions:
- cluster:monitor/health

View File

@@ -1,48 +0,0 @@
# In this file users, backendroles and hosts can be mapped to Search Guard roles.
# Permissions for Search Guard roles are configured in sg_roles.yml
_sg_meta:
type: "rolesmapping"
config_version: 2
## Demo roles mapping
SGS_ALL_ACCESS:
description: "Maps admin to SGS_ALL_ACCESS"
reserved: true
backend_roles:
- "admin"
SGS_OWN_INDEX:
description: "Allow full access to an index named like the username"
reserved: false
users:
- "*"
SGS_LOGSTASH:
reserved: false
backend_roles:
- "logstash"
SGS_KIBANA_USER:
description: "Maps kibanauser to SGS_KIBANA_USER"
reserved: false
backend_roles:
- "kibanauser"
SGS_READALL:
reserved: true
backend_roles:
- "readall"
SGS_MANAGE_SNAPSHOTS:
reserved: true
backend_roles:
- "snapshotrestore"
SGS_KIBANA_SERVER:
reserved: true
users:
- "kibanaserver"
sg_anonymous:
backend_roles:
- sg_anonymous_backendrole

View File

@@ -1,28 +0,0 @@
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCa3GuNbI30EdRs
S2Dmq87i/4Y7QeOldogzmNYH3m7GMjPFJcJg11Yc2HsAbBYs86fW6gGvO+68bFmY
X5kYvPN+L8KRUCSvmvjHCGf7ULmxiG2Wh7RPzQaAdvqqkMGW1QDwwxA25tP9KfZv
nP/08CPmboP8rcCEhX6HCVh0Im+WT3BBxkikjhVaVru2cLPtKtgtBX7a3HY7XMfp
DRYhXZNf+ZxfWewLQhNNndHwjtuJooLHdtX4WEXUhsrXS7/I+M7BdL/fB0ptwfvg
x1WvC2JnvNnvgdMBoUevlHjugWBVGo4AhOpFqAmQ8MxXZUhPGinDxjFvwrHYwYm0
w7tVAnTbAgMBAAECggEAAr7esZKzD5ilnWx7RkKMikAvFyKUkJXvnq6RXXFZoZKm
/5tPtABEOKbYekoU3SPgeWkLseK568YBbqXM9ySsLerpSIvVIq1T660pHsowP32/
8MoRkmYOPRj6WgcX/UetEan7r66ktfT9AJpM6gDgzFm5Zgz0knvFawJ7w8Yzqmks
8JqjA1E433xEUtc00Qm4z7You1I5eyrz1zKxBPZATVM6ScbDq2WXqwgIGUbnAHG2
6PADvOPP+8Kl0/JNC+SkE8J+KvfCYnJIDZaWTCjdd4cjkFAAHXi16BvF6PY3veel
/LT2nr1/YmcADCt4wuWGn+1HRF+mJgjqTVcfQSJrbQKBgQDJG45Hmku7fnNAn/A9
FPHmo7CpymxXpg12yf7BuKr4irpJpa6WmXB6EsxCy91rffQTDEh8TnpJG6yj5vyJ
b0dEt3u8RtBfx49UhKG/pDYi9mnUuazH0u6BHu+w4fRi3Cju7sY4qM4aj8rnAlU0
2DnXWEKIfhd+1cXDwyI8DyuvfwKBgQDFIV7ZgI1weZv7EnNiIKs65y4NWG4uG7jB
Z+Wx8xx9n5OKVxw21NPt2pZzzW3Y3+pRXypcjH13XPrZxfaUt1Y8ylC3/DHFgsid
iXyfjmit4TWiW9busC09Q8YwFZZbMWj/Wd1PRav3/zDICf3B1QRXEqqpYfUtAbXf
SaanZNGopQKBgQDFwO77weHOkN1MIvndVoc4QKYrj/1Rgtuif6afX7Pfiqr8WIuB
U4iiwXFSDZ3BYa1sPZvZgGIHGct9sFmL23y9OZ/W19t3E4kBlxpmlFcXsi8HGz2n
kOcu2Pjheo8R12P475rDhFqHC/Z9inG28RiPhR6HkVYRRqydf3hejpxqiQKBgEJw
ZM9ZjFIEKpYMOecwq4VGtTa6Pyg7H6HPqpK3JTsRtWBCy7ePM35O1bZh3kvh689R
C631i7PXGpSbK+gjgmUqqtnXnc67rXGrDN2Z2Z4A8VqvKVl490ZWuU0reWly1bh6
SSSWjsceswo4k9XoPXY7TFmaMk/g67M913VDfYYhAoGAXp6HYCZga72N6RdB38TY
i08c/O/xksfkNVo0SuVqr99uQ5TN+d2+o+t5H9Fekl1y9jUSK6q6q6+Vp8zSiyzV
GwAWk9u8dBGoNiWs4cOtQAdyeLbGDIHbIv4jeRqqSl87H6R6wJY4+fWdfm9/KEG7
N957kwur+XYzE0RfG5wgS3o=
-----END PRIVATE KEY-----

View File

@@ -1,50 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIESjCCAzKgAwIBAgIGAXY81RknMA0GCSqGSIb3DQEBCwUAMG4xEzARBgoJkiaJ
k/IsZAEZFgNjb20xFTATBgoJkiaJk/IsZAEZFgVqZnJvZzEUMBIGA1UECgwLamZy
b2csIEluYy4xCzAJBgNVBAsMAkNBMR0wGwYDVQQDDBRzaWduaW5nLmNhLmpmcm9n
LmNvbTAeFw0yMDEyMDcxMDUyNDlaFw0zMDEyMDUxMDUyNDlaMGYxEzARBgoJkiaJ
k/IsZAEZFgNjb20xFzAVBgoJkiaJk/IsZAEZFgdzZ2FkbWluMRYwFAYDVQQKDA1z
Z2FkbWluLCBJbmMuMQwwCgYDVQQLDANPcHMxEDAOBgNVBAMMB3NnYWRtaW4wggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCa3GuNbI30EdRsS2Dmq87i/4Y7
QeOldogzmNYH3m7GMjPFJcJg11Yc2HsAbBYs86fW6gGvO+68bFmYX5kYvPN+L8KR
UCSvmvjHCGf7ULmxiG2Wh7RPzQaAdvqqkMGW1QDwwxA25tP9KfZvnP/08CPmboP8
rcCEhX6HCVh0Im+WT3BBxkikjhVaVru2cLPtKtgtBX7a3HY7XMfpDRYhXZNf+Zxf
WewLQhNNndHwjtuJooLHdtX4WEXUhsrXS7/I+M7BdL/fB0ptwfvgx1WvC2JnvNnv
gdMBoUevlHjugWBVGo4AhOpFqAmQ8MxXZUhPGinDxjFvwrHYwYm0w7tVAnTbAgMB
AAGjgfUwgfIwgZoGA1UdIwSBkjCBj4AUoe6Xib3OCJt1pEevxWlA/2mA2vKhdKRy
MHAxEzARBgoJkiaJk/IsZAEZFgNjb20xGjAYBgoJkiaJk/IsZAEZFgpqZnJvZ2pm
cm9nMRQwEgYDVQQKDAtKRnJvZywgSW5jLjELMAkGA1UECwwCQ0ExGjAYBgNVBAMM
EXJvb3QuY2EuamZyb2cuY29tggECMB0GA1UdDgQWBBSSIpvK2db0wJf7bw1mhYt8
A0JUQTAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIF4DAWBgNVHSUBAf8EDDAK
BggrBgEFBQcDAjANBgkqhkiG9w0BAQsFAAOCAQEAn3cM0PDh8vTJS8zZ7HylMpZl
SaZwd3sxshhBKx4JEc85WQPp60nVADqVhnkVa1rfQQURaMP87hqmzf9eOcesnjn6
17eSVpDpZ0B1qV46hJd15yYKqFLavqtFpy0ePpk4EoanwJUikphT3yuIB6v3gqfY
h20t7/XmkjEwfGkmgmXOZNb9uOpKjkotWRR/IslSMxoozsdWYQLaqA0De/7Tqpmi
mortmVTOtZCX/ZChuN2XzqUnWZT+xIJomAj4ZCOlw03Yd9eUhrDZBmrYHiUmS4VO
wWFDER3zhwncjg0X2rOqL6N5P8TIfqpVgf1VuDhTAj/GY1ZKrXol28WwQQCA9w==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIEPTCCAyWgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBwMRMwEQYKCZImiZPyLGQB
GRYDY29tMRowGAYKCZImiZPyLGQBGRYKamZyb2dqZnJvZzEUMBIGA1UECgwLSkZy
b2csIEluYy4xCzAJBgNVBAsMAkNBMRowGAYDVQQDDBFyb290LmNhLmpmcm9nLmNv
bTAeFw0yMDEyMDcxMDUyNDhaFw0zMDEyMDUxMDUyNDhaMG4xEzARBgoJkiaJk/Is
ZAEZFgNjb20xFTATBgoJkiaJk/IsZAEZFgVqZnJvZzEUMBIGA1UECgwLamZyb2cs
IEluYy4xCzAJBgNVBAsMAkNBMR0wGwYDVQQDDBRzaWduaW5nLmNhLmpmcm9nLmNv
bTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALCe74VmqSryFPESO/oq
bgspiOSwGheG/AbUf/2XXPLZNbZJ/hhuI6T+iSW5FYy3jETwwODDlF8GBN6R33+U
gNCjXIMBDUOWkETe1fD2zj1HMTC6angykKJy2Xkw+sWniELbYfTu+SLHsBMPQnVI
jFwDLcbSMbs7ieU/IuQTEnEZxPiKcokOaF7vPntfPwdvRoGwMR0VuX7h+20Af1Il
3ntOuoasoV66K6KuiBRkSBcsV2ercCRQlpXCvIsTJVWASpSTNrpKy8zejjePw/xs
ieMGSo6WIxnIJnOLTJWnrw8sZt0tiNrLbB8npSvP67uUMDGhrZ3Tnro9JtujquOE
zMUCAwEAAaOB4zCB4DASBgNVHRMBAf8ECDAGAQH/AgEAMIGaBgNVHSMEgZIwgY+A
FBX3TQRxJRItQ/hi81MA3eZggFs7oXSkcjBwMRMwEQYKCZImiZPyLGQBGRYDY29t
MRowGAYKCZImiZPyLGQBGRYKamZyb2dqZnJvZzEUMBIGA1UECgwLSkZyb2csIElu
Yy4xCzAJBgNVBAsMAkNBMRowGAYDVQQDDBFyb290LmNhLmpmcm9nLmNvbYIBATAd
BgNVHQ4EFgQUoe6Xib3OCJt1pEevxWlA/2mA2vIwDgYDVR0PAQH/BAQDAgGGMA0G
CSqGSIb3DQEBCwUAA4IBAQAzkcvT1tTjnjguRH4jGPxP1fidiM0DWiWZQlRT9Evt
BkltRwkqOZIdrBLy/KJbOxRSCRaKpxyIYd5bWrCDCWvXArBFDY9j3jGGk8kqXb0/
VajEKDjHXzJM7HXAzyJO2hKVK4/OoPlzhKqR1ZbZF1F8Omzo7+oNwPqf5Y5hnn2M
qrUWxE216mWE8v7gvbfu39w9XKTFH1/RPgAAJet2dunyLbz3W5NgyBbCWGj/qJCz
TUDD9I8az/XX73HLpkXbcEY5/qrPV6EQWzf+ec4EcgrEi0f8gTKzl7OQaqYDxObk
yixmONVlwYD2FpWqJYAfg04u/CRQMXPPCdUQh/eKrHUg
-----END CERTIFICATE-----

View File

@@ -1,7 +0,0 @@
---
# handlers file for missioncontrol
- name: restart missioncontrol
become: yes
systemd:
name: "{{ mc_daemon }}"
state: restarted

View File

@@ -1,16 +0,0 @@
galaxy_info:
author: "JFrog Maintainers Team <installers@jfrog.com>"
description: "The missionControl role will install missionControl software onto the host. An Artifactory server and Postgress database is required."
company: JFrog
issue_tracker_url: "https://github.com/jfrog/JFrog-Cloud-Installers/issues"
license: license (Apache-2.0)
min_ansible_version: 2.9
galaxy_tags:
- missionControl
- jfrog
dependencies: []

View File

@@ -1,44 +0,0 @@
- name: Prepare expect scenario script
set_fact:
expect_scenario: |
set timeout 300
spawn {{ exp_executable_cmd }}
expect_before timeout { exit 1 }
set CYCLE_END 0
set count 0
while { $CYCLE_END == 0 } {
expect {
{% for each_request in exp_scenarios %}
-nocase -re {{ '{' }}{{ each_request.expecting }}.*} {
send "{{ each_request.sending }}\n"
}
{% endfor %}
eof {
set CYCLE_END 1
}
}
set count "[expr $count + 1]"
if { $count > 16} {
exit 128
}
}
expect eof
lassign [wait] pid spawnid os_error_flag value
if {$os_error_flag == 0} {
puts "INSTALLER_EXIT_STATUS-$value"
} else {
puts "INSTALLER_EXIT_STATUS-$value"
}
- name: Interactive with expect
become: yes
ignore_errors: yes
shell: |
{{ expect_scenario }}
args:
executable: /usr/bin/expect
chdir: "{{ exp_dir }}"
register: exp_result

View File

@@ -1,150 +0,0 @@
---
- debug:
msg: "Performing installation of missionControl version - {{ missioncontrol_version }}"
- name: Install expect dependency
become: yes
yum:
name: expect
state: present
when: ansible_os_family == 'Redhat'
- name: Install expect dependency
become: yes
apt:
name: expect
state: present
update_cache: yes
when: ansible_os_family == 'Debian'
- name: Ensure group jfmc exist
become: yes
group:
name: "{{ mc_group }}"
gid: "{{ mc_gid }}"
state: present
- name: Ensure user jfmc exist
become: yes
user:
uid: "{{ mc_uid }}"
name: "{{ mc_user }}"
group: "{{ mc_group }}"
create_home: yes
home: "{{ mc_home }}"
shell: /bin/bash
state: present
- name: Download mc
become: yes
unarchive:
src: "{{ mc_tar }}"
dest: "{{ jfrog_home_directory }}"
remote_src: yes
owner: "{{ mc_user }}"
group: "{{ mc_group }}"
creates: "{{ mc_untar_home }}"
register: downloadmc
until: downloadmc is succeeded
retries: 3
- name: Check if app directory exists
become: yes
stat:
path: "{{ mc_home }}/app"
register: app_dir_check
- name: Copy untar directory to mc home
become: yes
command: "cp -r {{ mc_untar_home }}/. {{ mc_home }}"
when: not app_dir_check.stat.exists
- name: Create required directories
become: yes
file:
path: "{{ item }}"
state: directory
recurse: yes
owner: "{{ mc_user }}"
group: "{{ mc_group }}"
loop:
- "{{ mc_home }}/var/etc"
- "{{ mc_home }}/var/etc/security/"
- "{{ mc_home }}/var/etc/info/"
- name: Configure master key
become: yes
copy:
dest: "{{ mc_home }}/var/etc/security/master.key"
content: |
{{ master_key }}
owner: "{{ mc_user }}"
group: "{{ mc_group }}"
mode: 0640
- name: Setup elasticsearch
import_tasks: setup-elasticsearch.yml
- name: Check if install.sh wrapper script exist
become: yes
stat:
path: "{{ mc_install_script_path }}/install.sh"
register: install_wrapper_script
- name: Include interactive installer scripts
include_vars: script/archive.yml
- name: Install JFMC
include_tasks: expect.yml
vars:
exp_executable_cmd: "./install.sh -u {{ mc_user }} -g {{ mc_group }}"
exp_dir: "{{ mc_install_script_path }}"
exp_scenarios: "{{ mc_installer_scenario['main'] }}"
args:
apply:
environment:
YQ_PATH: "{{ mc_thirdparty_path }}/yq"
when: install_wrapper_script.stat.exists
- name: Configure installer info
become: yes
template:
src: installer-info.json.j2
dest: "{{ mc_home }}/var/etc/info/installer-info.json"
notify: restart missioncontrol
- name: Configure systemyaml
become: yes
template:
src: "{{ mc_system_yaml_template }}"
dest: "{{ mc_home }}/var/etc/system.yaml"
notify: restart missioncontrol
- name: Update correct permissions
become: yes
file:
path: "{{ mc_home }}"
state: directory
recurse: yes
owner: "{{ mc_user }}"
group: "{{ mc_group }}"
mode: '0755'
- name: Install mc as a service
become: yes
shell: |
{{ mc_archive_service_cmd }}
args:
chdir: "{{ mc_install_script_path }}"
register: check_service_status_result
ignore_errors: yes
- name: Restart missioncontrol
meta: flush_handlers
- name : Wait for missionControl to be fully deployed
uri: url=http://127.0.0.1:8082/router/api/v1/system/health timeout=130
register: result
until: result.status == 200
retries: 25
delay: 5

View File

@@ -1,6 +0,0 @@
- name: perform installation
include_tasks: "install.yml"
when: not mc_upgrade_only
- name: perform upgrade
include_tasks: "upgrade.yml"
when: mc_upgrade_only

View File

@@ -1,179 +0,0 @@
- name: Ensure group elasticsearch exists
become: yes
group:
name: elasticsearch
gid: "{{ es_gid }}"
state: present
- name: Ensure user elasticsearch exists
become: yes
user:
name: elasticsearch
uid: "{{ es_uid }}"
group: elasticsearch
create_home: yes
home: "{{ mc_es_home }}"
shell: /bin/bash
state: present
- name: Create required directories
become: yes
file:
path: "{{ item }}"
state: directory
mode: 0755
recurse: yes
owner: elasticsearch
group: elasticsearch
loop:
- "{{ mc_es_conf_base }}"
- "{{ mc_es_data_dir }}"
- "{{ mc_es_log_dir }}"
- "{{ mc_es_home }}"
- name: Set max file descriptors limit
become: yes
pam_limits:
domain: 'elasticsearch'
limit_type: '-'
limit_item: nofile
value: '65536'
- name: Update nproc limit
become: yes
pam_limits:
domain: 'elasticsearch'
limit_type: '-'
limit_item: nproc
value: '4096'
- name: Setting sysctl values
become: yes
sysctl: name={{ item.name }} value={{ item.value }} sysctl_set=yes
loop:
- { name: "vm.max_map_count", value: 262144}
ignore_errors: yes
- name: Find elasticsearch package
become: yes
find:
paths: "{{ mc_home }}/app/third-party/elasticsearch"
patterns: "^elasticsearch-oss-.+\\.tar.gz$"
use_regex: yes
file_type: file
register: check_elasticsearch_package_result
- name: Set elasticsearch package file name
set_fact:
mc_elasticsearch_package: "{{ check_elasticsearch_package_result.files[0].path }}"
when: check_elasticsearch_package_result.matched > 0
- name: Ensure /usr/share/elasticsearch exists
file:
path: "{{ mc_es_home }}"
state: directory
owner: elasticsearch
group: elasticsearch
become: yes
- name: Extract elasticsearch package
become: yes
become_user: elasticsearch
ignore_errors: yes
unarchive:
src: "{{ mc_elasticsearch_package }}"
dest: "{{ mc_es_home }}"
remote_src: yes
extra_opts:
- --strip-components=1
owner: elasticsearch
group: elasticsearch
register: unarchive_result
when: check_elasticsearch_package_result.matched > 0
- name: Copy elasticsearch config files to ES_PATH_CONF dir
become: yes
command: "cp -r {{ mc_es_home }}/config/. {{ mc_es_conf_base }}/"
- name: Remove elasticsearch config dir
become: yes
file:
path: "{{ mc_es_home }}/config"
state: absent
- name: Generate HA elasticsearch.yml template file
become: yes
ignore_errors: yes
template:
src: templates/ha/{{ mc_ha_node_type }}.elasticsearch.yml.j2
dest: "{{ mc_es_conf_base }}/elasticsearch.yml"
owner: elasticsearch
group: elasticsearch
when:
- unarchive_result.extract_results.rc | default(128) == 0
- flow_type in ["ha-cluster", "ha-upgrade"]
- name: Generate elasticsearch.yml template file
become: yes
template:
src: templates/elasticsearch.yml.j2
dest: "{{ mc_es_conf_base }}/elasticsearch.yml"
owner: elasticsearch
group: elasticsearch
when:
- unarchive_result.extract_results.rc | default(128) == 0
- flow_type in ["archive", "upgrade"]
- name: Create empty unicast_hosts.txt file
become: yes
file:
path: "{{ mc_es_conf_base }}/unicast_hosts.txt"
state: touch
mode: 0664
owner: elasticsearch
group: elasticsearch
- name: Setup searchguard plugin
import_tasks: setup-searchguard.yml
- name: Update directories permissions
become: yes
file:
path: "{{ item }}"
state: directory
mode: 0755
recurse: yes
owner: elasticsearch
group: elasticsearch
loop:
- "{{ mc_es_conf_base }}"
- "{{ mc_es_data_dir }}"
- "{{ mc_es_log_dir }}"
- "{{ mc_es_home }}"
- name: Start elasticsearch
become: yes
become_user: elasticsearch
shell: "{{ mc_es_script_path }}/elasticsearch -d"
environment:
JAVA_HOME: "{{ mc_es_java_home }}"
ES_PATH_CONF: "{{ mc_es_conf_base }}/"
register: start_elasticsearch_result
when: unarchive_result.extract_results.rc | default(128) == 0
- name: Wait for elasticsearch to start
pause:
seconds: 15
- name: Init searchguard plugin
become: yes
become_user: elasticsearch
shell: |
./sgadmin.sh -p {{ mc_es_transport_port }} -cacert root-ca.pem \
-cert sgadmin.pem -key sgadmin.key -cd {{ mc_es_searchgaurd_home }}/sgconfig/ -nhnv -icl
args:
chdir: "{{ mc_es_searchgaurd_home }}/tools/"
environment:
JAVA_HOME: "{{ mc_es_java_home }}"
register: install_searchguard_result
when: check_searchguard_bundle_result.matched == 1

View File

@@ -1,100 +0,0 @@
- name: Copy elasticsearch certificate
become: yes
copy:
mode: 0600
src: files/searchguard/localhost.pem
dest: "{{ mc_es_conf_base }}/localhost.pem"
owner: elasticsearch
group: elasticsearch
- name: Copy elasticsearch private key
become: yes
copy:
mode: 0600
src: files/searchguard/localhost.key
dest: "{{ mc_es_conf_base }}/localhost.key"
owner: elasticsearch
group: elasticsearch
- name: Copy searchguard root ca
become: yes
copy:
mode: 0600
src: files/searchguard/root-ca.pem
dest: "{{ mc_es_conf_base }}/root-ca.pem"
owner: elasticsearch
group: elasticsearch
- name: Find searchguard bundle
find:
paths: "{{ mc_home }}/app/third-party/elasticsearch/"
patterns: "^search-guard-.+\\.zip$"
use_regex: yes
file_type: file
register: check_searchguard_bundle_result
- name: Install searchguard plugin
become: yes
become_user: elasticsearch
ignore_errors: yes
shell: |
{{ mc_es_script_path }}/elasticsearch-plugin install \
-b file://{{ check_searchguard_bundle_result.files[0].path }}
environment:
JAVA_HOME: "{{ mc_es_java_home }}"
ES_PATH_CONF: "{{ mc_es_conf_base }}/"
register: install_searchguard_result
when: check_searchguard_bundle_result.matched == 1
- name: Copy searchguard admin certificate
become: yes
copy:
mode: 0600
src: files/searchguard/sgadmin.pem
dest: "{{ mc_es_searchgaurd_home }}/tools/sgadmin.pem"
owner: elasticsearch
group: elasticsearch
- name: Copy searchguard admin private key
become: yes
copy:
mode: 0600
src: files/searchguard/sgadmin.key
dest: "{{ mc_es_searchgaurd_home }}/tools/sgadmin.key"
owner: elasticsearch
group: elasticsearch
- name: Copy searchguard root ca
become: yes
copy:
mode: 0600
src: files/searchguard/root-ca.pem
dest: "{{ mc_es_searchgaurd_home }}/tools/root-ca.pem"
owner: elasticsearch
group: elasticsearch
- name: Copy roles template
become: yes
copy:
mode: 0600
src: files/searchguard/sg_roles.yml
dest: "{{ mc_es_searchgaurd_home }}/sgconfig/sg_roles.yml"
owner: elasticsearch
group: elasticsearch
- name: Copy roles template
become: yes
copy:
mode: 0600
src: files/searchguard/sg_roles_mapping.yml
dest: "{{ mc_es_searchgaurd_home }}/sgconfig/sg_roles_mapping.yml"
owner: elasticsearch
group: elasticsearch
- name: Check execution bit
become: yes
file:
path: "{{ mc_es_searchgaurd_home }}/tools/sgadmin.sh"
owner: elasticsearch
group: elasticsearch
mode: 0700

View File

@@ -1,113 +0,0 @@
- name: Get elasticsearch pid
shell: "ps -ef | grep -v grep | grep -w elasticsearch | awk '{print $2}'"
register: elasticsearch_pid
- name: Stop elasticsearch before upgrade
become: yes
shell: kill -9 {{ elasticsearch_pid.stdout }}
when: elasticsearch_pid.stdout | length > 0
- name: Waiting until all running processes are killed
wait_for:
path: "/proc/{{ elasticsearch_pid.stdout }}/status"
state: absent
when: elasticsearch_pid.stdout | length > 0
- name: Find searchguard bundle for removal
become: yes
find:
paths: "{{ mc_home }}/app/third-party/elasticsearch/"
patterns: "^search-guard-.+\\.zip$"
use_regex: yes
file_type: file
register: check_searchguard_bundle_result
- name: Remove searchguard plugin
become: yes
become_user: elasticsearch
ignore_errors: yes
shell: |
{{ mc_es_script_path }}/elasticsearch-plugin remove {{ check_searchguard_bundle_result.files[0].path }}
environment:
JAVA_HOME: "{{ mc_es_java_home }}"
ES_PATH_CONF: "{{ mc_es_conf_base }}/config"
register: remove_searchguard_result
when: check_searchguard_bundle_result.matched == 1
- name: Delete elasticsearch home dir
become: yes
file:
path: "{{ mc_es_home }}"
state: absent
- name: Create elasticsearch home dir
become: yes
file:
path: "{{ mc_es_home }}"
state: directory
mode: 0755
owner: elasticsearch
group: elasticsearch
- name: Find elasticsearch package
become: yes
find:
paths: "{{ mc_home }}/app/third-party/elasticsearch"
patterns: "^elasticsearch-oss-.+\\.tar.gz$"
use_regex: yes
file_type: file
register: check_elasticsearch_package_result
- name: Set elasticsearch package file name
set_fact:
mc_elasticsearch_package: "{{ check_elasticsearch_package_result.files[0].path }}"
when: check_elasticsearch_package_result.matched > 0
- name: Extract elasticsearch package
become: yes
become_user: elasticsearch
ignore_errors: yes
unarchive:
src: "{{ mc_elasticsearch_package }}"
dest: "{{ mc_es_home }}"
remote_src: yes
extra_opts:
- --strip-components=1
- --exclude=config
owner: elasticsearch
group: elasticsearch
register: unarchive_result
when: check_elasticsearch_package_result.matched > 0
- name: Generate HA elasticsearch.yml template file
become: yes
ignore_errors: yes
template:
src: templates/ha/{{ mc_ha_node_type }}.elasticsearch.yml.j2
dest: "{{ mc_es_conf_base }}/elasticsearch.yml"
owner: elasticsearch
group: elasticsearch
when: unarchive_result.extract_results.rc | default(128) == 0
- name: Create empty unicast_hosts.txt file
become: yes
file:
path: "{{ mc_es_conf_base }}/unicast_hosts.txt"
state: touch
mode: 0644
owner: elasticsearch
group: elasticsearch
- name: Upgrade searchguard plugin
import_tasks: upgrade-searchguard.yml
- name: Start elasticsearch
become: yes
become_user: elasticsearch
ignore_errors: yes
shell: "{{ mc_es_script_path }}/elasticsearch -d"
environment:
JAVA_HOME: "{{ mc_es_java_home }}"
ES_PATH_CONF: "{{ mc_es_conf_base }}/"
when: unarchive_result.extract_results.rc | default(128) == 0
register: start_elastcsearch_upgraded

View File

@@ -1,100 +0,0 @@
- name: Create elasticsearch config path folder
become: yes
file:
path: "{{ mc_es_conf_base }}/searchguard"
state: directory
mode: 0755
owner: elasticsearch
group: elasticsearch
- name: Copy elasticsearch certificate
become: yes
copy:
mode: 0600
src: files/searchguard/localhost.pem
dest: "{{ mc_es_conf_base }}/localhost.pem"
owner: elasticsearch
group: elasticsearch
- name: Copy elasticsearch private key
become: yes
copy:
mode: 0600
src: files/searchguard/localhost.key
dest: "{{ mc_es_conf_base }}/localhost.key"
owner: elasticsearch
group: elasticsearch
- name: Copy searchguard admin certificate
become: yes
copy:
mode: 0600
src: files/searchguard/sgadmin.pem
dest: "{{ mc_es_conf_base }}/searchguard/sgadmin.pem"
owner: elasticsearch
group: elasticsearch
- name: Copy searchguard admin private key
become: yes
copy:
mode: 0600
src: files/searchguard/sgadmin.key
dest: "{{ mc_es_conf_base }}/searchguard/sgadmin.key"
owner: elasticsearch
group: elasticsearch
- name: Copy searchguard root ca
become: yes
copy:
mode: 0600
src: files/searchguard/root-ca.pem
dest: "{{ mc_es_conf_base }}/root-ca.pem"
owner: elasticsearch
group: elasticsearch
- name: Find searchguard bundle
find:
paths: "{{ mc_home }}/app/third-party/elasticsearch/"
patterns: "^search-guard-.+\\.zip$"
use_regex: yes
file_type: file
register: check_searchguard_bundle_result
- name: Install searchguard plugin
become: yes
become_user: elasticsearch
ignore_errors: yes
shell: |
{{ mc_es_script_path }}/elasticsearch-plugin install \
-b file://{{ check_searchguard_bundle_result.files[0].path }}
environment:
JAVA_HOME: "{{ mc_es_java_home }}"
ES_PATH_CONF: "{{ mc_es_conf_base }}/"
register: install_searchguard_result
when: check_searchguard_bundle_result.matched == 1
- name: Copy roles template
become: yes
copy:
mode: 0600
src: files/searchguard/sg_roles.yml
dest: "{{ mc_es_home }}/plugins/search-guard-7/sgconfig/sg_roles.yml"
owner: elasticsearch
group: elasticsearch
- name: Copy roles template
become: yes
copy:
mode: 0600
src: files/searchguard/sg_roles_mapping.yml
dest: "{{ mc_es_home }}/plugins/search-guard-7/sgconfig/sg_roles_mapping.yml"
owner: elasticsearch
group: elasticsearch
- name: Check execution bit
become: yes
file:
path: "{{ mc_es_home }}/plugins/search-guard-7/tools/sgadmin.sh"
owner: elasticsearch
group: elasticsearch
mode: 0700

View File

@@ -1,96 +0,0 @@
---
- debug:
msg: "Performing Upgrade of missionControl version - {{ missioncontrol_version }}"
- name: Stop mc service
become: yes
systemd:
name: "{{ mc_daemon }}"
state: stopped
- name: Download mc for upgrade
unarchive:
src: "{{ mc_tar }}"
dest: "{{ jfrog_home_directory }}"
remote_src: yes
owner: "{{ mc_user }}"
group: "{{ mc_group }}"
creates: "{{ mc_untar_home }}"
become: yes
register: downloadmc
until: downloadmc is succeeded
retries: 3
- name: Delete current app folder
become: yes
file:
path: "{{ mc_home }}/app"
state: absent
- name: Copy new app to mc app
command: "cp -r {{ mc_untar_home }}/app/. {{ mc_home }}/app"
become: yes
- name: Delete untar directory
file:
path: "{{ mc_untar_home }}"
state: absent
become: yes
- name: Upgrade elasticsearch
import_tasks: upgrade-elasticsearch.yml
- name: Check if install.sh wrapper script exist
become: yes
stat:
path: "{{ mc_install_script_path }}/install.sh"
register: upgrade_wrapper_script
- name: Include interactive installer scripts
include_vars: script/archive.yml
- name: Upgrade JFMC
include_tasks: expect.yml
vars:
exp_executable_cmd: "./install.sh -u {{ mc_user }} -g {{ mc_group }}"
exp_dir: "{{ mc_install_script_path }}"
exp_scenarios: "{{ mc_installer_scenario['main'] }}"
args:
apply:
environment:
YQ_PATH: "{{ mc_thirdparty_path }}/yq"
when: upgrade_wrapper_script.stat.exists
- name: Configure installer info
become: yes
template:
src: installer-info.json.j2
dest: "{{ mc_home }}/var/etc/info/installer-info.json"
notify: restart missioncontrol
- name: Configure systemyaml
template:
src: "{{ mc_system_yaml_template }}"
dest: "{{ mc_home }}/var/etc/system.yaml"
become: yes
notify: restart missioncontrol
- name: Update correct permissions
become: yes
file:
path: "{{ mc_home }}"
state: directory
recurse: yes
owner: "{{ mc_user }}"
group: "{{ mc_group }}"
mode: '0755'
- name: Restart missioncontrol
meta: flush_handlers
- name : Wait for missionControl to be fully deployed
uri: url=http://127.0.0.1:8082/router/api/v1/system/health timeout=130
register: result
until: result.status == 200
retries: 25
delay: 5

View File

@@ -1,21 +0,0 @@
discovery.seed_providers: file
transport.port: {{ mc_es_transport_port }}
transport.host: 0.0.0.0
transport.publish_host: {{ ansible_host }}
network.host: 0.0.0.0
node.name: {{ ansible_host }}
cluster.initial_master_nodes: {{ ansible_host }}
bootstrap.memory_lock: false
path.data: {{ mc_es_data_dir }}
path.logs: {{ mc_es_log_dir }}
searchguard.ssl.transport.pemcert_filepath: localhost.pem
searchguard.ssl.transport.pemkey_filepath: localhost.key
searchguard.ssl.transport.pemtrustedcas_filepath: root-ca.pem
searchguard.ssl.transport.enforce_hostname_verification: false
searchguard.ssl.transport.resolve_hostname: false
searchguard.nodes_dn:
- CN=localhost,OU=Ops,O=localhost\, Inc.,DC=localhost,DC=com
searchguard.authcz.admin_dn:
- CN=sgadmin,OU=Ops,O=sgadmin\, Inc.,DC=sgadmin,DC=com
searchguard.enterprise_modules_enabled: false

View File

@@ -1,14 +0,0 @@
discovery.seed_providers: file
{% if mc_elasticsearch_package | regex_search(".*oss-7.*") %}
cluster.initial_master_nodes: {{ ansible_host }}
{% endif %}
path.data: {{ mc_es_home }}/data
path.logs: {{ mc_es_home }}/logs
network.host: 0.0.0.0
node.name: {{ ansible_host }}
transport.host: 0.0.0.0
transport.port: 9300
transport.publish_host: {{ ansible_host }}

View File

@@ -1,21 +0,0 @@
configVersion: 1
shared:
jfrogUrl: {{ jfrog_url }}
node:
ip: {{ ansible_host }}
database:
type: "{{ mc_db_type }}"
driver: "{{ mc_db_driver }}"
url: "{{ mc_db_url }}"
username: "{{ mc_db_user }}"
password: "{{ mc_db_password }}"
elasticsearch:
unicastFile: {{ mc_es_conf_base }}/unicast_hosts.txt
password: {{ mc_es_password }}
url: {{ mc_es_url }}
username: {{ mc_es_user }}
security:
joinKey: {{ join_key }}
router:
entrypoints:
internalPort: 8046

View File

@@ -1,11 +0,0 @@
#bootstrap.memory_lock: true
discovery.seed_providers: file
path.data: {{ mc_es_home }}/data
path.logs: {{ mc_es_home }}/logs
network.host: 0.0.0.0
node.name: {{ ansible_host }}
transport.host: 0.0.0.0
transport.port: 9300
transport.publish_host: {{ ansible_host }}

View File

@@ -1,22 +0,0 @@
configVersion: 1
shared:
jfrogUrl: {{ jfrog_url }}
node:
ip: {{ ansible_host }}
database:
type: "{{ mc_db_type }}"
driver: "{{ mc_db_driver }}"
url: "{{ mc_db_url }}"
username: "{{ mc_db_user }}"
password: "{{ mc_db_password }}"
elasticsearch:
unicastFile: {{ mc_es_conf_base }}/unicast_hosts.txt
clusterSetup: YES
password: {{ mc_es_password }}
url: {{ mc_es_url }}
username: {{ mc_es_user }}
security:
joinKey: {{ join_key }}
router:
entrypoints:
internalPort: 8046

View File

@@ -1,9 +0,0 @@
{{ ansible_managed | comment }}
{
"productId": "Ansible_MissionControl/{{ platform_collection_version }}-{{ missionControl_version }}",
"features": [
{
"featureId": "Channel/{{ ansible_marketplace }}"
}
]
}

View File

@@ -1,35 +0,0 @@
configVersion: 1
shared:
jfrogUrl: {{ jfrog_url }}
node:
ip: {{ mc_primary_ip }}
id: {{ ansible_date_time.iso8601_micro | to_uuid }}
database:
type: "{{ mc_db_type }}"
driver: "{{ mc_db_driver }}"
url: "{{ mc_db_url }}"
elasticsearch:
unicastFile: {{ mc_es_conf_base }}/config/unicast_hosts.txt
password: {{ mc_es_password }}
url: {{ mc_es_url }}
username: {{ mc_es_user }}
security:
joinKey: {{ join_key }}
mc:
database:
username: "{{ mc_db_user }}"
password: "{{ mc_db_password }}"
schema: "jfmc_server"
insight-scheduler:
database:
username: "{{ mc_db_user }}"
password: "{{ mc_db_password }}"
schema: "insight_scheduler"
insight-server:
database:
username: "{{ mc_db_user }}"
password: "{{ mc_db_password }}"
schema: "insight_server"
router:
entrypoints:
internalPort: 8046

View File

@@ -1,58 +0,0 @@
mc_installer_scenario:
main:
- {
"expecting": "(data|installation) directory \\(",
"sending": "{{ mc_home }}"
}
- {
"expecting": "jfrog url( \\(.+\\))?:(?!.*Skipping prompt)",
"sending": "{{ jfrog_url }}"
}
- {
"expecting": "join key:(?!.*Skipping prompt)",
"sending": "{{ join_key }}"
}
- {
"expecting": "please specify the ip address of this machine(?!.*Skipping prompt)",
"sending": "{% if mc_ha_node_type is defined and mc_ha_node_type == 'master' %}{{ ansible_host }}{% else %}{{ ansible_host }}{% endif %}"
}
- {
"expecting": "are you adding an additional node",
"sending": "{% if mc_ha_node_type is defined and mc_ha_node_type == 'master' %}n{% else %}y{% endif %}"
}
- {
"expecting": "do you want to install postgresql",
"sending": "n"
}
- {
"expecting": "do you want to install elasticsearch",
"sending": "n"
}
- {
"expecting": "(postgresql|database) url.+\\[jdbc:postgresql.+\\]:",
"sending": "{{ mc_db_url }}"
}
- {
"expecting": "(postgresql|database) password",
"sending": "{{ mc_db_password }}"
}
- {
"expecting": "(postgresql|database) username",
"sending": "{{ mc_db_user }}"
}
- {
"expecting": "confirm database password",
"sending": "{{ mc_db_password }}"
}
- {
"expecting": "elasticsearch url:(?!.*Skipping prompt)",
"sending": "{{ mc_es_url }}"
}
- {
"expecting": "elasticsearch username:",
"sending": "{{ mc_es_user }}"
}
- {
"expecting": "elasticsearch password:",
"sending": "{{ mc_es_password }}"
}

View File

@@ -1,23 +0,0 @@
# postgres
The postgres role will install Postgresql software and configure a database and user to support an Artifactory or Xray server.
### Role Variables
By default, the [_pg_hba.conf_](https://www.postgresql.org/docs/13/auth-pg-hba-conf.html) client authentication file is configured for open access for development purposes through the _postgres_allowed_hosts_ variable:
```
postgres_allowed_hosts:
- { type: "host", database: "all", user: "all", address: "0.0.0.0/0", method: "trust"}
```
**THIS SHOULD NOT BE USED FOR PRODUCTION.**
**Update this variable to only allow access from Artifactory, Distibution, MissionControl and Xray.**
## Example Playbook
```
---
- hosts: postgres_servers
roles:
- postgres
```

View File

@@ -1,95 +0,0 @@
---
# Default version of Postgres server to install.
postgres_version: 13
# Default listen_addresses of Postgres server
postgres_listen_addresses: 0.0.0.0
# Default port of Postgres server
postgres_port: 5432
# Server version in package:
postgres_server_pkg_version: "{{ postgres_version|replace('.', '') }}"
# Whether or not the files are on ZFS.
postgres_server_volume_is_zfs: false
# Postgres setting max_connections.
postgres_server_max_connections: 1000
# Postgres setting shared_buffers.
postgres_server_shared_buffers: 128MB
# Postgres setting maintenance_work_mem.
postgres_server_maintenance_work_mem: 64MB
# Postgres setting effective_io_concurrency.
postgres_server_effective_io_concurrency: 1
# Postgres setting max_worker_processes.
postgres_server_max_worker_processes: 8
# Postgres setting max_parallel_maintenance_workers.
postgres_server_max_parallel_maintenance_workers: 2
# Postgres setting max_parallel_workers_per_gather.
postgres_server_max_parallel_workers_per_gather: 2
# Postgres setting parallel_leader_participation.
postgres_server_parallel_leader_participation: true
# Postgres setting max_parallel_workers.
postgres_server_max_parallel_workers: 8
# Postgres setting max_locks_per_transaction.
postgres_server_max_locks_per_transaction: 64
# Configuration for "random access" cost.
postgres_server_random_page_cost: "4.0"
# User name that the postgres user runs as.
postgres_user: postgres
postgres_locale: "en_US.UTF-8"
# Whether or not to lock checkpoints.
postgres_server_log_checkpoints: false
# Whether or not to lock connects.
postgres_server_log_connections: false
# Whether or not to lock disconnects.
postgres_server_log_disconnections: false
# Whether or not to log duration
postgres_server_log_duration: false
# Error logging verbosity.
postgres_server_log_error_verbosity: "default"
# Whether or not to log the host name.
postgres_server_log_hostname: false
# Whether or not to lock waits.
postgres_server_log_lock_waits: false
# Which statements to log.
postgres_server_log_statements: "none"
# Whether or not to enable the auto_explain module.
postgres_server_auto_explain_enabled: false
# Minimal duration to log auto explain for.
postgres_server_auto_explain_log_min_duration: -1
# Whether or not to use EXPLAIN ANALYZE.
postgres_server_auto_explain_log_analyze: true
# Sets the hosts that can access the database
postgres_allowed_hosts:
- {
type: "host",
database: "all",
user: "all",
address: "0.0.0.0/0",
method: "trust",
}

View File

@@ -1,6 +0,0 @@
---
- name: restart postgresql
become: yes
systemd:
name: "{{ postgresql_daemon }}"
state: restarted

View File

@@ -1,16 +0,0 @@
galaxy_info:
author: "JFrog Maintainers Team <installers@jfrog.com>"
description: "The postgres role will install Postgresql software and configure a database and user to support an Artifactory or Xray server."
company: JFrog
issue_tracker_url: "https://github.com/jfrog/JFrog-Cloud-Installers/issues"
license: license (Apache-2.0)
min_ansible_version: 2.9
galaxy_tags:
- postgres
- jfrog
dependencies: []

View File

@@ -1,33 +0,0 @@
---
- name: install acl, python3-psycopg2
become: yes
apt:
name:
- acl
- python3-psycopg2
state: present
update_cache: yes
ignore_errors: yes
- name: add postgres apt key
become: yes
apt_key:
url: https://www.postgresql.org/media/keys/ACCC4CF8.asc
id: "0x7FCC7D46ACCC4CF8"
validate_certs: no
state: present
- name: register APT repository
become: yes
apt_repository:
repo: deb http://apt.postgresql.org/pub/repos/apt/ {{ ansible_distribution_release }}-pgdg main
state: present
filename: pgdg
- name: install postgres packages
become: yes
apt:
name:
- postgresql-{{ postgres_version }}
- postgresql-contrib-{{ postgres_version }}
state: present

View File

@@ -1,81 +0,0 @@
---
- name: install EPEL repository
become: yes
yum: name=epel-release state=present
when: > # not for Fedora
ansible_distribution == 'CentOS' or
ansible_distribution == 'Red Hat Enterprise Linux'
- name: install acl
become: yes
yum:
name:
- acl
- sudo
- wget
- perl
state: present
ignore_errors: yes
- name: install python3-psycopg2
become: yes
yum:
name:
- python3-psycopg2
state: present
when: ansible_distribution_major_version == '8'
- name: install python2-psycopg2
become: yes
yum:
name:
- python-psycopg2
state: present
when: ansible_distribution_major_version == '7'
- name: fixup some locale issues
become: yes
lineinfile:
dest: /etc/default/locale
line: 'LANGUAGE="{{ item }}"'
state: present
create: yes
loop:
- 'en_US:en'
- 'en_us.UTF-8'
- name: get latest version
vars:
base: http://download.postgresql.org/pub/repos/yum
ver: "{{ ansible_distribution_major_version }}"
shell: |
set -eo pipefail
wget -O - {{ base }}/reporpms/EL-{{ ver }}-x86_64/ 2>/dev/null | \
grep 'pgdg-redhat-repo-latest' | \
perl -pe 's/^.*rpm">//g' | \
perl -pe 's/<\/a>.*//g' | \
tail -n 1
args:
executable: /bin/bash
changed_when: false
check_mode: false
register: latest_version
tags: [skip_ansible_lint]
- name: config postgres repository
become: yes
vars:
base: http://download.postgresql.org/pub/repos/yum
ver: "{{ ansible_distribution_major_version }}"
yum:
name: "{{ base }}/reporpms/EL-{{ ver }}-x86_64/{{ latest_version.stdout }}"
state: present
- name: install postgres packages
become: yes
yum:
name:
- postgresql{{ postgres_server_pkg_version }}-server
- postgresql{{ postgres_server_pkg_version }}-contrib
state: present

View File

@@ -1,118 +0,0 @@
---
- name: define OS-specific variables
include_vars: "{{ ansible_os_family }}.yml"
- name: perform installation
include_tasks: "{{ ansible_os_family }}.yml"
- name: Set PostgreSQL environment variables.
become: yes
template:
src: postgres.sh.j2
dest: /etc/profile.d/postgres.sh
mode: 0644
notify: restart postgresql
- name: Ensure PostgreSQL data directory exists.
become: yes
become_user: postgres
file:
path: "{{ postgresql_data_dir }}"
owner: postgres
group: postgres
state: directory
mode: 0700
- name: Initialize PostgreSQL database cluster
become: yes
become_user: postgres
command: "{{ postgresql_bin_path }}/initdb -D {{ postgresql_data_dir }}"
args:
creates: "{{ postgresql_data_dir }}/PG_VERSION"
environment:
LC_ALL: "{{ postgres_locale }}"
- name: Setup postgres configuration files
become: yes
become_user: postgres
template:
src: "{{ item }}.j2"
dest: "{{ postgresql_config_path }}/{{ item }}"
owner: postgres
group: postgres
mode: u=rw,go=r
loop:
- pg_hba.conf
- postgresql.conf
notify: restart postgresql
- name: Ensure PostgreSQL is started and enabled on boot
become: yes
systemd:
name: "{{ postgresql_daemon }}"
state: started
enabled: yes
- name: Hold until Postgresql is up and running
wait_for:
port: "{{ postgres_port }}"
- name: Create users
become: yes
become_user: postgres
postgresql_user:
name: "{{ item.db_user }}"
password: "{{ item.db_password }}"
conn_limit: "-1"
loop: "{{ db_users|default([]) }}"
no_log: true # secret passwords
- name: Create a database
become: yes
become_user: postgres
postgresql_db:
name: "{{ item.db_name }}"
owner: "{{ item.db_owner }}"
encoding: UTF-8
lc_collate: "{{ postgres_locale }}"
lc_ctype: "{{ postgres_locale }}"
template: template0
loop: "{{ dbs|default([]) }}"
- name: Check if MC schemas already exists
become: yes
become_user: postgres
command: psql -d {{ mc_db_name }} -t -c "\dn"
register: mc_schemas_loaded
- name: Create schemas for mission-control
become: yes
become_user: postgres
command: psql -d {{ mc_db_name }} -c 'CREATE SCHEMA {{ item }} authorization {{ mc_db_user }}'
loop: "{{ mc_schemas|default([]) }}"
when: "mc_schemas_loaded.stdout is defined and '{{ item }}' not in mc_schemas_loaded.stdout"
- name: Grant all privileges to mc user on its schema
become: yes
become_user: postgres
postgresql_privs:
database: "{{ mc_db_name}}"
privs: ALL
type: schema
roles: "{{ mc_db_user }}"
objs: "{{ item }}"
loop: "{{ mc_schemas|default([]) }}"
- name: Grant privs on db
become: yes
become_user: postgres
postgresql_privs:
database: "{{ item.db_name }}"
role: "{{ item.db_owner }}"
state: present
privs: ALL
type: database
loop: "{{ dbs|default([]) }}"
- debug:
msg: "Restarted postgres systemd {{ postgresql_daemon }}"

View File

@@ -1,18 +0,0 @@
{{ ansible_managed | comment }}
# PostgreSQL Client Authentication Configuration File
# ===================================================
#
# See: https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html
# TYPE DATABASE USER ADDRESS METHOD
## localhost connections through Unix port (user name), IPv4, IPv6 (MD5 pw).
local all all peer
host all all 127.0.0.1/32 md5
host all all ::1/128 md5
## remote connections IPv4
{% if postgres_allowed_hosts and postgres_allowed_hosts is iterable %}
{% for host in postgres_allowed_hosts %}
{{ host.type | default('host') }} {{ host.database | default('all') }} {{ host.user | default('all') }} {{ host.address | default('0.0.0.0/0') }} {{ item.auth | default('trust') }}
{% endfor %}
{% endif %}

View File

@@ -1,4 +0,0 @@
{{ ansible_managed | comment }}
export PGDATA={{ postgresql_data_dir }}
export LC_ALL={{ postgres_locale }}
export PATH=$PATH:{{ postgresql_bin_path }}

View File

@@ -1,718 +0,0 @@
{{ ansible_managed | comment }}
# -----------------------------
# PostgreSQL configuration file
# -----------------------------
#
# This file consists of lines of the form:
#
# name = value
#
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
# "#" anywhere on a line. The complete list of parameter names and allowed
# values can be found in the PostgreSQL documentation.
#
# The commented-out settings shown in this file represent the default values.
# Re-commenting a setting is NOT sufficient to revert it to the default value;
# you need to reload the server.
#
# This file is read on server startup and when the server receives a SIGHUP
# signal. If you edit the file on a running system, you have to SIGHUP the
# server for the changes to take effect, run "pg_ctl reload", or execute
# "SELECT pg_reload_conf()". Some parameters, which are marked below,
# require a server shutdown and restart to take effect.
#
# Any parameter can also be given as a command-line option to the server, e.g.,
# "postgres -c log_connections=on". Some parameters can be changed at run time
# with the "SET" SQL command.
#
# Memory units: kB = kilobytes Time units: ms = milliseconds
# MB = megabytes s = seconds
# GB = gigabytes min = minutes
# TB = terabytes h = hours
# d = days
#------------------------------------------------------------------------------
# FILE LOCATIONS
#------------------------------------------------------------------------------
# The default values of these variables are driven from the -D command-line
# option or PGDATA environment variable, represented here as ConfigDir.
data_directory = '{{ postgresql_data_dir }}' # use data in another directory
# (change requires restart)
hba_file = '{{ postgresql_config_path }}/pg_hba.conf' # host-based authentication file
# (change requires restart)
ident_file = '{{ postgresql_config_path }}/pg_ident.conf' # ident configuration file
# (change requires restart)
# If external_pid_file is not explicitly set, no extra PID file is written.
external_pid_file = '{{ postgresql_external_pid_file }}' # write an extra PID file
# (change requires restart)
#------------------------------------------------------------------------------
# CONNECTIONS AND AUTHENTICATION
#------------------------------------------------------------------------------
# - Connection Settings -
listen_addresses = '{{ postgres_listen_addresses }}' # what IP address(es) to listen on;
# comma-separated list of addresses;
# defaults to 'localhost'; use '*' for all
# (change requires restart)
port = {{ postgres_port }} # (change requires restart)
max_connections = {{ postgres_server_max_connections }} # (change requires restart)
#superuser_reserved_connections = 3 # (change requires restart)
#unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories
# (change requires restart)
#unix_socket_group = '' # (change requires restart)
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
# (change requires restart)
#bonjour = off # advertise server via Bonjour
# (change requires restart)
#bonjour_name = '' # defaults to the computer name
# (change requires restart)
# - TCP settings -
# see "man 7 tcp" for details
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
# 0 selects the system default
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
# 0 selects the system default
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
# 0 selects the system default
#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds;
# 0 selects the system default
# - Authentication -
#authentication_timeout = 1min # 1s-600s
#ssl = off # (change requires restart)
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
# (change requires restart)
#ssl_prefer_server_ciphers = on # (change requires restart)
#ssl_ecdh_curve = 'prime256v1' # (change requires restart)
#ssl_cert_file = 'server.crt' # (change requires restart)
#ssl_key_file = 'server.key' # (change requires restart)
#ssl_ca_file = '' # (change requires restart)
#ssl_crl_file = '' # (change requires restart)
#password_encryption = on
#db_user_namespace = off
#row_security = on
# GSSAPI using Kerberos
#krb_server_keyfile = ''
#krb_caseins_users = off
#------------------------------------------------------------------------------
# RESOURCE USAGE (except WAL)
#------------------------------------------------------------------------------
# - Memory -
shared_buffers = {{ postgres_server_shared_buffers }} # min 128kB
# (change requires restart)
#huge_pages = try # on, off, or try
# (change requires restart)
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 0 # zero disables the feature
# (change requires restart)
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
# you actively intend to use prepared transactions.
#work_mem = 4MB # min 64kB
maintenance_work_mem = {{ postgres_server_maintenance_work_mem }} # min 1MB
#replacement_sort_tuples = 150000 # limits use of replacement selection sort
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
#max_stack_depth = 2MB # min 100kB
dynamic_shared_memory_type = posix # the default is the first option
# supported by the operating system:
# posix
# sysv
# windows
# mmap
# use none to disable dynamic shared memory
# (change requires restart)
# - Disk -
#temp_file_limit = -1 # limits per-process temp file space
# in kB, or -1 for no limit
# - Kernel Resource Usage -
#max_files_per_process = 1000 # min 25
# (change requires restart)
shared_preload_libraries = 'pg_stat_statements' # restart on change
# - Cost-Based Vacuum Delay -
#vacuum_cost_delay = 0 # 0-100 milliseconds
#vacuum_cost_page_hit = 1 # 0-10000 credits
#vacuum_cost_page_miss = 10 # 0-10000 credits
#vacuum_cost_page_dirty = 20 # 0-10000 credits
#vacuum_cost_limit = 200 # 1-10000 credits
# - Background Writer -
#bgwriter_delay = 200ms # 10-10000ms between rounds
#bgwriter_lru_maxpages = 100 # 0-1000 max buffers written/round
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
#bgwriter_flush_after = 512kB # measured in pages, 0 disables
# - Asynchronous Behavior -
effective_io_concurrency = {{ postgres_server_effective_io_concurrency }}
max_worker_processes = {{ postgres_server_max_worker_processes }}
max_parallel_workers_per_gather = {{ postgres_server_max_parallel_maintenance_workers }}
max_parallel_workers_per_gather = {{ postgres_server_max_parallel_workers_per_gather }}
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
# (change requires restart)
#backend_flush_after = 0 # measured in pages, 0 disables
{% if postgres_version|string != "9.6" %}
parallel_leader_participation = {{ "on" if postgres_server_parallel_leader_participation else "off" }}
max_parallel_maintenance_workers = {{ postgres_server_max_parallel_maintenance_workers }}
{% endif %}
#------------------------------------------------------------------------------
# WRITE AHEAD LOG
#------------------------------------------------------------------------------
# - Settings -
#wal_level = minimal # minimal, replica, or logical
# (change requires restart)
#fsync = on # flush data to disk for crash safety
# (turning this off can cause
# unrecoverable data corruption)
#synchronous_commit = on # synchronization level;
# off, local, remote_write, remote_apply, or on
#wal_sync_method = fsync # the default is the first option
# supported by the operating system:
# open_datasync
# fdatasync (default on Linux)
# fsync
# fsync_writethrough
# open_sync
full_page_writes = {{ "off" if postgres_server_volume_is_zfs else "on" }} # off OK on ZFS # recover from partial page writes
wal_compression = off # enable compression of full-page writes
#wal_log_hints = off # also do full page writes of non-critical updates
# (change requires restart)
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
# (change requires restart)
#wal_writer_delay = 200ms # 1-10000 milliseconds
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
commit_delay = 100000 # range 0-100000, in microseconds
#commit_siblings = 5 # range 1-1000
# - Checkpoints -
checkpoint_timeout = 4h # range 30s-1d
max_wal_size = 100GB
min_wal_size = 1GB
checkpoint_completion_target = 0.8 # checkpoint target duration, 0.0 - 1.0
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
#checkpoint_warning = 30s # 0 disables
# - Archiving -
#archive_mode = off # enables archiving; off, on, or always
# (change requires restart)
#archive_command = '' # command to use to archive a logfile segment
# placeholders: %p = path of file to archive
# %f = file name only
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
#archive_timeout = 0 # force a logfile segment switch after this
# number of seconds; 0 disables
# - Archive Recovery -
# These are only used in recovery mode.
#restore_command = '' # command to use to restore an archived logfile segment
# placeholders: %p = path of file to restore
# %f = file name only
# e.g. 'cp /mnt/server/archivedir/%f %p'
# (change requires restart)
#archive_cleanup_command = '' # command to execute at every restartpoint
#recovery_end_command = '' # command to execute at completion of recovery
# - Recovery Target -
# Set these only when performing a targeted recovery.
#recovery_target = '' # 'immediate' to end recovery as soon as a
# consistent state is reached
# (change requires restart)
#recovery_target_name = '' # the named restore point to which recovery will proceed
# (change requires restart)
#recovery_target_time = '' # the time stamp up to which recovery will proceed
# (change requires restart)
#recovery_target_xid = '' # the transaction ID up to which recovery will proceed
# (change requires restart)
#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed
# (change requires restart)
#recovery_target_inclusive = on # Specifies whether to stop:
# just after the specified recovery target (on)
# just before the recovery target (off)
# (change requires restart)
#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID
# (change requires restart)
#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown'
# (change requires restart)
#------------------------------------------------------------------------------
# REPLICATION
#------------------------------------------------------------------------------
# - Sending Server(s) -
# Set these on the master and on any standby that will send replication data.
#max_wal_senders = 0 # max number of walsender processes
# (change requires restart)
#wal_keep_segments = 0 # in logfile segments, 16MB each; 0 disables
#wal_sender_timeout = 60s # in milliseconds; 0 disables
#max_replication_slots = 0 # max number of replication slots
# (change requires restart)
#track_commit_timestamp = off # collect timestamp of transaction commit
# (change requires restart)
# - Master Server -
# These settings are ignored on a standby server.
#synchronous_standby_names = '' # standby servers that provide sync rep
# number of sync standbys and comma-separated list of application_name
# from standby(s); '*' = all
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
# - Standby Servers -
# These settings are ignored on a master server.
#hot_standby = off # "on" allows queries during recovery
# (change requires restart)
#max_standby_archive_delay = 30s # max delay before canceling queries
# when reading WAL from archive;
# -1 allows indefinite delay
#max_standby_streaming_delay = 30s # max delay before canceling queries
# when reading streaming WAL;
# -1 allows indefinite delay
#wal_receiver_status_interval = 10s # send replies at least this often
# 0 disables
#hot_standby_feedback = off # send info from standby to prevent
# query conflicts
#wal_receiver_timeout = 60s # time that receiver waits for
# communication from master
# in milliseconds; 0 disables
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
# retrieve WAL after a failed attempt
#------------------------------------------------------------------------------
# QUERY TUNING
#------------------------------------------------------------------------------
# - Planner Method Configuration -
#enable_bitmapscan = on
#enable_hashagg = on
#enable_hashjoin = on
#enable_indexscan = on
#enable_indexonlyscan = on
#enable_material = on
#enable_mergejoin = on
#enable_nestloop = on
#enable_parallel_append = on
#enable_seqscan = on
#enable_sort = on
#enable_tidscan = on
#enable_partitionwise_join = off
#enable_partitionwise_aggregate = off
#enable_parallel_hash = on
#enable_partition_pruning = on
# - Planner Cost Constants -
#seq_page_cost = 1.0 # measured on an arbitrary scale
random_page_cost = {{ postgres_server_random_page_cost }}
#cpu_tuple_cost = 0.01 # same scale as above
#cpu_index_tuple_cost = 0.005 # same scale as above
#cpu_operator_cost = 0.0025 # same scale as above
#parallel_tuple_cost = 0.1 # same scale as above
#parallel_setup_cost = 1000.0 # same scale as above
#jit_above_cost = 100000 # perform JIT compilation if available
# and query more expensive than this;
# -1 disables
#jit_inline_above_cost = 500000 # inline small functions if query is
# more expensive than this; -1 disables
#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
# query is more expensive than this;
# -1 disables
#min_parallel_table_scan_size = 8MB
#min_parallel_index_scan_size = 512kB
#effective_cache_size = 4GB
# - Genetic Query Optimizer -
#geqo = on
#geqo_threshold = 12
#geqo_effort = 5 # range 1-10
#geqo_pool_size = 0 # selects default based on effort
#geqo_generations = 0 # selects default based on effort
#geqo_selection_bias = 2.0 # range 1.5-2.0
#geqo_seed = 0.0 # range 0.0-1.0
# - Other Planner Options -
#default_statistics_target = 100 # range 1-10000
#constraint_exclusion = partition # on, off, or partition
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
#from_collapse_limit = 8
#join_collapse_limit = 8 # 1 disables collapsing of explicit
# JOIN clauses
#force_parallel_mode = off
#jit = on # allow JIT compilation
#plan_cache_mode = auto # auto, force_generic_plan or
# force_custom_plan
#------------------------------------------------------------------------------
# ERROR REPORTING AND LOGGING
#------------------------------------------------------------------------------
# - Where to Log -
log_destination = 'stderr' # Valid values are combinations of
# stderr, csvlog, syslog, and eventlog,
# depending on platform. csvlog
# requires logging_collector to be on.
# This is used when logging to stderr:
logging_collector = on # Enable capturing of stderr and csvlog
# into log files. Required to be on for
# csvlogs.
# (change requires restart)
# These are only used if logging_collector is on:
log_directory = 'pg_log' # directory where log files are written,
# can be absolute or relative to PGDATA
log_filename = 'postgresql-%a.log' # log file name pattern,
# can include strftime() escapes
#log_file_mode = 0600 # creation mode for log files,
# begin with 0 to use octal notation
log_truncate_on_rotation = on # If on, an existing log file with the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
# time-driven rotation, not on restarts
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
log_rotation_age = 1d # Automatic rotation of logfiles will
# happen after that time. 0 disables.
log_rotation_size = 0 # Automatic rotation of logfiles will
# happen after that much log output.
# 0 disables.
# These are relevant when logging to syslog:
#syslog_facility = 'LOCAL0'
#syslog_ident = 'postgres'
#syslog_sequence_numbers = on
#syslog_split_messages = on
# This is only relevant when logging to eventlog (win32):
# (change requires restart)
#event_source = 'PostgreSQL'
# - When to Log -
#client_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# log
# notice
# warning
# error
#log_min_messages = warning # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic
#log_min_error_statement = error # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic (effectively off)
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
# and their durations, > 0 logs only
# statements running at least this number
# of milliseconds
# - What to Log -
#debug_print_parse = off
#debug_print_rewritten = off
#debug_print_plan = off
#debug_pretty_print = on
log_checkpoints = {{ "on" if postgres_server_log_checkpoints else "off" }}
log_connections = {{ "on" if postgres_server_log_connections else "off" }}
log_disconnections = {{ "on" if postgres_server_log_disconnections else "off" }}
log_duration = {{ "on" if postgres_server_log_duration else "off" }}
log_error_verbosity = {{ postgres_server_log_error_verbosity }} # terse, default, or verbose messages
log_hostname = {{ "on" if postgres_server_log_hostname else "off" }}
log_line_prefix = '< %m > ' # special values:
# %a = application name
# %u = user name
# %d = database name
# %r = remote host and port
# %h = remote host
# %p = process ID
# %t = timestamp without milliseconds
# %m = timestamp with milliseconds
# %n = timestamp with milliseconds (as a Unix epoch)
# %i = command tag
# %e = SQL state
# %c = session ID
# %l = session line number
# %s = session start timestamp
# %v = virtual transaction ID
# %x = transaction ID (0 if none)
# %q = stop here in non-session
# processes
# %% = '%'
# e.g. '<%u%%%d> '
log_lock_waits = {{ "on" if postgres_server_log_lock_waits else "off" }} # log lock waits >= deadlock_timeout
log_statement = '{{ postgres_server_log_statements }}' # none, ddl, mod, all
#log_replication_commands = off
#log_temp_files = -1 # log temporary files equal or larger
# than the specified size in kilobytes;
# -1 disables, 0 logs all temp files
log_timezone = 'Etc/UTC'
# - Process Title -
#cluster_name = '' # added to process titles if nonempty
# (change requires restart)
#update_process_title = on
#------------------------------------------------------------------------------
# RUNTIME STATISTICS
#------------------------------------------------------------------------------
# - Query/Index Statistics Collector -
#track_activities = on
#track_counts = on
#track_io_timing = off
#track_functions = none # none, pl, all
track_activity_query_size = 102400 # (change requires restart)
#stats_temp_directory = 'pg_stat_tmp'
# Track statements generated by stored procedures as well
pg_stat_statements.track = all
# - Statistics Monitoring -
#log_parser_stats = off
#log_planner_stats = off
#log_executor_stats = off
#log_statement_stats = off
#------------------------------------------------------------------------------
# AUTOVACUUM PARAMETERS
#------------------------------------------------------------------------------
#autovacuum = on # Enable autovacuum subprocess? 'on'
# requires track_counts to also be on.
#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and
# their durations, > 0 logs only
# actions running at least this number
# of milliseconds.
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
# (change requires restart)
#autovacuum_naptime = 1min # time between autovacuum runs
#autovacuum_vacuum_threshold = 50 # min number of row updates before
# vacuum
#autovacuum_analyze_threshold = 50 # min number of row updates before
# analyze
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
# (change requires restart)
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
# before forced vacuum
# (change requires restart)
#autovacuum_vacuum_cost_delay = 20ms # default vacuum cost delay for
# autovacuum, in milliseconds;
# -1 means use vacuum_cost_delay
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
# autovacuum, -1 means use
# vacuum_cost_limit
#------------------------------------------------------------------------------
# CLIENT CONNECTION DEFAULTS
#------------------------------------------------------------------------------
# - Statement Behavior -
#search_path = '"$user", public' # schema names
#default_tablespace = '' # a tablespace name, '' uses the default
#temp_tablespaces = '' # a list of tablespace names, '' uses
# only default tablespace
#check_function_bodies = on
#default_transaction_isolation = 'read committed'
#default_transaction_read_only = off
#default_transaction_deferrable = off
#session_replication_role = 'origin'
#statement_timeout = 0 # in milliseconds, 0 is disabled
#lock_timeout = 0 # in milliseconds, 0 is disabled
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
#vacuum_freeze_min_age = 50000000
#vacuum_freeze_table_age = 150000000
#vacuum_multixact_freeze_min_age = 5000000
#vacuum_multixact_freeze_table_age = 150000000
#bytea_output = 'hex' # hex, escape
#xmlbinary = 'base64'
#xmloption = 'content'
#gin_fuzzy_search_limit = 0
#gin_pending_list_limit = 4MB
# - Locale and Formatting -
datestyle = 'iso, mdy'
#intervalstyle = 'postgres'
timezone = 'Europe/Berlin'
#timezone_abbreviations = 'Default' # Select the set of available time zone
# abbreviations. Currently, there are
# Default
# Australia (historical usage)
# India
# You can create your own file in
# share/timezonesets/.
#extra_float_digits = 0 # min -15, max 3
#client_encoding = sql_ascii # actually, defaults to database
# encoding
# These settings are initialized by initdb, but they can be changed.
lc_messages = 'en_US.UTF-8' # locale for system error message
# strings
lc_monetary = 'en_US.UTF-8' # locale for monetary formatting
lc_numeric = 'en_US.UTF-8' # locale for number formatting
lc_time = 'en_US.UTF-8' # locale for time formatting
# default configuration for text search
default_text_search_config = 'pg_catalog.english'
# - Other Defaults -
#dynamic_library_path = '$libdir'
#local_preload_libraries = ''
{% set preload_libraries = [] %}
{% if postgres_server_auto_explain_enabled %}
{{ preload_libraries.append("auto_explain") }}
{% endif %}
session_preload_libraries = '{{ ",".join(preload_libraries) }}'
#------------------------------------------------------------------------------
# auto_explain SETTINGS
#------------------------------------------------------------------------------
auto_explain.log_min_duration = {{ "on" if postgres_server_auto_explain_log_min_duration else "off" }}
auto_explain.log_analyze = {{ "on" if postgres_server_auto_explain_log_analyze else "off" }}
#------------------------------------------------------------------------------
# LOCK MANAGEMENT
#------------------------------------------------------------------------------
#deadlock_timeout = 1s
max_locks_per_transaction = {{ postgres_server_max_locks_per_transaction }} # min 10
# (change requires restart)
#max_pred_locks_per_transaction = 64 # min 10
# (change requires restart)
#------------------------------------------------------------------------------
# VERSION/PLATFORM COMPATIBILITY
#------------------------------------------------------------------------------
# - Previous PostgreSQL Versions -
#array_nulls = on
#backslash_quote = safe_encoding # on, off, or safe_encoding
#default_with_oids = off
#escape_string_warning = on
#lo_compat_privileges = off
#operator_precedence_warning = off
#quote_all_identifiers = off
#sql_inheritance = on
#standard_conforming_strings = on
#synchronize_seqscans = on
# - Other Platforms and Clients -
#transform_null_equals = off
#------------------------------------------------------------------------------
# ERROR HANDLING
#------------------------------------------------------------------------------
#exit_on_error = off # terminate session on any error?
#restart_after_crash = on # reinitialize after backend crash?
#------------------------------------------------------------------------------
# CONFIG FILE INCLUDES
#------------------------------------------------------------------------------
# These options allow settings to be loaded from files other than the
# default postgresql.conf.
#include_dir = 'conf.d' # include files ending in '.conf' from
# directory 'conf.d'
#include_if_exists = 'exists.conf' # include file only if it exists
#include = 'special.conf' # include file
#------------------------------------------------------------------------------
# CUSTOMIZED OPTIONS
#------------------------------------------------------------------------------
# Add settings for extensions here

View File

@@ -1,6 +0,0 @@
---
postgresql_data_dir: "/var/lib/postgresql/{{ postgres_version }}/main"
postgresql_bin_path: "/usr/lib/postgresql/{{ postgres_version }}/bin"
postgresql_config_path: "/etc/postgresql/{{ postgres_version }}/main"
postgresql_daemon: postgresql@{{ postgres_version}}-main
postgresql_external_pid_file: "/var/run/postgresql/{{ postgres_version }}-main.pid"

View File

@@ -1,6 +0,0 @@
---
postgresql_bin_path: "/usr/pgsql-{{ postgres_version }}/bin"
postgresql_data_dir: "/var/lib/pgsql/{{ postgres_version}}/data"
postgresql_config_path: "/var/lib/pgsql/{{ postgres_version}}/data"
postgresql_daemon: postgresql-{{ postgres_version}}.service
postgresql_external_pid_file: "/var/run/postgresql/{{ postgres_version }}-main.pid"

View File

@@ -1,26 +0,0 @@
# Xray
The xray role will install Xray software onto the host. An Artifactory server and Postgress database is required.
### Role Variables
* _xray_upgrade_only_: Perform an software upgrade only. Default is false.
Additional variables can be found in [defaults/main.yml](./defaults/main.yml).
## Example Playbook
```
---
- hosts: xray_servers
roles:
- xray
```
## Upgrades
The Xray role supports software upgrades. To use a role to perform a software upgrade only, use the _xray_upgrade_only_ variables and specify the version. See the following example.
```
- hosts: xray_servers
vars:
xray_version: "{{ lookup('env', 'xray_version_upgrade') }}"
xray_upgrade_only: true
roles:
- xray
```

View File

@@ -1,77 +0,0 @@
---
# defaults file for xray
# indicates were this collection was downlaoded from (galaxy, automation_hub, standalone)
ansible_marketplace: standalone
# whether to enable HA
xray_ha_enabled: false
xray_ha_node_type : master
# The location where xray should install.
jfrog_home_directory: /opt/jfrog
# The remote xray download file
xray_tar: https://releases.jfrog.io/artifactory/jfrog-xray/xray-linux/{{ xray_version }}/jfrog-xray-{{ xray_version }}-linux.tar.gz
#The xray install directory
xray_untar_home: "{{ jfrog_home_directory }}/jfrog-xray-{{ xray_version }}-linux"
xray_home: "{{ jfrog_home_directory }}/xray"
xray_install_script_path: "{{ xray_home }}/app/bin"
xray_thirdparty_path: "{{ xray_home }}/app/third-party"
xray_archive_service_cmd: "{{ xray_install_script_path }}/installService.sh"
#xray users and groups
xray_user: xray
xray_group: xray
xray_uid: 1035
xray_gid: 1035
xray_daemon: xray
flow_type: archive
#rabbitmq user
xray_rabbitmq_user: guest
xray_rabbitmq_password: guest
xray_rabbitmq_url: "amqp://localhost:5672/"
xray_rabbitmq_default_cookie: "XRAY_RABBITMQ_COOKIE"
# if this is an upgrade
xray_upgrade_only: false
xray_system_yaml_template: system.yaml.j2
linux_distro: "{{ ansible_distribution | lower }}{{ansible_distribution_major_version}}"
xray_db_util_search_filter:
ubuntu16:
db5: 'db5.3-util.*ubuntu.*amd64\.deb'
db: 'db-util.*ubuntu.*all.deb'
ubuntu18:
db5: 'db5.3-util.*ubuntu.*amd64\.deb'
db: 'db-util.*ubuntu.*all.deb'
ubuntu20:
db5: 'db5.3-util.*ubuntu.*amd64\.deb'
db: 'db-util.*ubuntu.*all.deb'
debian8:
db5: 'db5.3-util.*deb8.*amd64\.deb'
db: 'db-util_([0-9]{1,3}\.?){3}_all\.deb'
debian9:
db5: 'db5.3-util.*deb9.*amd64\.deb'
db: 'db-util_([0-9]{1,3}\.?){3}_all\.deb'
debian10:
db5: 'TBD'
db: 'db-util_([0-9]{1,3}\.?){3}.*nmu1_all\.deb'
yum_python_interpreter: >-
{%- if linux_distro is not defined -%}
/usr/bin/python3
{%- elif linux_distro in ['centos7', 'rhel7'] -%}
/usr/bin/python
{%- else -%}
/usr/bin/python3
{%- endif -%}

View File

@@ -1,7 +0,0 @@
---
# handlers file for xray
- name: restart xray
become: yes
systemd:
name: "{{ xray_daemon }}"
state: restarted

View File

@@ -1,16 +0,0 @@
galaxy_info:
author: "JFrog Maintainers Team <installers@jfrog.com>"
description: "The xray role will install Xray software onto the host. An Artifactory server and Postgress database is required."
company: JFrog
issue_tracker_url: "https://github.com/jfrog/JFrog-Cloud-Installers/issues"
license: license (Apache-2.0)
min_ansible_version: 2.9
galaxy_tags:
- xray
- jfrog
dependencies: []

View File

@@ -1,44 +0,0 @@
- name: Prepare expect scenario script
set_fact:
expect_scenario: |
set timeout 300
spawn {{ exp_executable_cmd }}
expect_before timeout { exit 1 }
set CYCLE_END 0
set count 0
while { $CYCLE_END == 0 } {
expect {
{% for each_request in exp_scenarios %}
-nocase -re {{ '{' }}{{ each_request.expecting }}.*} {
send "{{ each_request.sending }}\n"
}
{% endfor %}
eof {
set CYCLE_END 1
}
}
set count "[expr $count + 1]"
if { $count > 16} {
exit 128
}
}
expect eof
lassign [wait] pid spawnid os_error_flag value
if {$os_error_flag == 0} {
puts "INSTALLER_EXIT_STATUS-$value"
} else {
puts "INSTALLER_EXIT_STATUS-$value"
}
- name: Interactive with expect
become: yes
ignore_errors: yes
shell: |
{{ expect_scenario }}
args:
executable: /usr/bin/expect
chdir: "{{ exp_dir }}"
register: exp_result

View File

@@ -1,165 +0,0 @@
---
- debug:
msg: "Performing installation of Xray version : {{ xray_version }}"
- debug:
msg: "ansible_os_family: {{ ansible_os_family }}"
- name: Install expect dependency
become: yes
yum:
name: expect
state: present
when: ansible_os_family == 'RedHat'
- name: Install expect dependency
become: yes
apt:
name: expect
state: present
update_cache: yes
when: ansible_os_family == 'Debian'
- name: Ensure group xray exist
become: yes
group:
name: "{{ xray_group }}"
gid: "{{ xray_gid }}"
state: present
- name: Ensure user xray exist
become: yes
user:
uid: "{{ xray_uid }}"
name: "{{ xray_user }}"
group: "{{ xray_group }}"
create_home: yes
home: "{{ xray_home }}"
shell: /bin/bash
state: present
- name: Download xray
become: yes
unarchive:
src: "{{ xray_tar }}"
dest: "{{ jfrog_home_directory }}"
remote_src: yes
owner: "{{ xray_user }}"
group: "{{ xray_group }}"
creates: "{{ xray_untar_home }}"
register: downloadxray
until: downloadxray is succeeded
retries: 3
- name: Check if app directory exists
become: yes
stat:
path: "{{ xray_home }}/app"
register: app_dir_check
- name: Copy untar directory to xray home
become: yes
command: "cp -r {{ xray_untar_home }}/. {{ xray_home }}"
when: not app_dir_check.stat.exists
- name: Create required directories
become: yes
file:
path: "{{ item }}"
state: directory
recurse: yes
owner: "{{ xray_user }}"
group: "{{ xray_group }}"
loop:
- "{{ xray_home }}/var/etc"
- "{{ xray_home }}/var/etc/info/"
- "{{ xray_home }}/var/etc/security/"
- name: Configure master key
become: yes
copy:
dest: "{{ xray_home }}/var/etc/security/master.key"
content: |
{{ master_key }}
owner: "{{ xray_user }}"
group: "{{ xray_group }}"
mode: 0640
- name: Setup rabbitmq
import_tasks: rabbitmq/setup/RedHat.yml
when: ansible_os_family == 'RedHat'
- name: Setup rabbitmq
import_tasks: rabbitmq/setup/Debian.yml
when: ansible_os_family == 'Debian'
- name: Check if install.sh wrapper script exist
become: yes
stat:
path: "{{ xray_install_script_path }}/install.sh"
register: install_wrapper_script
- name: Include interactive installer scripts
include_vars: script/archive.yml
- name: Install xray
include_tasks: expect.yml
vars:
exp_executable_cmd: "./install.sh -u {{ xray_user }} -g {{ xray_group }}"
exp_dir: "{{ xray_install_script_path }}"
exp_scenarios: "{{ xray_installer_scenario['main'] }}"
args:
apply:
environment:
YQ_PATH: "{{ xray_thirdparty_path }}/yq"
when: install_wrapper_script.stat.exists
ignore_errors: yes
- name: Configure rabbitmq config
become: yes
template:
src: "rabbitmq.conf.j2"
dest: "{{ xray_home }}/app/bin/rabbitmq/rabbitmq.conf"
notify: restart xray
- name: Configure systemyaml
become: yes
template:
src: "{{ xray_system_yaml_template }}"
dest: "{{ xray_home }}/var/etc/system.yaml"
notify: restart xray
- name: Configure installer info
become: yes
template:
src: installer-info.json.j2
dest: "{{ xray_home }}/var/etc/info/installer-info.json"
notify: restart xray
- name: Ensure permissions are correct
become: yes
file:
path: "{{ jfrog_home_directory }}"
state: directory
owner: "{{ xray_user }}"
group: "{{ xray_group }}"
recurse: yes
- name: Install xray as a service
become: yes
shell: |
{{ xray_archive_service_cmd }}
args:
chdir: "{{ xray_install_script_path }}"
register: check_service_status_result
ignore_errors: yes
- name: Restart xray
meta: flush_handlers
- name : Wait for xray to be fully deployed
uri: url=http://127.0.0.1:8082/router/api/v1/system/health timeout=130
register: result
until: result.status == 200
retries: 25
delay: 5

View File

@@ -1,6 +0,0 @@
- name: perform installation
include_tasks: "install.yml"
when: not xray_upgrade_only
- name: perform upgrade
include_tasks: "upgrade.yml"
when: xray_upgrade_only

View File

@@ -1,63 +0,0 @@
- name: Check rabbitmq cluster_keepalive_interval option
become: yes
ignore_errors: yes
shell: |
./rabbitmqctl --erlang-cookie {{ xray_rabbitmq_default_cookie }} eval \
'application:get_env(rabbit, cluster_keepalive_interval).' \
| tr -d '}{' | cut -d ',' -f2
args:
chdir: "{{ xray_home }}/app/third-party/rabbitmq/sbin/"
environment:
LC_ALL: en_US.UTF-8
LC_CTYPE: en_US.UTF-8
register: cluster_keepalive_interval_value
- name: Check rabbitmq handshake_timeout option
become: yes
ignore_errors: yes
shell: |
./rabbitmqctl --erlang-cookie {{ xray_rabbitmq_default_cookie }} eval \
'application:get_env(rabbit, handshake_timeout).' \
| tr -d '}{' | cut -d ',' -f2
args:
chdir: "{{ xray_home }}/app/third-party/rabbitmq/sbin/"
environment:
LC_ALL: en_US.UTF-8
LC_CTYPE: en_US.UTF-8
register: handshake_timeout_value
- name: Check rabbitmq vm_memory_high_watermark.relative option
become: yes
ignore_errors: yes
shell: |
./rabbitmqctl --erlang-cookie {{ xray_rabbitmq_default_cookie }} eval \
'application:get_env(rabbit, vm_memory_high_watermark).' \
| tr -d '}{' | cut -d ',' -f2
args:
chdir: "{{ xray_home }}/app/third-party/rabbitmq/sbin/"
environment:
LC_ALL: en_US.UTF-8
LC_CTYPE: en_US.UTF-8
register: vm_memory_high_watermark_relative_value
- name: Store result
include_role:
name: report
vars:
stop_testing_if_fail: false
test_description: "{{ test_ext_description }}Check rabbitmq custom options values. INST-775"
test_host: "{{ inventory_hostname }}"
test_result: >-
{{
vm_memory_high_watermark_relative_value.stdout == rabbitmq_custom_values['vm_memory_high_watermark']
and cluster_keepalive_interval_value.stdout == rabbitmq_custom_values['cluster_keepalive_interval']
and handshake_timeout_value.stdout == rabbitmq_custom_values['handshake_timeout']
}}
report_action: "store-result"
log_result: >-
{{
{}
| combine({'handshake_timeout': {'real': handshake_timeout_value.stdout, 'expected': rabbitmq_custom_values.handshake_timeout}})
| combine({'vm_memory_high_watermark': {'real': vm_memory_high_watermark_relative_value.stdout, 'expected': rabbitmq_custom_values.vm_memory_high_watermark}})
| combine({'cluster_keepalive_interval': {'real': cluster_keepalive_interval_value.stdout, 'expected': rabbitmq_custom_values.cluster_keepalive_interval}})
}}

View File

@@ -1,102 +0,0 @@
- name: Find libssl package
find:
paths: "{{ xray_home }}/app/third-party/rabbitmq/"
patterns: "^libssl.+\\.deb$"
use_regex: yes
file_type: file
register: check_libssl_package_result
- name: Set libssl package file name
set_fact:
xray_libssl_package: "{{ check_libssl_package_result.files[0].path }}"
when: check_libssl_package_result.matched > 0
- name: Install libssl package
become: yes
apt:
deb: "{{ xray_libssl_package }}"
register: install_libssl_package_result
when:
- ansible_distribution_release == 'xenial'
- check_libssl_package_result.matched > 0
- name: Find socat package
find:
paths: "{{ xray_home }}/app/third-party/rabbitmq/"
patterns: "^socat.+\\.deb$"
use_regex: yes
file_type: file
register: check_socat_package_result
- name: Set socat package file name
set_fact:
xray_socat_package: "{{ check_socat_package_result.files[0].path }}"
when: check_socat_package_result.matched > 0
- name: Install socat package
become: yes
ignore_errors: yes
apt:
deb: "{{ xray_socat_package }}"
register: install_socat_package_result
when: check_socat_package_result.matched > 0
- name: Find erlang package
find:
paths: "{{ xray_home }}/app/third-party/rabbitmq/"
patterns: "^(esl-)?erlang.+{{ ansible_distribution_release }}.+\\.deb$"
use_regex: yes
file_type: file
register: check_erlang_package_result
- name: Set erlang package file name
set_fact:
xray_erlang_package: "{{ check_erlang_package_result.files[0].path }}"
when: check_erlang_package_result.matched > 0
- name: Install erlang package
become: yes
apt:
deb: "{{ xray_erlang_package }}"
register: install_erlang_package_result
when: check_erlang_package_result.matched > 0
- name: Find db5-util package
find:
paths: "{{ xray_home }}/app/third-party/misc/"
patterns: ["{{ xray_db_util_search_filter[linux_distro]['db5'] }}"]
use_regex: yes
file_type: file
register: check_db5_util_package_result
- name: Set db5-util package file name
set_fact:
xray_db5_util_package: "{{ check_db5_util_package_result.files[0].path }}"
when: check_db5_util_package_result.matched > 0
- name: Install db5-util package
become: yes
apt:
deb: "{{ xray_db5_util_package }}"
register: install_db5_util_package_result
when: check_db5_util_package_result.matched > 0
- name: Find db-util package
find:
paths: "{{ xray_home }}/app/third-party/misc/"
patterns: ["{{ xray_db_util_search_filter[linux_distro]['db'] }}"]
use_regex: yes
file_type: file
register: check_db_util_package_result
- name: Set db-util package file name
set_fact:
xray_db_util_package: "{{ check_db_util_package_result.files[0].path }}"
when: check_db_util_package_result.matched > 0
- name: Install db-util package
become: yes
apt:
deb: "{{ xray_db_util_package }}"
register: install_db_util_package_result
when: check_db_util_package_result.matched > 0

View File

@@ -1,59 +0,0 @@
- name: Set package prefix
set_fact:
rhel_package_prefix: >-
{%- if linux_distro in ['centos7','rhel7'] -%}
el7
{%- elif linux_distro in ['centos8','rhel8'] -%}
el8
{%- endif -%}
- debug:
msg: "rhel_package_prefix: {{ rhel_package_prefix }}"
- name: Find socat package
become: yes
find:
paths: "{{ xray_home }}/app/third-party/rabbitmq/"
patterns: "^socat.+{{ rhel_package_prefix }}.+\\.rpm$"
use_regex: yes
file_type: file
register: check_socat_package_result
- name: Set socat package file name
set_fact:
xray_socat_package: "{{ check_socat_package_result.files[0].path }}"
when: check_socat_package_result.matched > 0
- name: Install socat package
become: yes
yum:
name: "{{ xray_socat_package }}"
state: present
vars:
ansible_python_interpreter: "{{ yum_python_interpreter }}"
register: install_socat_package_result
when: check_socat_package_result.matched > 0
- name: Find erlang package
become: yes
find:
paths: "{{ xray_home }}/app/third-party/rabbitmq/"
patterns: "^(esl-)?erlang.+{{ rhel_package_prefix }}.+\\.rpm$"
use_regex: yes
file_type: file
register: check_erlang_package_result
- name: Set erlang package file name
set_fact:
xray_erlang_package: "{{ check_erlang_package_result.files[0].path }}"
when: check_erlang_package_result.matched > 0
- name: Install erlang package
become: yes
yum:
name: "{{ xray_erlang_package }}"
state: present
vars:
ansible_python_interpreter: "{{ yum_python_interpreter }}"
register: install_erlang_package_result
when: check_erlang_package_result.matched > 0

View File

@@ -1,12 +0,0 @@
- name: Get rabbitmq ha cluster status
become: yes
ignore_errors: yes
shell: |
./rabbitmqctl --erlang-cookie {{ xray_rabbitmq_default_cookie }} \
--formatter json cluster_status | jq .
args:
chdir: "{{ xray_home }}/app/third-party/rabbitmq/sbin/"
environment:
LC_ALL: en_US.UTF-8
LC_CTYPE: en_US.UTF-8
register: ha_rabbitmq_cluster_status

View File

@@ -1,20 +0,0 @@
- name: Find erlang package
find:
paths: "{{ xray_home }}/app/third-party/rabbitmq/"
patterns: "^(esl-)?erlang.+{{ ansible_distribution_release }}.+\\.deb$"
use_regex: yes
file_type: file
register: check_erlang_package_result
- name: Set erlang package file name
set_fact:
xray_erlang_package: "{{ check_erlang_package_result.files[0].path }}"
when: check_erlang_package_result.matched > 0
- name: Install erlang package
become: yes
apt:
deb: "{{ xray_erlang_package }}"
state: present
register: install_erlang_package_result
when: check_erlang_package_result.matched > 0

View File

@@ -1,32 +0,0 @@
- name: Set package prefix
set_fact:
rhel_package_prefix: >-
{%- if linux_distro in ['centos7','rhel7'] -%}
el7
{%- elif linux_distro in ['centos8','rhel8'] -%}
el8
{%- endif -%}
- name: Find erlang package
become: yes
find:
paths: "{{ xray_home }}/app/third-party/rabbitmq/"
patterns: "^(esl-)?erlang.+{{ rhel_package_prefix }}.+\\.rpm$"
use_regex: yes
file_type: file
register: check_erlang_package_result
- name: Set erlang package file name
set_fact:
xray_erlang_package: "{{ check_erlang_package_result.files[0].path }}"
when: check_erlang_package_result.matched > 0
- name: Install erlang package
become: yes
yum:
name: "{{ xray_erlang_package }}"
state: present
vars:
ansible_python_interpreter: "{{ yum_python_interpreter }}"
register: install_erlang_package_result
when: check_erlang_package_result.matched > 0

View File

@@ -1,112 +0,0 @@
---
- debug:
msg: "Performing upgrade of Xray version to {{ xray_version }}..."
- name: stop xray
become: yes
systemd:
name: "{{ xray_daemon }}"
state: stopped
- name: download xray for upgrade
become: yes
unarchive:
src: "{{ xray_tar }}"
dest: "{{ jfrog_home_directory }}"
remote_src: yes
owner: "{{ xray_user }}"
group: "{{ xray_group }}"
creates: "{{ xray_untar_home }}"
register: downloadxray
until: downloadxray is succeeded
retries: 3
- name: Delete xray app
become: yes
file:
path: "{{ xray_home }}/app"
state: absent
- name: Copy new app to xray app
become: yes
command: "cp -r {{ xray_untar_home }}/app/. {{ xray_home }}/app"
- name: Upgrade rabbitmq
import_tasks: rabbitmq/upgrade/RedHat.yml
when: ansible_os_family == 'RedHat'
- name: Upgrade rabbitmq
import_tasks: rabbitmq/upgrade/Debian.yml
when: ansible_os_family == 'Debian'
- name: Check if install.sh wrapper script exist
become: yes
stat:
path: "{{ xray_install_script_path }}/install.sh"
register: install_wrapper_script
- name: Include interactive installer scripts
include_vars: script/archive.yml
- name: Install xray
include_tasks: expect.yml
vars:
exp_executable_cmd: "./install.sh -u {{ xray_user }} -g {{ xray_group }}"
exp_dir: "{{ xray_install_script_path }}"
exp_scenarios: "{{ xray_installer_scenario['main'] }}"
args:
apply:
environment:
YQ_PATH: "{{ xray_thirdparty_path }}/yq"
when: install_wrapper_script.stat.exists
ignore_errors: yes
- name: Configure rabbitmq config
become: yes
template:
src: "rabbitmq.conf.j2"
dest: "{{ xray_home }}/app/bin/rabbitmq/rabbitmq.conf"
notify: restart xray
- name: Configure systemyaml
become: yes
template:
src: "{{ xray_system_yaml_template }}"
dest: "{{ xray_home }}/var/etc/system.yaml"
notify: restart xray
- name: configure installer info
become: yes
template:
src: installer-info.json.j2
dest: "{{ xray_home }}/var/etc/info/installer-info.json"
notify: restart xray
- name: Ensure permissions are correct
become: yes
file:
path: "{{ jfrog_home_directory }}"
state: directory
owner: "{{ xray_user }}"
group: "{{ xray_group }}"
recurse: yes
- name: Install xray as a service
become: yes
shell: |
{{ xray_archive_service_cmd }}
args:
chdir: "{{ xray_install_script_path }}"
register: check_service_status_result
ignore_errors: yes
- name: Restart xray
meta: flush_handlers
- name : wait for xray to be fully deployed
uri: url=http://127.0.0.1:8082/router/api/v1/system/health timeout=130
register: result
until: result.status == 200
retries: 25
delay: 5

View File

@@ -1,9 +0,0 @@
{{ ansible_managed | comment }}
{
"productId": "Ansible_Xray/{{ platform_collection_version }}-{{ xray_version }}",
"features": [
{
"featureId": "Channel/{{ ansible_marketplace }}"
}
]
}

View File

@@ -1,9 +0,0 @@
loopback_users.guest = false
listeners.tcp.default = 5672
hipe_compile = false
management.listener.port = 15672
management.listener.ssl = false
cluster_partition_handling = autoheal
default_user = {{ xray_rabbitmq_user }}
default_pass = {{ xray_rabbitmq_password }}

Some files were not shown because too many files have changed in this diff Show More