[Ansible] JFrog Platform 10.0.1 release (#166)

This commit is contained in:
Ram Mohan Rao Chukka
2021-10-22 13:13:22 +05:30
committed by GitHub
parent 8d5ff07819
commit 37bab36884
78 changed files with 876 additions and 731 deletions

View File

@@ -1,7 +1,7 @@
# defaults file for artifactory
# The version of artifactory to install
artifactory_version: 7.25.7
artifactory_version: 7.27.6
# Set this to true when SSL is enabled (to use artifactory_nginx_ssl role), default to false (implies artifactory uses artifactory_nginx role )
artifactory_nginx_ssl_enabled: false
@@ -9,10 +9,7 @@ artifactory_nginx_ssl_enabled: false
# Set this to false when ngnix is disabled, defaults to true (implies artifactory uses artifactory_nginx role )
artifactory_nginx_enabled: true
# Provide single node license
# artifactory_single_license:
# Provide individual (HA) licenses file separated by new line and 2-space indentation and set artifactory_ha_enabled: true.
# Provide single or HA individual licenses file separated by new line and 2-space indentation and for HA, set artifactory_ha_enabled: true.
# Example: Replace <license_1> , <license_2> , <license_3> with original licenses
# artifactory_licenses: |-
# <license_1>
@@ -27,6 +24,9 @@ artifactory_ha_enabled: false
# By default, all nodes are primary (CNHA) - https://www.jfrog.com/confluence/display/JFROG/High+Availability#HighAvailability-Cloud-NativeHighAvailability
artifactory_taskaffinity: any
# To enable mission-control in artifactory (>= 7.27.x) applicable only on E+ license
artifactory_mc_enabled: true
# The location where Artifactory should install
jfrog_home_directory: /opt/jfrog
@@ -43,7 +43,7 @@ artifactory_untar_home: "{{ jfrog_home_directory }}/artifactory-{{ artifactory_f
# Timeout in seconds for URL request
artifactory_download_timeout: 10
postgres_driver_version: 42.2.23
postgres_driver_version: 42.2.24
postgres_driver_download_url: https://repo1.maven.org/maven2/org/postgresql/postgresql/{{ postgres_driver_version }}/postgresql-{{ postgres_driver_version }}.jar
artifactory_user: artifactory
@@ -88,6 +88,8 @@ artifactory_systemyaml: |-
url: "{{ artifactory_db_url }}"
username: "{{ artifactory_db_user }}"
password: "{{ artifactory_db_password }}"
mc:
enabled: {{ artifactory_mc_enabled }}
router:
entrypoints:
internalPort: 8046

View File

@@ -1,5 +1,13 @@
- name: Install prerequisite packages
become: yes
yum:
name: net-tools
name: ['net-tools', '{{ selinux_policy_package }}']
state: present
- name: Configure SELinux context
become: yes
sefcontext:
target: "{{ jfrog_home_directory }}/artifactory/app/bin(/.*)?"
setype: bin_t
state: present
when: ansible_selinux.status == 'enabled'

View File

@@ -1,3 +1,14 @@
- name: Include distro specific variables
include_vars: "{{ distro_vars_file }}"
vars:
distro_vars_file: "{{ lookup('first_found', distro_vars, errors='ignore') }}"
distro_vars:
files:
- "vars/distro/{{ ansible_distribution ~ ansible_distribution_major_version }}.yml"
- "vars/distro/{{ ansible_distribution }}.yml"
- "vars/distro/{{ ansible_os_family }}.yml"
- "vars/distro/default.yml"
- name: Install prerequisite packages
include_tasks: "{{ ansible_os_family }}.yml"
@@ -53,10 +64,10 @@
unarchive:
src: "{{ jfrog_home_directory }}/{{ artifactory_tar_file_name }}"
dest: "{{ jfrog_home_directory }}"
remote_src: true
owner: "{{ artifactory_user }}"
group: "{{ artifactory_group }}"
creates: "{{ artifactory_untar_home }}"
remote_src: true
when: download_artifactory is succeeded
- name: Check if app directory exists
@@ -70,6 +81,9 @@
copy:
src: "{{ artifactory_untar_home }}/"
dest: "{{ artifactory_home }}"
owner: "{{ artifactory_user }}"
group: "{{ artifactory_group }}"
mode: 0755
remote_src: yes
when: not app_dir_check.stat.exists
@@ -101,7 +115,7 @@
mode: 0644
when:
- artifactory_systemyaml is defined
- artifactory_systemyaml|length > 0
- artifactory_systemyaml | length > 0
- artifactory_systemyaml_override or (not systemyaml.stat.exists)
notify: restart artifactory
@@ -133,18 +147,7 @@
- artifactory_binarystore | length > 0
notify: restart artifactory
- name: Configure single license
become: yes
template:
src: artifactory.lic.j2
dest: "{{ artifactory_home }}/var/etc/artifactory/artifactory.lic"
mode: 0644
when:
- artifactory_single_license is defined
- artifactory_single_license|length > 0
notify: restart artifactory
- name: Configure HA licenses
- name: Configure artifactory license(s)
become: yes
template:
src: artifactory.cluster.license.j2
@@ -152,7 +155,7 @@
mode: 0644
when:
- artifactory_licenses is defined
- artifactory_licenses|length > 0
- artifactory_licenses | length > 0
notify: restart artifactory
- name: Check if database driver exists
@@ -173,6 +176,12 @@
- not database_driver.stat.exists
notify: restart artifactory
- name: Run restore context to reload selinux
become: yes
shell: |
restorecon -R -v "{{ jfrog_home_directory }}/artifactory/app/bin"
when: ansible_distribution == 'RedHat'
- name: Create artifactory service
become: yes
command: "{{ artifactory_home }}/app/bin/installService.sh"

View File

@@ -1,4 +1,3 @@
---
- name: Check if artifactory tar already exists
become: yes
stat:
@@ -27,12 +26,13 @@
creates: "{{ artifactory_untar_home }}"
when: download_artifactory is succeeded
- name: stop artifactory
- name: Stop artifactory
meta: flush_handlers
- name: Ensure jfrog_home_directory exists
become: yes
file:
mode: 0755
path: "{{ jfrog_home_directory }}"
state: directory
@@ -56,7 +56,7 @@
mode: 0644
when:
- artifactory_single_license is defined
- artifactory_single_license|length > 0
- artifactory_single_license | length > 0
notify: restart artifactory
- name: Configure HA licenses
@@ -70,13 +70,13 @@
- artifactory_licenses | length > 0
notify: restart artifactory
- name: Check if database driver exists
- name: Check if jdbc driver exists
become: yes
stat:
path: "{{ artifactory_home }}/app/artifactory/tomcat/lib/jf_postgresql-{{ postgres_driver_version }}.jar"
register: database_driver
- name: Download database driver
- name: Download jdbc driver
become: yes
get_url:
url: "{{ postgres_driver_download_url }}"
@@ -104,16 +104,16 @@
mode: 0644
when:
- artifactory_binarystore is defined
- artifactory_binarystore|length > 0
- artifactory_binarystore | length > 0
notify: restart artifactory
- name: Check if systemyaml exists
- name: Check if system.yaml exists
become: yes
stat:
path: "{{ artifactory_home }}/var/etc/system.yaml"
register: systemyaml
- name: Configure systemyaml
- name: Configure system.yaml
become: yes
template:
src: "{{ artifactory_system_yaml_template }}"
@@ -121,7 +121,7 @@
mode: 0644
when:
- artifactory_systemyaml is defined
- artifactory_systemyaml|length > 0
- artifactory_systemyaml | length > 0
- artifactory_systemyaml_override or (not systemyaml.stat.exists)
notify: restart artifactory

View File

@@ -1 +0,0 @@
{{ artifactory_single_license }}

View File

@@ -0,0 +1 @@
selinux_policy_package: policycoreutils-python

View File

@@ -0,0 +1 @@
selinux_policy_package: policycoreutils-python

View File

@@ -0,0 +1 @@
selinux_policy_package: python3-policycoreutils

View File

@@ -1,5 +1,5 @@
# platform collection version
platform_collection_version: 7.25.7
platform_collection_version: 10.0.1
# indicates where this collection was downloaded from (galaxy, automation_hub, standalone)
ansible_marketplace: galaxy

View File

@@ -1,7 +1,7 @@
# defaults file for distribution
# The version of distribution to install
distribution_version: 2.9.2
distribution_version: 2.9.3
# whether to enable HA
distribution_ha_enabled: false

View File

@@ -39,10 +39,10 @@
unarchive:
src: "{{ jfrog_home_directory }}/{{ distribution_tar_file_name }}"
dest: "{{ jfrog_home_directory }}"
remote_src: true
owner: "{{ distribution_user }}"
group: "{{ distribution_group }}"
creates: "{{ distribution_untar_home }}"
remote_src: true
when: download_distribution is succeeded
- name: Check if app directory exists
@@ -56,6 +56,9 @@
copy:
src: "{{ distribution_untar_home }}/"
dest: "{{ distribution_home }}"
owner: "{{ distribution_user }}"
group: "{{ distribution_group }}"
mode: 0755
remote_src: yes
when: not app_dir_check.stat.exists
@@ -125,7 +128,7 @@
mode: 0644
when:
- distribution_systemyaml is defined
- distribution_systemyaml|length > 0
- distribution_systemyaml | length > 0
- distribution_systemyaml_override or (not systemyaml.stat.exists)
notify: restart distribution

View File

@@ -55,7 +55,7 @@
mode: 0644
when:
- distribution_systemyaml is defined
- distribution_systemyaml|length > 0
- distribution_systemyaml | length > 0
- distribution_systemyaml_override or (not systemyaml.stat.exists)
notify: restart distribution

View File

@@ -1,5 +1,5 @@
# platform collection version
platform_collection_version: 7.25.7
platform_collection_version: 10.0.1
# indicates were this collection was downlaoded from (galaxy, automation_hub, standalone)
ansible_marketplace: galaxy

View File

@@ -0,0 +1,26 @@
# Insight
The insight role will install insight software onto the host. An Artifactory server and Postgress database is required.
### Role Variables
* _insight_upgrade_only_: Perform an software upgrade only. Default is false.
Additional variables can be found in [defaults/main.yml](./defaults/main.yml).
## Example Playbook
```
---
- hosts: insight_servers
roles:
- insight
```
## Upgrades
The insight role supports software upgrades. To use a role to perform a software upgrade only, use the _insight_upgrade_only_ variables and specify the version. See the following example.
```
- hosts: insight_servers
vars:
insight_version: "{{ lookup('env', 'insight_version_upgrade') }}"
insight_upgrade_only: true
roles:
- insight
```

View File

@@ -0,0 +1,87 @@
# defaults file for insight
# The version of insight to install
insight_version: 1.0.1
# whether to enable HA
insight_ha_enabled: false
insight_ha_node_type: master
# The location where insight should install
jfrog_home_directory: /opt/jfrog
# The remote insight download file
insight_tar_file_name: jfrog-insight-{{ insight_version }}-linux.tar.gz
insight_tar: https://releases.jfrog.io/artifactory/jfrog-insight/linux/{{ insight_version }}/{{ insight_tar_file_name }}
# Timeout in seconds for URL request
insight_download_timeout: 10
#The insight install directory
insight_untar_home: "{{ jfrog_home_directory }}/jfrog-insight-{{ insight_version }}-linux"
insight_home: "{{ jfrog_home_directory }}/insight"
insight_install_script_path: "{{ insight_home }}/app/bin"
insight_thirdparty_path: "{{ insight_home }}/app/third-party"
insight_archive_service_cmd: "{{ insight_install_script_path }}/installService.sh"
insight_service_file: /lib/systemd/system/insight.service
#insight users and groups
insight_user: insight
insight_group: insight
insight_uid: 1040
insight_gid: 1040
insight_daemon: insight
# Insight ElasticSearch Details
es_uid: 1060
es_gid: 1060
insight_es_conf_base: "/etc/elasticsearch"
insight_es_user: admin
insight_es_password: admin
insight_es_url: "http://localhost:9200"
insight_es_transport_port: 9300
insight_es_home: "/usr/share/elasticsearch"
insight_es_data_dir: "/var/lib/elasticsearch"
insight_es_log_dir: "/var/log/elasticsearch"
insight_es_java_home: "/usr/share/elasticsearch/jdk"
insight_es_script_path: "/usr/share/elasticsearch/bin"
insight_es_searchgaurd_home: "/usr/share/elasticsearch/plugins/search-guard-7"
# if this is an upgrade
insight_upgrade_only: false
insight_system_yaml_template: system.yaml.j2
# Provide systemyaml content below with 2-space indentation
insight_systemyaml: |-
configVersion: 1
shared:
jfrogUrl: {{ jfrog_url }}
node:
ip: {{ ansible_host }}
id: {{ ansible_hostname }}
database:
type: "{{ insight_db_type }}"
driver: "{{ insight_db_driver }}"
url: "{{ insight_db_url }}"
username: "{{ insight_db_user }}"
elasticsearch:
unicastFile: {{ insight_es_conf_base }}/config/unicast_hosts.txt
password: {{ insight_es_password }}
url: {{ insight_es_url }}
username: {{ insight_es_user }}
external: true
security:
joinKey: {{ join_key }}
router:
entrypoints:
internalPort: 8046
# Note: insight_systemyaml_override is by default false, if you want to change default insight_systemyaml
insight_systemyaml_override: false

View File

@@ -0,0 +1,13 @@
---
# handlers file for insight
- name: restart insight
become: yes
systemd:
name: "{{ insight_daemon }}"
state: restarted
- name: stop insight
become: yes
systemd:
name: "{{ insight_daemon }}"
state: stopped

View File

@@ -3,7 +3,7 @@ dependencies: []
galaxy_info:
author: "JFrog Maintainers Team <installers@jfrog.com>"
description: "The missioncontrol role will install missioncontrol software onto the host. An Artifactory server and Postgress database is required."
description: "The insight role will install insight software onto the host. An Artifactory server and Postgress database are required."
company: JFrog
issue_tracker_url: "https://github.com/jfrog/JFrog-Cloud-Installers/issues"
license: license (Apache-2.0)
@@ -23,5 +23,5 @@ galaxy_info:
- stretch
- buster
galaxy_tags:
- missioncontrol
- insight
- jfrog

View File

@@ -1,7 +1,7 @@
- name: Install prerequisite packages
become: yes
apt:
name: ["expect", "locales"]
name: ["expect", "locales", "acl"]
state: present
update_cache: yes
cache_valid_time: 3600

View File

@@ -1,5 +1,5 @@
- name: Install prerequisite packages
become: yes
yum:
name: expect
name: ["expect", "acl"]
state: present

View File

@@ -0,0 +1,170 @@
- name: Install prerequisite packages
include_tasks: "{{ ansible_os_family }}.yml"
- name: Ensure group insight exist
become: yes
group:
name: "{{ insight_group }}"
state: present
- name: Ensure user insight exist
become: yes
user:
name: "{{ insight_user }}"
group: "{{ insight_group }}"
create_home: yes
home: "{{ insight_home }}"
shell: /bin/bash
state: present
- name: Check if insight tar already exists
become: yes
stat:
path: "{{ jfrog_home_directory }}/{{ insight_tar_file_name }}"
register: insight_tar_check
- name: Download insight
become: yes
get_url:
url: "{{ insight_tar }}"
timeout: "{{ insight_download_timeout }}"
dest: "{{ jfrog_home_directory }}"
register: download_insight
until: download_insight is succeeded
retries: 3
when: not insight_tar_check.stat.exists
- name: Extract insight tar
become: yes
unarchive:
src: "{{ jfrog_home_directory }}/{{ insight_tar_file_name }}"
dest: "{{ jfrog_home_directory }}"
owner: "{{ insight_user }}"
group: "{{ insight_group }}"
creates: "{{ insight_untar_home }}"
remote_src: true
when: download_insight is succeeded
- name: Check if app directory exists
become: yes
stat:
path: "{{ insight_home }}/app"
register: app_dir_check
- name: Copy untar directory to insight home
become: yes
copy:
src: "{{ insight_untar_home }}/"
dest: "{{ insight_home }}"
owner: "{{ insight_user }}"
group: "{{ insight_group }}"
mode: 0755
remote_src: yes
when: not app_dir_check.stat.exists
- name: Create required directories
become: yes
file:
path: "{{ item }}"
state: directory
recurse: yes
owner: "{{ insight_user }}"
group: "{{ insight_group }}"
loop:
- "{{ insight_home }}/var/etc"
- "{{ insight_home }}/var/etc/security/"
- "{{ insight_home }}/var/etc/info/"
- name: Configure master key
become: yes
copy:
dest: "{{ insight_home }}/var/etc/security/master.key"
content: "{{ master_key }}"
owner: "{{ insight_user }}"
group: "{{ insight_group }}"
mode: 0640
- name: Setup elasticsearch
import_tasks: setup-elasticsearch.yml
- name: Check if install.sh wrapper script exist
become: yes
stat:
path: "{{ insight_install_script_path }}/install.sh"
register: install_wrapper_script
- name: Include interactive installer scripts
include_vars: script/archive.yml
- name: Install Insight
include_tasks: expect.yml
vars:
exp_executable_cmd: "./install.sh -u {{ insight_user }} -g {{ insight_group }}"
exp_dir: "{{ insight_install_script_path }}"
exp_scenarios: "{{ insight_installer_scenario['main'] }}"
args:
apply:
environment:
YQ_PATH: "{{ insight_thirdparty_path }}/yq"
when: install_wrapper_script.stat.exists
- name: Configure installer info
become: yes
template:
src: installer-info.json.j2
dest: "{{ insight_home }}/var/etc/info/installer-info.json"
owner: "{{ insight_user }}"
group: "{{ insight_group }}"
mode: 0644
notify: restart insight
- name: Check if system.yaml exists
become: yes
stat:
path: "{{ insight_home }}/var/etc/system.yaml"
register: systemyaml
- name: Configure system.yaml
become: yes
template:
src: "{{ insight_system_yaml_template }}"
dest: "{{ insight_home }}/var/etc/system.yaml"
owner: "{{ insight_user }}"
group: "{{ insight_group }}"
mode: 0644
when:
- insight_systemyaml is defined
- insight_systemyaml | length > 0
- insight_systemyaml_override or (not systemyaml.stat.exists)
notify: restart insight
- name: Update correct permissions
become: yes
file:
path: "{{ insight_home }}"
state: directory
recurse: yes
owner: "{{ insight_user }}"
group: "{{ insight_group }}"
- name: Install insight as a service
become: yes
command: "{{ insight_archive_service_cmd }}"
args:
chdir: "{{ insight_install_script_path }}"
creates: "{{ insight_service_file }}"
register: check_service_status_result
- name: Restart insight
meta: flush_handlers
- name: Make sure insight is up and running
uri:
url: http://127.0.0.1:8082/router/api/v1/system/health
timeout: 130
status_code: 200
register: result
until: result is succeeded
retries: 25
delay: 5
when: not ansible_check_mode

View File

@@ -1,11 +1,11 @@
- name: Perform installation
include_tasks: "install.yml"
when:
- mc_enabled
- not mc_upgrade_only
- insight_enabled
- not insight_upgrade_only
- name: Perform upgrade
include_tasks: "upgrade.yml"
when:
- mc_enabled
- mc_upgrade_only
- insight_enabled
- insight_upgrade_only

View File

@@ -10,7 +10,7 @@
name: elasticsearch
group: elasticsearch
create_home: yes
home: "{{ mc_es_home }}"
home: "{{ insight_es_home }}"
shell: /bin/bash
state: present
@@ -22,11 +22,12 @@
recurse: yes
owner: elasticsearch
group: elasticsearch
mode: 0644
loop:
- "{{ mc_es_conf_base }}"
- "{{ mc_es_data_dir }}"
- "{{ mc_es_log_dir }}"
- "{{ mc_es_home }}"
- "{{ insight_es_conf_base }}"
- "{{ insight_es_data_dir }}"
- "{{ insight_es_log_dir }}"
- "{{ insight_es_home }}"
- name: Set max file descriptors limit
become: yes
@@ -46,7 +47,6 @@
- name: Set vm.max_map_count in /etc/sysctl.conf
become: yes
ignore_errors: yes
sysctl:
name: vm.max_map_count
value: '262144'
@@ -55,7 +55,7 @@
- name: Find elasticsearch package
become: yes
find:
paths: "{{ mc_home }}/app/third-party/elasticsearch"
paths: "{{ insight_home }}/app/third-party/elasticsearch"
patterns: "^elasticsearch-.+\\.tar.gz$"
use_regex: yes
file_type: file
@@ -63,53 +63,49 @@
- name: Set elasticsearch package file name
set_fact:
mc_elasticsearch_package: "{{ check_elasticsearch_package_result.files[0].path }}"
insight_elasticsearch_package: "{{ check_elasticsearch_package_result.files[0].path }}"
when: check_elasticsearch_package_result.matched > 0
- name: Ensure elasticsearch home exists
become: yes
file:
path: "{{ mc_es_home }}"
path: "{{ insight_es_home }}"
state: directory
owner: elasticsearch
group: elasticsearch
mode: 0644
- name: Extract elasticsearch package
become: yes
ignore_errors: yes
unarchive:
src: "{{ mc_elasticsearch_package }}"
dest: "{{ mc_es_home }}"
src: "{{ insight_elasticsearch_package }}"
dest: "{{ insight_es_home }}"
remote_src: yes
extra_opts:
- --strip-components=1
owner: elasticsearch
group: elasticsearch
creates: "{{ mc_es_java_home }}"
creates: "{{ insight_es_java_home }}"
register: unarchive_result
when: check_elasticsearch_package_result.matched > 0
- name: Copy elasticsearch config files to ES_PATH_CONF dir
become: yes
copy:
src: "{{ mc_es_home }}/config/"
dest: "{{ mc_es_conf_base }}"
remote_src: yes
command: "cp -r {{ insight_es_home }}/config/. {{ insight_es_conf_base }}/"
when: unarchive_result.changed
- name: Remove elasticsearch config dir
become: yes
file:
path: "{{ mc_es_home }}/config"
path: "{{ insight_es_home }}/config"
state: absent
when: unarchive_result.changed
- name: Generate HA elasticsearch.yml template file
become: yes
ignore_errors: yes
template:
src: templates/ha/{{ mc_ha_node_type }}.elasticsearch.yml.j2
dest: "{{ mc_es_conf_base }}/elasticsearch.yml"
src: templates/ha/{{ insight_ha_node_type }}.elasticsearch.yml.j2
dest: "{{ insight_es_conf_base }}/elasticsearch.yml"
owner: elasticsearch
group: elasticsearch
mode: 0644
@@ -119,7 +115,7 @@
become: yes
template:
src: templates/elasticsearch.yml.j2
dest: "{{ mc_es_conf_base }}/elasticsearch.yml"
dest: "{{ insight_es_conf_base }}/elasticsearch.yml"
owner: elasticsearch
group: elasticsearch
mode: 0644
@@ -128,11 +124,11 @@
- name: Create empty unicast_hosts.txt file
become: yes
file:
path: "{{ mc_es_conf_base }}/unicast_hosts.txt"
path: "{{ insight_es_conf_base }}/unicast_hosts.txt"
state: touch
mode: 0664
owner: elasticsearch
group: elasticsearch
mode: 0664
- name: Setup searchguard plugin
import_tasks: setup-searchguard.yml
@@ -145,18 +141,21 @@
recurse: yes
owner: elasticsearch
group: elasticsearch
mode: 0755
loop:
- "{{ mc_es_conf_base }}"
- "{{ mc_es_data_dir }}"
- "{{ mc_es_log_dir }}"
- "{{ mc_es_home }}"
- "{{ insight_es_conf_base }}"
- "{{ insight_es_data_dir }}"
- "{{ insight_es_log_dir }}"
- "{{ insight_es_home }}"
- name: Start elasticsearch
become: yes
command: "su -c '{{ mc_es_script_path }}/elasticsearch -d' elasticsearch"
become_user: elasticsearch
shell: |
nohup {{ insight_es_script_path }}/elasticsearch -d
environment:
ES_JAVA_HOME: "{{ mc_es_java_home }}"
ES_PATH_CONF: "{{ mc_es_conf_base }}/"
ES_JAVA_HOME: "{{ insight_es_java_home }}"
ES_PATH_CONF: "{{ insight_es_conf_base }}/"
register: start_elasticsearch
when: unarchive_result.extract_results.rc | default(128) == 0
@@ -166,20 +165,21 @@
when: start_elasticsearch.changed
- name: Check if elasticsearch is running
wait_for:
wait_for:
host: localhost
port: "{{ mc_es_transport_port }}"
port: "{{ insight_es_transport_port }}"
delay: 5
connect_timeout: 1
- name: Init searchguard plugin
become: yes
become_user: elasticsearch
shell: |
./sgadmin.sh -p {{ mc_es_transport_port }} -cacert root-ca.pem \
-cert sgadmin.pem -key sgadmin.key -cd {{ mc_es_searchgaurd_home }}/sgconfig/ -nhnv -icl
./sgadmin.sh -p {{ insight_es_transport_port }} -cacert root-ca.pem \
-cert sgadmin.pem -key sgadmin.key -cd {{ insight_es_searchgaurd_home }}/sgconfig/ -nhnv -icl
args:
chdir: "{{ mc_es_searchgaurd_home }}/tools/"
chdir: "{{ insight_es_searchgaurd_home }}/tools/"
environment:
JAVA_HOME: "{{ mc_es_java_home }}"
JAVA_HOME: "{{ insight_es_java_home }}"
register: install_searchguard_result
when: check_searchguard_bundle_result.matched == 1

View File

@@ -1,11 +1,11 @@
- name: Copy elasticsearch cert files
become: yes
copy:
mode: 0600
src: "files/searchguard/{{ item }}"
dest: "{{ mc_es_conf_base }}/{{ item }}"
dest: "{{ insight_es_conf_base }}/{{ item }}"
owner: elasticsearch
group: elasticsearch
mode: 0600
loop:
- "localhost.pem"
- "localhost.key"
@@ -14,7 +14,7 @@
- name: Find searchguard bundle
become: yes
find:
paths: "{{ mc_home }}/app/third-party/elasticsearch/"
paths: "{{ insight_home }}/app/third-party/elasticsearch/"
patterns: "^search-guard-.+\\.zip$"
use_regex: yes
file_type: file
@@ -24,22 +24,22 @@
become: yes
ignore_errors: yes
shell: |
{{ mc_es_script_path }}/elasticsearch-plugin install \
{{ insight_es_script_path }}/elasticsearch-plugin install \
-b file://{{ check_searchguard_bundle_result.files[0].path }}
environment:
ES_JAVA_HOME: "{{ mc_es_java_home }}"
ES_PATH_CONF: "{{ mc_es_conf_base }}/"
ES_JAVA_HOME: "{{ insight_es_java_home }}"
ES_PATH_CONF: "{{ insight_es_conf_base }}/"
register: install_searchguard_result
when: check_searchguard_bundle_result.matched == 1
- name: Copy searchguard certificate files
become: yes
copy:
mode: 0600
src: "files/searchguard/{{ item }}"
dest: "{{ mc_es_searchgaurd_home }}/tools/{{ item }}"
dest: "{{ insight_es_searchgaurd_home }}/tools/{{ item }}"
owner: elasticsearch
group: elasticsearch
mode: 0600
loop:
- "sgadmin.pem"
- "sgadmin.key"
@@ -48,11 +48,11 @@
- name: Copy SG roles files
become: yes
copy:
mode: 0600
src: "files/searchguard/{{ item }}"
dest: "{{ mc_es_searchgaurd_home }}/sgconfig/{{ item }}"
dest: "{{ insight_es_searchgaurd_home }}/sgconfig/{{ item }}"
owner: elasticsearch
group: elasticsearch
mode: 0600
loop:
- "sg_roles.yml"
- "sg_roles_mapping.yml"
@@ -61,7 +61,7 @@
- name: Check execution bit
become: yes
file:
path: "{{ mc_es_searchgaurd_home }}/tools/sgadmin.sh"
path: "{{ insight_es_searchgaurd_home }}/tools/sgadmin.sh"
owner: elasticsearch
group: elasticsearch
mode: 0700

View File

@@ -1,19 +1,21 @@
- name: Kill elasticsearch process
become: yes
ignore_errors: yes
shell: |
set -o pipefail
ps -ef | grep -v grep | grep -w elasticsearch | awk '{print $2}' | while read curr_ps_id
do
echo "process ${curr_ps_id} still running"
echo "$(ps -ef | grep -v grep | grep ${curr_ps_id})"
kill -9 ${curr_ps_id}
done
args:
executable: /bin/bash
changed_when: false
- name: Find searchguard bundle for removal
become: yes
find:
paths: "{{ mc_home }}/app/third-party/elasticsearch/"
paths: "{{ insight_home }}/app/third-party/elasticsearch/"
patterns: "^search-guard-.+\\.zip$"
use_regex: yes
file_type: file
@@ -24,23 +26,23 @@
become_user: elasticsearch
ignore_errors: yes
shell: |
{{ mc_es_script_path }}/elasticsearch-plugin remove {{ check_searchguard_bundle_result.files[0].path }}
{{ insight_es_script_path }}/elasticsearch-plugin remove {{ check_searchguard_bundle_result.files[0].path }}
environment:
ES_JAVA_HOME: "{{ mc_es_java_home }}"
ES_PATH_CONF: "{{ mc_es_conf_base }}/config"
ES_JAVA_HOME: "{{ insight_es_java_home }}"
ES_PATH_CONF: "{{ insight_es_conf_base }}/config"
register: remove_searchguard_result
when: check_searchguard_bundle_result.matched == 1
- name: Delete elasticsearch home dir
become: yes
file:
path: "{{ mc_es_home }}"
path: "{{ insight_es_home }}"
state: absent
- name: Create elasticsearch home dir
become: yes
file:
path: "{{ mc_es_home }}"
path: "{{ insight_es_home }}"
state: directory
owner: elasticsearch
group: elasticsearch
@@ -49,7 +51,7 @@
- name: Find elasticsearch package
become: yes
find:
paths: "{{ mc_home }}/app/third-party/elasticsearch"
paths: "{{ insight_home }}/app/third-party/elasticsearch"
patterns: "^elasticsearch-.+\\.tar.gz$"
use_regex: yes
file_type: file
@@ -57,30 +59,29 @@
- name: Set elasticsearch package file name
set_fact:
mc_elasticsearch_package: "{{ check_elasticsearch_package_result.files[0].path }}"
insight_elasticsearch_package: "{{ check_elasticsearch_package_result.files[0].path }}"
when: check_elasticsearch_package_result.matched > 0
- name: Extract elasticsearch package
become: yes
unarchive:
src: "{{ mc_elasticsearch_package }}"
dest: "{{ mc_es_home }}"
src: "{{ insight_elasticsearch_package }}"
dest: "{{ insight_es_home }}"
remote_src: yes
extra_opts:
- --strip-components=1
- --exclude=config
owner: elasticsearch
group: elasticsearch
creates: "{{ mc_es_java_home }}"
creates: "{{ insight_es_java_home }}"
register: unarchive_result
when: check_elasticsearch_package_result.matched > 0
- name: Generate HA elasticsearch.yml template file
become: yes
ignore_errors: yes
template:
src: templates/ha/{{ mc_ha_node_type }}.elasticsearch.yml.j2
dest: "{{ mc_es_conf_base }}/elasticsearch.yml"
src: templates/ha/{{ insight_ha_node_type }}.elasticsearch.yml.j2
dest: "{{ insight_es_conf_base }}/elasticsearch.yml"
owner: elasticsearch
group: elasticsearch
mode: 0644
@@ -89,7 +90,7 @@
- name: Create empty unicast_hosts.txt file
become: yes
file:
path: "{{ mc_es_conf_base }}/unicast_hosts.txt"
path: "{{ insight_es_conf_base }}/unicast_hosts.txt"
state: touch
owner: elasticsearch
group: elasticsearch
@@ -100,12 +101,14 @@
- name: Start elasticsearch
become: yes
command: "su -c '{{ mc_es_script_path }}/elasticsearch -d' elasticsearch"
become_user: elasticsearch
shell: |
nohup {{ insight_es_script_path }}/elasticsearch -d
environment:
ES_JAVA_HOME: "{{ mc_es_java_home }}"
ES_PATH_CONF: "{{ mc_es_conf_base }}/"
register: start_elastcsearch
ES_JAVA_HOME: "{{ insight_es_java_home }}"
ES_PATH_CONF: "{{ insight_es_conf_base }}/"
when: unarchive_result.extract_results.rc | default(128) == 0
register: start_elastcsearch
- name: Wait for elasticsearch to start
pause:
@@ -115,7 +118,7 @@
- name: Check if elasticsearch is running
wait_for:
host: localhost
port: "{{ mc_es_transport_port }}"
port: "{{ insight_es_transport_port }}"
delay: 5
connect_timeout: 1
@@ -123,11 +126,11 @@
become: yes
become_user: elasticsearch
shell: |
./sgadmin.sh -p {{ mc_es_transport_port }} -cacert root-ca.pem \
-cert sgadmin.pem -key sgadmin.key -cd {{ mc_es_searchgaurd_home }}/sgconfig/ -nhnv -icl
./sgadmin.sh -p {{ insight_es_transport_port }} -cacert root-ca.pem \
-cert sgadmin.pem -key sgadmin.key -cd {{ insight_es_searchgaurd_home }}/sgconfig/ -nhnv -icl
args:
chdir: "{{ mc_es_searchgaurd_home }}/tools/"
chdir: "{{ insight_es_searchgaurd_home }}/tools/"
environment:
JAVA_HOME: "{{ mc_es_java_home }}"
JAVA_HOME: "{{ insight_es_java_home }}"
register: install_searchguard_result
when: check_searchguard_bundle_result.matched == 1

View File

@@ -1,20 +1,20 @@
- name: Create elasticsearch config path folder
become: yes
file:
path: "{{ mc_es_conf_base }}"
path: "{{ insight_es_conf_base }}"
state: directory
mode: 0755
owner: elasticsearch
group: elasticsearch
mode: 0755
- name: Copy elasticsearch cert files
become: yes
copy:
mode: 0600
src: "files/searchguard/{{ item }}"
dest: "{{ mc_es_conf_base }}/{{ item }}"
dest: "{{ insight_es_conf_base }}/{{ item }}"
owner: elasticsearch
group: elasticsearch
mode: 0600
loop:
- "localhost.pem"
- "localhost.key"
@@ -23,7 +23,7 @@
- name: Find searchguard bundle
become: yes
find:
paths: "{{ mc_home }}/app/third-party/elasticsearch/"
paths: "{{ insight_home }}/app/third-party/elasticsearch/"
patterns: "^search-guard-.+\\.zip$"
use_regex: yes
file_type: file
@@ -33,22 +33,22 @@
become: yes
ignore_errors: yes
shell: |
{{ mc_es_script_path }}/elasticsearch-plugin install \
{{ insight_es_script_path }}/elasticsearch-plugin install \
-b file://{{ check_searchguard_bundle_result.files[0].path }}
environment:
ES_JAVA_HOME: "{{ mc_es_java_home }}"
ES_PATH_CONF: "{{ mc_es_conf_base }}/"
ES_JAVA_HOME: "{{ insight_es_java_home }}"
ES_PATH_CONF: "{{ insight_es_conf_base }}/"
register: install_searchguard_result
when: check_searchguard_bundle_result.matched == 1
- name: Copy searchguard cert files
become: yes
copy:
mode: 0600
src: "files/searchguard/{{ item }}"
dest: "{{ mc_es_searchgaurd_home }}/tools/{{ item }}"
dest: "{{ insight_es_searchgaurd_home }}/tools/{{ item }}"
owner: elasticsearch
group: elasticsearch
mode: 0600
loop:
- "sgadmin.pem"
- "sgadmin.key"
@@ -57,11 +57,11 @@
- name: Copy SG roles files
become: yes
copy:
mode: 0600
src: "files/searchguard/{{ item }}"
dest: "{{ mc_es_searchgaurd_home }}/sgconfig/{{ item }}"
dest: "{{ insight_es_searchgaurd_home }}/sgconfig/{{ item }}"
owner: elasticsearch
group: elasticsearch
mode: 0600
loop:
- "sg_roles.yml"
- "sg_roles_mapping.yml"
@@ -70,7 +70,7 @@
- name: Check execution bit
become: yes
file:
path: "{{ mc_es_searchgaurd_home }}/tools/sgadmin.sh"
path: "{{ insight_es_searchgaurd_home }}/tools/sgadmin.sh"
owner: elasticsearch
group: elasticsearch
mode: 0700

View File

@@ -0,0 +1,128 @@
- name: Check if insight tar exists
become: yes
stat:
path: "{{ jfrog_home_directory }}/{{ insight_tar_file_name }}"
register: insight_tar_check
- name: Download insight for upgrade
become: yes
get_url:
url: "{{ insight_tar }}"
timeout: "{{ insight_download_timeout }}"
dest: "{{ jfrog_home_directory }}"
register: download_insight
until: download_insight is succeeded
retries: 3
when: not insight_tar_check.stat.exists
- name: Extract insight tar
become: yes
unarchive:
src: "{{ jfrog_home_directory }}/{{ insight_tar_file_name }}"
dest: "{{ jfrog_home_directory }}"
remote_src: true
owner: "{{ insight_user }}"
group: "{{ insight_group }}"
creates: "{{ insight_untar_home }}"
when: download_insight is succeeded
- name: Stop insight
meta: flush_handlers
- name: Delete current app folder
become: yes
file:
path: "{{ insight_home }}/app"
state: absent
when: download_insight.changed
- name: Copy new app to insight app
command: "cp -r {{ insight_untar_home }}/app/. {{ insight_home }}/app"
become: yes
when: download_insight.changed
- name: Delete untar directory
become: yes
file:
path: "{{ insight_untar_home }}"
state: absent
when: download_insight.changed
- name: Upgrade elasticsearch
import_tasks: upgrade-elasticsearch.yml
when: download_insight.changed
- name: Check if system.yaml exists
become: yes
stat:
path: "{{ insight_home }}/var/etc/system.yaml"
register: systemyaml
- name: Configure system.yaml
become: yes
template:
src: "{{ insight_system_yaml_template }}"
dest: "{{ insight_home }}/var/etc/system.yaml"
owner: "{{ insight_user }}"
group: "{{ insight_group }}"
mode: 0644
when:
- insight_systemyaml is defined
- insight_systemyaml | length > 0
- insight_systemyaml_override or (not systemyaml.stat.exists)
notify: restart insight
- name: Check if install.sh wrapper script exist
become: yes
stat:
path: "{{ insight_install_script_path }}/install.sh"
register: upgrade_wrapper_script
when: download_insight.changed
- name: Include interactive installer scripts
include_vars: script/archive.yml
- name: Upgrade Insight
include_tasks: expect.yml
vars:
exp_executable_cmd: "./install.sh -u {{ insight_user }} -g {{ insight_group }}"
exp_dir: "{{ insight_install_script_path }}"
exp_scenarios: "{{ insight_installer_scenario['main'] }}"
args:
apply:
environment:
YQ_PATH: "{{ insight_thirdparty_path }}/yq"
when:
- upgrade_wrapper_script.stat.exists
- download_insight.changed
- name: Configure installer info
become: yes
template:
src: installer-info.json.j2
dest: "{{ insight_home }}/var/etc/info/installer-info.json"
mode: 0644
notify: restart insight
- name: Update correct permissions
become: yes
file:
path: "{{ insight_home }}"
state: directory
recurse: yes
owner: "{{ insight_user }}"
group: "{{ insight_group }}"
- name: Restart insight
meta: flush_handlers
- name: Make sure insight is up and running
uri:
url: http://127.0.0.1:8082/router/api/v1/system/health
timeout: 130
status_code: 200
register: result
until: result is succeeded
retries: 25
delay: 5
when: not ansible_check_mode

View File

@@ -1,13 +1,13 @@
discovery.seed_providers: file
transport.port: {{ mc_es_transport_port }}
transport.port: {{ insight_es_transport_port }}
transport.host: 0.0.0.0
transport.publish_host: {{ ansible_host }}
network.host: 0.0.0.0
node.name: {{ ansible_host }}
cluster.initial_master_nodes: {{ ansible_host }}
bootstrap.memory_lock: false
path.data: {{ mc_es_data_dir }}
path.logs: {{ mc_es_log_dir }}
path.data: {{ insight_es_data_dir }}
path.logs: {{ insight_es_log_dir }}
xpack.security.enabled: false
searchguard.ssl.transport.pemcert_filepath: localhost.pem

View File

@@ -1,12 +1,12 @@
discovery.seed_providers: file
{% if mc_elasticsearch_package | regex_search(".*-7.*") %}
{% if insight_elasticsearch_package | regex_search(".*oss-7.*") %}
cluster.initial_master_nodes: {{ ansible_host }}
{% endif %}
xpack.security.enabled: false
path.data: {{ mc_es_home }}/data
path.logs: {{ mc_es_home }}/logs
path.data: {{ insight_es_home }}/data
path.logs: {{ insight_es_home }}/logs
network.host: 0.0.0.0
node.name: {{ ansible_host }}

View File

@@ -0,0 +1,22 @@
configVersion: 1
shared:
jfrogUrl: {{ jfrog_url }}
node:
ip: {{ ansible_host }}
database:
type: "{{ insight_db_type }}"
driver: "{{ insight_db_driver }}"
url: "{{ insight_db_url }}"
username: "{{ insight_db_user }}"
password: "{{ insight_db_password }}"
elasticsearch:
unicastFile: {{ insight_es_conf_base }}/unicast_hosts.txt
password: {{ insight_es_password }}
url: {{ insight_es_url }}
username: {{ insight_es_user }}
external: true
security:
joinKey: {{ join_key }}
router:
entrypoints:
internalPort: 8046

View File

@@ -2,8 +2,8 @@
discovery.seed_providers: file
xpack.security.enabled: false
path.data: {{ mc_es_home }}/data
path.logs: {{ mc_es_home }}/logs
path.data: {{ insight_es_home }}/data
path.logs: {{ insight_es_home }}/logs
network.host: 0.0.0.0
node.name: {{ ansible_host }}

View File

@@ -0,0 +1,23 @@
configVersion: 1
shared:
jfrogUrl: {{ jfrog_url }}
node:
ip: {{ ansible_host }}
database:
type: "{{ insight_db_type }}"
driver: "{{ insight_db_driver }}"
url: "{{ insight_db_url }}"
username: "{{ insight_db_user }}"
password: "{{ insight_db_password }}"
elasticsearch:
unicastFile: {{ insight_es_conf_base }}/unicast_hosts.txt
clusterSetup: YES
password: {{ insight_es_password }}
url: {{ insight_es_url }}
username: {{ insight_es_user }}
external: true
security:
joinKey: {{ join_key }}
router:
entrypoints:
internalPort: 8046

View File

@@ -1,6 +1,6 @@
{{ ansible_managed | comment }}
{
"productId": "Ansible_Missioncontrol/{{ platform_collection_version }}-{{ missioncontrol_version }}",
"productId": "Ansible_Insight/{{ platform_collection_version }}-{{ insight_version }}",
"features": [
{
"featureId": "Channel/{{ ansible_marketplace }}"

View File

@@ -0,0 +1 @@
{{ insight_systemyaml }}

View File

@@ -1,5 +1,5 @@
# platform collection version
platform_collection_version: 7.25.7
platform_collection_version: 10.0.1
# indicates were this collection was downlaoded from (galaxy, automation_hub, standalone)
ansible_marketplace: galaxy

View File

@@ -1,8 +1,8 @@
mc_installer_scenario:
insight_installer_scenario:
main:
- {
"expecting": "(data|installation) directory \\(",
"sending": "{{ mc_home }}"
"sending": "{{ insight_home }}"
}
- {
"expecting": "jfrog url( \\(.+\\))?:(?!.*Skipping prompt)",
@@ -18,7 +18,7 @@ mc_installer_scenario:
}
- {
"expecting": "are you adding an additional node",
"sending": "{% if mc_ha_node_type is defined and mc_ha_node_type == 'master' %}n{% else %}y{% endif %}"
"sending": "{% if insight_ha_node_type is defined and insight_ha_node_type == 'master' %}n{% else %}y{% endif %}"
}
- {
"expecting": "do you want to install postgresql",
@@ -30,29 +30,29 @@ mc_installer_scenario:
}
- {
"expecting": "(postgresql|database) url.+\\[jdbc:postgresql.+\\]:",
"sending": "{{ mc_db_url }}"
"sending": "{{ insight_db_url }}"
}
- {
"expecting": "(postgresql|database) password",
"sending": "{{ mc_db_password }}"
"sending": "{{ insight_db_password }}"
}
- {
"expecting": "(postgresql|database) username",
"sending": "{{ mc_db_user }}"
"sending": "{{ insight_db_user }}"
}
- {
"expecting": "confirm database password",
"sending": "{{ mc_db_password }}"
"sending": "{{ insight_db_password }}"
}
- {
"expecting": "elasticsearch url:(?!.*Skipping prompt)",
"sending": "{{ mc_es_url }}"
"sending": "{{ insight_es_url }}"
}
- {
"expecting": "elasticsearch username:",
"sending": "{{ mc_es_user }}"
"sending": "{{ insight_es_user }}"
}
- {
"expecting": "elasticsearch password:",
"sending": "{{ mc_es_password }}"
"sending": "{{ insight_es_password }}"
}

View File

@@ -1,26 +0,0 @@
# Missioncontrol
The missioncontrol role will install missioncontrol software onto the host. An Artifactory server and Postgress database is required.
### Role Variables
* _mc_upgrade_only_: Perform an software upgrade only. Default is false.
Additional variables can be found in [defaults/main.yml](./defaults/main.yml).
## Example Playbook
```
---
- hosts: missioncontrol_servers
roles:
- missioncontrol
```
## Upgrades
The missioncontrol role supports software upgrades. To use a role to perform a software upgrade only, use the _xray_upgrade_only_ variables and specify the version. See the following example.
```
- hosts: missioncontrol_servers
vars:
missioncontrol_version: "{{ lookup('env', 'missioncontrol_version_upgrade') }}"
mc_upgrade_only: true
roles:
- missioncontrol
```

View File

@@ -1,101 +0,0 @@
# defaults file for mc
# The version of missioncontrol to install
missioncontrol_version: 4.7.14
# whether to enable HA
mc_ha_enabled: false
mc_ha_node_type: master
# The location where mc should install
jfrog_home_directory: /opt/jfrog
# The remote mc download file
mc_tar_file_name: jfrog-mc-{{ missioncontrol_version }}-linux.tar.gz
mc_tar: https://releases.jfrog.io/artifactory/jfrog-mc/linux/{{ missioncontrol_version }}/{{ mc_tar_file_name }}
# Timeout in seconds for URL request
mc_download_timeout: 10
#The mc install directory
mc_untar_home: "{{ jfrog_home_directory }}/jfrog-mc-{{ missioncontrol_version }}-linux"
mc_home: "{{ jfrog_home_directory }}/mc"
mc_install_script_path: "{{ mc_home }}/app/bin"
mc_thirdparty_path: "{{ mc_home }}/app/third-party"
mc_archive_service_cmd: "{{ mc_install_script_path }}/installService.sh"
mc_service_file: /lib/systemd/system/mc.service
#mc users and groups
mc_user: jfmc
mc_group: jfmc
mc_uid: 1050
mc_gid: 1050
mc_daemon: mc
# MissionContol ElasticSearch Details
es_uid: 1060
es_gid: 1060
mc_es_conf_base: "/etc/elasticsearch"
mc_es_user: admin
mc_es_password: admin
mc_es_url: "http://localhost:9200"
mc_es_transport_port: 9300
mc_es_home: "/usr/share/elasticsearch"
mc_es_data_dir: "/var/lib/elasticsearch"
mc_es_log_dir: "/var/log/elasticsearch"
mc_es_java_home: "/usr/share/elasticsearch/jdk"
mc_es_script_path: "/usr/share/elasticsearch/bin"
mc_es_searchgaurd_home: "/usr/share/elasticsearch/plugins/search-guard-7"
# if this is an upgrade
mc_upgrade_only: false
mc_system_yaml_template: system.yaml.j2
# Provide systemyaml content below with 2-space indentation
mc_systemyaml: |-
configVersion: 1
shared:
jfrogUrl: {{ jfrog_url }}
node:
ip: {{ ansible_host }}
id: {{ ansible_hostname }}
database:
type: "{{ mc_db_type }}"
driver: "{{ mc_db_driver }}"
url: "{{ mc_db_url }}"
elasticsearch:
unicastFile: {{ mc_es_conf_base }}/config/unicast_hosts.txt
password: {{ mc_es_password }}
url: {{ mc_es_url }}
username: {{ mc_es_user }}
external: true
security:
joinKey: {{ join_key }}
mc:
database:
username: "{{ mc_db_user }}"
password: "{{ mc_db_password }}"
schema: "jfmc_server"
insight-scheduler:
database:
username: "{{ mc_db_user }}"
password: "{{ mc_db_password }}"
schema: "insight_scheduler"
insight-server:
database:
username: "{{ mc_db_user }}"
password: "{{ mc_db_password }}"
schema: "insight_server"
router:
entrypoints:
internalPort: 8046
# Note: mc_systemyaml_override is by default false, if you want to change default mc_systemyaml
mc_systemyaml_override: false

View File

@@ -1,13 +0,0 @@
---
# handlers file for missioncontrol
- name: restart missioncontrol
become: yes
systemd:
name: "{{ mc_daemon }}"
state: restarted
- name: stop missioncontrol
become: yes
systemd:
name: "{{ mc_daemon }}"
state: stopped

View File

@@ -1,161 +0,0 @@
- name: Install prerequisite packages
include_tasks: "{{ ansible_os_family }}.yml"
- name: Ensure group jfmc exist
become: yes
group:
name: "{{ mc_group }}"
state: present
- name: Ensure user jfmc exist
become: yes
user:
name: "{{ mc_user }}"
group: "{{ mc_group }}"
create_home: yes
home: "{{ mc_home }}"
shell: /bin/bash
state: present
- name: Check if mc tar exists
become: yes
stat:
path: "{{ jfrog_home_directory }}/{{ mc_tar_file_name }}"
register: mc_tar_check
- name: Download mc
become: yes
get_url:
url: "{{ mc_tar }}"
timeout: "{{ mc_download_timeout }}"
dest: "{{ jfrog_home_directory }}"
register: download_mc
until: download_mc is succeeded
retries: 3
when: not mc_tar_check.stat.exists
- name: Extract mc tar
become: yes
unarchive:
src: "{{ jfrog_home_directory }}/{{ mc_tar_file_name }}"
dest: "{{ jfrog_home_directory }}"
remote_src: true
owner: "{{ mc_user }}"
group: "{{ mc_group }}"
creates: "{{ mc_untar_home }}"
when: download_mc is succeeded
- name: Check if app directory exists
become: yes
stat:
path: "{{ mc_home }}/app"
register: app_dir_check
- name: Copy untar directory to mc home
become: yes
copy:
src: "{{ mc_untar_home }}/"
dest: "{{ mc_home }}"
remote_src: yes
when: not app_dir_check.stat.exists
- name: Create required directories
become: yes
file:
path: "{{ item }}"
state: directory
recurse: yes
owner: "{{ mc_user }}"
group: "{{ mc_group }}"
loop:
- "{{ mc_home }}/var/etc"
- "{{ mc_home }}/var/etc/security/"
- "{{ mc_home }}/var/etc/info/"
- name: Configure master key
become: yes
copy:
dest: "{{ mc_home }}/var/etc/security/master.key"
content: "{{ master_key }}"
owner: "{{ mc_user }}"
group: "{{ mc_group }}"
mode: 0640
- name: Setup elasticsearch
import_tasks: setup-elasticsearch.yml
- name: Check if install.sh wrapper script exist
become: yes
stat:
path: "{{ mc_install_script_path }}/install.sh"
register: install_wrapper_script
- name: Include interactive installer scripts
include_vars: script/archive.yml
- name: Install JFMC
include_tasks: expect.yml
vars:
exp_executable_cmd: "./install.sh -u {{ mc_user }} -g {{ mc_group }}"
exp_dir: "{{ mc_install_script_path }}"
exp_scenarios: "{{ mc_installer_scenario['main'] }}"
args:
apply:
environment:
YQ_PATH: "{{ mc_thirdparty_path }}/yq"
when: install_wrapper_script.stat.exists
- name: Configure installer info
become: yes
template:
src: installer-info.json.j2
dest: "{{ mc_home }}/var/etc/info/installer-info.json"
notify: restart missioncontrol
- name: Check if systemyaml exists
become: yes
stat:
path: "{{ mc_home }}/var/etc/system.yaml"
register: systemyaml
- name: Configure systemyaml
become: yes
template:
src: "{{ mc_system_yaml_template }}"
dest: "{{ mc_home }}/var/etc/system.yaml"
when:
- mc_systemyaml is defined
- mc_systemyaml|length > 0
- mc_systemyaml_override or (not systemyaml.stat.exists)
notify: restart missioncontrol
- name: Update correct permissions
become: yes
file:
path: "{{ mc_home }}"
state: directory
recurse: yes
owner: "{{ mc_user }}"
group: "{{ mc_group }}"
- name: Install mc as a service
become: yes
command: "{{ mc_archive_service_cmd }}"
args:
chdir: "{{ mc_install_script_path }}"
creates: "{{ mc_service_file }}"
register: check_service_status_result
- name: Restart missioncontrol
meta: flush_handlers
- name: Make sure missionControl is up and running
uri:
url: http://127.0.0.1:8082/router/api/v1/system/health
timeout: 130
status_code: 200
register: result
until: result is succeeded
retries: 25
delay: 5
when: not ansible_check_mode

View File

@@ -1,126 +0,0 @@
---
- name: Check if mc tar exists
become: yes
stat:
path: "{{ jfrog_home_directory }}/{{ mc_tar_file_name }}"
register: mc_tar_check
- name: Download mc for upgrade
become: yes
get_url:
url: "{{ mc_tar }}"
timeout: "{{ mc_download_timeout }}"
dest: "{{ jfrog_home_directory }}"
register: download_mc
until: download_mc is succeeded
retries: 3
when: not mc_tar_check.stat.exists
- name: Extract mc tar
become: yes
unarchive:
src: "{{ jfrog_home_directory }}/{{ mc_tar_file_name }}"
dest: "{{ jfrog_home_directory }}"
remote_src: true
owner: "{{ mc_user }}"
group: "{{ mc_group }}"
creates: "{{ mc_untar_home }}"
when: download_mc is succeeded
- name: stop missioncontrol
meta: flush_handlers
- name: Delete current app folder
become: yes
file:
path: "{{ mc_home }}/app"
state: absent
when: download_mc.changed
- name: Copy new app to mc app
command: "cp -r {{ mc_untar_home }}/app/. {{ mc_home }}/app"
become: yes
when: download_mc.changed
- name: Delete untar directory
file:
path: "{{ mc_untar_home }}"
state: absent
become: yes
when: download_mc.changed
- name: Upgrade elasticsearch
import_tasks: upgrade-elasticsearch.yml
when: download_mc.changed
- name: Check if systemyaml exists
become: yes
stat:
path: "{{ mc_home }}/var/etc/system.yaml"
register: systemyaml
- name: Configure systemyaml
become: yes
template:
src: "{{ mc_system_yaml_template }}"
dest: "{{ mc_home }}/var/etc/system.yaml"
when:
- mc_systemyaml is defined
- mc_systemyaml|length > 0
- mc_systemyaml_override or (not systemyaml.stat.exists)
notify: restart missioncontrol
- name: Check if install.sh wrapper script exist
become: yes
stat:
path: "{{ mc_install_script_path }}/install.sh"
register: upgrade_wrapper_script
when: download_mc.changed
- name: Include interactive installer scripts
include_vars: script/archive.yml
- name: Upgrade JFMC
include_tasks: expect.yml
vars:
exp_executable_cmd: "./install.sh -u {{ mc_user }} -g {{ mc_group }}"
exp_dir: "{{ mc_install_script_path }}"
exp_scenarios: "{{ mc_installer_scenario['main'] }}"
args:
apply:
environment:
YQ_PATH: "{{ mc_thirdparty_path }}/yq"
when:
- upgrade_wrapper_script.stat.exists
- download_mc.changed
- name: Configure installer info
become: yes
template:
src: installer-info.json.j2
dest: "{{ mc_home }}/var/etc/info/installer-info.json"
mode: 0644
notify: restart missioncontrol
- name: Update correct permissions
become: yes
file:
path: "{{ mc_home }}"
state: directory
recurse: yes
owner: "{{ mc_user }}"
group: "{{ mc_group }}"
- name: Restart missioncontrol
meta: flush_handlers
- name: Make sure missionControl is up and running
uri:
url: http://127.0.0.1:8082/router/api/v1/system/health
timeout: 130
status_code: 200
register: result
until: result is succeeded
retries: 25
delay: 5
when: not ansible_check_mode

View File

@@ -1,22 +0,0 @@
configVersion: 1
shared:
jfrogUrl: {{ jfrog_url }}
node:
ip: {{ ansible_host }}
database:
type: "{{ mc_db_type }}"
driver: "{{ mc_db_driver }}"
url: "{{ mc_db_url }}"
username: "{{ mc_db_user }}"
password: "{{ mc_db_password }}"
elasticsearch:
unicastFile: {{ mc_es_conf_base }}/unicast_hosts.txt
password: {{ mc_es_password }}
url: {{ mc_es_url }}
username: {{ mc_es_user }}
external: true
security:
joinKey: {{ join_key }}
router:
entrypoints:
internalPort: 8046

View File

@@ -1,23 +0,0 @@
configVersion: 1
shared:
jfrogUrl: {{ jfrog_url }}
node:
ip: {{ ansible_host }}
database:
type: "{{ mc_db_type }}"
driver: "{{ mc_db_driver }}"
url: "{{ mc_db_url }}"
username: "{{ mc_db_user }}"
password: "{{ mc_db_password }}"
elasticsearch:
unicastFile: {{ mc_es_conf_base }}/unicast_hosts.txt
clusterSetup: YES
password: {{ mc_es_password }}
url: {{ mc_es_url }}
username: {{ mc_es_user }}
external: true
security:
joinKey: {{ join_key }}
router:
entrypoints:
internalPort: 8046

View File

@@ -17,7 +17,6 @@
- name: Install prerequisite packages
become: yes
ignore_errors: yes
yum:
name:
- acl
@@ -52,7 +51,13 @@
- LANG=en_us.UTF-8
- LANGUAGE=en_us.UTF-8
- name: Install postgres repository
- name: Import PostgreSQL GPG public key
become: yes
rpm_key:
key: https://download.postgresql.org/pub/repos/yum/RPM-GPG-KEY-PGDG
state: present
- name: Install PostgreSQL repository
become: yes
yum:
name: "{{ base }}/reporpms/EL-{{ version }}-x86_64/{{ repo_file_name }}"
@@ -62,7 +67,24 @@
version: "{{ ansible_distribution_major_version }}"
repo_file_name: pgdg-redhat-repo-latest.noarch.rpm
- name: Install postgres packages
- name: Disable PostgreSQL module
become: yes
copy:
dest: /etc/dnf/modules.d/postgresql.module
owner: root
group: root
mode: 0644
content: |
[postgresql]
name=postgresql
stream=
profiles=
state=disabled
when:
- ansible_os_family == 'RedHat'
- ansible_distribution_major_version | int == 8
- name: Install PostgreSQL packages
become: yes
yum:
name:

View File

@@ -58,65 +58,46 @@
timeout: 120
sleep: 10
- name: Create users
- name: Create user
become: yes
become_user: postgres
postgresql_user:
name: "{{ item.db_user }}"
password: "{{ item.db_password }}"
name: "{{ curr_user.username }}"
password: "{{ curr_user.password }}"
conn_limit: "-1"
loop: "{{ db_users | default([]) }}"
loop: "{{ database | dict2items | map(attribute='value') | list }}"
loop_control:
loop_var: curr_user
when: curr_user.enabled | bool
no_log: true # secret passwords
- name: Create a database
- name: Create database
become: yes
become_user: postgres
postgresql_db:
name: "{{ item.db_name }}"
owner: "{{ item.db_owner }}"
name: "{{ curr_db.name }}"
owner: "{{ curr_db.owner }}"
encoding: UTF-8
lc_collate: "{{ postgres_locale }}"
lc_ctype: "{{ postgres_locale }}"
template: template0
loop: "{{ dbs | default([]) }}"
- name: Check if MC schemas already exists
become: yes
become_user: postgres
command: psql -d {{ mc_db_name }} -t -c "\dn"
register: mc_schemas_loaded
when: mc_enabled
changed_when: false
- name: Create schemas for mission-control
become: yes
become_user: postgres
command: psql -d {{ mc_db_name }} -c 'CREATE SCHEMA {{ item }} authorization {{ mc_db_user }}'
loop: "{{ mc_schemas | default([]) }}"
when:
- mc_enabled
- mc_schemas_loaded.stdout is defined
- item not in mc_schemas_loaded.stdout
- name: Grant all privileges to mc user on its schema
become: yes
become_user: postgres
postgresql_privs:
database: "{{ mc_db_name }}"
privs: ALL
type: schema
roles: "{{ mc_db_user }}"
objs: "{{ item }}"
loop: "{{ mc_schemas | default([]) }}"
when: mc_enabled
loop: "{{ database | dict2items | map(attribute='value') | list }}"
loop_control:
loop_var: curr_db
when: curr_db.enabled | bool
no_log: true # secret passwords
- name: Grant privs on db
become: yes
become_user: postgres
postgresql_privs:
database: "{{ item.db_name }}"
role: "{{ item.db_owner }}"
database: "{{ curr_db.name }}"
role: "{{ curr_db.owner }}"
state: present
privs: ALL
type: database
loop: "{{ dbs | default([]) }}"
loop: "{{ database | dict2items | map(attribute='value') | list }}"
loop_control:
loop_var: curr_db
when: curr_db.enabled | bool
no_log: true # secret passwords

View File

@@ -1,7 +1,7 @@
# defaults file for xray
# The version of xray to install
xray_version: 3.32.2
xray_version: 3.34.1
# whether to enable HA
xray_ha_enabled: false
@@ -51,7 +51,7 @@ linux_distro: "{{ ansible_distribution | lower }}{{ ansible_distribution_major_v
xray_db_util_search_filter:
ubuntu16:
db5: 'db5.3-util.*ubuntu.*amd64\.deb'
db5: 'db5.3-util.*ubuntu0.*amd64\.deb'
db: 'db-util.*ubuntu.*all.deb'
ubuntu18:
db5: 'db5.3-util.*ubuntu1.1.*amd64\.deb'

View File

@@ -39,10 +39,10 @@
unarchive:
src: "{{ jfrog_home_directory }}/{{ xray_tar_file_name }}"
dest: "{{ jfrog_home_directory }}"
remote_src: true
owner: "{{ xray_user }}"
group: "{{ xray_group }}"
creates: "{{ xray_untar_home }}"
remote_src: true
when: download_xray is succeeded
- name: Check if app directory exists
@@ -56,6 +56,9 @@
copy:
src: "{{ xray_untar_home }}/"
dest: "{{ xray_home }}"
owner: "{{ xray_user }}"
group: "{{ xray_group }}"
mode: 0755
remote_src: yes
when: not app_dir_check.stat.exists

View File

@@ -21,6 +21,12 @@
xray_socat_package: "{{ check_socat_package_result.files[0].path }}"
when: check_socat_package_result.matched > 0
- name: Import CentOS GPG public key
become: yes
rpm_key:
key: https://www.centos.org/keys/RPM-GPG-KEY-CentOS-Official
state: present
- name: Install socat package
become: yes
yum:

View File

@@ -1,4 +1,3 @@
---
- name: Check if xray tar exists
become: yes
stat:
@@ -101,7 +100,7 @@
mode: 0644
when:
- xray_systemyaml is defined
- xray_systemyaml|length > 0
- xray_systemyaml | length > 0
- xray_systemyaml_override or (not systemyaml.stat.exists)
notify: restart xray

View File

@@ -1,3 +1,3 @@
{% if (xray_systemyaml) and (xray_systemyaml|length > 0) %}
{% if (xray_systemyaml) and (xray_systemyaml | length > 0) %}
{{ xray_systemyaml }}
{% endif %}

View File

@@ -1,5 +1,5 @@
# platform collection version
platform_collection_version: 7.25.7
platform_collection_version: 10.0.1
# indicates were this collection was downlaoded from (galaxy, automation_hub, standalone)
ansible_marketplace: galaxy