[Ansible] JFrog Platform 10.0.1 release (#166)

This commit is contained in:
Ram Mohan Rao Chukka
2021-10-22 13:13:22 +05:30
committed by GitHub
parent 8d5ff07819
commit 37bab36884
78 changed files with 876 additions and 731 deletions

View File

@@ -0,0 +1,53 @@
##########################
##########################
## Ansible Linter rules ##
##########################
##########################
#############################
# Exclude paths from linter #
#############################
# exclude_paths:
# - .cache/
########################
# Make output parsable #
########################
parseable: false
#######################
# Set output to quiet #
#######################
quiet: true
#####################
# Path to rules dir #
#####################
#rulesdir:
################
# Tags to skip #
################
skip_list:
# - '602' # Allow compare to empty string
# - '106' # Role name
# - '204' # Allow string length greater than 160 chars
# - '301' # False positives for running command shells
# - '303' # Allow git commands for push, add, etc...
# - '305' # Allow use of shell when you want
- '503' # Allow step to run like handler
##################
# Tags to follow #
##################
#tags:
#############
# Use rules #
#############
use_default_rules: true
#################
# Set verbosity #
#################
verbosity: 1

View File

@@ -1,6 +1,15 @@
# JFrog Platform Ansible Collection Changelog # JFrog Platform Ansible Collection Changelog
All changes to this collection will be documented in this file. All changes to this collection will be documented in this file.
## [10.0.1] - Oct 22, 2021
* Version bump to align with all jfrog platform installers
* Added insight (new product) role
* Missioncontrol (`artifactory_mc_enabled: true`) is now part of artifactory (>= 7.27.x) - [Migrating from Missioncontrol to Insight for existing installations](https://www.jfrog.com/confluence/display/JFROG/Migrating+from+Mission+Control+to+Insight)
* Removed `artifactory_single_license` variable,From artifactory version >=7.27.6,`artifactory_licenses` can be used for both single/HA modes
* Added SELinux support for RHEL systems [GH-161](https://github.com/jfrog/JFrog-Cloud-Installers/pull/161)
* Added rolling upgrade support for artifactory HA installations(using `serial` approach)
* Updated artifactory postgresql driver to `42.2.24`
## [7.25.7] - Sep 16, 2021 ## [7.25.7] - Sep 16, 2021
* Bug Fixes * Bug Fixes

View File

@@ -1,4 +1,6 @@
---
- hosts: artifactory_servers - hosts: artifactory_servers
serial:
- 1
- 100%
roles: roles:
- artifactory - artifactory

View File

@@ -1,4 +1,3 @@
---
- hosts: distribution_servers - hosts: distribution_servers
roles: roles:
- distribution - distribution

View File

@@ -9,7 +9,7 @@ namespace: "jfrog"
name: "platform" name: "platform"
# The version of the collection. Must be compatible with semantic versioning # The version of the collection. Must be compatible with semantic versioning
version: "7.25.7" version: "10.0.1"
# The path to the Markdown (.md) readme file. This path is relative to the root of the collection # The path to the Markdown (.md) readme file. This path is relative to the root of the collection
readme: "README.md" readme: "README.md"
@@ -44,6 +44,7 @@ tags:
- artifactory - artifactory
- distribution - distribution
- missioncontrol - missioncontrol
- insight
- xray - xray
# Collections that this collection requires to be installed for it to be usable. The key of the dict is the # Collections that this collection requires to be installed for it to be usable. The key of the dict is the

View File

@@ -1,4 +1,3 @@
# Defaults
## Note: These values are global and have precedence over role/<product>/defaults/main.yaml ## Note: These values are global and have precedence over role/<product>/defaults/main.yaml
## For production deployments, You may want to generate your master amd join keys and apply it to all the nodes. ## For production deployments, You may want to generate your master amd join keys and apply it to all the nodes.
master_key: ee69d96880726d3abf6b42b97d2ae589111ea95c2a8bd5876ec5cd9e8ee34f86 master_key: ee69d96880726d3abf6b42b97d2ae589111ea95c2a8bd5876ec5cd9e8ee34f86
@@ -11,7 +10,7 @@ jfrog_url: >-
artifactory_enabled: true artifactory_enabled: true
xray_enabled: true xray_enabled: true
distribution_enabled: true distribution_enabled: true
mc_enabled: true insight_enabled: true
postgres_enabled: true postgres_enabled: true
# Artifactory DB details # Artifactory DB details
@@ -41,30 +40,41 @@ distribution_db_password: password
distribution_db_url: >- distribution_db_url: >-
jdbc:postgresql://{{ hostvars[groups['postgres_servers'][0]]['ansible_host'] }}:5432/{{ distribution_db_name }}?sslmode=disable jdbc:postgresql://{{ hostvars[groups['postgres_servers'][0]]['ansible_host'] }}:5432/{{ distribution_db_name }}?sslmode=disable
# MissionControl DB details # Insight DB details
mc_db_type: postgresql insight_db_type: postgresql
mc_db_driver: org.postgresql.Driver insight_db_driver: org.postgresql.Driver
mc_db_name: mc insight_db_name: insight
mc_db_user: mc insight_db_user: insight
mc_db_password: password insight_db_password: password
mc_db_url: >- insight_db_url: >-
jdbc:postgresql://{{ hostvars[groups['postgres_servers'][0]]['ansible_host'] }}:5432/{{ mc_db_name }}?sslmode=disable jdbc:postgresql://{{ hostvars[groups['postgres_servers'][0]]['ansible_host'] }}:5432/{{ insight_db_name }}?sslmode=disable
# Postgresql users and databases/schemas # Postgresql users and databases/schemas
db_users: database:
- { db_user: "{{ artifactory_db_user }}", db_password: "{{ artifactory_db_password }}" } artifactory:
- { db_user: "{{ xray_db_user }}", db_password: "{{ xray_db_password }}" } name: "{{ artifactory_db_name }}"
- { db_user: "{{ distribution_db_user }}", db_password: "{{ distribution_db_password }}" } owner: "{{ artifactory_db_user }}"
- { db_user: "{{ mc_db_user }}", db_password: "{{ mc_db_password }}" } username: "{{ artifactory_db_user }}"
dbs: password: "{{ artifactory_db_password }}"
- { db_name: "{{ artifactory_db_name }}", db_owner: "{{ artifactory_db_user }}" } enabled: "{{ artifactory_enabled }}"
- { db_name: "{{ xray_db_name }}", db_owner: "{{ xray_db_user }}" } xray:
- { db_name: "{{ distribution_db_name }}", db_owner: "{{ distribution_db_user }}" } name: "{{ xray_db_name }}"
- { db_name: "{{ mc_db_name }}", db_owner: "{{ mc_db_user }}" } owner: "{{ xray_db_user }}"
mc_schemas: username: "{{ xray_db_user }}"
- jfmc_server password: "{{ xray_db_password }}"
- insight_server enabled: "{{ xray_enabled }}"
- insight_scheduler distribution:
name: "{{ distribution_db_name }}"
owner: "{{ distribution_db_user }}"
username: "{{ distribution_db_user }}"
password: "{{ distribution_db_password }}"
enabled: "{{ distribution_enabled }}"
insight:
name: "{{ insight_db_name }}"
owner: "{{ insight_db_user }}"
username: "{{ insight_db_user }}"
password: "{{ insight_db_password }}"
enabled: "{{ insight_enabled }}"
# For Centos/RHEL-7, Set this to "/usr/bin/python" # For Centos/RHEL-7, Set this to "/usr/bin/python"
ansible_python_interpreter: "/usr/bin/python3" ansible_python_interpreter: "/usr/bin/python3"

View File

@@ -0,0 +1,3 @@
- hosts: insight_servers
roles:
- insight

View File

@@ -1,4 +0,0 @@
---
- hosts: missioncontrol_servers
roles:
- missioncontrol

View File

@@ -1,16 +1,27 @@
---
- hosts: postgres_servers - hosts: postgres_servers
roles: roles:
- postgres - role: postgres
when: postgres_enabled | bool
- hosts: artifactory_servers - hosts: artifactory_servers
serial:
- 1
- 100%
roles: roles:
- artifactory - role: artifactory
when: artifactory_enabled | bool
- hosts: xray_servers - hosts: xray_servers
roles: roles:
- xray - role: xray
when: xray_enabled | bool
- hosts: distribution_servers - hosts: distribution_servers
roles: roles:
- distribution - role: distribution
- hosts: missioncontrol_servers when: distribution_enabled | bool
- hosts: insight_servers
roles: roles:
- missioncontrol - role: insight
when: insight_enabled | bool

View File

@@ -1,4 +1,3 @@
---
- hosts: postgres - hosts: postgres
roles: roles:
- postgres - postgres

View File

@@ -1,7 +1,7 @@
# defaults file for artifactory # defaults file for artifactory
# The version of artifactory to install # The version of artifactory to install
artifactory_version: 7.25.7 artifactory_version: 7.27.6
# Set this to true when SSL is enabled (to use artifactory_nginx_ssl role), default to false (implies artifactory uses artifactory_nginx role ) # Set this to true when SSL is enabled (to use artifactory_nginx_ssl role), default to false (implies artifactory uses artifactory_nginx role )
artifactory_nginx_ssl_enabled: false artifactory_nginx_ssl_enabled: false
@@ -9,10 +9,7 @@ artifactory_nginx_ssl_enabled: false
# Set this to false when ngnix is disabled, defaults to true (implies artifactory uses artifactory_nginx role ) # Set this to false when ngnix is disabled, defaults to true (implies artifactory uses artifactory_nginx role )
artifactory_nginx_enabled: true artifactory_nginx_enabled: true
# Provide single node license # Provide single or HA individual licenses file separated by new line and 2-space indentation and for HA, set artifactory_ha_enabled: true.
# artifactory_single_license:
# Provide individual (HA) licenses file separated by new line and 2-space indentation and set artifactory_ha_enabled: true.
# Example: Replace <license_1> , <license_2> , <license_3> with original licenses # Example: Replace <license_1> , <license_2> , <license_3> with original licenses
# artifactory_licenses: |- # artifactory_licenses: |-
# <license_1> # <license_1>
@@ -27,6 +24,9 @@ artifactory_ha_enabled: false
# By default, all nodes are primary (CNHA) - https://www.jfrog.com/confluence/display/JFROG/High+Availability#HighAvailability-Cloud-NativeHighAvailability # By default, all nodes are primary (CNHA) - https://www.jfrog.com/confluence/display/JFROG/High+Availability#HighAvailability-Cloud-NativeHighAvailability
artifactory_taskaffinity: any artifactory_taskaffinity: any
# To enable mission-control in artifactory (>= 7.27.x) applicable only on E+ license
artifactory_mc_enabled: true
# The location where Artifactory should install # The location where Artifactory should install
jfrog_home_directory: /opt/jfrog jfrog_home_directory: /opt/jfrog
@@ -43,7 +43,7 @@ artifactory_untar_home: "{{ jfrog_home_directory }}/artifactory-{{ artifactory_f
# Timeout in seconds for URL request # Timeout in seconds for URL request
artifactory_download_timeout: 10 artifactory_download_timeout: 10
postgres_driver_version: 42.2.23 postgres_driver_version: 42.2.24
postgres_driver_download_url: https://repo1.maven.org/maven2/org/postgresql/postgresql/{{ postgres_driver_version }}/postgresql-{{ postgres_driver_version }}.jar postgres_driver_download_url: https://repo1.maven.org/maven2/org/postgresql/postgresql/{{ postgres_driver_version }}/postgresql-{{ postgres_driver_version }}.jar
artifactory_user: artifactory artifactory_user: artifactory
@@ -88,6 +88,8 @@ artifactory_systemyaml: |-
url: "{{ artifactory_db_url }}" url: "{{ artifactory_db_url }}"
username: "{{ artifactory_db_user }}" username: "{{ artifactory_db_user }}"
password: "{{ artifactory_db_password }}" password: "{{ artifactory_db_password }}"
mc:
enabled: {{ artifactory_mc_enabled }}
router: router:
entrypoints: entrypoints:
internalPort: 8046 internalPort: 8046

View File

@@ -1,5 +1,13 @@
- name: Install prerequisite packages - name: Install prerequisite packages
become: yes become: yes
yum: yum:
name: net-tools name: ['net-tools', '{{ selinux_policy_package }}']
state: present state: present
- name: Configure SELinux context
become: yes
sefcontext:
target: "{{ jfrog_home_directory }}/artifactory/app/bin(/.*)?"
setype: bin_t
state: present
when: ansible_selinux.status == 'enabled'

View File

@@ -1,3 +1,14 @@
- name: Include distro specific variables
include_vars: "{{ distro_vars_file }}"
vars:
distro_vars_file: "{{ lookup('first_found', distro_vars, errors='ignore') }}"
distro_vars:
files:
- "vars/distro/{{ ansible_distribution ~ ansible_distribution_major_version }}.yml"
- "vars/distro/{{ ansible_distribution }}.yml"
- "vars/distro/{{ ansible_os_family }}.yml"
- "vars/distro/default.yml"
- name: Install prerequisite packages - name: Install prerequisite packages
include_tasks: "{{ ansible_os_family }}.yml" include_tasks: "{{ ansible_os_family }}.yml"
@@ -53,10 +64,10 @@
unarchive: unarchive:
src: "{{ jfrog_home_directory }}/{{ artifactory_tar_file_name }}" src: "{{ jfrog_home_directory }}/{{ artifactory_tar_file_name }}"
dest: "{{ jfrog_home_directory }}" dest: "{{ jfrog_home_directory }}"
remote_src: true
owner: "{{ artifactory_user }}" owner: "{{ artifactory_user }}"
group: "{{ artifactory_group }}" group: "{{ artifactory_group }}"
creates: "{{ artifactory_untar_home }}" creates: "{{ artifactory_untar_home }}"
remote_src: true
when: download_artifactory is succeeded when: download_artifactory is succeeded
- name: Check if app directory exists - name: Check if app directory exists
@@ -70,6 +81,9 @@
copy: copy:
src: "{{ artifactory_untar_home }}/" src: "{{ artifactory_untar_home }}/"
dest: "{{ artifactory_home }}" dest: "{{ artifactory_home }}"
owner: "{{ artifactory_user }}"
group: "{{ artifactory_group }}"
mode: 0755
remote_src: yes remote_src: yes
when: not app_dir_check.stat.exists when: not app_dir_check.stat.exists
@@ -133,18 +147,7 @@
- artifactory_binarystore | length > 0 - artifactory_binarystore | length > 0
notify: restart artifactory notify: restart artifactory
- name: Configure single license - name: Configure artifactory license(s)
become: yes
template:
src: artifactory.lic.j2
dest: "{{ artifactory_home }}/var/etc/artifactory/artifactory.lic"
mode: 0644
when:
- artifactory_single_license is defined
- artifactory_single_license|length > 0
notify: restart artifactory
- name: Configure HA licenses
become: yes become: yes
template: template:
src: artifactory.cluster.license.j2 src: artifactory.cluster.license.j2
@@ -173,6 +176,12 @@
- not database_driver.stat.exists - not database_driver.stat.exists
notify: restart artifactory notify: restart artifactory
- name: Run restore context to reload selinux
become: yes
shell: |
restorecon -R -v "{{ jfrog_home_directory }}/artifactory/app/bin"
when: ansible_distribution == 'RedHat'
- name: Create artifactory service - name: Create artifactory service
become: yes become: yes
command: "{{ artifactory_home }}/app/bin/installService.sh" command: "{{ artifactory_home }}/app/bin/installService.sh"

View File

@@ -1,4 +1,3 @@
---
- name: Check if artifactory tar already exists - name: Check if artifactory tar already exists
become: yes become: yes
stat: stat:
@@ -27,12 +26,13 @@
creates: "{{ artifactory_untar_home }}" creates: "{{ artifactory_untar_home }}"
when: download_artifactory is succeeded when: download_artifactory is succeeded
- name: stop artifactory - name: Stop artifactory
meta: flush_handlers meta: flush_handlers
- name: Ensure jfrog_home_directory exists - name: Ensure jfrog_home_directory exists
become: yes become: yes
file: file:
mode: 0755
path: "{{ jfrog_home_directory }}" path: "{{ jfrog_home_directory }}"
state: directory state: directory
@@ -70,13 +70,13 @@
- artifactory_licenses | length > 0 - artifactory_licenses | length > 0
notify: restart artifactory notify: restart artifactory
- name: Check if database driver exists - name: Check if jdbc driver exists
become: yes become: yes
stat: stat:
path: "{{ artifactory_home }}/app/artifactory/tomcat/lib/jf_postgresql-{{ postgres_driver_version }}.jar" path: "{{ artifactory_home }}/app/artifactory/tomcat/lib/jf_postgresql-{{ postgres_driver_version }}.jar"
register: database_driver register: database_driver
- name: Download database driver - name: Download jdbc driver
become: yes become: yes
get_url: get_url:
url: "{{ postgres_driver_download_url }}" url: "{{ postgres_driver_download_url }}"
@@ -107,13 +107,13 @@
- artifactory_binarystore | length > 0 - artifactory_binarystore | length > 0
notify: restart artifactory notify: restart artifactory
- name: Check if systemyaml exists - name: Check if system.yaml exists
become: yes become: yes
stat: stat:
path: "{{ artifactory_home }}/var/etc/system.yaml" path: "{{ artifactory_home }}/var/etc/system.yaml"
register: systemyaml register: systemyaml
- name: Configure systemyaml - name: Configure system.yaml
become: yes become: yes
template: template:
src: "{{ artifactory_system_yaml_template }}" src: "{{ artifactory_system_yaml_template }}"

View File

@@ -1 +0,0 @@
{{ artifactory_single_license }}

View File

@@ -0,0 +1 @@
selinux_policy_package: policycoreutils-python

View File

@@ -0,0 +1 @@
selinux_policy_package: policycoreutils-python

View File

@@ -0,0 +1 @@
selinux_policy_package: python3-policycoreutils

View File

@@ -1,5 +1,5 @@
# platform collection version # platform collection version
platform_collection_version: 7.25.7 platform_collection_version: 10.0.1
# indicates where this collection was downloaded from (galaxy, automation_hub, standalone) # indicates where this collection was downloaded from (galaxy, automation_hub, standalone)
ansible_marketplace: galaxy ansible_marketplace: galaxy

View File

@@ -1,7 +1,7 @@
# defaults file for distribution # defaults file for distribution
# The version of distribution to install # The version of distribution to install
distribution_version: 2.9.2 distribution_version: 2.9.3
# whether to enable HA # whether to enable HA
distribution_ha_enabled: false distribution_ha_enabled: false

View File

@@ -39,10 +39,10 @@
unarchive: unarchive:
src: "{{ jfrog_home_directory }}/{{ distribution_tar_file_name }}" src: "{{ jfrog_home_directory }}/{{ distribution_tar_file_name }}"
dest: "{{ jfrog_home_directory }}" dest: "{{ jfrog_home_directory }}"
remote_src: true
owner: "{{ distribution_user }}" owner: "{{ distribution_user }}"
group: "{{ distribution_group }}" group: "{{ distribution_group }}"
creates: "{{ distribution_untar_home }}" creates: "{{ distribution_untar_home }}"
remote_src: true
when: download_distribution is succeeded when: download_distribution is succeeded
- name: Check if app directory exists - name: Check if app directory exists
@@ -56,6 +56,9 @@
copy: copy:
src: "{{ distribution_untar_home }}/" src: "{{ distribution_untar_home }}/"
dest: "{{ distribution_home }}" dest: "{{ distribution_home }}"
owner: "{{ distribution_user }}"
group: "{{ distribution_group }}"
mode: 0755
remote_src: yes remote_src: yes
when: not app_dir_check.stat.exists when: not app_dir_check.stat.exists

View File

@@ -1,5 +1,5 @@
# platform collection version # platform collection version
platform_collection_version: 7.25.7 platform_collection_version: 10.0.1
# indicates were this collection was downlaoded from (galaxy, automation_hub, standalone) # indicates were this collection was downlaoded from (galaxy, automation_hub, standalone)
ansible_marketplace: galaxy ansible_marketplace: galaxy

View File

@@ -0,0 +1,26 @@
# Insight
The insight role will install insight software onto the host. An Artifactory server and Postgress database is required.
### Role Variables
* _insight_upgrade_only_: Perform an software upgrade only. Default is false.
Additional variables can be found in [defaults/main.yml](./defaults/main.yml).
## Example Playbook
```
---
- hosts: insight_servers
roles:
- insight
```
## Upgrades
The insight role supports software upgrades. To use a role to perform a software upgrade only, use the _insight_upgrade_only_ variables and specify the version. See the following example.
```
- hosts: insight_servers
vars:
insight_version: "{{ lookup('env', 'insight_version_upgrade') }}"
insight_upgrade_only: true
roles:
- insight
```

View File

@@ -0,0 +1,87 @@
# defaults file for insight
# The version of insight to install
insight_version: 1.0.1
# whether to enable HA
insight_ha_enabled: false
insight_ha_node_type: master
# The location where insight should install
jfrog_home_directory: /opt/jfrog
# The remote insight download file
insight_tar_file_name: jfrog-insight-{{ insight_version }}-linux.tar.gz
insight_tar: https://releases.jfrog.io/artifactory/jfrog-insight/linux/{{ insight_version }}/{{ insight_tar_file_name }}
# Timeout in seconds for URL request
insight_download_timeout: 10
#The insight install directory
insight_untar_home: "{{ jfrog_home_directory }}/jfrog-insight-{{ insight_version }}-linux"
insight_home: "{{ jfrog_home_directory }}/insight"
insight_install_script_path: "{{ insight_home }}/app/bin"
insight_thirdparty_path: "{{ insight_home }}/app/third-party"
insight_archive_service_cmd: "{{ insight_install_script_path }}/installService.sh"
insight_service_file: /lib/systemd/system/insight.service
#insight users and groups
insight_user: insight
insight_group: insight
insight_uid: 1040
insight_gid: 1040
insight_daemon: insight
# Insight ElasticSearch Details
es_uid: 1060
es_gid: 1060
insight_es_conf_base: "/etc/elasticsearch"
insight_es_user: admin
insight_es_password: admin
insight_es_url: "http://localhost:9200"
insight_es_transport_port: 9300
insight_es_home: "/usr/share/elasticsearch"
insight_es_data_dir: "/var/lib/elasticsearch"
insight_es_log_dir: "/var/log/elasticsearch"
insight_es_java_home: "/usr/share/elasticsearch/jdk"
insight_es_script_path: "/usr/share/elasticsearch/bin"
insight_es_searchgaurd_home: "/usr/share/elasticsearch/plugins/search-guard-7"
# if this is an upgrade
insight_upgrade_only: false
insight_system_yaml_template: system.yaml.j2
# Provide systemyaml content below with 2-space indentation
insight_systemyaml: |-
configVersion: 1
shared:
jfrogUrl: {{ jfrog_url }}
node:
ip: {{ ansible_host }}
id: {{ ansible_hostname }}
database:
type: "{{ insight_db_type }}"
driver: "{{ insight_db_driver }}"
url: "{{ insight_db_url }}"
username: "{{ insight_db_user }}"
elasticsearch:
unicastFile: {{ insight_es_conf_base }}/config/unicast_hosts.txt
password: {{ insight_es_password }}
url: {{ insight_es_url }}
username: {{ insight_es_user }}
external: true
security:
joinKey: {{ join_key }}
router:
entrypoints:
internalPort: 8046
# Note: insight_systemyaml_override is by default false, if you want to change default insight_systemyaml
insight_systemyaml_override: false

View File

@@ -0,0 +1,13 @@
---
# handlers file for insight
- name: restart insight
become: yes
systemd:
name: "{{ insight_daemon }}"
state: restarted
- name: stop insight
become: yes
systemd:
name: "{{ insight_daemon }}"
state: stopped

View File

@@ -3,7 +3,7 @@ dependencies: []
galaxy_info: galaxy_info:
author: "JFrog Maintainers Team <installers@jfrog.com>" author: "JFrog Maintainers Team <installers@jfrog.com>"
description: "The missioncontrol role will install missioncontrol software onto the host. An Artifactory server and Postgress database is required." description: "The insight role will install insight software onto the host. An Artifactory server and Postgress database are required."
company: JFrog company: JFrog
issue_tracker_url: "https://github.com/jfrog/JFrog-Cloud-Installers/issues" issue_tracker_url: "https://github.com/jfrog/JFrog-Cloud-Installers/issues"
license: license (Apache-2.0) license: license (Apache-2.0)
@@ -23,5 +23,5 @@ galaxy_info:
- stretch - stretch
- buster - buster
galaxy_tags: galaxy_tags:
- missioncontrol - insight
- jfrog - jfrog

View File

@@ -1,7 +1,7 @@
- name: Install prerequisite packages - name: Install prerequisite packages
become: yes become: yes
apt: apt:
name: ["expect", "locales"] name: ["expect", "locales", "acl"]
state: present state: present
update_cache: yes update_cache: yes
cache_valid_time: 3600 cache_valid_time: 3600

View File

@@ -1,5 +1,5 @@
- name: Install prerequisite packages - name: Install prerequisite packages
become: yes become: yes
yum: yum:
name: expect name: ["expect", "acl"]
state: present state: present

View File

@@ -0,0 +1,170 @@
- name: Install prerequisite packages
include_tasks: "{{ ansible_os_family }}.yml"
- name: Ensure group insight exist
become: yes
group:
name: "{{ insight_group }}"
state: present
- name: Ensure user insight exist
become: yes
user:
name: "{{ insight_user }}"
group: "{{ insight_group }}"
create_home: yes
home: "{{ insight_home }}"
shell: /bin/bash
state: present
- name: Check if insight tar already exists
become: yes
stat:
path: "{{ jfrog_home_directory }}/{{ insight_tar_file_name }}"
register: insight_tar_check
- name: Download insight
become: yes
get_url:
url: "{{ insight_tar }}"
timeout: "{{ insight_download_timeout }}"
dest: "{{ jfrog_home_directory }}"
register: download_insight
until: download_insight is succeeded
retries: 3
when: not insight_tar_check.stat.exists
- name: Extract insight tar
become: yes
unarchive:
src: "{{ jfrog_home_directory }}/{{ insight_tar_file_name }}"
dest: "{{ jfrog_home_directory }}"
owner: "{{ insight_user }}"
group: "{{ insight_group }}"
creates: "{{ insight_untar_home }}"
remote_src: true
when: download_insight is succeeded
- name: Check if app directory exists
become: yes
stat:
path: "{{ insight_home }}/app"
register: app_dir_check
- name: Copy untar directory to insight home
become: yes
copy:
src: "{{ insight_untar_home }}/"
dest: "{{ insight_home }}"
owner: "{{ insight_user }}"
group: "{{ insight_group }}"
mode: 0755
remote_src: yes
when: not app_dir_check.stat.exists
- name: Create required directories
become: yes
file:
path: "{{ item }}"
state: directory
recurse: yes
owner: "{{ insight_user }}"
group: "{{ insight_group }}"
loop:
- "{{ insight_home }}/var/etc"
- "{{ insight_home }}/var/etc/security/"
- "{{ insight_home }}/var/etc/info/"
- name: Configure master key
become: yes
copy:
dest: "{{ insight_home }}/var/etc/security/master.key"
content: "{{ master_key }}"
owner: "{{ insight_user }}"
group: "{{ insight_group }}"
mode: 0640
- name: Setup elasticsearch
import_tasks: setup-elasticsearch.yml
- name: Check if install.sh wrapper script exist
become: yes
stat:
path: "{{ insight_install_script_path }}/install.sh"
register: install_wrapper_script
- name: Include interactive installer scripts
include_vars: script/archive.yml
- name: Install Insight
include_tasks: expect.yml
vars:
exp_executable_cmd: "./install.sh -u {{ insight_user }} -g {{ insight_group }}"
exp_dir: "{{ insight_install_script_path }}"
exp_scenarios: "{{ insight_installer_scenario['main'] }}"
args:
apply:
environment:
YQ_PATH: "{{ insight_thirdparty_path }}/yq"
when: install_wrapper_script.stat.exists
- name: Configure installer info
become: yes
template:
src: installer-info.json.j2
dest: "{{ insight_home }}/var/etc/info/installer-info.json"
owner: "{{ insight_user }}"
group: "{{ insight_group }}"
mode: 0644
notify: restart insight
- name: Check if system.yaml exists
become: yes
stat:
path: "{{ insight_home }}/var/etc/system.yaml"
register: systemyaml
- name: Configure system.yaml
become: yes
template:
src: "{{ insight_system_yaml_template }}"
dest: "{{ insight_home }}/var/etc/system.yaml"
owner: "{{ insight_user }}"
group: "{{ insight_group }}"
mode: 0644
when:
- insight_systemyaml is defined
- insight_systemyaml | length > 0
- insight_systemyaml_override or (not systemyaml.stat.exists)
notify: restart insight
- name: Update correct permissions
become: yes
file:
path: "{{ insight_home }}"
state: directory
recurse: yes
owner: "{{ insight_user }}"
group: "{{ insight_group }}"
- name: Install insight as a service
become: yes
command: "{{ insight_archive_service_cmd }}"
args:
chdir: "{{ insight_install_script_path }}"
creates: "{{ insight_service_file }}"
register: check_service_status_result
- name: Restart insight
meta: flush_handlers
- name: Make sure insight is up and running
uri:
url: http://127.0.0.1:8082/router/api/v1/system/health
timeout: 130
status_code: 200
register: result
until: result is succeeded
retries: 25
delay: 5
when: not ansible_check_mode

View File

@@ -1,11 +1,11 @@
- name: Perform installation - name: Perform installation
include_tasks: "install.yml" include_tasks: "install.yml"
when: when:
- mc_enabled - insight_enabled
- not mc_upgrade_only - not insight_upgrade_only
- name: Perform upgrade - name: Perform upgrade
include_tasks: "upgrade.yml" include_tasks: "upgrade.yml"
when: when:
- mc_enabled - insight_enabled
- mc_upgrade_only - insight_upgrade_only

View File

@@ -10,7 +10,7 @@
name: elasticsearch name: elasticsearch
group: elasticsearch group: elasticsearch
create_home: yes create_home: yes
home: "{{ mc_es_home }}" home: "{{ insight_es_home }}"
shell: /bin/bash shell: /bin/bash
state: present state: present
@@ -22,11 +22,12 @@
recurse: yes recurse: yes
owner: elasticsearch owner: elasticsearch
group: elasticsearch group: elasticsearch
mode: 0644
loop: loop:
- "{{ mc_es_conf_base }}" - "{{ insight_es_conf_base }}"
- "{{ mc_es_data_dir }}" - "{{ insight_es_data_dir }}"
- "{{ mc_es_log_dir }}" - "{{ insight_es_log_dir }}"
- "{{ mc_es_home }}" - "{{ insight_es_home }}"
- name: Set max file descriptors limit - name: Set max file descriptors limit
become: yes become: yes
@@ -46,7 +47,6 @@
- name: Set vm.max_map_count in /etc/sysctl.conf - name: Set vm.max_map_count in /etc/sysctl.conf
become: yes become: yes
ignore_errors: yes
sysctl: sysctl:
name: vm.max_map_count name: vm.max_map_count
value: '262144' value: '262144'
@@ -55,7 +55,7 @@
- name: Find elasticsearch package - name: Find elasticsearch package
become: yes become: yes
find: find:
paths: "{{ mc_home }}/app/third-party/elasticsearch" paths: "{{ insight_home }}/app/third-party/elasticsearch"
patterns: "^elasticsearch-.+\\.tar.gz$" patterns: "^elasticsearch-.+\\.tar.gz$"
use_regex: yes use_regex: yes
file_type: file file_type: file
@@ -63,53 +63,49 @@
- name: Set elasticsearch package file name - name: Set elasticsearch package file name
set_fact: set_fact:
mc_elasticsearch_package: "{{ check_elasticsearch_package_result.files[0].path }}" insight_elasticsearch_package: "{{ check_elasticsearch_package_result.files[0].path }}"
when: check_elasticsearch_package_result.matched > 0 when: check_elasticsearch_package_result.matched > 0
- name: Ensure elasticsearch home exists - name: Ensure elasticsearch home exists
become: yes become: yes
file: file:
path: "{{ mc_es_home }}" path: "{{ insight_es_home }}"
state: directory state: directory
owner: elasticsearch owner: elasticsearch
group: elasticsearch group: elasticsearch
mode: 0644
- name: Extract elasticsearch package - name: Extract elasticsearch package
become: yes become: yes
ignore_errors: yes
unarchive: unarchive:
src: "{{ mc_elasticsearch_package }}" src: "{{ insight_elasticsearch_package }}"
dest: "{{ mc_es_home }}" dest: "{{ insight_es_home }}"
remote_src: yes remote_src: yes
extra_opts: extra_opts:
- --strip-components=1 - --strip-components=1
owner: elasticsearch owner: elasticsearch
group: elasticsearch group: elasticsearch
creates: "{{ mc_es_java_home }}" creates: "{{ insight_es_java_home }}"
register: unarchive_result register: unarchive_result
when: check_elasticsearch_package_result.matched > 0 when: check_elasticsearch_package_result.matched > 0
- name: Copy elasticsearch config files to ES_PATH_CONF dir - name: Copy elasticsearch config files to ES_PATH_CONF dir
become: yes become: yes
copy: command: "cp -r {{ insight_es_home }}/config/. {{ insight_es_conf_base }}/"
src: "{{ mc_es_home }}/config/"
dest: "{{ mc_es_conf_base }}"
remote_src: yes
when: unarchive_result.changed when: unarchive_result.changed
- name: Remove elasticsearch config dir - name: Remove elasticsearch config dir
become: yes become: yes
file: file:
path: "{{ mc_es_home }}/config" path: "{{ insight_es_home }}/config"
state: absent state: absent
when: unarchive_result.changed when: unarchive_result.changed
- name: Generate HA elasticsearch.yml template file - name: Generate HA elasticsearch.yml template file
become: yes become: yes
ignore_errors: yes
template: template:
src: templates/ha/{{ mc_ha_node_type }}.elasticsearch.yml.j2 src: templates/ha/{{ insight_ha_node_type }}.elasticsearch.yml.j2
dest: "{{ mc_es_conf_base }}/elasticsearch.yml" dest: "{{ insight_es_conf_base }}/elasticsearch.yml"
owner: elasticsearch owner: elasticsearch
group: elasticsearch group: elasticsearch
mode: 0644 mode: 0644
@@ -119,7 +115,7 @@
become: yes become: yes
template: template:
src: templates/elasticsearch.yml.j2 src: templates/elasticsearch.yml.j2
dest: "{{ mc_es_conf_base }}/elasticsearch.yml" dest: "{{ insight_es_conf_base }}/elasticsearch.yml"
owner: elasticsearch owner: elasticsearch
group: elasticsearch group: elasticsearch
mode: 0644 mode: 0644
@@ -128,11 +124,11 @@
- name: Create empty unicast_hosts.txt file - name: Create empty unicast_hosts.txt file
become: yes become: yes
file: file:
path: "{{ mc_es_conf_base }}/unicast_hosts.txt" path: "{{ insight_es_conf_base }}/unicast_hosts.txt"
state: touch state: touch
mode: 0664
owner: elasticsearch owner: elasticsearch
group: elasticsearch group: elasticsearch
mode: 0664
- name: Setup searchguard plugin - name: Setup searchguard plugin
import_tasks: setup-searchguard.yml import_tasks: setup-searchguard.yml
@@ -145,18 +141,21 @@
recurse: yes recurse: yes
owner: elasticsearch owner: elasticsearch
group: elasticsearch group: elasticsearch
mode: 0755
loop: loop:
- "{{ mc_es_conf_base }}" - "{{ insight_es_conf_base }}"
- "{{ mc_es_data_dir }}" - "{{ insight_es_data_dir }}"
- "{{ mc_es_log_dir }}" - "{{ insight_es_log_dir }}"
- "{{ mc_es_home }}" - "{{ insight_es_home }}"
- name: Start elasticsearch - name: Start elasticsearch
become: yes become: yes
command: "su -c '{{ mc_es_script_path }}/elasticsearch -d' elasticsearch" become_user: elasticsearch
shell: |
nohup {{ insight_es_script_path }}/elasticsearch -d
environment: environment:
ES_JAVA_HOME: "{{ mc_es_java_home }}" ES_JAVA_HOME: "{{ insight_es_java_home }}"
ES_PATH_CONF: "{{ mc_es_conf_base }}/" ES_PATH_CONF: "{{ insight_es_conf_base }}/"
register: start_elasticsearch register: start_elasticsearch
when: unarchive_result.extract_results.rc | default(128) == 0 when: unarchive_result.extract_results.rc | default(128) == 0
@@ -168,18 +167,19 @@
- name: Check if elasticsearch is running - name: Check if elasticsearch is running
wait_for: wait_for:
host: localhost host: localhost
port: "{{ mc_es_transport_port }}" port: "{{ insight_es_transport_port }}"
delay: 5 delay: 5
connect_timeout: 1 connect_timeout: 1
- name: Init searchguard plugin - name: Init searchguard plugin
become: yes become: yes
become_user: elasticsearch
shell: | shell: |
./sgadmin.sh -p {{ mc_es_transport_port }} -cacert root-ca.pem \ ./sgadmin.sh -p {{ insight_es_transport_port }} -cacert root-ca.pem \
-cert sgadmin.pem -key sgadmin.key -cd {{ mc_es_searchgaurd_home }}/sgconfig/ -nhnv -icl -cert sgadmin.pem -key sgadmin.key -cd {{ insight_es_searchgaurd_home }}/sgconfig/ -nhnv -icl
args: args:
chdir: "{{ mc_es_searchgaurd_home }}/tools/" chdir: "{{ insight_es_searchgaurd_home }}/tools/"
environment: environment:
JAVA_HOME: "{{ mc_es_java_home }}" JAVA_HOME: "{{ insight_es_java_home }}"
register: install_searchguard_result register: install_searchguard_result
when: check_searchguard_bundle_result.matched == 1 when: check_searchguard_bundle_result.matched == 1

View File

@@ -1,11 +1,11 @@
- name: Copy elasticsearch cert files - name: Copy elasticsearch cert files
become: yes become: yes
copy: copy:
mode: 0600
src: "files/searchguard/{{ item }}" src: "files/searchguard/{{ item }}"
dest: "{{ mc_es_conf_base }}/{{ item }}" dest: "{{ insight_es_conf_base }}/{{ item }}"
owner: elasticsearch owner: elasticsearch
group: elasticsearch group: elasticsearch
mode: 0600
loop: loop:
- "localhost.pem" - "localhost.pem"
- "localhost.key" - "localhost.key"
@@ -14,7 +14,7 @@
- name: Find searchguard bundle - name: Find searchguard bundle
become: yes become: yes
find: find:
paths: "{{ mc_home }}/app/third-party/elasticsearch/" paths: "{{ insight_home }}/app/third-party/elasticsearch/"
patterns: "^search-guard-.+\\.zip$" patterns: "^search-guard-.+\\.zip$"
use_regex: yes use_regex: yes
file_type: file file_type: file
@@ -24,22 +24,22 @@
become: yes become: yes
ignore_errors: yes ignore_errors: yes
shell: | shell: |
{{ mc_es_script_path }}/elasticsearch-plugin install \ {{ insight_es_script_path }}/elasticsearch-plugin install \
-b file://{{ check_searchguard_bundle_result.files[0].path }} -b file://{{ check_searchguard_bundle_result.files[0].path }}
environment: environment:
ES_JAVA_HOME: "{{ mc_es_java_home }}" ES_JAVA_HOME: "{{ insight_es_java_home }}"
ES_PATH_CONF: "{{ mc_es_conf_base }}/" ES_PATH_CONF: "{{ insight_es_conf_base }}/"
register: install_searchguard_result register: install_searchguard_result
when: check_searchguard_bundle_result.matched == 1 when: check_searchguard_bundle_result.matched == 1
- name: Copy searchguard certificate files - name: Copy searchguard certificate files
become: yes become: yes
copy: copy:
mode: 0600
src: "files/searchguard/{{ item }}" src: "files/searchguard/{{ item }}"
dest: "{{ mc_es_searchgaurd_home }}/tools/{{ item }}" dest: "{{ insight_es_searchgaurd_home }}/tools/{{ item }}"
owner: elasticsearch owner: elasticsearch
group: elasticsearch group: elasticsearch
mode: 0600
loop: loop:
- "sgadmin.pem" - "sgadmin.pem"
- "sgadmin.key" - "sgadmin.key"
@@ -48,11 +48,11 @@
- name: Copy SG roles files - name: Copy SG roles files
become: yes become: yes
copy: copy:
mode: 0600
src: "files/searchguard/{{ item }}" src: "files/searchguard/{{ item }}"
dest: "{{ mc_es_searchgaurd_home }}/sgconfig/{{ item }}" dest: "{{ insight_es_searchgaurd_home }}/sgconfig/{{ item }}"
owner: elasticsearch owner: elasticsearch
group: elasticsearch group: elasticsearch
mode: 0600
loop: loop:
- "sg_roles.yml" - "sg_roles.yml"
- "sg_roles_mapping.yml" - "sg_roles_mapping.yml"
@@ -61,7 +61,7 @@
- name: Check execution bit - name: Check execution bit
become: yes become: yes
file: file:
path: "{{ mc_es_searchgaurd_home }}/tools/sgadmin.sh" path: "{{ insight_es_searchgaurd_home }}/tools/sgadmin.sh"
owner: elasticsearch owner: elasticsearch
group: elasticsearch group: elasticsearch
mode: 0700 mode: 0700

View File

@@ -1,19 +1,21 @@
- name: Kill elasticsearch process - name: Kill elasticsearch process
become: yes become: yes
ignore_errors: yes
shell: | shell: |
set -o pipefail
ps -ef | grep -v grep | grep -w elasticsearch | awk '{print $2}' | while read curr_ps_id ps -ef | grep -v grep | grep -w elasticsearch | awk '{print $2}' | while read curr_ps_id
do do
echo "process ${curr_ps_id} still running" echo "process ${curr_ps_id} still running"
echo "$(ps -ef | grep -v grep | grep ${curr_ps_id})" echo "$(ps -ef | grep -v grep | grep ${curr_ps_id})"
kill -9 ${curr_ps_id} kill -9 ${curr_ps_id}
done done
args:
executable: /bin/bash
changed_when: false changed_when: false
- name: Find searchguard bundle for removal - name: Find searchguard bundle for removal
become: yes become: yes
find: find:
paths: "{{ mc_home }}/app/third-party/elasticsearch/" paths: "{{ insight_home }}/app/third-party/elasticsearch/"
patterns: "^search-guard-.+\\.zip$" patterns: "^search-guard-.+\\.zip$"
use_regex: yes use_regex: yes
file_type: file file_type: file
@@ -24,23 +26,23 @@
become_user: elasticsearch become_user: elasticsearch
ignore_errors: yes ignore_errors: yes
shell: | shell: |
{{ mc_es_script_path }}/elasticsearch-plugin remove {{ check_searchguard_bundle_result.files[0].path }} {{ insight_es_script_path }}/elasticsearch-plugin remove {{ check_searchguard_bundle_result.files[0].path }}
environment: environment:
ES_JAVA_HOME: "{{ mc_es_java_home }}" ES_JAVA_HOME: "{{ insight_es_java_home }}"
ES_PATH_CONF: "{{ mc_es_conf_base }}/config" ES_PATH_CONF: "{{ insight_es_conf_base }}/config"
register: remove_searchguard_result register: remove_searchguard_result
when: check_searchguard_bundle_result.matched == 1 when: check_searchguard_bundle_result.matched == 1
- name: Delete elasticsearch home dir - name: Delete elasticsearch home dir
become: yes become: yes
file: file:
path: "{{ mc_es_home }}" path: "{{ insight_es_home }}"
state: absent state: absent
- name: Create elasticsearch home dir - name: Create elasticsearch home dir
become: yes become: yes
file: file:
path: "{{ mc_es_home }}" path: "{{ insight_es_home }}"
state: directory state: directory
owner: elasticsearch owner: elasticsearch
group: elasticsearch group: elasticsearch
@@ -49,7 +51,7 @@
- name: Find elasticsearch package - name: Find elasticsearch package
become: yes become: yes
find: find:
paths: "{{ mc_home }}/app/third-party/elasticsearch" paths: "{{ insight_home }}/app/third-party/elasticsearch"
patterns: "^elasticsearch-.+\\.tar.gz$" patterns: "^elasticsearch-.+\\.tar.gz$"
use_regex: yes use_regex: yes
file_type: file file_type: file
@@ -57,30 +59,29 @@
- name: Set elasticsearch package file name - name: Set elasticsearch package file name
set_fact: set_fact:
mc_elasticsearch_package: "{{ check_elasticsearch_package_result.files[0].path }}" insight_elasticsearch_package: "{{ check_elasticsearch_package_result.files[0].path }}"
when: check_elasticsearch_package_result.matched > 0 when: check_elasticsearch_package_result.matched > 0
- name: Extract elasticsearch package - name: Extract elasticsearch package
become: yes become: yes
unarchive: unarchive:
src: "{{ mc_elasticsearch_package }}" src: "{{ insight_elasticsearch_package }}"
dest: "{{ mc_es_home }}" dest: "{{ insight_es_home }}"
remote_src: yes remote_src: yes
extra_opts: extra_opts:
- --strip-components=1 - --strip-components=1
- --exclude=config - --exclude=config
owner: elasticsearch owner: elasticsearch
group: elasticsearch group: elasticsearch
creates: "{{ mc_es_java_home }}" creates: "{{ insight_es_java_home }}"
register: unarchive_result register: unarchive_result
when: check_elasticsearch_package_result.matched > 0 when: check_elasticsearch_package_result.matched > 0
- name: Generate HA elasticsearch.yml template file - name: Generate HA elasticsearch.yml template file
become: yes become: yes
ignore_errors: yes
template: template:
src: templates/ha/{{ mc_ha_node_type }}.elasticsearch.yml.j2 src: templates/ha/{{ insight_ha_node_type }}.elasticsearch.yml.j2
dest: "{{ mc_es_conf_base }}/elasticsearch.yml" dest: "{{ insight_es_conf_base }}/elasticsearch.yml"
owner: elasticsearch owner: elasticsearch
group: elasticsearch group: elasticsearch
mode: 0644 mode: 0644
@@ -89,7 +90,7 @@
- name: Create empty unicast_hosts.txt file - name: Create empty unicast_hosts.txt file
become: yes become: yes
file: file:
path: "{{ mc_es_conf_base }}/unicast_hosts.txt" path: "{{ insight_es_conf_base }}/unicast_hosts.txt"
state: touch state: touch
owner: elasticsearch owner: elasticsearch
group: elasticsearch group: elasticsearch
@@ -100,12 +101,14 @@
- name: Start elasticsearch - name: Start elasticsearch
become: yes become: yes
command: "su -c '{{ mc_es_script_path }}/elasticsearch -d' elasticsearch" become_user: elasticsearch
shell: |
nohup {{ insight_es_script_path }}/elasticsearch -d
environment: environment:
ES_JAVA_HOME: "{{ mc_es_java_home }}" ES_JAVA_HOME: "{{ insight_es_java_home }}"
ES_PATH_CONF: "{{ mc_es_conf_base }}/" ES_PATH_CONF: "{{ insight_es_conf_base }}/"
register: start_elastcsearch
when: unarchive_result.extract_results.rc | default(128) == 0 when: unarchive_result.extract_results.rc | default(128) == 0
register: start_elastcsearch
- name: Wait for elasticsearch to start - name: Wait for elasticsearch to start
pause: pause:
@@ -115,7 +118,7 @@
- name: Check if elasticsearch is running - name: Check if elasticsearch is running
wait_for: wait_for:
host: localhost host: localhost
port: "{{ mc_es_transport_port }}" port: "{{ insight_es_transport_port }}"
delay: 5 delay: 5
connect_timeout: 1 connect_timeout: 1
@@ -123,11 +126,11 @@
become: yes become: yes
become_user: elasticsearch become_user: elasticsearch
shell: | shell: |
./sgadmin.sh -p {{ mc_es_transport_port }} -cacert root-ca.pem \ ./sgadmin.sh -p {{ insight_es_transport_port }} -cacert root-ca.pem \
-cert sgadmin.pem -key sgadmin.key -cd {{ mc_es_searchgaurd_home }}/sgconfig/ -nhnv -icl -cert sgadmin.pem -key sgadmin.key -cd {{ insight_es_searchgaurd_home }}/sgconfig/ -nhnv -icl
args: args:
chdir: "{{ mc_es_searchgaurd_home }}/tools/" chdir: "{{ insight_es_searchgaurd_home }}/tools/"
environment: environment:
JAVA_HOME: "{{ mc_es_java_home }}" JAVA_HOME: "{{ insight_es_java_home }}"
register: install_searchguard_result register: install_searchguard_result
when: check_searchguard_bundle_result.matched == 1 when: check_searchguard_bundle_result.matched == 1

View File

@@ -1,20 +1,20 @@
- name: Create elasticsearch config path folder - name: Create elasticsearch config path folder
become: yes become: yes
file: file:
path: "{{ mc_es_conf_base }}" path: "{{ insight_es_conf_base }}"
state: directory state: directory
mode: 0755
owner: elasticsearch owner: elasticsearch
group: elasticsearch group: elasticsearch
mode: 0755
- name: Copy elasticsearch cert files - name: Copy elasticsearch cert files
become: yes become: yes
copy: copy:
mode: 0600
src: "files/searchguard/{{ item }}" src: "files/searchguard/{{ item }}"
dest: "{{ mc_es_conf_base }}/{{ item }}" dest: "{{ insight_es_conf_base }}/{{ item }}"
owner: elasticsearch owner: elasticsearch
group: elasticsearch group: elasticsearch
mode: 0600
loop: loop:
- "localhost.pem" - "localhost.pem"
- "localhost.key" - "localhost.key"
@@ -23,7 +23,7 @@
- name: Find searchguard bundle - name: Find searchguard bundle
become: yes become: yes
find: find:
paths: "{{ mc_home }}/app/third-party/elasticsearch/" paths: "{{ insight_home }}/app/third-party/elasticsearch/"
patterns: "^search-guard-.+\\.zip$" patterns: "^search-guard-.+\\.zip$"
use_regex: yes use_regex: yes
file_type: file file_type: file
@@ -33,22 +33,22 @@
become: yes become: yes
ignore_errors: yes ignore_errors: yes
shell: | shell: |
{{ mc_es_script_path }}/elasticsearch-plugin install \ {{ insight_es_script_path }}/elasticsearch-plugin install \
-b file://{{ check_searchguard_bundle_result.files[0].path }} -b file://{{ check_searchguard_bundle_result.files[0].path }}
environment: environment:
ES_JAVA_HOME: "{{ mc_es_java_home }}" ES_JAVA_HOME: "{{ insight_es_java_home }}"
ES_PATH_CONF: "{{ mc_es_conf_base }}/" ES_PATH_CONF: "{{ insight_es_conf_base }}/"
register: install_searchguard_result register: install_searchguard_result
when: check_searchguard_bundle_result.matched == 1 when: check_searchguard_bundle_result.matched == 1
- name: Copy searchguard cert files - name: Copy searchguard cert files
become: yes become: yes
copy: copy:
mode: 0600
src: "files/searchguard/{{ item }}" src: "files/searchguard/{{ item }}"
dest: "{{ mc_es_searchgaurd_home }}/tools/{{ item }}" dest: "{{ insight_es_searchgaurd_home }}/tools/{{ item }}"
owner: elasticsearch owner: elasticsearch
group: elasticsearch group: elasticsearch
mode: 0600
loop: loop:
- "sgadmin.pem" - "sgadmin.pem"
- "sgadmin.key" - "sgadmin.key"
@@ -57,11 +57,11 @@
- name: Copy SG roles files - name: Copy SG roles files
become: yes become: yes
copy: copy:
mode: 0600
src: "files/searchguard/{{ item }}" src: "files/searchguard/{{ item }}"
dest: "{{ mc_es_searchgaurd_home }}/sgconfig/{{ item }}" dest: "{{ insight_es_searchgaurd_home }}/sgconfig/{{ item }}"
owner: elasticsearch owner: elasticsearch
group: elasticsearch group: elasticsearch
mode: 0600
loop: loop:
- "sg_roles.yml" - "sg_roles.yml"
- "sg_roles_mapping.yml" - "sg_roles_mapping.yml"
@@ -70,7 +70,7 @@
- name: Check execution bit - name: Check execution bit
become: yes become: yes
file: file:
path: "{{ mc_es_searchgaurd_home }}/tools/sgadmin.sh" path: "{{ insight_es_searchgaurd_home }}/tools/sgadmin.sh"
owner: elasticsearch owner: elasticsearch
group: elasticsearch group: elasticsearch
mode: 0700 mode: 0700

View File

@@ -0,0 +1,128 @@
- name: Check if insight tar exists
become: yes
stat:
path: "{{ jfrog_home_directory }}/{{ insight_tar_file_name }}"
register: insight_tar_check
- name: Download insight for upgrade
become: yes
get_url:
url: "{{ insight_tar }}"
timeout: "{{ insight_download_timeout }}"
dest: "{{ jfrog_home_directory }}"
register: download_insight
until: download_insight is succeeded
retries: 3
when: not insight_tar_check.stat.exists
- name: Extract insight tar
become: yes
unarchive:
src: "{{ jfrog_home_directory }}/{{ insight_tar_file_name }}"
dest: "{{ jfrog_home_directory }}"
remote_src: true
owner: "{{ insight_user }}"
group: "{{ insight_group }}"
creates: "{{ insight_untar_home }}"
when: download_insight is succeeded
- name: Stop insight
meta: flush_handlers
- name: Delete current app folder
become: yes
file:
path: "{{ insight_home }}/app"
state: absent
when: download_insight.changed
- name: Copy new app to insight app
command: "cp -r {{ insight_untar_home }}/app/. {{ insight_home }}/app"
become: yes
when: download_insight.changed
- name: Delete untar directory
become: yes
file:
path: "{{ insight_untar_home }}"
state: absent
when: download_insight.changed
- name: Upgrade elasticsearch
import_tasks: upgrade-elasticsearch.yml
when: download_insight.changed
- name: Check if system.yaml exists
become: yes
stat:
path: "{{ insight_home }}/var/etc/system.yaml"
register: systemyaml
- name: Configure system.yaml
become: yes
template:
src: "{{ insight_system_yaml_template }}"
dest: "{{ insight_home }}/var/etc/system.yaml"
owner: "{{ insight_user }}"
group: "{{ insight_group }}"
mode: 0644
when:
- insight_systemyaml is defined
- insight_systemyaml | length > 0
- insight_systemyaml_override or (not systemyaml.stat.exists)
notify: restart insight
- name: Check if install.sh wrapper script exist
become: yes
stat:
path: "{{ insight_install_script_path }}/install.sh"
register: upgrade_wrapper_script
when: download_insight.changed
- name: Include interactive installer scripts
include_vars: script/archive.yml
- name: Upgrade Insight
include_tasks: expect.yml
vars:
exp_executable_cmd: "./install.sh -u {{ insight_user }} -g {{ insight_group }}"
exp_dir: "{{ insight_install_script_path }}"
exp_scenarios: "{{ insight_installer_scenario['main'] }}"
args:
apply:
environment:
YQ_PATH: "{{ insight_thirdparty_path }}/yq"
when:
- upgrade_wrapper_script.stat.exists
- download_insight.changed
- name: Configure installer info
become: yes
template:
src: installer-info.json.j2
dest: "{{ insight_home }}/var/etc/info/installer-info.json"
mode: 0644
notify: restart insight
- name: Update correct permissions
become: yes
file:
path: "{{ insight_home }}"
state: directory
recurse: yes
owner: "{{ insight_user }}"
group: "{{ insight_group }}"
- name: Restart insight
meta: flush_handlers
- name: Make sure insight is up and running
uri:
url: http://127.0.0.1:8082/router/api/v1/system/health
timeout: 130
status_code: 200
register: result
until: result is succeeded
retries: 25
delay: 5
when: not ansible_check_mode

View File

@@ -1,13 +1,13 @@
discovery.seed_providers: file discovery.seed_providers: file
transport.port: {{ mc_es_transport_port }} transport.port: {{ insight_es_transport_port }}
transport.host: 0.0.0.0 transport.host: 0.0.0.0
transport.publish_host: {{ ansible_host }} transport.publish_host: {{ ansible_host }}
network.host: 0.0.0.0 network.host: 0.0.0.0
node.name: {{ ansible_host }} node.name: {{ ansible_host }}
cluster.initial_master_nodes: {{ ansible_host }} cluster.initial_master_nodes: {{ ansible_host }}
bootstrap.memory_lock: false bootstrap.memory_lock: false
path.data: {{ mc_es_data_dir }} path.data: {{ insight_es_data_dir }}
path.logs: {{ mc_es_log_dir }} path.logs: {{ insight_es_log_dir }}
xpack.security.enabled: false xpack.security.enabled: false
searchguard.ssl.transport.pemcert_filepath: localhost.pem searchguard.ssl.transport.pemcert_filepath: localhost.pem

View File

@@ -1,12 +1,12 @@
discovery.seed_providers: file discovery.seed_providers: file
{% if mc_elasticsearch_package | regex_search(".*-7.*") %} {% if insight_elasticsearch_package | regex_search(".*oss-7.*") %}
cluster.initial_master_nodes: {{ ansible_host }} cluster.initial_master_nodes: {{ ansible_host }}
{% endif %} {% endif %}
xpack.security.enabled: false xpack.security.enabled: false
path.data: {{ mc_es_home }}/data path.data: {{ insight_es_home }}/data
path.logs: {{ mc_es_home }}/logs path.logs: {{ insight_es_home }}/logs
network.host: 0.0.0.0 network.host: 0.0.0.0
node.name: {{ ansible_host }} node.name: {{ ansible_host }}

View File

@@ -0,0 +1,22 @@
configVersion: 1
shared:
jfrogUrl: {{ jfrog_url }}
node:
ip: {{ ansible_host }}
database:
type: "{{ insight_db_type }}"
driver: "{{ insight_db_driver }}"
url: "{{ insight_db_url }}"
username: "{{ insight_db_user }}"
password: "{{ insight_db_password }}"
elasticsearch:
unicastFile: {{ insight_es_conf_base }}/unicast_hosts.txt
password: {{ insight_es_password }}
url: {{ insight_es_url }}
username: {{ insight_es_user }}
external: true
security:
joinKey: {{ join_key }}
router:
entrypoints:
internalPort: 8046

View File

@@ -2,8 +2,8 @@
discovery.seed_providers: file discovery.seed_providers: file
xpack.security.enabled: false xpack.security.enabled: false
path.data: {{ mc_es_home }}/data path.data: {{ insight_es_home }}/data
path.logs: {{ mc_es_home }}/logs path.logs: {{ insight_es_home }}/logs
network.host: 0.0.0.0 network.host: 0.0.0.0
node.name: {{ ansible_host }} node.name: {{ ansible_host }}

View File

@@ -0,0 +1,23 @@
configVersion: 1
shared:
jfrogUrl: {{ jfrog_url }}
node:
ip: {{ ansible_host }}
database:
type: "{{ insight_db_type }}"
driver: "{{ insight_db_driver }}"
url: "{{ insight_db_url }}"
username: "{{ insight_db_user }}"
password: "{{ insight_db_password }}"
elasticsearch:
unicastFile: {{ insight_es_conf_base }}/unicast_hosts.txt
clusterSetup: YES
password: {{ insight_es_password }}
url: {{ insight_es_url }}
username: {{ insight_es_user }}
external: true
security:
joinKey: {{ join_key }}
router:
entrypoints:
internalPort: 8046

View File

@@ -1,6 +1,6 @@
{{ ansible_managed | comment }} {{ ansible_managed | comment }}
{ {
"productId": "Ansible_Missioncontrol/{{ platform_collection_version }}-{{ missioncontrol_version }}", "productId": "Ansible_Insight/{{ platform_collection_version }}-{{ insight_version }}",
"features": [ "features": [
{ {
"featureId": "Channel/{{ ansible_marketplace }}" "featureId": "Channel/{{ ansible_marketplace }}"

View File

@@ -0,0 +1 @@
{{ insight_systemyaml }}

View File

@@ -1,5 +1,5 @@
# platform collection version # platform collection version
platform_collection_version: 7.25.7 platform_collection_version: 10.0.1
# indicates were this collection was downlaoded from (galaxy, automation_hub, standalone) # indicates were this collection was downlaoded from (galaxy, automation_hub, standalone)
ansible_marketplace: galaxy ansible_marketplace: galaxy

View File

@@ -1,8 +1,8 @@
mc_installer_scenario: insight_installer_scenario:
main: main:
- { - {
"expecting": "(data|installation) directory \\(", "expecting": "(data|installation) directory \\(",
"sending": "{{ mc_home }}" "sending": "{{ insight_home }}"
} }
- { - {
"expecting": "jfrog url( \\(.+\\))?:(?!.*Skipping prompt)", "expecting": "jfrog url( \\(.+\\))?:(?!.*Skipping prompt)",
@@ -18,7 +18,7 @@ mc_installer_scenario:
} }
- { - {
"expecting": "are you adding an additional node", "expecting": "are you adding an additional node",
"sending": "{% if mc_ha_node_type is defined and mc_ha_node_type == 'master' %}n{% else %}y{% endif %}" "sending": "{% if insight_ha_node_type is defined and insight_ha_node_type == 'master' %}n{% else %}y{% endif %}"
} }
- { - {
"expecting": "do you want to install postgresql", "expecting": "do you want to install postgresql",
@@ -30,29 +30,29 @@ mc_installer_scenario:
} }
- { - {
"expecting": "(postgresql|database) url.+\\[jdbc:postgresql.+\\]:", "expecting": "(postgresql|database) url.+\\[jdbc:postgresql.+\\]:",
"sending": "{{ mc_db_url }}" "sending": "{{ insight_db_url }}"
} }
- { - {
"expecting": "(postgresql|database) password", "expecting": "(postgresql|database) password",
"sending": "{{ mc_db_password }}" "sending": "{{ insight_db_password }}"
} }
- { - {
"expecting": "(postgresql|database) username", "expecting": "(postgresql|database) username",
"sending": "{{ mc_db_user }}" "sending": "{{ insight_db_user }}"
} }
- { - {
"expecting": "confirm database password", "expecting": "confirm database password",
"sending": "{{ mc_db_password }}" "sending": "{{ insight_db_password }}"
} }
- { - {
"expecting": "elasticsearch url:(?!.*Skipping prompt)", "expecting": "elasticsearch url:(?!.*Skipping prompt)",
"sending": "{{ mc_es_url }}" "sending": "{{ insight_es_url }}"
} }
- { - {
"expecting": "elasticsearch username:", "expecting": "elasticsearch username:",
"sending": "{{ mc_es_user }}" "sending": "{{ insight_es_user }}"
} }
- { - {
"expecting": "elasticsearch password:", "expecting": "elasticsearch password:",
"sending": "{{ mc_es_password }}" "sending": "{{ insight_es_password }}"
} }

View File

@@ -1,26 +0,0 @@
# Missioncontrol
The missioncontrol role will install missioncontrol software onto the host. An Artifactory server and Postgress database is required.
### Role Variables
* _mc_upgrade_only_: Perform an software upgrade only. Default is false.
Additional variables can be found in [defaults/main.yml](./defaults/main.yml).
## Example Playbook
```
---
- hosts: missioncontrol_servers
roles:
- missioncontrol
```
## Upgrades
The missioncontrol role supports software upgrades. To use a role to perform a software upgrade only, use the _xray_upgrade_only_ variables and specify the version. See the following example.
```
- hosts: missioncontrol_servers
vars:
missioncontrol_version: "{{ lookup('env', 'missioncontrol_version_upgrade') }}"
mc_upgrade_only: true
roles:
- missioncontrol
```

View File

@@ -1,101 +0,0 @@
# defaults file for mc
# The version of missioncontrol to install
missioncontrol_version: 4.7.14
# whether to enable HA
mc_ha_enabled: false
mc_ha_node_type: master
# The location where mc should install
jfrog_home_directory: /opt/jfrog
# The remote mc download file
mc_tar_file_name: jfrog-mc-{{ missioncontrol_version }}-linux.tar.gz
mc_tar: https://releases.jfrog.io/artifactory/jfrog-mc/linux/{{ missioncontrol_version }}/{{ mc_tar_file_name }}
# Timeout in seconds for URL request
mc_download_timeout: 10
#The mc install directory
mc_untar_home: "{{ jfrog_home_directory }}/jfrog-mc-{{ missioncontrol_version }}-linux"
mc_home: "{{ jfrog_home_directory }}/mc"
mc_install_script_path: "{{ mc_home }}/app/bin"
mc_thirdparty_path: "{{ mc_home }}/app/third-party"
mc_archive_service_cmd: "{{ mc_install_script_path }}/installService.sh"
mc_service_file: /lib/systemd/system/mc.service
#mc users and groups
mc_user: jfmc
mc_group: jfmc
mc_uid: 1050
mc_gid: 1050
mc_daemon: mc
# MissionContol ElasticSearch Details
es_uid: 1060
es_gid: 1060
mc_es_conf_base: "/etc/elasticsearch"
mc_es_user: admin
mc_es_password: admin
mc_es_url: "http://localhost:9200"
mc_es_transport_port: 9300
mc_es_home: "/usr/share/elasticsearch"
mc_es_data_dir: "/var/lib/elasticsearch"
mc_es_log_dir: "/var/log/elasticsearch"
mc_es_java_home: "/usr/share/elasticsearch/jdk"
mc_es_script_path: "/usr/share/elasticsearch/bin"
mc_es_searchgaurd_home: "/usr/share/elasticsearch/plugins/search-guard-7"
# if this is an upgrade
mc_upgrade_only: false
mc_system_yaml_template: system.yaml.j2
# Provide systemyaml content below with 2-space indentation
mc_systemyaml: |-
configVersion: 1
shared:
jfrogUrl: {{ jfrog_url }}
node:
ip: {{ ansible_host }}
id: {{ ansible_hostname }}
database:
type: "{{ mc_db_type }}"
driver: "{{ mc_db_driver }}"
url: "{{ mc_db_url }}"
elasticsearch:
unicastFile: {{ mc_es_conf_base }}/config/unicast_hosts.txt
password: {{ mc_es_password }}
url: {{ mc_es_url }}
username: {{ mc_es_user }}
external: true
security:
joinKey: {{ join_key }}
mc:
database:
username: "{{ mc_db_user }}"
password: "{{ mc_db_password }}"
schema: "jfmc_server"
insight-scheduler:
database:
username: "{{ mc_db_user }}"
password: "{{ mc_db_password }}"
schema: "insight_scheduler"
insight-server:
database:
username: "{{ mc_db_user }}"
password: "{{ mc_db_password }}"
schema: "insight_server"
router:
entrypoints:
internalPort: 8046
# Note: mc_systemyaml_override is by default false, if you want to change default mc_systemyaml
mc_systemyaml_override: false

View File

@@ -1,13 +0,0 @@
---
# handlers file for missioncontrol
- name: restart missioncontrol
become: yes
systemd:
name: "{{ mc_daemon }}"
state: restarted
- name: stop missioncontrol
become: yes
systemd:
name: "{{ mc_daemon }}"
state: stopped

View File

@@ -1,161 +0,0 @@
- name: Install prerequisite packages
include_tasks: "{{ ansible_os_family }}.yml"
- name: Ensure group jfmc exist
become: yes
group:
name: "{{ mc_group }}"
state: present
- name: Ensure user jfmc exist
become: yes
user:
name: "{{ mc_user }}"
group: "{{ mc_group }}"
create_home: yes
home: "{{ mc_home }}"
shell: /bin/bash
state: present
- name: Check if mc tar exists
become: yes
stat:
path: "{{ jfrog_home_directory }}/{{ mc_tar_file_name }}"
register: mc_tar_check
- name: Download mc
become: yes
get_url:
url: "{{ mc_tar }}"
timeout: "{{ mc_download_timeout }}"
dest: "{{ jfrog_home_directory }}"
register: download_mc
until: download_mc is succeeded
retries: 3
when: not mc_tar_check.stat.exists
- name: Extract mc tar
become: yes
unarchive:
src: "{{ jfrog_home_directory }}/{{ mc_tar_file_name }}"
dest: "{{ jfrog_home_directory }}"
remote_src: true
owner: "{{ mc_user }}"
group: "{{ mc_group }}"
creates: "{{ mc_untar_home }}"
when: download_mc is succeeded
- name: Check if app directory exists
become: yes
stat:
path: "{{ mc_home }}/app"
register: app_dir_check
- name: Copy untar directory to mc home
become: yes
copy:
src: "{{ mc_untar_home }}/"
dest: "{{ mc_home }}"
remote_src: yes
when: not app_dir_check.stat.exists
- name: Create required directories
become: yes
file:
path: "{{ item }}"
state: directory
recurse: yes
owner: "{{ mc_user }}"
group: "{{ mc_group }}"
loop:
- "{{ mc_home }}/var/etc"
- "{{ mc_home }}/var/etc/security/"
- "{{ mc_home }}/var/etc/info/"
- name: Configure master key
become: yes
copy:
dest: "{{ mc_home }}/var/etc/security/master.key"
content: "{{ master_key }}"
owner: "{{ mc_user }}"
group: "{{ mc_group }}"
mode: 0640
- name: Setup elasticsearch
import_tasks: setup-elasticsearch.yml
- name: Check if install.sh wrapper script exist
become: yes
stat:
path: "{{ mc_install_script_path }}/install.sh"
register: install_wrapper_script
- name: Include interactive installer scripts
include_vars: script/archive.yml
- name: Install JFMC
include_tasks: expect.yml
vars:
exp_executable_cmd: "./install.sh -u {{ mc_user }} -g {{ mc_group }}"
exp_dir: "{{ mc_install_script_path }}"
exp_scenarios: "{{ mc_installer_scenario['main'] }}"
args:
apply:
environment:
YQ_PATH: "{{ mc_thirdparty_path }}/yq"
when: install_wrapper_script.stat.exists
- name: Configure installer info
become: yes
template:
src: installer-info.json.j2
dest: "{{ mc_home }}/var/etc/info/installer-info.json"
notify: restart missioncontrol
- name: Check if systemyaml exists
become: yes
stat:
path: "{{ mc_home }}/var/etc/system.yaml"
register: systemyaml
- name: Configure systemyaml
become: yes
template:
src: "{{ mc_system_yaml_template }}"
dest: "{{ mc_home }}/var/etc/system.yaml"
when:
- mc_systemyaml is defined
- mc_systemyaml|length > 0
- mc_systemyaml_override or (not systemyaml.stat.exists)
notify: restart missioncontrol
- name: Update correct permissions
become: yes
file:
path: "{{ mc_home }}"
state: directory
recurse: yes
owner: "{{ mc_user }}"
group: "{{ mc_group }}"
- name: Install mc as a service
become: yes
command: "{{ mc_archive_service_cmd }}"
args:
chdir: "{{ mc_install_script_path }}"
creates: "{{ mc_service_file }}"
register: check_service_status_result
- name: Restart missioncontrol
meta: flush_handlers
- name: Make sure missionControl is up and running
uri:
url: http://127.0.0.1:8082/router/api/v1/system/health
timeout: 130
status_code: 200
register: result
until: result is succeeded
retries: 25
delay: 5
when: not ansible_check_mode

View File

@@ -1,126 +0,0 @@
---
- name: Check if mc tar exists
become: yes
stat:
path: "{{ jfrog_home_directory }}/{{ mc_tar_file_name }}"
register: mc_tar_check
- name: Download mc for upgrade
become: yes
get_url:
url: "{{ mc_tar }}"
timeout: "{{ mc_download_timeout }}"
dest: "{{ jfrog_home_directory }}"
register: download_mc
until: download_mc is succeeded
retries: 3
when: not mc_tar_check.stat.exists
- name: Extract mc tar
become: yes
unarchive:
src: "{{ jfrog_home_directory }}/{{ mc_tar_file_name }}"
dest: "{{ jfrog_home_directory }}"
remote_src: true
owner: "{{ mc_user }}"
group: "{{ mc_group }}"
creates: "{{ mc_untar_home }}"
when: download_mc is succeeded
- name: stop missioncontrol
meta: flush_handlers
- name: Delete current app folder
become: yes
file:
path: "{{ mc_home }}/app"
state: absent
when: download_mc.changed
- name: Copy new app to mc app
command: "cp -r {{ mc_untar_home }}/app/. {{ mc_home }}/app"
become: yes
when: download_mc.changed
- name: Delete untar directory
file:
path: "{{ mc_untar_home }}"
state: absent
become: yes
when: download_mc.changed
- name: Upgrade elasticsearch
import_tasks: upgrade-elasticsearch.yml
when: download_mc.changed
- name: Check if systemyaml exists
become: yes
stat:
path: "{{ mc_home }}/var/etc/system.yaml"
register: systemyaml
- name: Configure systemyaml
become: yes
template:
src: "{{ mc_system_yaml_template }}"
dest: "{{ mc_home }}/var/etc/system.yaml"
when:
- mc_systemyaml is defined
- mc_systemyaml|length > 0
- mc_systemyaml_override or (not systemyaml.stat.exists)
notify: restart missioncontrol
- name: Check if install.sh wrapper script exist
become: yes
stat:
path: "{{ mc_install_script_path }}/install.sh"
register: upgrade_wrapper_script
when: download_mc.changed
- name: Include interactive installer scripts
include_vars: script/archive.yml
- name: Upgrade JFMC
include_tasks: expect.yml
vars:
exp_executable_cmd: "./install.sh -u {{ mc_user }} -g {{ mc_group }}"
exp_dir: "{{ mc_install_script_path }}"
exp_scenarios: "{{ mc_installer_scenario['main'] }}"
args:
apply:
environment:
YQ_PATH: "{{ mc_thirdparty_path }}/yq"
when:
- upgrade_wrapper_script.stat.exists
- download_mc.changed
- name: Configure installer info
become: yes
template:
src: installer-info.json.j2
dest: "{{ mc_home }}/var/etc/info/installer-info.json"
mode: 0644
notify: restart missioncontrol
- name: Update correct permissions
become: yes
file:
path: "{{ mc_home }}"
state: directory
recurse: yes
owner: "{{ mc_user }}"
group: "{{ mc_group }}"
- name: Restart missioncontrol
meta: flush_handlers
- name: Make sure missionControl is up and running
uri:
url: http://127.0.0.1:8082/router/api/v1/system/health
timeout: 130
status_code: 200
register: result
until: result is succeeded
retries: 25
delay: 5
when: not ansible_check_mode

View File

@@ -1,22 +0,0 @@
configVersion: 1
shared:
jfrogUrl: {{ jfrog_url }}
node:
ip: {{ ansible_host }}
database:
type: "{{ mc_db_type }}"
driver: "{{ mc_db_driver }}"
url: "{{ mc_db_url }}"
username: "{{ mc_db_user }}"
password: "{{ mc_db_password }}"
elasticsearch:
unicastFile: {{ mc_es_conf_base }}/unicast_hosts.txt
password: {{ mc_es_password }}
url: {{ mc_es_url }}
username: {{ mc_es_user }}
external: true
security:
joinKey: {{ join_key }}
router:
entrypoints:
internalPort: 8046

View File

@@ -1,23 +0,0 @@
configVersion: 1
shared:
jfrogUrl: {{ jfrog_url }}
node:
ip: {{ ansible_host }}
database:
type: "{{ mc_db_type }}"
driver: "{{ mc_db_driver }}"
url: "{{ mc_db_url }}"
username: "{{ mc_db_user }}"
password: "{{ mc_db_password }}"
elasticsearch:
unicastFile: {{ mc_es_conf_base }}/unicast_hosts.txt
clusterSetup: YES
password: {{ mc_es_password }}
url: {{ mc_es_url }}
username: {{ mc_es_user }}
external: true
security:
joinKey: {{ join_key }}
router:
entrypoints:
internalPort: 8046

View File

@@ -17,7 +17,6 @@
- name: Install prerequisite packages - name: Install prerequisite packages
become: yes become: yes
ignore_errors: yes
yum: yum:
name: name:
- acl - acl
@@ -52,7 +51,13 @@
- LANG=en_us.UTF-8 - LANG=en_us.UTF-8
- LANGUAGE=en_us.UTF-8 - LANGUAGE=en_us.UTF-8
- name: Install postgres repository - name: Import PostgreSQL GPG public key
become: yes
rpm_key:
key: https://download.postgresql.org/pub/repos/yum/RPM-GPG-KEY-PGDG
state: present
- name: Install PostgreSQL repository
become: yes become: yes
yum: yum:
name: "{{ base }}/reporpms/EL-{{ version }}-x86_64/{{ repo_file_name }}" name: "{{ base }}/reporpms/EL-{{ version }}-x86_64/{{ repo_file_name }}"
@@ -62,7 +67,24 @@
version: "{{ ansible_distribution_major_version }}" version: "{{ ansible_distribution_major_version }}"
repo_file_name: pgdg-redhat-repo-latest.noarch.rpm repo_file_name: pgdg-redhat-repo-latest.noarch.rpm
- name: Install postgres packages - name: Disable PostgreSQL module
become: yes
copy:
dest: /etc/dnf/modules.d/postgresql.module
owner: root
group: root
mode: 0644
content: |
[postgresql]
name=postgresql
stream=
profiles=
state=disabled
when:
- ansible_os_family == 'RedHat'
- ansible_distribution_major_version | int == 8
- name: Install PostgreSQL packages
become: yes become: yes
yum: yum:
name: name:

View File

@@ -58,65 +58,46 @@
timeout: 120 timeout: 120
sleep: 10 sleep: 10
- name: Create users - name: Create user
become: yes become: yes
become_user: postgres become_user: postgres
postgresql_user: postgresql_user:
name: "{{ item.db_user }}" name: "{{ curr_user.username }}"
password: "{{ item.db_password }}" password: "{{ curr_user.password }}"
conn_limit: "-1" conn_limit: "-1"
loop: "{{ db_users | default([]) }}" loop: "{{ database | dict2items | map(attribute='value') | list }}"
loop_control:
loop_var: curr_user
when: curr_user.enabled | bool
no_log: true # secret passwords no_log: true # secret passwords
- name: Create a database - name: Create database
become: yes become: yes
become_user: postgres become_user: postgres
postgresql_db: postgresql_db:
name: "{{ item.db_name }}" name: "{{ curr_db.name }}"
owner: "{{ item.db_owner }}" owner: "{{ curr_db.owner }}"
encoding: UTF-8 encoding: UTF-8
lc_collate: "{{ postgres_locale }}" lc_collate: "{{ postgres_locale }}"
lc_ctype: "{{ postgres_locale }}" lc_ctype: "{{ postgres_locale }}"
template: template0 template: template0
loop: "{{ dbs | default([]) }}" loop: "{{ database | dict2items | map(attribute='value') | list }}"
loop_control:
- name: Check if MC schemas already exists loop_var: curr_db
become: yes when: curr_db.enabled | bool
become_user: postgres no_log: true # secret passwords
command: psql -d {{ mc_db_name }} -t -c "\dn"
register: mc_schemas_loaded
when: mc_enabled
changed_when: false
- name: Create schemas for mission-control
become: yes
become_user: postgres
command: psql -d {{ mc_db_name }} -c 'CREATE SCHEMA {{ item }} authorization {{ mc_db_user }}'
loop: "{{ mc_schemas | default([]) }}"
when:
- mc_enabled
- mc_schemas_loaded.stdout is defined
- item not in mc_schemas_loaded.stdout
- name: Grant all privileges to mc user on its schema
become: yes
become_user: postgres
postgresql_privs:
database: "{{ mc_db_name }}"
privs: ALL
type: schema
roles: "{{ mc_db_user }}"
objs: "{{ item }}"
loop: "{{ mc_schemas | default([]) }}"
when: mc_enabled
- name: Grant privs on db - name: Grant privs on db
become: yes become: yes
become_user: postgres become_user: postgres
postgresql_privs: postgresql_privs:
database: "{{ item.db_name }}" database: "{{ curr_db.name }}"
role: "{{ item.db_owner }}" role: "{{ curr_db.owner }}"
state: present state: present
privs: ALL privs: ALL
type: database type: database
loop: "{{ dbs | default([]) }}" loop: "{{ database | dict2items | map(attribute='value') | list }}"
loop_control:
loop_var: curr_db
when: curr_db.enabled | bool
no_log: true # secret passwords

View File

@@ -1,7 +1,7 @@
# defaults file for xray # defaults file for xray
# The version of xray to install # The version of xray to install
xray_version: 3.32.2 xray_version: 3.34.1
# whether to enable HA # whether to enable HA
xray_ha_enabled: false xray_ha_enabled: false
@@ -51,7 +51,7 @@ linux_distro: "{{ ansible_distribution | lower }}{{ ansible_distribution_major_v
xray_db_util_search_filter: xray_db_util_search_filter:
ubuntu16: ubuntu16:
db5: 'db5.3-util.*ubuntu.*amd64\.deb' db5: 'db5.3-util.*ubuntu0.*amd64\.deb'
db: 'db-util.*ubuntu.*all.deb' db: 'db-util.*ubuntu.*all.deb'
ubuntu18: ubuntu18:
db5: 'db5.3-util.*ubuntu1.1.*amd64\.deb' db5: 'db5.3-util.*ubuntu1.1.*amd64\.deb'

View File

@@ -39,10 +39,10 @@
unarchive: unarchive:
src: "{{ jfrog_home_directory }}/{{ xray_tar_file_name }}" src: "{{ jfrog_home_directory }}/{{ xray_tar_file_name }}"
dest: "{{ jfrog_home_directory }}" dest: "{{ jfrog_home_directory }}"
remote_src: true
owner: "{{ xray_user }}" owner: "{{ xray_user }}"
group: "{{ xray_group }}" group: "{{ xray_group }}"
creates: "{{ xray_untar_home }}" creates: "{{ xray_untar_home }}"
remote_src: true
when: download_xray is succeeded when: download_xray is succeeded
- name: Check if app directory exists - name: Check if app directory exists
@@ -56,6 +56,9 @@
copy: copy:
src: "{{ xray_untar_home }}/" src: "{{ xray_untar_home }}/"
dest: "{{ xray_home }}" dest: "{{ xray_home }}"
owner: "{{ xray_user }}"
group: "{{ xray_group }}"
mode: 0755
remote_src: yes remote_src: yes
when: not app_dir_check.stat.exists when: not app_dir_check.stat.exists

View File

@@ -21,6 +21,12 @@
xray_socat_package: "{{ check_socat_package_result.files[0].path }}" xray_socat_package: "{{ check_socat_package_result.files[0].path }}"
when: check_socat_package_result.matched > 0 when: check_socat_package_result.matched > 0
- name: Import CentOS GPG public key
become: yes
rpm_key:
key: https://www.centos.org/keys/RPM-GPG-KEY-CentOS-Official
state: present
- name: Install socat package - name: Install socat package
become: yes become: yes
yum: yum:

View File

@@ -1,4 +1,3 @@
---
- name: Check if xray tar exists - name: Check if xray tar exists
become: yes become: yes
stat: stat:

View File

@@ -1,5 +1,5 @@
# platform collection version # platform collection version
platform_collection_version: 7.25.7 platform_collection_version: 10.0.1
# indicates were this collection was downlaoded from (galaxy, automation_hub, standalone) # indicates were this collection was downlaoded from (galaxy, automation_hub, standalone)
ansible_marketplace: galaxy ansible_marketplace: galaxy

View File

@@ -1,4 +1,3 @@
---
- hosts: xray_servers - hosts: xray_servers
roles: roles:
- xray - xray

View File

@@ -11,5 +11,5 @@ xray-1 ansible_host=x.x.x.x
[distribution_servers] [distribution_servers]
distribution-1 ansible_host=x.x.x.x distribution-1 ansible_host=x.x.x.x
[missionControl_servers] [insight_servers]
missionControl-1 ansible_host=x.x.x.x insight-1 ansible_host=x.x.x.x

View File

@@ -0,0 +1,8 @@
# Replace x.x.x.x with public Ips of servers
[postgres_servers]
postgres-1 ansible_host=x.x.x.x
[artifactory_servers]
artifactory-1 ansible_host=x.x.x.x
artifactory-2 ansible_host=x.x.x.x
artifactory-3 ansible_host=x.x.x.x

View File

@@ -3,28 +3,36 @@
collections: collections:
- jfrog.platform - jfrog.platform
roles: roles:
- postgres - role: postgres
when: postgres_enabled | bool
- hosts: artifactory_servers - hosts: artifactory_servers
collections: collections:
- jfrog.platform - jfrog.platform
serial:
- 1
- 100%
roles: roles:
- artifactory - role: artifactory
when: artifactory_enabled | bool
- hosts: xray_servers - hosts: xray_servers
collections: collections:
- jfrog.platform - jfrog.platform
roles: roles:
- xray - role: xray
when: xray_enabled | bool
- hosts: distribution_servers - hosts: distribution_servers
collections: collections:
- jfrog.platform - jfrog.platform
roles: roles:
- distribution - role: distribution
when: distribution_enabled | bool
- hosts: missionControl_servers - hosts: insight_servers
collections: collections:
- jfrog.platform - jfrog.platform
roles: roles:
- missionControl - role: insight
when: insight_enabled | bool

View File

@@ -8,11 +8,16 @@
- hosts: artifactory_servers - hosts: artifactory_servers
collections: collections:
- jfrog.platform - jfrog.platform
serial:
- 1
- 100%
roles: roles:
- artifactory - role: artifactory
when: artifactory_enabled | bool
- hosts: xray_servers - hosts: xray_servers
collections: collections:
- jfrog.platform - jfrog.platform
roles: roles:
- xray - role: xray
when: xray_enabled | bool

View File

@@ -3,10 +3,15 @@
collections: collections:
- jfrog.platform - jfrog.platform
roles: roles:
- postgres - role: postgres
when: postgres_enabled | bool
- hosts: artifactory_servers - hosts: artifactory_servers
collections: collections:
- jfrog.platform - jfrog.platform
serial:
- 1
- 100%
roles: roles:
- artifactory - role: artifactory
when: artifactory_enabled | bool

View File

@@ -3,10 +3,12 @@
collections: collections:
- jfrog.platform - jfrog.platform
roles: roles:
- postgres - role: postgres
when: postgres_enabled | bool
- hosts: xray_servers - hosts: xray_servers
collections: collections:
- jfrog.platform - jfrog.platform
roles: roles:
- xray - role: xray
when: xray_enabled | bool