mirror of
https://github.com/ZwareBear/awx.git
synced 2026-03-20 07:43:35 -05:00
Common Inventory slicing method for job slices
- Extract how slicing is done from Inventory#get_script_data and pull it into a new method, Inventory#get_sliced_hosts - Make use of this method in Inventory#get_script_data - Make use of this method in Job#_get_inventory_hosts (used by Job#start_job_fact_cache and Job#finish_job_fact_cache). This fixes an issue (namely in Tower 4.1) where job slicing with fact caching enabled doesn't save facts for all hosts. Signed-off-by: Rick Elrod <rick@elrod.me>
This commit is contained in:
@@ -236,6 +236,12 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
|||||||
raise ParseError(_('Slice number must be 1 or higher.'))
|
raise ParseError(_('Slice number must be 1 or higher.'))
|
||||||
return (number, step)
|
return (number, step)
|
||||||
|
|
||||||
|
def get_sliced_hosts(self, host_queryset, slice_number, slice_count):
|
||||||
|
if slice_count > 1 and slice_number > 0:
|
||||||
|
offset = slice_number - 1
|
||||||
|
host_queryset = host_queryset[offset::slice_count]
|
||||||
|
return host_queryset
|
||||||
|
|
||||||
def get_script_data(self, hostvars=False, towervars=False, show_all=False, slice_number=1, slice_count=1):
|
def get_script_data(self, hostvars=False, towervars=False, show_all=False, slice_number=1, slice_count=1):
|
||||||
hosts_kw = dict()
|
hosts_kw = dict()
|
||||||
if not show_all:
|
if not show_all:
|
||||||
@@ -243,10 +249,8 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
|||||||
fetch_fields = ['name', 'id', 'variables', 'inventory_id']
|
fetch_fields = ['name', 'id', 'variables', 'inventory_id']
|
||||||
if towervars:
|
if towervars:
|
||||||
fetch_fields.append('enabled')
|
fetch_fields.append('enabled')
|
||||||
hosts = self.hosts.filter(**hosts_kw).order_by('name').only(*fetch_fields)
|
host_queryset = self.hosts.filter(**hosts_kw).order_by('name').only(*fetch_fields)
|
||||||
if slice_count > 1 and slice_number > 0:
|
hosts = self.get_sliced_hosts(host_queryset, slice_number, slice_count)
|
||||||
offset = slice_number - 1
|
|
||||||
hosts = hosts[offset::slice_count]
|
|
||||||
|
|
||||||
data = dict()
|
data = dict()
|
||||||
all_group = data.setdefault('all', dict())
|
all_group = data.setdefault('all', dict())
|
||||||
|
|||||||
@@ -814,7 +814,8 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
|||||||
def _get_inventory_hosts(self, only=['name', 'ansible_facts', 'ansible_facts_modified', 'modified', 'inventory_id']):
|
def _get_inventory_hosts(self, only=['name', 'ansible_facts', 'ansible_facts_modified', 'modified', 'inventory_id']):
|
||||||
if not self.inventory:
|
if not self.inventory:
|
||||||
return []
|
return []
|
||||||
return self.inventory.hosts.only(*only)
|
host_queryset = self.inventory.hosts.only(*only)
|
||||||
|
return self.inventory.get_sliced_hosts(host_queryset, self.job_slice_number, self.job_slice_count)
|
||||||
|
|
||||||
def start_job_fact_cache(self, destination, modification_times, timeout=None):
|
def start_job_fact_cache(self, destination, modification_times, timeout=None):
|
||||||
self.log_lifecycle("start_job_fact_cache")
|
self.log_lifecycle("start_job_fact_cache")
|
||||||
|
|||||||
Reference in New Issue
Block a user