Merge branch 'release_3.1.0' into devel

* release_3.1.0: (633 commits)
  query for failed projects to include canceled
  Style Audit of Host Event Modal
  Remove npm hacks in RPM builder
  Update po files
  Update pot files
  Make sure to delete any nodes that need to be deleted before attempting to associate
  super-user requests to HostDetail go through rbac
  Launch job with prompts fixes
  add notifications to cleanup_jobs
  allow can_add to be called for permission info
  Elaborate on system job default
  i accidentally removed the "-1" from this check, adding it back in
  remove job_event text filters, tweaked RBAC see issue 4958 for the RBAC details
  fixing jshint
  Adding tags was creating this nested array structure which was causing buggy behavior.  This should fix that.
  bump vmware inventory script
  Fix up some unit tests
  moving appendToBottom function elsewhere
  rearranging logic to match integrity of existing logic
  Fixed location of sub-category dropdowns
  ...
This commit is contained in:
Matthew Jones
2017-01-27 20:52:59 -05:00
517 changed files with 39528 additions and 9457 deletions
+43 -3
View File
@@ -19,6 +19,7 @@ from rest_framework.filters import BaseFilterBackend
# Ansible Tower
from awx.main.utils import get_type_for_model, to_python_boolean
from awx.main.models.rbac import RoleAncestorEntry
class MongoFilterBackend(BaseFilterBackend):
@@ -76,7 +77,7 @@ class FieldLookupBackend(BaseFilterBackend):
SUPPORTED_LOOKUPS = ('exact', 'iexact', 'contains', 'icontains',
'startswith', 'istartswith', 'endswith', 'iendswith',
'regex', 'iregex', 'gt', 'gte', 'lt', 'lte', 'in',
'isnull')
'isnull', 'search')
def get_field_from_lookup(self, model, lookup):
field = None
@@ -147,6 +148,15 @@ class FieldLookupBackend(BaseFilterBackend):
re.compile(value)
except re.error as e:
raise ValueError(e.args[0])
elif new_lookup.endswith('__search'):
related_model = getattr(field, 'related_model', None)
if not related_model:
raise ValueError('%s is not searchable' % new_lookup[:-8])
new_lookups = []
for rm_field in related_model._meta.fields:
if rm_field.name in ('username', 'first_name', 'last_name', 'email', 'name', 'description'):
new_lookups.append('{}__{}__icontains'.format(new_lookup[:-8], rm_field.name))
return value, new_lookups
else:
value = self.value_to_python_for_field(field, value)
return value, new_lookup
@@ -158,6 +168,8 @@ class FieldLookupBackend(BaseFilterBackend):
and_filters = []
or_filters = []
chain_filters = []
role_filters = []
search_filters = []
for key, values in request.query_params.lists():
if key in self.RESERVED_NAMES:
continue
@@ -174,6 +186,21 @@ class FieldLookupBackend(BaseFilterBackend):
key = key[:-5]
q_int = True
# RBAC filtering
if key == 'role_level':
role_filters.append(values[0])
continue
# Search across related objects.
if key.endswith('__search'):
for value in values:
for search_term in force_text(value).replace(',', ' ').split():
search_value, new_keys = self.value_to_python(queryset.model, key, search_term)
assert isinstance(new_keys, list)
for new_key in new_keys:
search_filters.append((new_key, search_value))
continue
# Custom chain__ and or__ filters, mutually exclusive (both can
# precede not__).
q_chain = False
@@ -204,13 +231,21 @@ class FieldLookupBackend(BaseFilterBackend):
and_filters.append((q_not, new_key, value))
# Now build Q objects for database query filter.
if and_filters or or_filters or chain_filters:
if and_filters or or_filters or chain_filters or role_filters or search_filters:
args = []
for n, k, v in and_filters:
if n:
args.append(~Q(**{k:v}))
else:
args.append(Q(**{k:v}))
for role_name in role_filters:
args.append(
Q(pk__in=RoleAncestorEntry.objects.filter(
ancestor__in=request.user.roles.all(),
content_type_id=ContentType.objects.get_for_model(queryset.model).id,
role_field=role_name
).values_list('object_id').distinct())
)
if or_filters:
q = Q()
for n,k,v in or_filters:
@@ -219,6 +254,11 @@ class FieldLookupBackend(BaseFilterBackend):
else:
q |= Q(**{k:v})
args.append(q)
if search_filters:
q = Q()
for k,v in search_filters:
q |= Q(**{k:v})
args.append(q)
for n,k,v in chain_filters:
if n:
q = ~Q(**{k:v})
@@ -227,7 +267,7 @@ class FieldLookupBackend(BaseFilterBackend):
queryset = queryset.filter(q)
queryset = queryset.filter(*args).distinct()
return queryset
except (FieldError, FieldDoesNotExist, ValueError) as e:
except (FieldError, FieldDoesNotExist, ValueError, TypeError) as e:
raise ParseError(e.args[0])
except ValidationError as e:
raise ParseError(e.messages)
+19 -5
View File
@@ -156,6 +156,7 @@ class APIView(views.APIView):
'new_in_240': getattr(self, 'new_in_240', False),
'new_in_300': getattr(self, 'new_in_300', False),
'new_in_310': getattr(self, 'new_in_310', False),
'deprecated': getattr(self, 'deprecated', False),
}
def get_description(self, html=False):
@@ -267,10 +268,25 @@ class ListAPIView(generics.ListAPIView, GenericAPIView):
fields = []
for field in self.model._meta.fields:
if field.name in ('username', 'first_name', 'last_name', 'email',
'name', 'description', 'email'):
'name', 'description'):
fields.append(field.name)
return fields
@property
def related_search_fields(self):
fields = []
for field in self.model._meta.fields:
if field.name.endswith('_role'):
continue
if getattr(field, 'related_model', None):
fields.append('{}__search'.format(field.name))
for rel in self.model._meta.related_objects:
name = rel.get_accessor_name()
if name.endswith('_set'):
continue
fields.append('{}__search'.format(name))
return fields
class ListCreateAPIView(ListAPIView, generics.ListCreateAPIView):
# Base class for a list view that allows creating new objects.
@@ -543,14 +559,12 @@ class DestroyAPIView(GenericAPIView, generics.DestroyAPIView):
pass
class ResourceAccessList(ListAPIView):
class ResourceAccessList(ParentMixin, ListAPIView):
serializer_class = ResourceAccessListElementSerializer
def get_queryset(self):
self.object_id = self.kwargs['pk']
resource_model = getattr(self, 'resource_model')
obj = get_object_or_404(resource_model, pk=self.object_id)
obj = self.get_parent_object()
content_type = ContentType.objects.get_for_model(obj)
roles = set(Role.objects.filter(content_type=content_type, object_id=obj.id))
-58
View File
@@ -1,58 +0,0 @@
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved
import sys
from optparse import make_option
from django.core.management.base import BaseCommand
from awx.main.ha import is_ha_environment
from awx.main.task_engine import TaskEnhancer
class Command(BaseCommand):
"""Return a exit status of 0 if MongoDB should be active, and an
exit status of 1 otherwise.
This script is intended to be used by bash and init scripts to
conditionally start MongoDB, so its focus is on being bash-friendly.
"""
def __init__(self):
super(Command, self).__init__()
BaseCommand.option_list += (make_option('--local',
dest='local',
default=False,
action="store_true",
help="Only check if mongo should be running locally"),)
def handle(self, *args, **kwargs):
# Get the license data.
license_data = TaskEnhancer().validate_enhancements()
# Does the license have features, at all?
# If there is no license yet, then all features are clearly off.
if 'features' not in license_data:
print('No license available.')
sys.exit(2)
# Does the license contain the system tracking feature?
# If and only if it does, MongoDB should run.
system_tracking = license_data['features']['system_tracking']
# Okay, do we need MongoDB to be turned on?
# This is a silly variable assignment right now, but I expect the
# rules here will grow more complicated over time.
uses_mongo = system_tracking # noqa
if is_ha_environment() and kwargs['local'] and uses_mongo:
print("HA Configuration detected. Database should be remote")
uses_mongo = False
# If we do not need Mongo, return a non-zero exit status.
if not uses_mongo:
print('MongoDB NOT required')
sys.exit(1)
# We do need Mongo, return zero.
print('MongoDB required')
sys.exit(0)
+6 -2
View File
@@ -13,7 +13,7 @@ from django.utils.translation import ugettext_lazy as _
from rest_framework import exceptions
from rest_framework import metadata
from rest_framework import serializers
from rest_framework.relations import RelatedField
from rest_framework.relations import RelatedField, ManyRelatedField
from rest_framework.request import clone_request
# Ansible Tower
@@ -75,7 +75,7 @@ class Metadata(metadata.SimpleMetadata):
elif getattr(field, 'fields', None):
field_info['children'] = self.get_serializer_info(field)
if hasattr(field, 'choices') and not isinstance(field, RelatedField):
if not isinstance(field, (RelatedField, ManyRelatedField)) and hasattr(field, 'choices'):
field_info['choices'] = [(choice_value, choice_name) for choice_value, choice_name in field.choices.items()]
# Indicate if a field is write-only.
@@ -183,6 +183,10 @@ class Metadata(metadata.SimpleMetadata):
if getattr(view, 'search_fields', None):
metadata['search_fields'] = view.search_fields
# Add related search fields if available from the view.
if getattr(view, 'related_search_fields', None):
metadata['related_search_fields'] = view.related_search_fields
return metadata
+4
View File
@@ -2,6 +2,7 @@
# All Rights Reserved.
# Django REST Framework
from django.conf import settings
from rest_framework import pagination
from rest_framework.utils.urls import replace_query_param
@@ -9,11 +10,13 @@ from rest_framework.utils.urls import replace_query_param
class Pagination(pagination.PageNumberPagination):
page_size_query_param = 'page_size'
max_page_size = settings.MAX_PAGE_SIZE
def get_next_link(self):
if not self.page.has_next():
return None
url = self.request and self.request.get_full_path() or ''
url = url.encode('utf-8')
page_number = self.page.next_page_number()
return replace_query_param(url, self.page_query_param, page_number)
@@ -21,5 +24,6 @@ class Pagination(pagination.PageNumberPagination):
if not self.page.has_previous():
return None
url = self.request and self.request.get_full_path() or ''
url = url.encode('utf-8')
page_number = self.page.previous_page_number()
return replace_query_param(url, self.page_query_param, page_number)
+3 -11
View File
@@ -4,9 +4,6 @@
# Python
import logging
# Django
from django.http import Http404
# Django REST Framework
from rest_framework.exceptions import MethodNotAllowed, PermissionDenied
from rest_framework import permissions
@@ -19,7 +16,7 @@ from awx.main.utils import get_object_or_400
logger = logging.getLogger('awx.api.permissions')
__all__ = ['ModelAccessPermission', 'JobTemplateCallbackPermission',
'TaskPermission', 'ProjectUpdatePermission', 'UserPermission']
'TaskPermission', 'ProjectUpdatePermission', 'UserPermission',]
class ModelAccessPermission(permissions.BasePermission):
@@ -96,13 +93,6 @@ class ModelAccessPermission(permissions.BasePermission):
method based on the request method.
'''
# Check that obj (if given) is active, otherwise raise a 404.
active = getattr(obj, 'active', getattr(obj, 'is_active', True))
if callable(active):
active = active()
if not active:
raise Http404()
# Don't allow anonymous users. 401, not 403, hence no raised exception.
if not request.user or request.user.is_anonymous():
return False
@@ -216,3 +206,5 @@ class UserPermission(ModelAccessPermission):
elif request.user.is_superuser:
return True
raise PermissionDenied()
+5
View File
@@ -80,3 +80,8 @@ class AnsiTextRenderer(PlainTextRenderer):
media_type = 'text/plain'
format = 'ansi'
class AnsiDownloadRenderer(PlainTextRenderer):
format = "ansi_download"
+34 -24
View File
@@ -76,13 +76,15 @@ SUMMARIZABLE_FK_FIELDS = {
'total_groups',
'groups_with_active_failures',
'has_inventory_sources'),
'project': DEFAULT_SUMMARY_FIELDS + ('status',),
'project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed',),
'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud'),
'cloud_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud'),
'network_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'net'),
'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed',),
'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed'),
'job_template': DEFAULT_SUMMARY_FIELDS,
'workflow_job_template': DEFAULT_SUMMARY_FIELDS,
'workflow_job': DEFAULT_SUMMARY_FIELDS,
'schedule': DEFAULT_SUMMARY_FIELDS + ('next_run',),
'unified_job_template': DEFAULT_SUMMARY_FIELDS + ('unified_job_type',),
'last_job': DEFAULT_SUMMARY_FIELDS + ('finished', 'status', 'failed', 'license_error'),
@@ -250,6 +252,8 @@ class BaseSerializer(serializers.ModelSerializer):
'project_update': _('SCM Update'),
'inventory_update': _('Inventory Sync'),
'system_job': _('Management Job'),
'workflow_job': _('Workflow Job'),
'workflow_job_template': _('Workflow Template'),
}
choices = []
for t in self.get_types():
@@ -518,7 +522,7 @@ class UnifiedJobTemplateSerializer(BaseSerializer):
class Meta:
model = UnifiedJobTemplate
fields = ('*', 'last_job_run', 'last_job_failed', 'has_schedules',
fields = ('*', 'last_job_run', 'last_job_failed',
'next_job_run', 'status')
def get_related(self, obj):
@@ -607,7 +611,11 @@ class UnifiedJobSerializer(BaseSerializer):
summary_fields = super(UnifiedJobSerializer, self).get_summary_fields(obj)
if obj.spawned_by_workflow:
summary_fields['source_workflow_job'] = {}
summary_obj = obj.unified_job_node.workflow_job
try:
summary_obj = obj.unified_job_node.workflow_job
except UnifiedJob.unified_job_node.RelatedObjectDoesNotExist:
return summary_fields
for field in SUMMARIZABLE_FK_FIELDS['job']:
val = getattr(summary_obj, field, None)
if val is not None:
@@ -666,7 +674,7 @@ class UnifiedJobListSerializer(UnifiedJobSerializer):
def get_types(self):
if type(self) is UnifiedJobListSerializer:
return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job']
return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job', 'workflow_job']
else:
return super(UnifiedJobListSerializer, self).get_types()
@@ -1581,8 +1589,7 @@ class ResourceAccessListElementSerializer(UserSerializer):
the resource.
'''
ret = super(ResourceAccessListElementSerializer, self).to_representation(user)
object_id = self.context['view'].object_id
obj = self.context['view'].resource_model.objects.get(pk=object_id)
obj = self.context['view'].get_parent_object()
if self.context['view'].request is not None:
requesting_user = self.context['view'].request.user
else:
@@ -1615,7 +1622,8 @@ class ResourceAccessListElementSerializer(UserSerializer):
'name': role.name,
'description': role.description,
'team_id': team_role.object_id,
'team_name': team_role.content_object.name
'team_name': team_role.content_object.name,
'team_organization_name': team_role.content_object.organization.name,
}
if role.content_type is not None:
role_dict['resource_name'] = role.content_object.name
@@ -1757,9 +1765,9 @@ class CredentialSerializerCreate(CredentialSerializer):
'do not give either user or organization. Only valid for creation.'))
organization = serializers.PrimaryKeyRelatedField(
queryset=Organization.objects.all(),
required=False, default=None, write_only=True, allow_null=True,
help_text=_('Write-only field used to add organization to owner role. If provided, '
'do not give either team or team. Only valid for creation.'))
required=False, default=None, allow_null=True,
help_text=_('Inherit permissions from organization roles. If provided on creation, '
'do not give either user or team.'))
class Meta:
model = Credential
@@ -1985,8 +1993,6 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
res = super(JobSerializer, self).get_related(obj)
res.update(dict(
job_events = reverse('api:job_job_events_list', args=(obj.pk,)),
job_plays = reverse('api:job_job_plays_list', args=(obj.pk,)),
job_tasks = reverse('api:job_job_tasks_list', args=(obj.pk,)),
job_host_summaries = reverse('api:job_job_host_summaries_list', args=(obj.pk,)),
activity_stream = reverse('api:job_activity_stream_list', args=(obj.pk,)),
notifications = reverse('api:job_notifications_list', args=(obj.pk,)),
@@ -2365,7 +2371,7 @@ class WorkflowJobTemplateNodeSerializer(WorkflowNodeBaseSerializer):
if view and view.request:
request_method = view.request.method
if request_method in ['PATCH']:
obj = view.get_object()
obj = self.instance
char_prompts = copy.copy(obj.char_prompts)
char_prompts.update(self.extract_char_prompts(data))
else:
@@ -2415,7 +2421,7 @@ class WorkflowJobNodeSerializer(WorkflowNodeBaseSerializer):
res['failure_nodes'] = reverse('api:workflow_job_node_failure_nodes_list', args=(obj.pk,))
res['always_nodes'] = reverse('api:workflow_job_node_always_nodes_list', args=(obj.pk,))
if obj.job:
res['job'] = reverse('api:job_detail', args=(obj.job.pk,))
res['job'] = obj.job.get_absolute_url()
if obj.workflow_job:
res['workflow_job'] = reverse('api:workflow_job_detail', args=(obj.workflow_job.pk,))
return res
@@ -2497,8 +2503,8 @@ class JobEventSerializer(BaseSerializer):
model = JobEvent
fields = ('*', '-name', '-description', 'job', 'event', 'counter',
'event_display', 'event_data', 'event_level', 'failed',
'changed', 'uuid', 'host', 'host_name', 'parent', 'playbook',
'play', 'task', 'role', 'stdout', 'start_line', 'end_line',
'changed', 'uuid', 'parent_uuid', 'host', 'host_name', 'parent',
'playbook', 'play', 'task', 'role', 'stdout', 'start_line', 'end_line',
'verbosity')
def get_related(self, obj):
@@ -2704,18 +2710,15 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
variables_needed_to_start = serializers.ReadOnlyField()
survey_enabled = serializers.SerializerMethodField()
extra_vars = VerbatimField(required=False, write_only=True)
warnings = serializers.SerializerMethodField()
workflow_job_template_data = serializers.SerializerMethodField()
class Meta:
model = WorkflowJobTemplate
fields = ('can_start_without_user_input', 'extra_vars', 'warnings',
fields = ('can_start_without_user_input', 'extra_vars',
'survey_enabled', 'variables_needed_to_start',
'node_templates_missing', 'node_prompts_rejected',
'workflow_job_template_data')
def get_warnings(self, obj):
return obj.get_warnings()
def get_survey_enabled(self, obj):
if obj:
return obj.survey_enabled and 'spec' in obj.survey_spec
@@ -2999,10 +3002,14 @@ class ActivityStreamSerializer(BaseSerializer):
for fk, __ in SUMMARIZABLE_FK_FIELDS.items():
if not hasattr(obj, fk):
continue
allm2m = getattr(obj, fk).distinct()
allm2m = getattr(obj, fk).all()
if getattr(obj, fk).exists():
rel[fk] = []
id_list = []
for thisItem in allm2m:
if getattr(thisItem, 'id', None) in id_list:
continue
id_list.append(getattr(thisItem, 'id', None))
if fk == 'custom_inventory_script':
rel[fk].append(reverse('api:inventory_script_detail', args=(thisItem.id,)))
else:
@@ -3018,7 +3025,7 @@ class ActivityStreamSerializer(BaseSerializer):
try:
if not hasattr(obj, fk):
continue
allm2m = getattr(obj, fk).distinct()
allm2m = getattr(obj, fk).all()
if getattr(obj, fk).exists():
summary_fields[fk] = []
for thisItem in allm2m:
@@ -3047,6 +3054,9 @@ class ActivityStreamSerializer(BaseSerializer):
thisItemDict[field] = fval
if fk == 'group':
thisItemDict['inventory_id'] = getattr(thisItem, 'inventory_id', None)
if thisItemDict.get('id', None):
if thisItemDict.get('id', None) in [obj_dict.get('id', None) for obj_dict in summary_fields[fk]]:
continue
summary_fields[fk].append(thisItemDict)
except ObjectDoesNotExist:
pass
+9
View File
@@ -56,6 +56,10 @@ within all designated text fields of a model.
_Added in AWX 1.4_
(_Added in Ansible Tower 3.1.0_) Search across related fields:
?related__search=findme
## Filtering
Any additional query string parameters may be used to filter the list of
@@ -132,3 +136,8 @@ values.
Lists (for the `in` lookup) may be specified as a comma-separated list of
values.
(_Added in Ansible Tower 3.1.0_) Filtering based on the requesting user's
level of access by query string parameter.
* `role_level`: Level of role to filter on, such as `admin_role`
+6 -5
View File
@@ -3,10 +3,11 @@
{% if new_in_14 %}> _Added in AWX 1.4_{% endif %}
{% if new_in_145 %}> _Added in Ansible Tower 1.4.5_{% endif %}
{% if new_in_148 %}> _Added in Ansible Tower 1.4.8_{% endif %}
{% if new_in_200 %}> _New in Ansible Tower 2.0.0_{% endif %}
{% if new_in_220 %}> _New in Ansible Tower 2.2.0_{% endif %}
{% if new_in_230 %}> _New in Ansible Tower 2.3.0_{% endif %}
{% if new_in_240 %}> _New in Ansible Tower 2.4.0_{% endif %}
{% if new_in_300 %}> _New in Ansible Tower 3.0.0_{% endif %}
{% if new_in_200 %}> _Added in Ansible Tower 2.0.0_{% endif %}
{% if new_in_220 %}> _Added in Ansible Tower 2.2.0_{% endif %}
{% if new_in_230 %}> _Added in Ansible Tower 2.3.0_{% endif %}
{% if new_in_240 %}> _Added in Ansible Tower 2.4.0_{% endif %}
{% if new_in_300 %}> _Added in Ansible Tower 3.0.0_{% endif %}
{% if new_in_310 %}> _New in Ansible Tower 3.1.0_{% endif %}
{% if deprecated %}> _This resource has been deprecated and will be removed in a future release_{% endif %}
{% endif %}
@@ -2,8 +2,9 @@ Launch a Job Template:
Make a POST request to this resource to launch the system job template.
An extra parameter `extra_vars` is suggested in order to pass extra parameters
to the system job task.
Variables specified inside of the parameter `extra_vars` are passed to the
system job task as command line parameters. These tasks can be ran manually
on the host system via the `tower-manage` command.
For example on `cleanup_jobs` and `cleanup_activitystream`:
@@ -13,9 +14,17 @@ Which will act on data older than 30 days.
For `cleanup_facts`:
`{"older_than": "4w", `granularity`: "3d"}`
`{"older_than": "4w", "granularity": "3d"}`
Which will reduce the granularity of scan data to one scan per 3 days when the data is older than 4w.
Each individual system job task has its own default values, which are
applicable either when running it from the command line or launching its
system job template with empty `extra_vars`.
- Defaults for `cleanup_activitystream`: days=90
- Defaults for `cleanup_facts`: older_than="30d", granularity="1w"
- Defaults for `cleanup_jobs`: days=90
If successful, the response status code will be 202. If the job cannot be
launched, a 405 status code will be returned.
+4 -2
View File
@@ -13,6 +13,7 @@ Use the `format` query string parameter to specify the output format.
* Plain Text with ANSI color codes: `?format=ansi`
* JSON structure: `?format=json`
* Downloaded Plain Text: `?format=txt_download`
* Downloaded Plain Text with ANSI color codes: `?format=ansi_download`
(_New in Ansible Tower 2.0.0_) When using the Browsable API, HTML and JSON
formats, the `start_line` and `end_line` query string parameters can be used
@@ -21,7 +22,8 @@ to specify a range of line numbers to retrieve.
Use `dark=1` or `dark=0` as a query string parameter to force or disable a
dark background.
+Files over {{ settings.STDOUT_MAX_BYTES_DISPLAY|filesizeformat }} (configurable) will not display in the browser. Use the `txt_download`
+format to download the file directly to view it.
Files over {{ settings.STDOUT_MAX_BYTES_DISPLAY|filesizeformat }} (configurable)
will not display in the browser. Use the `txt_download` or `ansi_download`
formats to download the file directly to view it.
{% include "api/_new_in_awx.md" %}
@@ -0,0 +1,12 @@
# Cancel Workflow Job
Make a GET request to this resource to determine if the workflow job can be
canceled. The response will include the following field:
* `can_cancel`: Indicates whether this workflow job is in a state that can
be canceled (boolean, read-only)
Make a POST request to this endpoint to submit a request to cancel a pending
or running workflow job. The response status code will be 202 if the
request to cancel was successfully submitted, or 405 if the workflow job
cannot be canceled.
@@ -0,0 +1,5 @@
Relaunch a workflow job:
Make a POST request to this endpoint to launch a workflow job identical to the parent workflow job. This will spawn jobs, project updates, or inventory updates based on the unified job templates referenced in the workflow nodes in the workflow job. No POST data is accepted for this action.
If successful, the response status code will be 201 and serialized data of the new workflow job will be returned.
@@ -0,0 +1,34 @@
Copy a Workflow Job Template:
Make a GET request to this resource to determine if the current user has
permission to copy the workflow_job_template and whether any linked
templates or prompted fields will be ignored due to permissions problems.
The response will include the following fields:
* `can_copy`: Flag indicating whether the active user has permission to make
a copy of this workflow_job_template, provides same content as the
workflow_job_template detail view summary_fields.user_capabilities.copy
(boolean, read-only)
* `can_copy_without_user_input`: Flag indicating if the user should be
prompted for confirmation before the copy is executed (boolean, read-only)
* `templates_unable_to_copy`: List of node ids of nodes that have a related
job template, project, or inventory that the current user lacks permission
to use and will be missing in workflow nodes of the copy (array, read-only)
* `inventories_unable_to_copy`: List of node ids of nodes that have a related
prompted inventory that the current user lacks permission
to use and will be missing in workflow nodes of the copy (array, read-only)
* `credentials_unable_to_copy`: List of node ids of nodes that have a related
prompted credential that the current user lacks permission
to use and will be missing in workflow nodes of the copy (array, read-only)
Make a POST request to this endpoint to save a copy of this
workflow_job_template. No POST data is accepted for this action.
If successful, the response status code will be 201. The response body will
contain serialized data about the new workflow_job_template, which will be
similar to the original workflow_job_template, but with an additional `@`
and a timestamp in the name.
All workflow nodes and connections in the original will also exist in the
copy. The nodes will be missing related resources if the user did not have
access to use them.
@@ -12,8 +12,13 @@ workflow_job_template. The response will include the following fields:
enabled survey (boolean, read-only)
* `extra_vars`: Text which is the `extra_vars` field of this workflow_job_template
(text, read-only)
* `warnings`: JSON object listing warnings of all workflow_job_template_nodes
contained in this workflow_job_template (JSON object, read-only)
* `node_templates_missing`: List of node ids of all nodes that have a
null `unified_job_template`, which will cause their branches to stop
execution (list, read-only)
* `node_prompts_rejected`: List of node ids of all nodes that have
specified a field that will be rejected because its `unified_job_template`
does not allow prompting for this field, this will not halt execution of
the branch but the field will be ignored (list, read-only)
* `workflow_job_template_data`: JSON object listing general information of
this workflow_job_template (JSON object, read-only)
-2
View File
@@ -205,8 +205,6 @@ job_urls = patterns('awx.api.views',
url(r'^(?P<pk>[0-9]+)/relaunch/$', 'job_relaunch'),
url(r'^(?P<pk>[0-9]+)/job_host_summaries/$', 'job_job_host_summaries_list'),
url(r'^(?P<pk>[0-9]+)/job_events/$', 'job_job_events_list'),
url(r'^(?P<pk>[0-9]+)/job_plays/$', 'job_job_plays_list'),
url(r'^(?P<pk>[0-9]+)/job_tasks/$', 'job_job_tasks_list'),
url(r'^(?P<pk>[0-9]+)/activity_stream/$', 'job_activity_stream_list'),
url(r'^(?P<pk>[0-9]+)/stdout/$', 'job_stdout'),
url(r'^(?P<pk>[0-9]+)/notifications/$', 'job_notifications_list'),
View File
-82
View File
@@ -1,82 +0,0 @@
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
from collections import OrderedDict
import copy
import functools
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework import status
def paginated(method):
"""Given an method with a Django REST Framework API method signature
(e.g. `def get(self, request, ...):`), abstract out boilerplate pagination
duties.
This causes the method to receive two additional keyword arguments:
`limit`, and `offset`. The method expects a two-tuple to be
returned, with a result list as the first item, and the total number
of results (across all pages) as the second item.
"""
@functools.wraps(method)
def func(self, request, *args, **kwargs):
# Manually spin up pagination.
# How many results do we show?
paginator_class = api_settings.DEFAULT_PAGINATION_CLASS
limit = paginator_class.page_size
if request.query_params.get(paginator_class.page_size_query_param, False):
limit = request.query_params[paginator_class.page_size_query_param]
if paginator_class.max_page_size:
limit = min(paginator_class.max_page_size, limit)
limit = int(limit)
# Get the order parameter if it's given
if request.query_params.get("ordering", False):
ordering = request.query_params["ordering"]
else:
ordering = None
# What page are we on?
page = int(request.query_params.get('page', 1))
offset = (page - 1) * limit
# Add the limit, offset, page, and order variables to the keyword arguments
# being sent to the underlying method.
kwargs['limit'] = limit
kwargs['offset'] = offset
kwargs['ordering'] = ordering
# Okay, call the underlying method.
results, count, stat = method(self, request, *args, **kwargs)
if stat is None:
stat = status.HTTP_200_OK
if stat == status.HTTP_200_OK:
# Determine the next and previous pages, if any.
prev, next_ = None, None
if page > 1:
get_copy = copy.copy(request.GET)
get_copy['page'] = page - 1
prev = '%s?%s' % (request.path, get_copy.urlencode())
if count > offset + limit:
get_copy = copy.copy(request.GET)
get_copy['page'] = page + 1
next_ = '%s?%s' % (request.path, get_copy.urlencode())
# Compile the results into a dictionary with pagination
# information.
answer = OrderedDict((
('count', count),
('next', next_),
('previous', prev),
('results', results),
))
else:
answer = results
# Okay, we're done; return response data.
return Response(answer, status=stat)
return func
+204 -274
View File
File diff suppressed because it is too large Load Diff