Define a DEFAULT_QUEUE_NAME

This commit is contained in:
Yanis Guenane
2021-05-17 18:06:00 +02:00
committed by Shane McDonald
parent d3b20e6585
commit 82c4f6bb88
10 changed files with 39 additions and 34 deletions

View File

@@ -81,7 +81,7 @@ def instance_group_factory():
@pytest.fixture
def default_instance_group(instance_factory, instance_group_factory):
return create_instance_group("tower", instances=[create_instance("hostA")])
return create_instance_group("default", instances=[create_instance("hostA")])
@pytest.fixture

View File

@@ -13,7 +13,7 @@ from awx.main.utils import camelcase_to_underscore
@pytest.fixture
def tower_instance_group():
ig = InstanceGroup(name='tower')
ig = InstanceGroup(name='default')
ig.save()
return ig
@@ -117,8 +117,8 @@ def test_delete_rename_tower_instance_group_prevented(delete, options, tower_ins
assert 'GET' in resp.data['actions']
assert 'PUT' in resp.data['actions']
# Rename 'tower' instance group denied
patch(url, {'name': 'tower_prime'}, super_user, expect=400)
# Rename 'default' instance group denied
patch(url, {'name': 'default_prime'}, super_user, expect=400)
# Rename, other instance group OK
url = reverse("api:instance_group_detail", kwargs={'pk': instance_group.pk})

View File

@@ -104,7 +104,7 @@ class TestActiveCount:
def test_active_count_minus_tower(self, inventory):
inventory.hosts.create(name='locally-managed-host')
source = inventory.inventory_sources.create(name='tower-source', source='tower')
source = inventory.inventory_sources.create(name='tower-source', source='default')
source.hosts.create(name='remotely-managed-host', inventory=inventory)
assert Host.objects.active_count() == 1

View File

@@ -10,21 +10,21 @@ class TestCapacityMapping(TransactionTestCase):
def sample_cluster(self):
ig_small = InstanceGroup.objects.create(name='ig_small')
ig_large = InstanceGroup.objects.create(name='ig_large')
tower = InstanceGroup.objects.create(name='tower')
default = InstanceGroup.objects.create(name='default')
i1 = Instance.objects.create(hostname='i1', capacity=200)
i2 = Instance.objects.create(hostname='i2', capacity=200)
i3 = Instance.objects.create(hostname='i3', capacity=200)
ig_small.instances.add(i1)
ig_large.instances.add(i2, i3)
tower.instances.add(i2)
return [tower, ig_large, ig_small]
default.instances.add(i2)
return [default, ig_large, ig_small]
def test_mapping(self):
self.sample_cluster()
with self.assertNumQueries(2):
inst_map, ig_map = InstanceGroup.objects.capacity_mapping()
assert inst_map['i1'] == set(['ig_small'])
assert inst_map['i2'] == set(['ig_large', 'tower'])
assert inst_map['i2'] == set(['ig_large', 'default'])
assert ig_map['ig_small'] == set(['ig_small'])
assert ig_map['ig_large'] == set(['ig_large', 'tower'])
assert ig_map['tower'] == set(['ig_large', 'tower'])
assert ig_map['ig_large'] == set(['ig_large', 'default'])
assert ig_map['default'] == set(['ig_large', 'default'])

View File

@@ -43,34 +43,34 @@ def sample_cluster():
ig_small = InstanceGroup(name='ig_small')
ig_large = InstanceGroup(name='ig_large')
tower = InstanceGroup(name='tower')
default = InstanceGroup(name='default')
i1 = Instance(hostname='i1', capacity=200)
i2 = Instance(hostname='i2', capacity=200)
i3 = Instance(hostname='i3', capacity=200)
ig_small.instances.add(i1)
ig_large.instances.add(i2, i3)
tower.instances.add(i2)
return [tower, ig_large, ig_small]
default.instances.add(i2)
return [default, ig_large, ig_small]
return stand_up_cluster
def test_committed_capacity(sample_cluster):
tower, ig_large, ig_small = sample_cluster()
tasks = [Job(status='waiting', instance_group=tower), Job(status='waiting', instance_group=ig_large), Job(status='waiting', instance_group=ig_small)]
capacities = InstanceGroup.objects.capacity_values(qs=[tower, ig_large, ig_small], tasks=tasks, breakdown=True)
default, ig_large, ig_small = sample_cluster()
tasks = [Job(status='waiting', instance_group=default), Job(status='waiting', instance_group=ig_large), Job(status='waiting', instance_group=ig_small)]
capacities = InstanceGroup.objects.capacity_values(qs=[default, ig_large, ig_small], tasks=tasks, breakdown=True)
# Jobs submitted to either tower or ig_larg must count toward both
assert capacities['tower']['committed_capacity'] == 43 * 2
assert capacities['default']['committed_capacity'] == 43 * 2
assert capacities['ig_large']['committed_capacity'] == 43 * 2
assert capacities['ig_small']['committed_capacity'] == 43
def test_running_capacity(sample_cluster):
tower, ig_large, ig_small = sample_cluster()
default, ig_large, ig_small = sample_cluster()
tasks = [Job(status='running', execution_node='i1'), Job(status='running', execution_node='i2'), Job(status='running', execution_node='i3')]
capacities = InstanceGroup.objects.capacity_values(qs=[tower, ig_large, ig_small], tasks=tasks, breakdown=True)
capacities = InstanceGroup.objects.capacity_values(qs=[default, ig_large, ig_small], tasks=tasks, breakdown=True)
# Tower is only given 1 instance
assert capacities['tower']['running_capacity'] == 43
assert capacities['default']['running_capacity'] == 43
# Large IG has 2 instances
assert capacities['ig_large']['running_capacity'] == 43 * 2
assert capacities['ig_small']['running_capacity'] == 43
@@ -81,10 +81,10 @@ def test_offline_node_running(sample_cluster):
Assure that algorithm doesn't explode if a job is marked running
in an offline node
"""
tower, ig_large, ig_small = sample_cluster()
default, ig_large, ig_small = sample_cluster()
ig_small.instance_list[0].capacity = 0
tasks = [Job(status='running', execution_node='i1', instance_group=ig_small)]
capacities = InstanceGroup.objects.capacity_values(qs=[tower, ig_large, ig_small], tasks=tasks)
capacities = InstanceGroup.objects.capacity_values(qs=[default, ig_large, ig_small], tasks=tasks)
assert capacities['ig_small']['consumed_capacity'] == 43
@@ -92,10 +92,10 @@ def test_offline_node_waiting(sample_cluster):
"""
Same but for a waiting job
"""
tower, ig_large, ig_small = sample_cluster()
default, ig_large, ig_small = sample_cluster()
ig_small.instance_list[0].capacity = 0
tasks = [Job(status='waiting', instance_group=ig_small)]
capacities = InstanceGroup.objects.capacity_values(qs=[tower, ig_large, ig_small], tasks=tasks)
capacities = InstanceGroup.objects.capacity_values(qs=[default, ig_large, ig_small], tasks=tasks)
assert capacities['ig_small']['consumed_capacity'] == 43
@@ -105,9 +105,9 @@ def test_RBAC_reduced_filter(sample_cluster):
but user does not have permission to see those actual instance groups.
Verify that this does not blow everything up.
"""
tower, ig_large, ig_small = sample_cluster()
tasks = [Job(status='waiting', instance_group=tower), Job(status='waiting', instance_group=ig_large), Job(status='waiting', instance_group=ig_small)]
capacities = InstanceGroup.objects.capacity_values(qs=[tower], tasks=tasks, breakdown=True)
default, ig_large, ig_small = sample_cluster()
tasks = [Job(status='waiting', instance_group=default), Job(status='waiting', instance_group=ig_large), Job(status='waiting', instance_group=ig_small)]
capacities = InstanceGroup.objects.capacity_values(qs=[default], tasks=tasks, breakdown=True)
# Cross-links between groups not visible to current user,
# so a naieve accounting of capacities is returned instead
assert capacities['tower']['committed_capacity'] == 43
assert capacities['default']['committed_capacity'] == 43