mirror of
https://github.com/ZwareBear/awx.git
synced 2026-04-11 18:41:48 -05:00
* Clean up added work_type processing for mesh_code branch * track both execution and control capacity * Remove unused execution_capacity property * Count all forms of capacity to make test pass * Force jobs to be on execution nodes, updates on control nodes * Introduce capacity_type property to abstract some details out * Update test to cover all job types at same time * Register OpenShift nodes as control types * Remove unqualified consumed_capacity from task manager and make unit tests work * Remove unqualified consumed_capacity from task manager and make unit tests work * Update unit test to execution vs control TM logic changes * Fix bug, else handling for work_type method
120 lines
4.4 KiB
Python
120 lines
4.4 KiB
Python
import pytest
|
|
|
|
from awx.main.models import InstanceGroup
|
|
|
|
|
|
class FakeMeta(object):
|
|
model_name = 'job'
|
|
|
|
|
|
class FakeObject(object):
|
|
def __init__(self, **kwargs):
|
|
for k, v in kwargs.items():
|
|
setattr(self, k, v)
|
|
self._meta = FakeMeta()
|
|
self._meta.concrete_model = self
|
|
|
|
|
|
class Job(FakeObject):
|
|
task_impact = 43
|
|
is_container_group_task = False
|
|
|
|
def log_format(self):
|
|
return 'job 382 (fake)'
|
|
|
|
|
|
@pytest.fixture
|
|
def sample_cluster():
|
|
def stand_up_cluster():
|
|
class Instances(FakeObject):
|
|
def add(self, *args):
|
|
for instance in args:
|
|
self.obj.instance_list.append(instance)
|
|
|
|
def all(self):
|
|
return self.obj.instance_list
|
|
|
|
class InstanceGroup(FakeObject):
|
|
def __init__(self, **kwargs):
|
|
super(InstanceGroup, self).__init__(**kwargs)
|
|
self.instance_list = []
|
|
|
|
@property
|
|
def instances(self):
|
|
mgr = Instances(obj=self)
|
|
return mgr
|
|
|
|
class Instance(FakeObject):
|
|
pass
|
|
|
|
ig_small = InstanceGroup(name='ig_small')
|
|
ig_large = InstanceGroup(name='ig_large')
|
|
default = InstanceGroup(name='default')
|
|
i1 = Instance(hostname='i1', capacity=200)
|
|
i2 = Instance(hostname='i2', capacity=200)
|
|
i3 = Instance(hostname='i3', capacity=200)
|
|
ig_small.instances.add(i1)
|
|
ig_large.instances.add(i2, i3)
|
|
default.instances.add(i2)
|
|
return [default, ig_large, ig_small]
|
|
|
|
return stand_up_cluster
|
|
|
|
|
|
def test_committed_capacity(sample_cluster):
|
|
default, ig_large, ig_small = sample_cluster()
|
|
tasks = [Job(status='waiting', instance_group=default), Job(status='waiting', instance_group=ig_large), Job(status='waiting', instance_group=ig_small)]
|
|
capacities = InstanceGroup.objects.capacity_values(qs=[default, ig_large, ig_small], tasks=tasks, breakdown=True)
|
|
# Jobs submitted to either tower or ig_larg must count toward both
|
|
assert capacities['default']['committed_capacity'] == 43 * 2
|
|
assert capacities['ig_large']['committed_capacity'] == 43 * 2
|
|
assert capacities['ig_small']['committed_capacity'] == 43
|
|
|
|
|
|
def test_running_capacity(sample_cluster):
|
|
default, ig_large, ig_small = sample_cluster()
|
|
tasks = [Job(status='running', execution_node='i1'), Job(status='running', execution_node='i2'), Job(status='running', execution_node='i3')]
|
|
capacities = InstanceGroup.objects.capacity_values(qs=[default, ig_large, ig_small], tasks=tasks, breakdown=True)
|
|
# Tower is only given 1 instance
|
|
assert capacities['default']['running_capacity'] == 43
|
|
# Large IG has 2 instances
|
|
assert capacities['ig_large']['running_capacity'] == 43 * 2
|
|
assert capacities['ig_small']['running_capacity'] == 43
|
|
|
|
|
|
def test_offline_node_running(sample_cluster):
|
|
"""
|
|
Assure that algorithm doesn't explode if a job is marked running
|
|
in an offline node
|
|
"""
|
|
default, ig_large, ig_small = sample_cluster()
|
|
ig_small.instance_list[0].capacity = 0
|
|
tasks = [Job(status='running', execution_node='i1', instance_group=ig_small)]
|
|
capacities = InstanceGroup.objects.capacity_values(qs=[default, ig_large, ig_small], tasks=tasks)
|
|
assert capacities['ig_small']['consumed_execution_capacity'] == 43
|
|
|
|
|
|
def test_offline_node_waiting(sample_cluster):
|
|
"""
|
|
Same but for a waiting job
|
|
"""
|
|
default, ig_large, ig_small = sample_cluster()
|
|
ig_small.instance_list[0].capacity = 0
|
|
tasks = [Job(status='waiting', instance_group=ig_small)]
|
|
capacities = InstanceGroup.objects.capacity_values(qs=[default, ig_large, ig_small], tasks=tasks)
|
|
assert capacities['ig_small']['consumed_execution_capacity'] == 43
|
|
|
|
|
|
def test_RBAC_reduced_filter(sample_cluster):
|
|
"""
|
|
User can see jobs that are running in `ig_small` and `ig_large` IGs,
|
|
but user does not have permission to see those actual instance groups.
|
|
Verify that this does not blow everything up.
|
|
"""
|
|
default, ig_large, ig_small = sample_cluster()
|
|
tasks = [Job(status='waiting', instance_group=default), Job(status='waiting', instance_group=ig_large), Job(status='waiting', instance_group=ig_small)]
|
|
capacities = InstanceGroup.objects.capacity_values(qs=[default], tasks=tasks, breakdown=True)
|
|
# Cross-links between groups not visible to current user,
|
|
# so a naieve accounting of capacities is returned instead
|
|
assert capacities['default']['committed_capacity'] == 43
|