def test_multi_group_basic_job_launch(instance_factory, default_instance_group, mocker, instance_group_factory, job_template_factory): i1 = instance_factory("i1") i2 = instance_factory("i2") ig1 = instance_group_factory("ig1", instances=[i1]) ig2 = instance_group_factory("ig2", instances=[i2]) objects1 = job_template_factory('jt1', organization='org1', project='proj1', inventory='inv1', credential='cred1', jobs=["job_should_start"]) objects1.job_template.instance_groups.add(ig1) j1 = objects1.jobs['job_should_start'] j1.status = 'pending' j1.save() objects2 = job_template_factory('jt2', organization='org2', project='proj2', inventory='inv2', credential='cred2', jobs=["job_should_still_start"]) objects2.job_template.instance_groups.add(ig2) j2 = objects2.jobs['job_should_still_start'] j2.status = 'pending' j2.save() with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact: mock_task_impact.return_value = 500 with mocker.patch("awx.main.scheduler.TaskManager.start_task"): TaskManager().schedule() TaskManager.start_task.assert_has_calls( [mock.call(j1, ig1, []), mock.call(j2, ig2, [])])
def test_single_jt_multi_job_launch_allow_simul_allowed( default_instance_group, job_template_factory, mocker): instance = default_instance_group.instances.all()[0] objects = job_template_factory( 'jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job_should_start", "job_should_not_start"]) jt = objects.job_template jt.save() j1 = objects.jobs["job_should_start"] j1.allow_simultaneous = True j1.status = 'pending' j1.save() j2 = objects.jobs["job_should_not_start"] j2.allow_simultaneous = True j2.status = 'pending' j2.save() with mock.patch("awx.main.scheduler.TaskManager.start_task"): TaskManager().schedule() TaskManager.start_task.assert_has_calls([ mock.call(j1, default_instance_group, [], instance), mock.call(j2, default_instance_group, [], instance) ])
def test_multi_group_with_shared_dependency(instance_factory, controlplane_instance_group, mocker, instance_group_factory, job_template_factory): i1 = instance_factory("i1") i2 = instance_factory("i2") ig1 = instance_group_factory("ig1", instances=[i1]) ig2 = instance_group_factory("ig2", instances=[i2]) objects1 = job_template_factory( 'jt1', organization='org1', project='proj1', inventory='inv1', credential='cred1', ) objects1.job_template.instance_groups.add(ig1) j1 = create_job(objects1.job_template, dependencies_processed=False) p = objects1.project p.scm_update_on_launch = True p.scm_update_cache_timeout = 0 p.scm_type = "git" p.scm_url = "http://github.com/ansible/ansible.git" p.save() objects2 = job_template_factory('jt2', organization=objects1.organization, project=p, inventory='inv2', credential='cred2') objects2.job_template.instance_groups.add(ig2) j2 = create_job(objects2.job_template, dependencies_processed=False) with mocker.patch("awx.main.scheduler.TaskManager.start_task"): DependencyManager().schedule() TaskManager().schedule() pu = p.project_updates.first() TaskManager.start_task.assert_called_once_with( pu, controlplane_instance_group, [j1, j2], controlplane_instance_group.instances.all()[0]) pu.finished = pu.created + timedelta(seconds=1) pu.status = "successful" pu.save() with mock.patch("awx.main.scheduler.TaskManager.start_task"): DependencyManager().schedule() TaskManager().schedule() TaskManager.start_task.assert_any_call(j1, ig1, [], i1) TaskManager.start_task.assert_any_call(j2, ig2, [], i2) assert TaskManager.start_task.call_count == 2
def test_save_failed(self, logger_mock, get_running_tasks, *args): logger_mock.error = mock.MagicMock() job = Job(id=2, modified=tz_now(), status='running', celery_task_id='blah', execution_node='host1') job.websocket_emit_status = mock.MagicMock() get_running_tasks.return_value = ({'host1': [job]}, []) tm = TaskManager() with mock.patch.object(job, 'save', side_effect=DatabaseError): tm.cleanup_inconsistent_celery_tasks() job.save.assert_called_once() logger_mock.error.assert_called_once_with( "Task job 2 (failed) DB error in marking failed. Job possibly deleted." )
def test_single_jt_multi_job_launch_blocks_last(default_instance_group, job_template_factory, mocker): objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job_should_start", "job_should_not_start"]) j1 = objects.jobs["job_should_start"] j1.status = 'pending' j1.save() j2 = objects.jobs["job_should_not_start"] j2.status = 'pending' j2.save() with mock.patch("awx.main.scheduler.TaskManager.start_task"): TaskManager().schedule() TaskManager.start_task.assert_called_once_with(j1, default_instance_group, []) j1.status = "successful" j1.save() with mocker.patch("awx.main.scheduler.TaskManager.start_task"): TaskManager().schedule() TaskManager.start_task.assert_called_once_with(j2, default_instance_group, [])
def test_single_job_scheduler_launch(default_instance_group, job_template_factory, mocker): objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job_should_start"]) j = objects.jobs["job_should_start"] j.status = 'pending' j.save() with mocker.patch("awx.main.scheduler.TaskManager.start_task"): TaskManager().schedule() TaskManager.start_task.assert_called_once_with(j, default_instance_group, [])
def test_approval_node_approve(self, post, admin_user, job_template): # This test ensures that a user (with permissions to do so) can APPROVE # workflow approvals. Also asserts that trying to APPROVE approvals # that have already been dealt with will throw an error. wfjt = WorkflowJobTemplate.objects.create(name='foobar') node = wfjt.workflow_nodes.create(unified_job_template=job_template) url = reverse('api:workflow_job_template_node_create_approval', kwargs={ 'pk': node.pk, 'version': 'v2' }) post(url, { 'name': 'Approve Test', 'description': '', 'timeout': 0 }, user=admin_user, expect=200) post(reverse('api:workflow_job_template_launch', kwargs={'pk': wfjt.pk}), user=admin_user, expect=201) wf_job = WorkflowJob.objects.first() TaskManager().schedule() TaskManager().schedule() wfj_node = wf_job.workflow_nodes.first() approval = wfj_node.job assert approval.name == 'Approve Test' post(reverse('api:workflow_approval_approve', kwargs={'pk': approval.pk}), user=admin_user, expect=204) # Test that there is an activity stream entry that was created for the "approve" action. qs = ActivityStream.objects.order_by('-timestamp').first() assert qs.object1 == 'workflow_approval' assert qs.changes == '{"status": ["pending", "successful"]}' assert WorkflowApproval.objects.get( pk=approval.pk).status == 'successful' assert qs.operation == 'update' post(reverse('api:workflow_approval_approve', kwargs={'pk': approval.pk}), user=admin_user, expect=400)
def test_workflow_job_no_instancegroup(workflow_job_template_factory, controlplane_instance_group, mocker): wfjt = workflow_job_template_factory( 'anicedayforawalk').workflow_job_template wfj = WorkflowJob.objects.create(workflow_job_template=wfjt) wfj.status = "pending" wfj.save() with mocker.patch("awx.main.scheduler.TaskManager.start_task"): TaskManager().schedule() TaskManager.start_task.assert_called_once_with(wfj, None, [], None) assert wfj.instance_group is None
def test_failover_group_run(instance_factory, controlplane_instance_group, mocker, instance_group_factory, job_template_factory): i1 = instance_factory("i1") i2 = instance_factory("i2") ig1 = instance_group_factory("ig1", instances=[i1]) ig2 = instance_group_factory("ig2", instances=[i2]) objects1 = job_template_factory('jt1', organization='org1', project='proj1', inventory='inv1', credential='cred1', jobs=["job_should_start"]) objects1.job_template.instance_groups.add(ig1) j1 = objects1.jobs['job_should_start'] j1.status = 'pending' j1.save() objects2 = job_template_factory( 'jt2', organization=objects1.organization, project='proj2', inventory='inv2', credential='cred2', jobs=["job_should_start", "job_should_also_start"]) objects2.job_template.instance_groups.add(ig1) objects2.job_template.instance_groups.add(ig2) j1_1 = objects2.jobs['job_should_also_start'] j1_1.status = 'pending' j1_1.save() tm = TaskManager() with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact: mock_task_impact.return_value = 500 with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job: tm.schedule() mock_job.assert_has_calls( [mock.call(j1, ig1, [], i1), mock.call(j1_1, ig2, [], i2)]) assert mock_job.call_count == 2
def test_cleanup_inconsistent_task(self, notify, active_tasks, considered_jobs, reapable_jobs, running_tasks, waiting_tasks, mocker): tm = TaskManager() tm.get_running_tasks = mocker.Mock(return_value=(running_tasks, waiting_tasks)) tm.get_active_tasks = mocker.Mock(return_value=active_tasks) tm.cleanup_inconsistent_celery_tasks() for j in considered_jobs: if j not in reapable_jobs: j.save.assert_not_called() assert notify.call_count == 4 notify.assert_has_calls( [mock.call(j, 'failed') for j in reapable_jobs], any_order=True) for j in reapable_jobs: j.websocket_emit_status.assert_called_once_with('failed') assert j.status == 'failed' assert j.job_explanation == ( 'Task was marked as running in Tower but was not present in Celery, so it has been marked as failed.' )
def test_multi_jt_capacity_blocking(hybrid_instance, job_template_factory, mocker): instance = hybrid_instance controlplane_instance_group = instance.rampart_groups.first() objects1 = job_template_factory('jt1', organization='org1', project='proj1', inventory='inv1', credential='cred1') objects2 = job_template_factory('jt2', organization='org2', project='proj2', inventory='inv2', credential='cred2') j1 = create_job(objects1.job_template) j2 = create_job(objects2.job_template) tm = TaskManager() with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact: mock_task_impact.return_value = 505 with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job: tm.schedule() mock_job.assert_called_once_with(j1, controlplane_instance_group, [], instance) j1.status = "successful" j1.save() with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job: tm.schedule() mock_job.assert_called_once_with(j2, controlplane_instance_group, [], instance)
def test_generate_dependencies_only_once(job_template_factory): objects = job_template_factory('jt', organization='org1') job = objects.job_template.create_job() job.status = "pending" job.name = "job_gen_dep" job.save() with mock.patch("awx.main.scheduler.TaskManager.start_task"): # job starts with dependencies_processed as False assert not job.dependencies_processed # run one cycle of ._schedule() to generate dependencies TaskManager()._schedule() # make sure dependencies_processed is now True job = Job.objects.filter(name="job_gen_dep")[0] assert job.dependencies_processed # Run ._schedule() again, but make sure .generate_dependencies() is not # called with job in the argument list tm = TaskManager() tm.generate_dependencies = mock.MagicMock() tm._schedule() # .call_args is tuple, (positional_args, kwargs), [0][0] then is # the first positional arg, i.e. the first argument of # .generate_dependencies() assert tm.generate_dependencies.call_args[0][0] == []
def test_multi_jt_capacity_blocking(default_instance_group, job_template_factory, mocker): instance = default_instance_group.instances.all()[0] objects1 = job_template_factory('jt1', organization='org1', project='proj1', inventory='inv1', credential='cred1', jobs=["job_should_start"]) objects2 = job_template_factory('jt2', organization='org2', project='proj2', inventory='inv2', credential='cred2', jobs=["job_should_not_start"]) j1 = objects1.jobs["job_should_start"] j1.status = 'pending' j1.save() j2 = objects2.jobs["job_should_not_start"] j2.status = 'pending' j2.save() tm = TaskManager() with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact: mock_task_impact.return_value = 500 with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job: tm.schedule() mock_job.assert_called_once_with(j1, default_instance_group, [], instance) j1.status = "successful" j1.save() with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job: tm.schedule() mock_job.assert_called_once_with(j2, default_instance_group, [], instance)
def test_task_manager_workflow_rescheduling(self, job_template_factory, inventory, project, controlplane_instance_group): jt = JobTemplate.objects.create(allow_simultaneous=True, inventory=inventory, project=project, playbook='helloworld.yml') wfjt = WorkflowJobTemplate.objects.create(name='foo') for i in range(2): wfjt.workflow_nodes.create(unified_job_template=jt) wj = wfjt.create_unified_job() assert wj.workflow_nodes.count() == 2 wj.signal_start() # Transitions workflow job to running # needs to re-schedule so it spawns jobs next round self.run_tm(TaskManager(), [mock.call('running')]) # Spawns jobs # needs re-schedule to submit jobs next round self.run_tm( WorkflowManager(), [mock.call('pending'), mock.call('pending')]) assert jt.jobs.count() == 2 # task manager spawned jobs # Submits jobs # intermission - jobs will run and reschedule TM when finished self.run_tm(DependencyManager()) # flip dependencies_processed to True self.run_tm( TaskManager(), [mock.call('waiting'), mock.call('waiting')]) # I am the job runner for job in jt.jobs.all(): job.status = 'successful' job.save() # Finishes workflow # no further action is necessary, so rescheduling should not happen self.run_tm(WorkflowManager(), [mock.call('successful')])
def test_single_jt_multi_job_launch_blocks_last(job_template_factory): objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred') j1 = create_job(objects.job_template) j2 = create_job(objects.job_template) TaskManager().schedule() j1.refresh_from_db() j2.refresh_from_db() assert j1.status == "waiting" assert j2.status == "pending" # mimic running j1 to unblock j2 j1.status = "successful" j1.save() TaskManager().schedule() j2.refresh_from_db() assert j2.status == "waiting"
def test_single_job_dependencies_inventory_update_launch( default_instance_group, job_template_factory, mocker, inventory_source_factory): objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job_should_start"]) instance = default_instance_group.instances.all()[0] j = objects.jobs["job_should_start"] j.status = 'pending' j.save() i = objects.inventory ii = inventory_source_factory("ec2") ii.source = "ec2" ii.update_on_launch = True ii.update_cache_timeout = 0 ii.save() i.inventory_sources.add(ii) with mock.patch("awx.main.scheduler.TaskManager.start_task"): tm = TaskManager() with mock.patch.object(TaskManager, "create_inventory_update", wraps=tm.create_inventory_update) as mock_iu: tm.schedule() mock_iu.assert_called_once_with(j, ii) iu = [x for x in ii.inventory_updates.all()] assert len(iu) == 1 TaskManager.start_task.assert_called_once_with( iu[0], default_instance_group, [j], instance) iu[0].status = "successful" iu[0].save() with mock.patch("awx.main.scheduler.TaskManager.start_task"): TaskManager().schedule() TaskManager.start_task.assert_called_once_with(j, default_instance_group, [], instance)
def test_single_job_scheduler_launch(hybrid_instance, controlplane_instance_group, job_template_factory, mocker): instance = controlplane_instance_group.instances.all()[0] objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred') j = create_job(objects.job_template) with mocker.patch("awx.main.scheduler.TaskManager.start_task"): TaskManager().schedule() TaskManager.start_task.assert_called_once_with( j, controlplane_instance_group, [], instance)
def test_job_not_blocking_project_update(default_instance_group, job_template_factory): objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job"]) job = objects.jobs["job"] job.instance_group = default_instance_group job.status = "running" job.save() with mock.patch("awx.main.scheduler.TaskManager.start_task"): task_manager = TaskManager() task_manager._schedule() proj = objects.project project_update = proj.create_project_update() project_update.instance_group = default_instance_group project_update.status = "pending" project_update.save() assert not task_manager.is_job_blocked(project_update) dependency_graph = DependencyGraph(None) dependency_graph.add_job(job) assert not dependency_graph.is_job_blocked(project_update)
def test_get_running_tasks(self, all_jobs): tm = TaskManager() # Ensure the query grabs the expected jobs execution_nodes_jobs, waiting_jobs = tm.get_running_tasks() assert 'host1' in execution_nodes_jobs assert 'host2' in execution_nodes_jobs assert 'host3_split' in execution_nodes_jobs assert all_jobs[3] in execution_nodes_jobs['host1'] assert all_jobs[6] in execution_nodes_jobs['host2'] assert all_jobs[7] in execution_nodes_jobs['host2'] assert all_jobs[9] in execution_nodes_jobs['host3_split'] assert all_jobs[10] in execution_nodes_jobs['host4_offline'] assert all_jobs[11] not in execution_nodes_jobs['host1'] assert all_jobs[2] in waiting_jobs assert all_jobs[4] in waiting_jobs assert all_jobs[5] in waiting_jobs assert all_jobs[8] in waiting_jobs
def test_job_dependency_with_already_updated(default_instance_group, job_template_factory, mocker, inventory_source_factory): objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job_should_start"]) instance = default_instance_group.instances.all()[0] j = objects.jobs["job_should_start"] j.status = 'pending' j.save() i = objects.inventory ii = inventory_source_factory("ec2") ii.source = "ec2" ii.update_on_launch = True ii.update_cache_timeout = 0 ii.save() i.inventory_sources.add(ii) j.start_args = json.dumps(dict(inventory_sources_already_updated=[ii.id])) j.save() j.start_args = encrypt_field(j, field_name="start_args") j.save() with mock.patch("awx.main.scheduler.TaskManager.start_task"): tm = TaskManager() with mock.patch.object(TaskManager, "create_inventory_update", wraps=tm.create_inventory_update) as mock_iu: tm.schedule() mock_iu.assert_not_called() with mock.patch("awx.main.scheduler.TaskManager.start_task"): TaskManager().schedule() TaskManager.start_task.assert_called_once_with(j, default_instance_group, [], instance)
def test_single_job_dependencies_project_launch(default_instance_group, job_template_factory, mocker): objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job_should_start"]) instance = default_instance_group.instances.all()[0] j = objects.jobs["job_should_start"] j.status = 'pending' j.save() p = objects.project p.scm_update_on_launch = True p.scm_update_cache_timeout = 0 p.scm_type = "git" p.scm_url = "http://github.com/ansible/ansible.git" p.save(skip_update=True) with mock.patch("awx.main.scheduler.TaskManager.start_task"): tm = TaskManager() with mock.patch.object(TaskManager, "create_project_update", wraps=tm.create_project_update) as mock_pu: tm.schedule() mock_pu.assert_called_once_with(j) pu = [x for x in p.project_updates.all()] assert len(pu) == 1 TaskManager.start_task.assert_called_once_with( pu[0], default_instance_group, [j], instance) pu[0].status = "successful" pu[0].save() with mock.patch("awx.main.scheduler.TaskManager.start_task"): TaskManager().schedule() TaskManager.start_task.assert_called_once_with(j, default_instance_group, [], instance)
def test_single_jt_multi_job_launch_allow_simul_allowed(job_template_factory): objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred') jt = objects.job_template jt.allow_simultaneous = True jt.save() j1 = create_job(objects.job_template) j2 = create_job(objects.job_template) TaskManager().schedule() j1.refresh_from_db() j2.refresh_from_db() assert j1.status == "waiting" assert j2.status == "waiting"
def test_task_manager_workflow_workflow_rescheduling( self, controlplane_instance_group): wfjts = [WorkflowJobTemplate.objects.create(name='foo')] for i in range(5): wfjt = WorkflowJobTemplate.objects.create(name='foo{}'.format(i)) wfjts[-1].workflow_nodes.create(unified_job_template=wfjt) wfjts.append(wfjt) wj = wfjts[0].create_unified_job() wj.signal_start() attempts = 10 while wfjts[0].status != 'successful' and attempts > 0: self.run_tm(TaskManager()) self.run_tm(WorkflowManager()) wfjts[0].refresh_from_db() attempts -= 1
def test_task_manager_workflow_workflow_rescheduling(self): wfjts = [WorkflowJobTemplate.objects.create(name='foo')] for i in range(5): wfjt = WorkflowJobTemplate.objects.create(name='foo{}'.format(i)) wfjts[-1].workflow_nodes.create(unified_job_template=wfjt) wfjts.append(wfjt) wj = wfjts[0].create_unified_job() wj.signal_start() tm = TaskManager() while wfjts[0].status != 'successful': wfjts[1].refresh_from_db() if wfjts[1].status == 'successful': # final run, no more work to do self.run_tm(tm, expect_schedule=[]) else: self.run_tm(tm, expect_schedule=[mock.call()]) wfjts[0].refresh_from_db()
def test_approval_node_deny(self, post, admin_user, job_template, controlplane_instance_group): # This test ensures that a user (with permissions to do so) can DENY # workflow approvals. Also asserts that trying to DENY approvals # that have already been dealt with will throw an error. wfjt = WorkflowJobTemplate.objects.create(name='foobar') node = wfjt.workflow_nodes.create(unified_job_template=job_template) url = reverse('api:workflow_job_template_node_create_approval', kwargs={ 'pk': node.pk, 'version': 'v2' }) post(url, { 'name': 'Deny Test', 'description': '', 'timeout': 0 }, user=admin_user, expect=201) post(reverse('api:workflow_job_template_launch', kwargs={'pk': wfjt.pk}), user=admin_user, expect=201) wf_job = WorkflowJob.objects.first() DependencyManager().schedule( ) # TODO: exclude workflows from this and delete line TaskManager().schedule() WorkflowManager().schedule() wfj_node = wf_job.workflow_nodes.first() approval = wfj_node.job assert approval.name == 'Deny Test' post(reverse('api:workflow_approval_deny', kwargs={'pk': approval.pk}), user=admin_user, expect=204) # Test that there is an activity stream entry that was created for the "deny" action. qs = ActivityStream.objects.order_by('-timestamp').first() assert qs.object1 == 'workflow_approval' assert qs.changes == '{"status": ["pending", "failed"]}' assert WorkflowApproval.objects.get(pk=approval.pk).status == 'failed' assert qs.operation == 'update' post(reverse('api:workflow_approval_deny', kwargs={'pk': approval.pk}), user=admin_user, expect=400)
def test_job_not_blocking_project_update(controlplane_instance_group, job_template_factory): instance = controlplane_instance_group.instances.all()[0] objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred') job = objects.job_template.create_unified_job() job.instance_group = controlplane_instance_group job.dependencies_process = True job.status = "running" job.save() with mock.patch("awx.main.scheduler.TaskManager.start_task"): proj = objects.project project_update = proj.create_project_update() project_update.instance_group = controlplane_instance_group project_update.status = "pending" project_update.save() TaskManager().schedule() TaskManager.start_task.assert_called_once_with( project_update, controlplane_instance_group, [], instance)
def test_job_fails_to_launch_when_no_control_capacity( self, job_template, control_instance_low_capacity, execution_instance): enough_capacity = job_template.create_unified_job() insufficient_capacity = job_template.create_unified_job() all_ujs = [enough_capacity, insufficient_capacity] for uj in all_ujs: uj.signal_start() # There is only enough control capacity to run one of the jobs so one should end up in pending and the other in waiting tm = TaskManager() self.run_tm(tm) for uj in all_ujs: uj.refresh_from_db() assert enough_capacity.status == 'waiting' assert insufficient_capacity.status == 'pending' assert [ enough_capacity.execution_node, enough_capacity.controller_node ] == [ execution_instance.hostname, control_instance_low_capacity.hostname, ], enough_capacity
def test_hybrid_capacity(self, job_template, hybrid_instance): enough_capacity = job_template.create_unified_job() insufficient_capacity = job_template.create_unified_job() expected_task_impact = enough_capacity.task_impact + settings.AWX_CONTROL_NODE_TASK_IMPACT all_ujs = [enough_capacity, insufficient_capacity] for uj in all_ujs: uj.signal_start() # There is only enough control capacity to run one of the jobs so one should end up in pending and the other in waiting tm = TaskManager() self.run_tm(tm) for uj in all_ujs: uj.refresh_from_db() assert enough_capacity.status == 'waiting' assert insufficient_capacity.status == 'pending' assert [ enough_capacity.execution_node, enough_capacity.controller_node ] == [ hybrid_instance.hostname, hybrid_instance.hostname, ], enough_capacity assert expected_task_impact == hybrid_instance.consumed_capacity
def test_project_update_capacity(self, project, hybrid_instance, instance_group_factory, controlplane_instance_group): pu = project.create_unified_job() instance_group_factory(name='second_ig', instances=[hybrid_instance]) expected_task_impact = pu.task_impact + settings.AWX_CONTROL_NODE_TASK_IMPACT pu.signal_start() tm = TaskManager() self.run_tm(tm) pu.refresh_from_db() assert pu.status == 'waiting' assert [pu.execution_node, pu.controller_node] == [ hybrid_instance.hostname, hybrid_instance.hostname, ], pu assert expected_task_impact == hybrid_instance.consumed_capacity # The hybrid node is in both instance groups, but the project update should # always get assigned to the controlplane assert pu.instance_group.name == settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME pu.status = 'successful' pu.save() assert hybrid_instance.consumed_capacity == 0
def test_generate_dependencies_only_once(job_template_factory): objects = job_template_factory('jt', organization='org1') job = objects.job_template.create_job() job.status = "pending" job.name = "job_gen_dep" job.save() with mock.patch("awx.main.scheduler.TaskManager.start_task"): # job starts with dependencies_processed as False assert not job.dependencies_processed # run one cycle of ._schedule() to generate dependencies TaskManager()._schedule() # make sure dependencies_processed is now True job = Job.objects.filter(name="job_gen_dep")[0] assert job.dependencies_processed # Run ._schedule() again, but make sure .generate_dependencies() is not # called with job in the argument list tm = TaskManager() tm.generate_dependencies = mock.MagicMock(return_value=[]) tm._schedule() tm.generate_dependencies.assert_has_calls( [mock.call([]), mock.call([])])