def test_host_summary_generation_with_deleted_hosts(): hostnames = [f'Host {i}' for i in range(10)] inv = Inventory() inv.save() Host.objects.bulk_create([Host(created=now(), modified=now(), name=h, inventory_id=inv.id) for h in hostnames]) j = Job(inventory=inv) j.save() host_map = dict((host.name, host.id) for host in inv.hosts.all()) # delete half of the hosts during the playbook run for h in inv.hosts.all()[:5]: h.delete() JobEvent.create_from_data( job_id=j.pk, parent_uuid='abc123', event='playbook_on_stats', event_data={ 'ok': dict((hostname, len(hostname)) for hostname in hostnames), 'changed': {}, 'dark': {}, 'failures': {}, 'ignored': {}, 'processed': {}, 'rescued': {}, 'skipped': {}, }, host_map=host_map, ).save() ids = sorted([s.host_id or -1 for s in j.job_host_summaries.order_by('id').all()]) names = sorted([s.host_name for s in j.job_host_summaries.all()]) assert ids == [-1, -1, -1, -1, -1, 6, 7, 8, 9, 10] assert names == ['Host 0', 'Host 1', 'Host 2', 'Host 3', 'Host 4', 'Host 5', 'Host 6', 'Host 7', 'Host 8', 'Host 9']
def test_parent_changed(emit): j = Job() j.save() JobEvent.create_from_data(job_id=j.pk, uuid='abc123', event='playbook_on_task_start').save() assert JobEvent.objects.count() == 1 for e in JobEvent.objects.all(): assert e.changed is False JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event='runner_on_ok', event_data={ 'res': { 'changed': ['localhost'] } }).save() # the `playbook_on_stats` event is where we update the parent changed linkage JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event='playbook_on_stats').save() events = JobEvent.objects.filter( event__in=['playbook_on_task_start', 'runner_on_ok']) assert events.count() == 2 for e in events.all(): assert e.changed is True
def test_awx_custom_virtualenv_without_jt(project): project.custom_virtualenv = '/venv/fancy-proj' project.save() job = Job(project=project) job.save() job = Job.objects.get(pk=job.id) assert job.ansible_virtualenv_path == '/venv/fancy-proj'
def test_job_event_websocket_notifications(emit): j = Job(id=123) j.save() JobEvent.create_from_data(job_id=j.pk) assert len(emit.call_args_list) == 1 topic, payload = emit.call_args_list[0][0] assert topic == 'job_events-123' assert payload['job'] == 123
def mk_job(job_type='run', status='new', job_template=None, inventory=None, credential=None, project=None, extra_vars={}, persisted=True): job = Job(job_type=job_type, status=status, extra_vars=json.dumps(extra_vars)) job.job_template = job_template job.inventory = inventory if persisted: job.save() job.credentials.add(credential) job.project = project return job
def test_host_summary_generation_with_limit(): # Make an inventory with 10 hosts, run a playbook with a --limit # pointed at *one* host, # Verify that *only* that host has an associated JobHostSummary and that # *only* that host has an updated value for .last_job. hostnames = [f'Host {i}' for i in range(10)] inv = Inventory() inv.save() Host.objects.bulk_create([ Host(created=now(), modified=now(), name=h, inventory_id=inv.id) for h in hostnames ]) j = Job(inventory=inv) j.save() # host map is a data structure that tracks a mapping of host name --> ID # for the inventory, _regardless_ of whether or not there's a limit # applied to the actual playbook run host_map = dict((host.name, host.id) for host in inv.hosts.all()) # by making the playbook_on_stats *only* include Host 1, we're emulating # the behavior of a `--limit=Host 1` matching_host = Host.objects.get(name='Host 1') JobEvent.create_from_data( job_id=j.pk, parent_uuid='abc123', event='playbook_on_stats', event_data={ 'ok': { matching_host.name: len(matching_host.name) }, # effectively, limit=Host 1 'changed': {}, 'dark': {}, 'failures': {}, 'ignored': {}, 'processed': {}, 'rescued': {}, 'skipped': {}, }, host_map=host_map).save() # since the playbook_on_stats only references one host, # there should *only* be on JobHostSummary record (and it should # be related to the appropriate Host) assert JobHostSummary.objects.count() == 1 for h in Host.objects.all(): if h.name == 'Host 1': assert h.last_job_id == j.id assert h.last_job_host_summary_id == JobHostSummary.objects.first( ).id else: # all other hosts in the inventory should remain untouched assert h.last_job_id is None assert h.last_job_host_summary_id is None
def spawn_bulk_jobs_simple(num): jobs = [] for _ in range(num): j = Job() j.job_template = jt j.status = "canceled" jobs.append(j) with transaction.atomic(): for i, j in enumerate(jobs): if i % 100 == 0: print(i) j.save()
def test_unicode_with_base64_ansi(sqlite_copy_expert, get, admin): job = Job() job.save() for i in range(3): JobEvent(job=job, stdout=u'オ{}\n'.format(i), start_line=i).save() url = reverse('api:job_stdout', kwargs={ 'pk': job.pk }) + '?format=json&content_encoding=base64&content_format=ansi' response = get(url, user=admin, expect=200) content = base64.b64decode(json.loads(response.content)['content']) assert content.splitlines() == ['オ%d' % i for i in range(3)]
def test_job_relaunch_copy_vars(self, machine_credential, inventory, deploy_jobtemplate, post, mocker, net_credential): job_with_links = Job(name='existing-job', inventory=inventory) job_with_links.job_template = deploy_jobtemplate job_with_links.limit = "my_server" job_with_links.save() job_with_links.credentials.add(machine_credential) job_with_links.credentials.add(net_credential) second_job = job_with_links.copy_unified_job() # Check that job data matches the original variables assert [c.pk for c in second_job.credentials.all()] == [machine_credential.pk, net_credential.pk] assert second_job.inventory == job_with_links.inventory assert second_job.limit == 'my_server' assert net_credential in second_job.credentials.all()
def test_parent_failed(emit, event): j = Job() j.save() JobEvent.create_from_data(job_id=j.pk, uuid='abc123', event='playbook_on_task_start') assert JobEvent.objects.count() == 1 for e in JobEvent.objects.all(): assert e.failed is False JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event=event) assert JobEvent.objects.count() == 2 for e in JobEvent.objects.all(): assert e.failed is True
def test_parent_failed(emit, event): j = Job() j.save() JobEvent.create_from_data(job_id=j.pk, uuid='abc123', event='playbook_on_task_start').save() assert JobEvent.objects.count() == 1 for e in JobEvent.objects.all(): assert e.failed is False JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event=event).save() # the `playbook_on_stats` event is where we update the parent failed linkage JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event='playbook_on_stats').save() events = JobEvent.objects.filter(event__in=['playbook_on_task_start', event]) assert events.count() == 2 for e in events.all(): assert e.failed is True
def test_host_summary_generation(): hostnames = [f'Host {i}' for i in range(100)] inv = Inventory() inv.save() Host.objects.bulk_create([ Host(created=now(), modified=now(), name=h, inventory_id=inv.id) for h in hostnames ]) j = Job(inventory=inv) j.save() host_map = dict((host.name, host.id) for host in inv.hosts.all()) JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event='playbook_on_stats', event_data={ 'ok': dict((hostname, len(hostname)) for hostname in hostnames), 'changed': {}, 'dark': {}, 'failures': {}, 'ignored': {}, 'processed': {}, 'rescued': {}, 'skipped': {}, }, host_map=host_map).save() assert j.job_host_summaries.count() == len(hostnames) assert sorted([s.host_name for s in j.job_host_summaries.all()]) == sorted(hostnames) for s in j.job_host_summaries.all(): assert host_map[s.host_name] == s.host_id assert s.ok == len(s.host_name) assert s.changed == 0 assert s.dark == 0 assert s.failures == 0 assert s.ignored == 0 assert s.processed == 0 assert s.rescued == 0 assert s.skipped == 0 for host in Host.objects.all(): assert host.last_job_id == j.id assert host.last_job_host_summary.host == host
def test_unicode_with_base64_ansi(sqlite_copy_expert, get, admin): created = datetime.utcnow() job = Job(created=created) job.save() for i in range(3): JobEvent(job=job, stdout='オ{}\n'.format(i), start_line=i, job_created=created).save() url = reverse('api:job_stdout', kwargs={ 'pk': job.pk }) + '?format=json&content_encoding=base64' response = get(url, user=admin, expect=200) content = base64.b64decode( json.loads(smart_str(response.content))['content']) assert smart_str(content).splitlines() == ['オ%d' % i for i in range(3)]
def test_openstack_create_fail_required_fields(post, organization, admin): openstack = CredentialType.defaults['openstack']() openstack.save() params = { 'credential_type': 1, 'inputs': {}, 'kind': 'openstack', 'name': 'Best credential ever', 'organization': organization.id, } response = post(reverse('api:credential_list'), params, admin) assert response.status_code == 201 # username, password, host, and project must be specified by launch time j = Job() j.save() j.credentials.add(Credential.objects.first()) assert j.pre_start() == (False, None) assert 'required fields (host, password, project, username)' in j.job_explanation
def test_vmware_create_fail_required_fields(post, organization, admin): params = { 'credential_type': 1, 'name': 'Best credential ever', 'inputs': {} } vmware = CredentialType.defaults['vmware']() vmware.save() params['organization'] = organization.id response = post(reverse('api:credential_list'), params, admin) assert response.status_code == 201 assert Credential.objects.count() == 1 # username, password, and host must be specified by launch time j = Job() j.save() j.credentials.add(Credential.objects.first()) assert j.pre_start() == (False, None) assert 'required fields (host, password, username)' in j.job_explanation
def test_vault_password_required(post, organization, admin): vault = CredentialType.defaults['vault']() vault.save() response = post( reverse('api:credential_list'), { 'credential_type': vault.pk, 'organization': organization.id, 'name': 'Best credential ever', 'inputs': {} }, admin) assert response.status_code == 201 assert Credential.objects.count() == 1 # vault_password must be specified by launch time j = Job() j.save() j.credentials.add(Credential.objects.first()) assert j.pre_start() == (False, None) assert 'required fields (vault_password)' in j.job_explanation
def test_parent_changed(emit): j = Job() j.save() JobEvent.create_from_data(job_id=j.pk, uuid='abc123', event='playbook_on_task_start') assert JobEvent.objects.count() == 1 for e in JobEvent.objects.all(): assert e.changed is False JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event='runner_on_ok', event_data={'res': { 'changed': ['localhost'] }}) assert JobEvent.objects.count() == 2 for e in JobEvent.objects.all(): assert e.changed is True
def test_job_relaunch_copy_vars(self, machine_credential, inventory, deploy_jobtemplate, post, mocker, net_credential): job_with_links = Job(name='existing-job', inventory=inventory) job_with_links.job_template = deploy_jobtemplate job_with_links.limit = "my_server" job_with_links.save() job_with_links.credentials.add(machine_credential) job_with_links.credentials.add(net_credential) with mocker.patch( 'awx.main.models.unified_jobs.UnifiedJobTemplate._get_unified_job_field_names', return_value=['inventory', 'credential', 'limit']): second_job = job_with_links.copy_unified_job() # Check that job data matches the original variables assert second_job.credential == job_with_links.credential assert second_job.inventory == job_with_links.inventory assert second_job.limit == 'my_server' assert net_credential in second_job.credentials.all()
def spawn_bulk_jobs(num): jobs = [] for i in range(num): j = Job() j.job_template = jt j.project = project j.playbook = jt.playbook j.inventory = inv j.name = "bulk_{0}".format(i) j.status = "canceled" j.extra_vars = '{"sleeptime": 60}' j.allow_simultaneous = False jobs.append(j) with transaction.atomic(): for i, j in enumerate(jobs): if i % 100 == 0: print(i) time.sleep(.5) j.save() j.credentials.add(cred)
def test_do_not_reap_excluded_uuids(self, excluded_uuids, fail): i = Instance(hostname='awx') i.save() j = Job( status='running', execution_node='awx', controller_node='', start_args='SENSITIVE', celery_task_id='abc123', ) j.save() # if the UUID is excluded, don't reap it reaper.reap(i, excluded_uuids=excluded_uuids) job = Job.objects.first() if fail: assert job.status == 'failed' assert 'marked as failed' in job.job_explanation assert job.start_args == '' else: assert job.status == 'running'
def test_should_reap(self, status, fail, execution_node, controller_node, modified): i = Instance(hostname='awx') i.save() j = Job( status=status, execution_node=execution_node, controller_node=controller_node, start_args='SENSITIVE', ) j.save() if modified: # we have to edit the modification time _without_ calling save() # (because .save() overwrites it to _now_) Job.objects.filter(id=j.id).update(modified=modified) reaper.reap(i) job = Job.objects.first() if fail: assert job.status == 'failed' assert 'marked as failed' in job.job_explanation assert job.start_args == '' else: assert job.status == status