def test_successful_diff(self): """Test the diff command.""" (mock_api, mock_scheduler_proxy) = self.create_mock_api() with contextlib.nested( patch("apache.aurora.client.api.SchedulerProxy", return_value=mock_scheduler_proxy), patch("subprocess.call", return_value=0), patch("json.loads", return_value=Mock()), ) as (_, subprocess_patch, _): mock_scheduler_proxy.getTasksStatus.return_value = self.create_status_response() self.setup_populate_job_config(mock_scheduler_proxy) with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute(["job", "diff", "west/bozo/test/hello", fp.name]) # Diff should get the task status, populate a config, and run diff. mock_scheduler_proxy.getTasksStatus.assert_called_with( TaskQuery(jobKeys=[JobKey(role="bozo", environment="test", name="hello")], statuses=ACTIVE_STATES) ) assert mock_scheduler_proxy.populateJobConfig.call_count == 1 assert isinstance(mock_scheduler_proxy.populateJobConfig.call_args[0][0], JobConfiguration) assert mock_scheduler_proxy.populateJobConfig.call_args[0][0].key == JobKey( environment=u"test", role=u"bozo", name=u"hello" ) # Subprocess should have been used to invoke diff with two parameters. assert subprocess_patch.call_count == 1 assert len(subprocess_patch.call_args[0][0]) == 3 assert subprocess_patch.call_args[0][0][0] == os.environ.get("DIFF_VIEWER", "diff")
def test_simple_successful_create_job_output(self): """Run a test of the "create" command against a mocked-out API: Verifies that the creation command generates the correct output. """ mock_context = FakeAuroraCommandContext() with contextlib.nested( patch('threading._Event.wait'), patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context)): mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.PENDING)) mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.RUNNING)) mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.RUNNING)) api = mock_context.get_api('west') api.create_job.return_value = self.get_createjob_response() with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() result = cmd.execute(['job', 'create', '--wait-until=RUNNING', 'west/bozo/test/hello', fp.name]) assert result == EXIT_OK assert mock_context.get_out() == [ "Job create succeeded: job url=http://something_or_other/scheduler/bozo/test/hello"] assert mock_context.get_err() == []
def test_kill_job_with_instances_batched_large(self): """Test kill client-side API logic.""" mock_context = FakeAuroraCommandContext() with contextlib.nested( patch('threading._Event.wait'), patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)): api = mock_context.get_api('west') status_result = self.create_status_call_result() mock_context.add_expected_status_query_result(status_result) api.kill_job.return_value = self.get_kill_job_response() mock_context.add_expected_status_query_result(self.create_status_call_result( self.create_mock_task(ScheduleStatus.KILLED))) with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute(['job', 'kill', '--config=%s' % fp.name, 'west/bozo/test/hello/0,2,4-13']) # Now check that the right API calls got made. assert api.kill_job.call_count == 3 api.kill_job.assert_called_with(AuroraJobKey.from_path('west/bozo/test/hello'), [12, 13]) # Expect total 5 calls (3 from JobMonitor). self.assert_scheduler_called(api, self.get_expected_task_query([12, 13]), 5)
def test_successful_ssh(self): """Test the ssh command.""" (mock_api, mock_scheduler_proxy) = self.create_mock_api() mock_scheduler_proxy.getTasksStatus.return_value = self.create_status_response() sandbox_args = {'slave_root': '/slaveroot', 'slave_run_directory': 'slaverun'} with contextlib.nested( patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy), patch('apache.aurora.client.api.command_runner.DistributedCommandRunner.sandbox_args', return_value=sandbox_args), patch('subprocess.call', return_value=0)) as ( mock_scheduler_proxy_class, mock_runner_args_patch, mock_subprocess): cmd = AuroraCommandLine() cmd.execute(['task', 'ssh', '--ssh-options=-v', 'west/bozo/test/hello/1', '--command=ls']) # The status command sends a getTasksStatus query to the scheduler, # and then prints the result. mock_scheduler_proxy.getTasksStatus.assert_called_with(TaskQuery( jobKeys=[JobKey(role='bozo', environment='test', name='hello')], instanceIds=set([1]), statuses=set([ScheduleStatus.RUNNING, ScheduleStatus.KILLING, ScheduleStatus.RESTARTING, ScheduleStatus.PREEMPTING, ScheduleStatus.DRAINING ]))) mock_subprocess.assert_called_with(['ssh', '-t', '-v', 'bozo@slavehost', 'cd /slaveroot/slaves/*/frameworks/*/executors/thermos-1287391823/runs/' 'slaverun/sandbox;ls'])
def test_failed_create_job_with_incomplete_bindings(self): """Run a test of the "create" command against a mocked-out API: Verifies that the creation command sends the right API RPCs, and performs the correct tests on the result.""" mock_context = FakeAuroraCommandContext() with contextlib.nested( patch('threading._Event.wait'), patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context)): # This is the real test: invoke create as if it had been called by the command line. with temporary_file() as fp: fp.write(self.get_unbound_test_config()) fp.flush() cmd = AuroraCommandLine() result = cmd.execute(['job', 'create', '--wait-until=RUNNING', '--bind', 'cluster_binding=west', 'west/bozo/test/hello', fp.name]) assert result == EXIT_INVALID_CONFIGURATION assert mock_context.get_out() == [] assert mock_context.get_err() == [ "Error loading configuration: " "TypeCheck(FAILED): MesosJob[update_config] failed: " "UpdateConfig[batch_size] failed: u'{{TEST_BATCH}}' not an integer"]
def test_create_job_failed_output(self): """Test that a failed create generates the correct error messages""" mock_context = FakeAuroraCommandContext() with patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context): mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.INIT)) api = mock_context.get_api('west') api.create_job.return_value = self.get_failed_createjob_response() api.get_tier_configs.return_value = self.get_mock_tier_configurations( ) with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() result = cmd.execute([ 'job', 'create', '--wait-until=RUNNING', 'west/bozo/test/hello', fp.name ]) assert result == EXIT_COMMAND_FAILURE # Check that create_job was called exactly once, with an AuroraConfig parameter. assert mock_context.get_out() == [] assert mock_context.get_err() == [ 'Job creation failed due to error:', '\tWhoops' ]
def test_status_wildcard(self): """Test status using a wildcard. It should first call api.get_jobs, and then do a getTasksWithoutConfigs on each job.""" mock_context = FakeAuroraCommandContext() mock_api = mock_context.get_api('west') mock_api.check_status.return_value = self.create_status_response() mock_api.get_jobs.return_value = self.create_getjobs_response() with patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context): cmd = AuroraCommandLine() cmd.execute(['job', 'status', '*']) # Wildcard should have expanded to two jobs, so there should be two calls # to check_status. assert mock_api.check_status.call_count == 2 assert mock_api.check_status.call_args_list[0][0][0].cluster == 'west' assert mock_api.check_status.call_args_list[0][0][0].role == 'RoleA' assert mock_api.check_status.call_args_list[0][0][0].env == 'test' assert mock_api.check_status.call_args_list[0][0][0].name == 'hithere' assert mock_api.check_status.call_args_list[1][0][0].cluster == 'west' assert mock_api.check_status.call_args_list[1][0][0].role == 'bozo' assert mock_api.check_status.call_args_list[1][0][0].env == 'test' assert mock_api.check_status.call_args_list[1][0][0].name == 'hello'
def test_cron_status_multiple_jobs(self): _, mock_scheduler_proxy = self.create_mock_api() with contextlib.nested( patch('time.sleep'), patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS), patch('apache.aurora.client.cli.context.AuroraCommandContext.print_out')) as ( _, _, _, mock_print): response = self.create_simple_success_response() response.result = Result(getJobsResult=GetJobsResult(configs=[ JobConfiguration( key=JobKey(role='bozo', environment='test', name='hello'), cronSchedule='* * * * *'), JobConfiguration( key=JobKey(role='bozo', environment='test', name='hello2'), cronSchedule='* * * * *') ])) mock_scheduler_proxy.getJobs.return_value = response cmd = AuroraCommandLine() result = cmd.execute(['cron', 'show', 'west/bozo/test/hello']) assert result == EXIT_OK mock_scheduler_proxy.getJobs.assert_called_once_with("bozo") mock_print.assert_called_with("west/bozo/test/hello\t * * * * *")
def test_restart_failed_restart_output(self): self.reset_mock_io() (mock_api, mock_scheduler_proxy) = self.create_mock_api() mock_health_check = self.setup_health_checks(mock_api) self.setup_mock_scheduler_for_simple_restart(mock_api) mock_scheduler_proxy.restartShards.return_value = self.create_error_response() with contextlib.nested( patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS), patch('apache.aurora.client.api.instance_watcher.StatusHealthCheck', return_value=mock_health_check), patch('time.time', side_effect=functools.partial(self.fake_time, self)), patch('apache.aurora.client.cli.context.AuroraCommandContext.print_out', side_effect=self.mock_print_out), patch('apache.aurora.client.cli.context.AuroraCommandContext.print_err', side_effect=self.mock_print_err), patch('threading._Event.wait')): with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute(['job', 'restart', '--batch-size=5', 'west/bozo/test/hello', '--config', fp.name]) assert self.MOCK_OUT == [] assert "Error restarting job west/bozo/test/hello:" in self.MOCK_ERR
def test_kill_job_with_instances_batched_output(self): """Test kill client-side API logic.""" mock_context = FakeAuroraCommandContext() with contextlib.nested( patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.cli.jobs.JobMonitor', return_value=self.get_monitor_mock())): api = mock_context.get_api('west') mock_context.add_expected_query_result( self.create_query_call_result(), job_key=self.TEST_JOBKEY) mock_context.add_expected_query_result( self.create_query_call_result()) api.kill_job.return_value = self.create_simple_success_response() cmd = AuroraCommandLine() cmd.execute([ 'job', 'kill', '--batch-size=5', self.get_instance_spec('0,2,4-6') ]) assert mock_context.get_out() == [ 'Successfully killed instances [0, 2, 4, 5, 6]', 'Job kill succeeded' ] assert mock_context.get_err() == [] self.assert_query(api, times=2)
def test_kill_job_with_instances_batched_maxerrors_output(self): """Test kill client-side API logic.""" mock_context = FakeAuroraCommandContext() mock_monitor = self.get_monitor_mock(result=False) with contextlib.nested( patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.cli.jobs.JobMonitor', return_value=mock_monitor)): api = mock_context.get_api('west') mock_context.add_expected_query_result( self.create_query_call_result(), job_key=self.TEST_JOBKEY) mock_context.add_expected_query_result( self.create_query_call_result()) api.kill_job.return_value = self.create_simple_success_response() cmd = AuroraCommandLine() cmd.execute([ 'job', 'kill', '--max-total-failures=1', '--batch-size=5', self.get_instance_spec('0,2,4-13') ]) assert mock_context.get_out() == [] assert mock_context.get_err() == [ 'Instances [0, 2, 4, 5, 6] were not killed in time', 'Instances [7, 8, 9, 10, 11] were not killed in time', 'Exceeded maximum number of errors while killing instances' ] self.assert_query(api, times=2)
def test_kill_job_with_instances_batched_maxerrors(self): """Test kill client-side API logic.""" mock_context = FakeAuroraCommandContext() mock_monitor = self.get_monitor_mock(result=False) with contextlib.nested( patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.cli.jobs.JobMonitor', return_value=mock_monitor)): api = mock_context.get_api('west') mock_context.add_expected_query_result( self.create_query_call_result(), job_key=self.TEST_JOBKEY) mock_context.add_expected_query_result( self.create_query_call_result(), job_key=self.TEST_JOBKEY) api.kill_job.return_value = self.create_simple_success_response() cmd = AuroraCommandLine() cmd.execute([ 'job', 'kill', '--max-total-failures=1', self.get_instance_spec('0-4') ]) # We should have aborted after the second batch. self.assert_kill_calls(api, instance_range=range(2), message=None) self.assert_query(api, times=2)
def test_kill_job_with_instances_nobatching(self): """Test kill client-side API logic.""" mock_context = FakeAuroraCommandContext() mock_monitor = self.get_monitor_mock() with contextlib.nested( patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.cli.jobs.JobMonitor', return_value=mock_monitor)) as (_, m): api = mock_context.get_api('west') mock_context.add_expected_query_result( self.create_query_call_result(), job_key=self.TEST_JOBKEY) api.kill_job.return_value = self.create_simple_success_response() cmd = AuroraCommandLine() cmd.execute([ 'job', 'kill', '--no-batching', self.get_instance_spec('0,2,4-6') ]) instances = [0, 2, 4, 5, 6] self.assert_kill_calls(api, instances=instances, message=None) self.assert_wait_calls(mock_monitor, m.terminal, instances=instances) self.assert_query(api)
def test_successful_status_output_no_metadata(self): """Test the status command more deeply: in a request with a fully specified job, it should end up doing a query using getTasksWithoutConfigs.""" mock_context = FakeAuroraCommandContext() mock_context.add_expected_status_query_result(self.create_status_null_metadata()) with patch("apache.aurora.client.cli.jobs.Job.create_context", return_value=mock_context): cmd = AuroraCommandLine() cmd.execute(["job", "status", "west/bozo/test/hello"]) actual = re.sub("\\d\\d:\\d\\d:\\d\\d", "##:##:##", "\n".join(mock_context.get_out())) expected = textwrap.dedent( """\ Active tasks (3): \tTask role: bozo, env: test, name: woops, instance: 1, status: RUNNING on slavehost \t CPU: 2 core(s), RAM: 2 MB, Disk: 2 MB \t events: \t 1970-11-23 ##:##:## RUNNING: Hi there \tTask role: bozo, env: test, name: woops, instance: 2, status: RUNNING on slavehost \t CPU: 2 core(s), RAM: 2 MB, Disk: 2 MB \t events: \t 1970-11-23 ##:##:## RUNNING: Hi there \tTask role: bozo, env: test, name: woops, instance: 3, status: RUNNING on slavehost \t CPU: 2 core(s), RAM: 2 MB, Disk: 2 MB \t events: \t 1970-11-23 ##:##:## RUNNING: Hi there Inactive tasks (0): """ ) assert actual == expected
def test_simple_successful_create_job_open_page(self): mock_context = FakeAuroraCommandContext() with contextlib.nested( # TODO(maxim): Patching threading.Event with all possible namespace/patch/mock # combinations did not produce the desired effect. Investigate why (AURORA-510) patch('threading._Event.wait'), patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context)): mock_query = self.create_query() mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.PENDING)) mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.RUNNING)) api = mock_context.get_api('west') api.create_job.return_value = self.get_createjob_response() api.get_tier_configs.return_value = self.get_mock_tier_configurations( ) with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() result = cmd.execute([ 'job', 'create', '--wait-until=RUNNING', '--open-browser', 'west/bozo/test/hello', fp.name ]) assert result == EXIT_OK self.assert_create_job_called(api) self.assert_scheduler_called(api, mock_query, 2) assert self.mock_webbrowser.mock_calls == [ call("http://something_or_other/scheduler/bozo/test/hello") ]
def test_restart_simple_output(self): self.reset_mock_io() # Test the client-side restart logic in its simplest case: everything succeeds (mock_api, mock_scheduler_proxy) = self.create_mock_api() mock_health_check = self.setup_health_checks(mock_api) self.setup_mock_scheduler_for_simple_restart(mock_api) with contextlib.nested( patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy), patch('apache.aurora.client.api.instance_watcher.StatusHealthCheck', return_value=mock_health_check), patch('time.time', side_effect=functools.partial(self.fake_time, self)), patch('threading._Event.wait'), patch('apache.aurora.client.cli.context.AuroraCommandContext.print_out', side_effect=self.mock_print_out), patch('apache.aurora.client.cli.context.AuroraCommandContext.print_err', side_effect=self.mock_print_err) ): with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute(['job', 'restart', '--batch-size=5', 'west/bozo/test/hello', '--config', fp.name]) assert self.MOCK_OUT == ['Job west/bozo/test/hello restarted successfully'] assert self.MOCK_ERR == []
def test_create_job_failed(self): """Run a test of the "create" command against a mocked-out API: this time, make the monitor check status several times before successful completion. """ mock_context = FakeAuroraCommandContext() with patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context): mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.INIT)) api = mock_context.get_api('west') api.create_job.return_value = self.get_failed_createjob_response() api.get_tier_configs.return_value = self.get_mock_tier_configurations( ) with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() result = cmd.execute([ 'job', 'create', '--wait-until=RUNNING', 'west/bozo/test/hello', fp.name ]) assert result == EXIT_COMMAND_FAILURE # Check that create_job was called exactly once, with an AuroraConfig parameter. self.assert_create_job_called(api)
def test_create_job_startup_fails(self): mock_context = FakeAuroraCommandContext() with contextlib.nested( patch('threading._Event.wait'), patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context)): mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.PENDING)) mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.RUNNING)) # We need to override the side_effect behavior of check_status in the context. def check_status_side_effect(*args): return self.create_error_response() mock_context.get_api( "west").check_status.side_effect = check_status_side_effect api = mock_context.get_api('west') api.create_job.return_value = self.get_createjob_response() with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() result = cmd.execute([ 'job', 'create', '--wait-until=RUNNING', 'west/bozo/test/hello', fp.name ]) assert result == EXIT_COMMAND_FAILURE assert mock_context.get_out() == [] assert mock_context.get_err() == [ "Error occurred while creating job west/bozo/test/hello" ]
def test_diff_server_error(self): """Test the diff command if the user passes a config with an error in it.""" mock_options = self.setup_mock_options() (mock_api, mock_scheduler_proxy) = self.create_mock_api() mock_scheduler_proxy.getTasksStatus.return_value = self.create_failed_status_response( ) self.setup_populate_job_config(mock_scheduler_proxy) with contextlib.nested( patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS), patch('twitter.common.app.get_options', return_value=mock_options), patch('subprocess.call', return_value=0), patch('json.loads', return_value=Mock())) as (mock_scheduler_proxy_class, mock_clusters, options, subprocess_patch, json_patch): with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() result = cmd.execute( ['job', 'diff', 'west/bozo/test/hello', fp.name]) assert result == EXIT_INVALID_PARAMETER # In this error case, we should have called the server getTasksStatus; # but since it fails, we shouldn't call populateJobConfig or subprocess. mock_scheduler_proxy.getTasksStatus.assert_called_with( TaskQuery(jobName='hello', environment='test', owner=Identity(role='bozo'), statuses=ACTIVE_STATES)) assert mock_scheduler_proxy.populateJobConfig.call_count == 0 assert subprocess_patch.call_count == 0
def test_simple_successful_create_job_with_bindings(self): """Run a test of the "create" command against a mocked-out API: Verifies that the creation command sends the right API RPCs, and performs the correct tests on the result.""" mock_context = FakeAuroraCommandContext() with contextlib.nested( patch('threading._Event.wait'), patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context)): mock_query = self.create_query() mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.PENDING)) mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.RUNNING)) api = mock_context.get_api('west') api.create_job.return_value = self.get_createjob_response() # This is the real test: invoke create as if it had been called by the command line. with temporary_file() as fp: fp.write(self.get_unbound_test_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute([ 'job', 'create', '--wait-until=RUNNING', '--bind', 'cluster_binding=west', '--bind', 'instances_binding=20', '--bind', 'TEST_BATCH=1', '--bind', 'flags_binding=-some_flag=value', 'west/bozo/test/hello', fp.name ]) # Now check that the right API calls got made. # Check that create_job was called exactly once, with an AuroraConfig parameter. self.assert_create_job_called(api) self.assert_scheduler_called(api, mock_query, 2)
def test_diff_invalid_config(self): """Test the diff command if the user passes a config with an error in it.""" mock_options = self.setup_mock_options() (mock_api, mock_scheduler_proxy) = self.create_mock_api() mock_scheduler_proxy.getTasksStatus.return_value = self.create_status_response() self.setup_populate_job_config(mock_scheduler_proxy) with contextlib.nested( patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS), patch('twitter.common.app.get_options', return_value=mock_options), patch('subprocess.call', return_value=0), patch('json.loads', return_value=Mock())) as ( mock_scheduler_proxy_class, mock_clusters, options, subprocess_patch, json_patch): with temporary_file() as fp: fp.write(self.get_invalid_config('stupid="me"',)) fp.flush() cmd = AuroraCommandLine() result = cmd.execute(['job', 'diff', 'west/bozo/test/hello', fp.name]) assert result == EXIT_INVALID_CONFIGURATION assert mock_scheduler_proxy.getTasksStatus.call_count == 0 assert mock_scheduler_proxy.populateJobConfig.call_count == 0 assert subprocess_patch.call_count == 0
def test_updater_simple_with_instances(self): (mock_api, mock_scheduler_proxy) = self.create_mock_api() mock_health_check = self.setup_health_checks(mock_api) mock_quota_check = self.setup_quota_check() mock_job_monitor = self.setup_job_monitor() fake_mux = self.FakeSchedulerMux() self.setup_mock_scheduler_for_simple_update(mock_api) with contextlib.nested( patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS), patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy), patch('apache.aurora.client.api.instance_watcher.StatusHealthCheck', return_value=mock_health_check), patch('apache.aurora.client.api.updater.JobMonitor', return_value=mock_job_monitor), patch('apache.aurora.client.api.updater.QuotaCheck', return_value=mock_quota_check), patch('apache.aurora.client.api.updater.SchedulerMux', return_value=fake_mux), patch('time.time', side_effect=functools.partial(self.fake_time, self)), patch('time.sleep', return_value=None), patch('threading._Event.wait')): with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute(['job', 'update', 'west/bozo/test/hello/1', fp.name]) mock_scheduler_proxy = mock_api.scheduler_proxy assert mock_scheduler_proxy.acquireLock.call_count == 1 assert mock_scheduler_proxy.addInstances.call_count == 1 assert mock_scheduler_proxy.killTasks.call_count == 1 self.assert_correct_status_calls(mock_scheduler_proxy) assert mock_scheduler_proxy.releaseLock.call_count == 1
def test_create_job_with_failed_hook(self): GlobalCommandHookRegistry.reset() command_hook = HookForTesting(False) GlobalCommandHookRegistry.register_command_hook(command_hook) mock_context = FakeAuroraCommandContext() with patch("apache.aurora.client.cli.jobs.Job.create_context", return_value=mock_context): mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.INIT)) mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.RUNNING)) api = mock_context.get_api("west") api.create_job.return_value = self.get_createjob_response() with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() result = cmd.execute([ "job", "create", "--wait-until=RUNNING", "west/bozo/test/hello", fp.name ]) assert result == 1 assert api.create_job.call_count == 0 assert command_hook.ran_pre assert not command_hook.ran_post
def test_large_with_instances_doesnt_warn(self): (mock_api, mock_scheduler_proxy) = self.create_mock_api() mock_health_check = self.setup_health_checks(mock_api) mock_quota_check = self.setup_quota_check() mock_job_monitor = self.setup_job_monitor() fake_mux = self.FakeSchedulerMux() self.setup_mock_scheduler_for_simple_update(mock_api, count=20) config = self.get_valid_config() config = config.replace("instances = 20", "instances = 200") with contextlib.nested( patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS), patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy), patch('apache.aurora.client.api.instance_watcher.StatusHealthCheck', return_value=mock_health_check), patch('apache.aurora.client.api.updater.JobMonitor', return_value=mock_job_monitor), patch('apache.aurora.client.api.updater.QuotaCheck', return_value=mock_quota_check), patch('apache.aurora.client.api.updater.SchedulerMux', return_value=fake_mux), patch('time.time', side_effect=functools.partial(self.fake_time, self)), patch('threading._Event.wait')): with patch('apache.aurora.client.cli.context.AuroraCommandContext.warn_and_pause') as pause: with temporary_file() as fp: fp.write(config) fp.flush() cmd = AuroraCommandLine() cmd.execute(['job', 'update', 'west/bozo/test/hello/1,3', fp.name]) assert pause.call_count == 0
def test_updater_simple_small_doesnt_warn(self): mock_out = IOMock() mock_err = IOMock() (mock_api, mock_scheduler_proxy) = self.create_mock_api() mock_health_check = self.setup_health_checks(mock_api) mock_quota_check = self.setup_quota_check() mock_job_monitor = self.setup_job_monitor() fake_mux = self.FakeSchedulerMux() self.setup_mock_scheduler_for_simple_update(mock_api) # This doesn't work, because: # - The mock_context stubs out the API. # - the test relies on using live code in the API. with contextlib.nested( patch('apache.aurora.client.cli.jobs.AuroraCommandContext.print_out', side_effect=mock_out.put), patch('apache.aurora.client.cli.jobs.AuroraCommandContext.print_err', side_effect=mock_err.put), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS), patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy), patch('apache.aurora.client.api.instance_watcher.StatusHealthCheck', return_value=mock_health_check), patch('apache.aurora.client.api.updater.JobMonitor', return_value=mock_job_monitor), patch('apache.aurora.client.api.updater.QuotaCheck', return_value=mock_quota_check), patch('apache.aurora.client.api.updater.SchedulerMux', return_value=fake_mux), patch('time.time', side_effect=functools.partial(self.fake_time, self)), patch('time.sleep', return_value=None), patch('threading._Event.wait')): with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute(['job', 'update', 'west/bozo/test/hello', fp.name]) assert mock_out.get() == ['Update completed successfully'] assert mock_err.get() == []
def test_inspect_job_raw(self): mock_stdout = [] def mock_print_out(msg, indent=0): indent_str = " " * indent mock_stdout.append("%s%s" % (indent_str, msg)) job_config = self.get_job_config() with contextlib.nested( patch( 'apache.aurora.client.cli.context.AuroraCommandContext.print_out', side_effect=mock_print_out), patch( 'apache.aurora.client.cli.context.AuroraCommandContext.get_job_config', return_value=job_config)): cmd = AuroraCommandLine() assert cmd.execute([ 'job', 'inspect', '--raw', 'west/bozo/test/hello', 'config.aurora' ]) == 0 output = '\n'.join(mock_stdout) # It's impossible to assert string equivalence of two objects with nested un-hashable types. # Given that the only product of --raw flag is the thrift representation of AuroraConfig # it's enough to do a spot check here and let thrift.py tests validate the structure. assert 'TaskConfig' in output
def test_simple_successful_create_job_with_bindings(self): """Run a test of the "create" command against a mocked-out API: Verifies that the creation command sends the right API RPCs, and performs the correct tests on the result.""" mock_context = FakeAuroraCommandContext() with contextlib.nested( patch('threading._Event.wait'), patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context)): mock_query = self.create_query() mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.PENDING)) mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.RUNNING)) api = mock_context.get_api('west') api.create_job.return_value = self.get_createjob_response() # This is the real test: invoke create as if it had been called by the command line. with temporary_file() as fp: fp.write(self.get_unbound_test_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute(['job', 'create', '--wait-until=RUNNING', '--bind', 'cluster_binding=west', '--bind', 'instances_binding=20', '--bind', 'TEST_BATCH=1', 'west/bozo/test/hello', fp.name]) # Now check that the right API calls got made. # Check that create_job was called exactly once, with an AuroraConfig parameter. self.assert_create_job_called(api) self.assert_scheduler_called(api, mock_query, 2)
def test_killall_job(self): """Test killall client-side API logic.""" mock_context = FakeAuroraCommandContext() mock_monitor = self.get_monitor_mock() with contextlib.nested( patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.cli.jobs.JobMonitor', return_value=mock_monitor), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)) as (_, m, _): api = mock_context.get_api('west') api.kill_job.return_value = self.create_simple_success_response() with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute([ 'job', 'killall', '--no-batching', '--config=%s' % fp.name, self.TEST_JOBSPEC ]) self.assert_kill_call_no_instances(api) assert mock_monitor.wait_until.mock_calls == [ call(m.terminal, instances=None, with_timeout=True) ]
def test_successful_status_output_no_metadata(self): """Test the status command more deeply: in a request with a fully specified job, it should end up doing a query using getTasksWithoutConfigs.""" mock_context = FakeAuroraCommandContext() mock_context.add_expected_status_query_result(self.create_status_null_metadata()) with contextlib.nested( patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)): cmd = AuroraCommandLine() cmd.execute(['job', 'status', 'west/bozo/test/hello']) actual = re.sub("\\d\\d:\\d\\d:\\d\\d", "##:##:##", '\n'.join(mock_context.get_out())) expected = textwrap.dedent("""\ Active tasks (3): \tTask role: bozo, env: test, name: woops, instance: 1, status: RUNNING on slavehost \t cpus: 2, ram: 2 MB, disk: 2 MB \t events: \t 1970-11-23 ##:##:## RUNNING: Hi there \tTask role: bozo, env: test, name: woops, instance: 2, status: RUNNING on slavehost \t cpus: 2, ram: 2 MB, disk: 2 MB \t events: \t 1970-11-23 ##:##:## RUNNING: Hi there \tTask role: bozo, env: test, name: woops, instance: 3, status: RUNNING on slavehost \t cpus: 2, ram: 2 MB, disk: 2 MB \t events: \t 1970-11-23 ##:##:## RUNNING: Hi there Inactive tasks (0): """) assert actual == expected
def test_killall_job_batched(self): """Test killall command with batching.""" mock_context = FakeAuroraCommandContext() mock_monitor = self.get_monitor_mock() with contextlib.nested( patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.cli.jobs.JobMonitor', return_value=mock_monitor), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)) as (_, m, _): api = mock_context.get_api('west') api.kill_job.return_value = self.create_simple_success_response() mock_context.add_expected_query_result( self.create_query_call_result(), job_key=self.TEST_JOBKEY) with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute([ 'job', 'killall', '--config=%s' % fp.name, self.TEST_JOBSPEC ]) self.assert_kill_calls(api, instance_range=range(20)) self.assert_wait_calls(mock_monitor, m.terminal, instance_range=range(20)) self.assert_query(api)
def test_restart_simple(self): # Test the client-side restart logic in its simplest case: everything succeeds (mock_api, mock_scheduler_proxy) = self.create_mock_api() mock_health_check = self.setup_health_checks(mock_api) self.setup_mock_scheduler_for_simple_restart(mock_api) with contextlib.nested( patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS), patch('apache.aurora.client.api.instance_watcher.StatusHealthCheck', return_value=mock_health_check), patch('time.time', side_effect=functools.partial(self.fake_time, self)), patch('threading._Event.wait') ): with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute(['job', 'restart', '--batch-size=5', 'west/bozo/test/hello', fp.name]) # Like the update test, the exact number of calls here doesn't matter. # what matters is that it must have been called once before batching, plus # at least once per batch, and there are 4 batches. assert mock_scheduler_proxy.getTasksWithoutConfigs.call_count >= 4 # called once per batch assert mock_scheduler_proxy.restartShards.call_count == 4 # parameters for all calls are generated by the same code, so we just check one mock_scheduler_proxy.restartShards.assert_called_with(JobKey(environment=self.TEST_ENV, role=self.TEST_ROLE, name=self.TEST_JOB), [15, 16, 17, 18, 19], None)
def test_kill_job_with_invalid_instances_nonstrict(self): """Test kill client-side API logic.""" mock_context = FakeAuroraCommandContext() mock_monitor = self.get_monitor_mock() with contextlib.nested( patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.cli.jobs.JobMonitor', return_value=mock_monitor), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)) as (_, m, _): api = mock_context.get_api('west') api.kill_job.return_value = self.create_simple_success_response() with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute([ 'job', 'kill', '--config=%s' % fp.name, '--no-batching', self.get_instance_spec('0,2,4-6,11-13') ]) instances = [0, 2, 4, 5, 6, 11, 12, 13] self.assert_kill_calls(api, instances=instances) self.assert_wait_calls(mock_monitor, m.terminal, instances=instances)
def test_create_job_delayed(self): """Run a test of the "create" command against a mocked-out API: this time, make the monitor check status several times before successful completion. """ mock_context = FakeAuroraCommandContext() with contextlib.nested( patch('threading._Event.wait'), patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context)): mock_query = self.create_query() for result in [ ScheduleStatus.PENDING, ScheduleStatus.PENDING, ScheduleStatus.RUNNING ]: mock_context.add_expected_status_query_result( self.create_mock_status_query_result(result)) api = mock_context.get_api('west') api.create_job.return_value = self.get_createjob_response() api.get_tier_configs.return_value = self.get_mock_tier_configurations( ) with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute([ 'job', 'create', '--wait-until=RUNNING', 'west/bozo/test/hello', fp.name ]) self.assert_create_job_called(api) self.assert_scheduler_called(api, mock_query, 3)
def test_kill_job_with_instances_batched_maxerrors(self): """Test kill client-side API logic.""" mock_context = FakeAuroraCommandContext() mock_monitor = self.get_monitor_mock(result=False) with contextlib.nested( patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.cli.jobs.JobMonitor', return_value=mock_monitor), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)): api = mock_context.get_api('west') mock_context.add_expected_query_result( self.create_query_call_result(), job_key=self.TEST_JOBKEY) api.kill_job.return_value = self.create_simple_success_response() with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute([ 'job', 'kill', '--max-total-failures=1', '--config=%s' % fp.name, self.get_instance_spec('0-4') ]) # We should have aborted after the second batch. self.assert_kill_calls(api, instance_range=range(2)) self.assert_query(api)
def test_simple_successful_create_job_output(self): """Run a test of the "create" command against a mocked-out API: Verifies that the creation command generates the correct output. """ mock_context = FakeAuroraCommandContext() with contextlib.nested( patch('threading._Event.wait'), patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context)): mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.PENDING)) mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.RUNNING)) mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.RUNNING)) api = mock_context.get_api('west') api.create_job.return_value = self.get_createjob_response() api.get_tier_configs.return_value = self.get_mock_tier_configurations( ) with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() result = cmd.execute([ 'job', 'create', '--wait-until=RUNNING', 'west/bozo/test/hello', fp.name ]) assert result == EXIT_OK assert mock_context.get_out() == [ "Job create succeeded: job url=http://something_or_other/scheduler/bozo/test/hello" ] assert mock_context.get_err() == []
def test_kill_job_with_empty_instances_batched(self): """Test kill client-side API logic.""" mock_context = FakeAuroraCommandContext() with contextlib.nested( patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)): api = mock_context.get_api('west') # set up an empty instance list in the getTasksWithoutConfigs response status_response = self.create_simple_success_response() status_response.result = Result( scheduleStatusResult=ScheduleStatusResult(tasks=[])) mock_context.add_expected_query_result(status_response) api.kill_job.return_value = self.create_simple_success_response() with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute([ 'job', 'kill', '--config=%s' % fp.name, self.get_instance_spec('0,2,4-13') ]) assert api.kill_job.call_count == 0
def test_create_job_fail_and_write_log(self): """Check that when an unknown error occurs during command execution, the command-line framework catches it, and writes out an error log file containing the details of the error, including the command-line arguments passed to aurora to execute the command, and the stack trace of the error. """ mock_context = FakeAuroraCommandContext() with contextlib.nested( patch('time.time', return_value=23), patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context)): api = mock_context.get_api('west') api.create_job.side_effect = UnknownException() with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() result = cmd.execute(['job', 'create', '--wait-until=RUNNING', '--error-log-dir=./logged-errors', 'west/bozo/test/hello', fp.name]) assert result == EXIT_UNKNOWN_ERROR with open("./logged-errors/aurora-23.error-log", "r") as logfile: error_log = logfile.read() assert error_log.startswith("ERROR LOG: command arguments = %s" % ['job', 'create', '--wait-until=RUNNING', '--error-log-dir=./logged-errors', 'west/bozo/test/hello', fp.name]) assert "Traceback" in error_log if os.path.exists("./logged-errors"): shutil.rmtree("./logged-errors")
def test_kill_job_with_instances_batched_output(self): """Test kill client-side API logic.""" mock_context = FakeAuroraCommandContext() with contextlib.nested( patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.cli.jobs.JobMonitor', return_value=self.get_monitor_mock()), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)): api = mock_context.get_api('west') mock_context.add_expected_query_result( self.create_query_call_result()) api.kill_job.return_value = self.create_simple_success_response() with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute([ 'job', 'kill', '--config=%s' % fp.name, '--batch-size=5', self.get_instance_spec('0,2,4-6') ]) assert mock_context.get_out() == [ 'Successfully killed instances [0, 2, 4, 5, 6]', 'Job kill succeeded' ] assert mock_context.get_err() == []
def test_successful_status_output_no_metadata(self): """Test the status command more deeply: in a request with a fully specified job, it should end up doing a query using getTasksWithoutConfigs.""" mock_context = FakeAuroraCommandContext() mock_context.add_expected_status_query_result( self.create_status_null_metadata()) with patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context): cmd = AuroraCommandLine() cmd.execute(['job', 'status', 'west/bozo/test/hello']) actual = re.sub("\\d\\d:\\d\\d:\\d\\d", "##:##:##", '\n'.join(mock_context.get_out())) expected = textwrap.dedent("""\ Active tasks (3): \tTask role: bozo, env: test, name: woops, instance: 1, status: RUNNING on slavehost \t CPU: 2 core(s), RAM: 2 MB, Disk: 2 MB \t events: \t 1970-11-23 ##:##:## RUNNING: Hi there \tTask role: bozo, env: test, name: woops, instance: 2, status: RUNNING on slavehost \t CPU: 2 core(s), RAM: 2 MB, Disk: 2 MB \t events: \t 1970-11-23 ##:##:## RUNNING: Hi there \tTask role: bozo, env: test, name: woops, instance: 3, status: RUNNING on slavehost \t CPU: 2 core(s), RAM: 2 MB, Disk: 2 MB \t events: \t 1970-11-23 ##:##:## RUNNING: Hi there Inactive tasks (0): """) assert actual == expected
def test_kill_job_with_instances_batched_maxerrors_output(self): """Test kill client-side API logic.""" mock_context = FakeAuroraCommandContext() mock_monitor = self.get_monitor_mock(result=False) with contextlib.nested( patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.cli.jobs.JobMonitor', return_value=mock_monitor), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)): api = mock_context.get_api('west') mock_context.add_expected_query_result( self.create_query_call_result()) api.kill_job.return_value = self.create_simple_success_response() with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute([ 'job', 'kill', '--max-total-failures=1', '--config=%s' % fp.name, '--batch-size=5', self.get_instance_spec('0,2,4-13') ]) assert mock_context.get_out() == [] assert mock_context.get_err() == [ 'Instances [0, 2, 4, 5, 6] were not killed in time', 'Instances [7, 8, 9, 10, 11] were not killed in time', 'Error executing command: Exceeded maximum number of errors while killing instances' ]
def test_successful_diff(self): """Test the diff command.""" (mock_api, mock_scheduler_proxy) = self.create_mock_api() with contextlib.nested( patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS), patch('subprocess.call', return_value=0), patch('json.loads', return_value=Mock())) as (_, _, subprocess_patch, _): mock_scheduler_proxy.getTasksStatus.return_value = self.create_status_response() self.setup_populate_job_config(mock_scheduler_proxy) with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute(['job', 'diff', 'west/bozo/test/hello', fp.name]) # Diff should get the task status, populate a config, and run diff. mock_scheduler_proxy.getTasksStatus.assert_called_with( TaskQuery(jobName='hello', environment='test', owner=Identity(role='bozo'), statuses=ACTIVE_STATES)) assert mock_scheduler_proxy.populateJobConfig.call_count == 1 assert isinstance(mock_scheduler_proxy.populateJobConfig.call_args[0][0], JobConfiguration) assert (mock_scheduler_proxy.populateJobConfig.call_args[0][0].key == JobKey(environment=u'test', role=u'bozo', name=u'hello')) # Subprocess should have been used to invoke diff with two parameters. assert subprocess_patch.call_count == 1 assert len(subprocess_patch.call_args[0][0]) == 3 assert subprocess_patch.call_args[0][0][0] == os.environ.get('DIFF_VIEWER', 'diff')
def test_successful_run(self): """Test the run command.""" # Calls api.check_status, which calls scheduler_proxy.getJobs (mock_api, mock_scheduler_proxy) = self.create_mock_api() mock_scheduler_proxy.getTasksStatus.return_value = self.create_status_response() sandbox_args = {'slave_root': '/slaveroot', 'slave_run_directory': 'slaverun'} with contextlib.nested( patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS), patch('apache.aurora.client.cli.task.CLUSTERS', new=self.TEST_CLUSTERS), patch('apache.aurora.client.api.command_runner.DistributedCommandRunner.sandbox_args', return_value=sandbox_args), patch('subprocess.Popen', return_value=self.create_mock_process())) as ( mock_scheduler_proxy_class, mock_clusters, mock_clusters_cli, mock_runner_args_patch, mock_subprocess): cmd = AuroraCommandLine() cmd.execute(['task', 'run', 'west/bozo/test/hello', 'ls']) # The status command sends a getTasksStatus query to the scheduler, # and then prints the result. mock_scheduler_proxy.getTasksStatus.assert_called_with(TaskQuery(jobName='hello', environment='test', owner=Identity(role='bozo'), statuses=set([ScheduleStatus.RUNNING, ScheduleStatus.KILLING, ScheduleStatus.RESTARTING, ScheduleStatus.PREEMPTING, ScheduleStatus.DRAINING]))) # The mock status call returns 3 three ScheduledTasks, so three commands should have been run assert mock_subprocess.call_count == 3 mock_subprocess.assert_called_with(['ssh', '-n', '-q', 'bozo@slavehost', 'cd /slaveroot/slaves/*/frameworks/*/executors/thermos-1287391823/runs/' 'slaverun/sandbox;ls'], stderr=-2, stdout=-1)
def test_diff_server_error(self): """Test the diff command if the user passes a config with an error in it.""" mock_options = self.setup_mock_options() (mock_api, mock_scheduler_proxy) = self.create_mock_api() mock_scheduler_proxy.getTasksStatus.return_value = self.create_failed_status_response() self.setup_populate_job_config(mock_scheduler_proxy) with contextlib.nested( patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS), patch('twitter.common.app.get_options', return_value=mock_options), patch('subprocess.call', return_value=0), patch('json.loads', return_value=Mock())) as ( mock_scheduler_proxy_class, mock_clusters, options, subprocess_patch, json_patch): with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() result = cmd.execute(['job', 'diff', 'west/bozo/test/hello', fp.name]) assert result == EXIT_INVALID_PARAMETER # In this error case, we should have called the server getTasksStatus; # but since it fails, we shouldn't call populateJobConfig or subprocess. mock_scheduler_proxy.getTasksStatus.assert_called_with( TaskQuery(jobName='hello', environment='test', owner=Identity(role='bozo'), statuses=ACTIVE_STATES)) assert mock_scheduler_proxy.populateJobConfig.call_count == 0 assert subprocess_patch.call_count == 0
def test_large_with_instances_doesnt_warn(self): (mock_api, mock_scheduler_proxy) = self.create_mock_api() mock_health_check = self.setup_health_checks(mock_api) mock_quota_check = self.setup_quota_check() mock_job_monitor = self.setup_job_monitor() fake_mux = self.FakeSchedulerMux() self.setup_mock_scheduler_for_simple_update(mock_api, count=20) config = self.get_valid_config() config = config.replace("instances = 20", "instances = 200") with contextlib.nested( patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS), patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy), patch( 'apache.aurora.client.api.instance_watcher.StatusHealthCheck', return_value=mock_health_check), patch('apache.aurora.client.api.updater.JobMonitor', return_value=mock_job_monitor), patch('apache.aurora.client.api.updater.QuotaCheck', return_value=mock_quota_check), patch('apache.aurora.client.api.updater.SchedulerMux', return_value=fake_mux), patch('time.time', side_effect=functools.partial(self.fake_time, self)), patch('threading._Event.wait')): with patch('time.sleep') as sleep: with temporary_file() as fp: fp.write(config) fp.flush() cmd = AuroraCommandLine() cmd.execute( ['job', 'update', 'west/bozo/test/hello/1,3', fp.name]) assert sleep.call_count == 0
def test_create_job_with_successful_hook(self): GlobalCommandHookRegistry.reset() command_hook = HookForTesting(True) GlobalCommandHookRegistry.register_command_hook(command_hook) mock_context = FakeAuroraCommandContext() with patch("apache.aurora.client.cli.jobs.Job.create_context", return_value=mock_context): mock_query = self.create_query() mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.INIT)) mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.RUNNING)) mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.RUNNING)) mock_context.get_api("west").check_status.side_effect = ( lambda x: self.create_mock_status_query_result(ScheduleStatus. RUNNING)) api = mock_context.get_api("west") api.create_job.return_value = self.get_createjob_response() api.get_tier_configs.return_value = self.get_mock_tier_configurations( ) with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute([ "job", "create", "--wait-until=RUNNING", "west/bozo/test/hello", fp.name ]) self.assert_create_job_called(api) self.assert_scheduler_called(api, mock_query, 1) assert command_hook.ran_pre assert command_hook.ran_post
def test_start_update_command_line_succeeds(self): mock_context = FakeAuroraCommandContext() resp = self.create_simple_success_response() resp.result = Result(startJobUpdateResult=StartJobUpdateResult(updateId="id")) with contextlib.nested( patch('apache.aurora.client.cli.update.Update.create_context', return_value=mock_context), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)): mock_api = mock_context.get_api('west') mock_api.start_job_update.return_value = resp with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() result = cmd.execute(['beta-update', 'start', self.TEST_JOBSPEC, fp.name]) assert result == EXIT_OK update_url_msg = StartUpdate.UPDATE_MSG_TEMPLATE % ( mock_context.get_update_page(mock_api, AuroraJobKey.from_path(self.TEST_JOBSPEC), "id")) assert mock_api.start_job_update.call_count == 1 args, kwargs = mock_api.start_job_update.call_args assert isinstance(args[0], AuroraConfig) assert args[1] is None assert mock_context.get_out() == [update_url_msg] assert mock_context.get_err() == []
def test_diff_invalid_config(self): """Test the diff command if the user passes a config with an error in it.""" mock_options = self.setup_mock_options() (mock_api, mock_scheduler_proxy) = self.create_mock_api() mock_scheduler_proxy.getTasksStatus.return_value = self.create_status_response( ) self.setup_populate_job_config(mock_scheduler_proxy) with contextlib.nested( patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS), patch('twitter.common.app.get_options', return_value=mock_options), patch('subprocess.call', return_value=0), patch('json.loads', return_value=Mock())) as (mock_scheduler_proxy_class, mock_clusters, options, subprocess_patch, json_patch): with temporary_file() as fp: fp.write(self.get_invalid_config('stupid="me"', )) fp.flush() cmd = AuroraCommandLine() result = cmd.execute( ['job', 'diff', 'west/bozo/test/hello', fp.name]) assert result == EXIT_INVALID_CONFIGURATION assert mock_scheduler_proxy.getTasksStatus.call_count == 0 assert mock_scheduler_proxy.populateJobConfig.call_count == 0 assert subprocess_patch.call_count == 0
def test_update_status(self): mock_context = FakeAuroraCommandContext() api = mock_context.get_api('west') api.query_job_updates.return_value = self.get_status_query_response() api.get_job_update_details.return_value = self.get_update_details_response() with contextlib.nested( patch('apache.aurora.client.cli.update.Update.create_context', return_value=mock_context), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)): cmd = AuroraCommandLine() result = cmd.execute(["beta-update", "status", "west/mcc/test/hello"]) assert result == EXIT_OK assert mock_context.get_out() == [ "Job: west/mcc/test/hello, UpdateID: fake-update-identifier", "Started YYYY-MM-DD HH:MM:SS, last updated: YYYY-MM-DD HH:MM:SS", "Current status: ROLLING_FORWARD", "Update events:", " Status: ROLLING_FORWARD at YYYY-MM-DD HH:MM:SS", " Status: ROLL_FORWARD_PAUSED at YYYY-MM-DD HH:MM:SS", " Status: ROLLING_FORWARD at YYYY-MM-DD HH:MM:SS", "Instance events:", " Instance 1 at YYYY-MM-DD HH:MM:SS: INSTANCE_UPDATING", " Instance 2 at YYYY-MM-DD HH:MM:SS: INSTANCE_UPDATING", " Instance 1 at YYYY-MM-DD HH:MM:SS: INSTANCE_UPDATED", " Instance 2 at YYYY-MM-DD HH:MM:SS: INSTANCE_UPDATED"] mock_context.get_api("west").query_job_updates.assert_called_with(job_key=AuroraJobKey( 'west', 'mcc', 'test', 'hello'))
def test_plugin_runs_in_create_job(self): """Run a test of the "create" command against a mocked-out API: Verifies that the creation command sends the right API RPCs, and performs the correct tests on the result.""" # We'll patch out create_context, which will give us a fake context # object, and everything can be stubbed through that. mock_context = FakeAuroraCommandContext() with patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context): # After making the client, create sets up a job monitor. # The monitor uses TaskQuery to get the tasks. It's called at least twice:once before # the job is created, and once after. So we need to set up mocks for the query results. mock_query = self.create_mock_query() mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.INIT)) mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.RUNNING)) api = mock_context.get_api('west') api.create_job.return_value = self.get_createjob_response() # This is the real test: invoke create as if it had been called by the command line. with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.register_plugin(BogusPlugin()) cmd.execute(['job', 'create', '--bogosity=maximum', '--wait_until=RUNNING', 'west/bozo/test/hello', fp.name]) # Now check that the right API calls got made. # Check that create_job was called exactly once, with an AuroraConfig parameter. self.assert_create_job_called(api) self.assert_scheduler_called(api, mock_query, 2) # Check that the plugin did its job. assert mock_context.bogosity == "maximum"
def test_restart_simple(self): # Test the client-side restart logic in its simplest case: everything succeeds (mock_api, mock_scheduler_proxy) = self.create_mock_api() mock_health_check = self.setup_health_checks(mock_api) self.setup_mock_scheduler_for_simple_restart(mock_api) with contextlib.nested( patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy), patch( 'apache.aurora.client.api.instance_watcher.StatusHealthCheck', return_value=mock_health_check), patch('time.time', side_effect=functools.partial(self.fake_time, self)), patch('threading._Event.wait')): with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute([ 'job', 'restart', '--batch-size=5', 'west/bozo/test/hello', '--config', fp.name ]) # Like the update test, the exact number of calls here doesn't matter. # what matters is that it must have been called once before batching, plus # at least once per batch, and there are 4 batches. assert mock_scheduler_proxy.getTasksWithoutConfigs.call_count >= 4 # called once per batch assert mock_scheduler_proxy.restartShards.call_count == 4 # parameters for all calls are generated by the same code, so we just check one mock_scheduler_proxy.restartShards.assert_called_with( JobKey(environment=self.TEST_ENV, role=self.TEST_ROLE, name=self.TEST_JOB), [15, 16, 17, 18, 19], None)
def test_simple_successful_create_job_open_page(self): mock_context = FakeAuroraCommandContext() with contextlib.nested( # TODO(maxim): Patching threading.Event with all possible namespace/patch/mock # combinations did not produce the desired effect. Investigate why (AURORA-510) patch('threading._Event.wait'), patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context)): mock_query = self.create_query() mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.PENDING)) mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.RUNNING)) api = mock_context.get_api('west') api.create_job.return_value = self.get_createjob_response() with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() result = cmd.execute(['job', 'create', '--wait-until=RUNNING', '--open-browser', 'west/bozo/test/hello', fp.name]) assert result == EXIT_OK self.assert_create_job_called(api) self.assert_scheduler_called(api, mock_query, 2) assert mock_context.showed_urls == ["http://something_or_other/scheduler/bozo/test/hello"]
def test_restart_invalid_shards(self): # Test the client-side restart when a shard argument is too large, and it's # using strict mode. (mock_api, mock_scheduler_proxy) = self.create_mock_api() mock_health_check = self.setup_health_checks(mock_api) self.setup_mock_scheduler_for_simple_restart(mock_api) with contextlib.nested( patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy), patch( 'apache.aurora.client.api.instance_watcher.StatusHealthCheck', return_value=mock_health_check), patch('time.time', side_effect=functools.partial(self.fake_time, self)), patch('threading._Event.wait')): with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() result = cmd.execute([ 'job', 'restart', '--batch-size=5', '--max-total-failures=-1', 'west/bozo/test/hello', '--config', fp.name ]) assert result == EXIT_INVALID_PARAMETER assert mock_scheduler_proxy.getTasksWithoutConfigs.call_count == 0 assert mock_scheduler_proxy.restartShards.call_count == 0
def test_create_job_startup_fails(self): mock_context = FakeAuroraCommandContext() with contextlib.nested( patch('threading._Event.wait'), patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context)): mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.PENDING)) mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.RUNNING)) # We need to override the side_effect behavior of check_status in the context. def check_status_side_effect(*args): return self.create_error_response() mock_context.get_api("west").check_status.side_effect = check_status_side_effect api = mock_context.get_api('west') api.create_job.return_value = self.get_createjob_response() with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() result = cmd.execute(['job', 'create', '--wait-until=RUNNING', 'west/bozo/test/hello', fp.name]) assert result == EXIT_COMMAND_FAILURE assert mock_context.get_out() == [] assert mock_context.get_err() == ["Error occurred while creating job west/bozo/test/hello"]
def test_restart_failed_restart(self): (mock_api, mock_scheduler_proxy) = self.create_mock_api() mock_health_check = self.setup_health_checks(mock_api) self.setup_mock_scheduler_for_simple_restart(mock_api) mock_scheduler_proxy.restartShards.return_value = self.create_error_response( ) with contextlib.nested( patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy), patch( 'apache.aurora.client.api.instance_watcher.StatusHealthCheck', return_value=mock_health_check), patch('time.time', side_effect=functools.partial(self.fake_time, self)), patch('threading._Event.wait')): with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() result = cmd.execute([ 'job', 'restart', '--batch-size=5', 'west/bozo/test/hello', '--config', fp.name ]) assert mock_scheduler_proxy.getTasksWithoutConfigs.call_count == 1 assert mock_scheduler_proxy.restartShards.call_count == 1 mock_scheduler_proxy.restartShards.assert_called_with( JobKey(environment=self.TEST_ENV, role=self.TEST_ROLE, name=self.TEST_JOB), [0, 1, 2, 3, 4], None) assert result == EXIT_API_ERROR
def test_killall_job_something_else(self): """Test kill client-side API logic.""" mock_context = FakeAuroraCommandContext() mock_scheduler_proxy = create_autospec(spec=SchedulerThriftApiSpec, instance=True) with contextlib.nested( patch('threading._Event.wait'), patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)): api = mock_context.get_api('west') api.kill_job.return_value = self.get_kill_job_response() mock_context.add_expected_status_query_result(self.create_status_call_result()) mock_scheduler_proxy.killTasks.return_value = self.get_kill_job_response() mock_context.add_expected_status_query_result(self.create_status_call_result( self.create_mock_task(ScheduleStatus.KILLED))) with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute(['job', 'killall', '--config=%s' % fp.name, 'west/bozo/test/hello']) # Now check that the right API calls got made. assert api.kill_job.call_count == 4 instances = [15, 16, 17, 18, 19] api.kill_job.assert_called_with(AuroraJobKey.from_path('west/bozo/test/hello'), instances) self.assert_scheduler_called(api, self.get_expected_task_query(instances), 6)
def test_restart_failed_restart_output(self): self.reset_mock_io() (mock_api, mock_scheduler_proxy) = self.create_mock_api() mock_health_check = self.setup_health_checks(mock_api) self.setup_mock_scheduler_for_simple_restart(mock_api) mock_scheduler_proxy.restartShards.return_value = self.create_error_response( ) with contextlib.nested( patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy), patch( 'apache.aurora.client.api.instance_watcher.StatusHealthCheck', return_value=mock_health_check), patch('time.time', side_effect=functools.partial(self.fake_time, self)), patch( 'apache.aurora.client.cli.context.AuroraCommandContext.print_out', side_effect=self.mock_print_out), patch( 'apache.aurora.client.cli.context.AuroraCommandContext.print_err', side_effect=self.mock_print_err), patch('threading._Event.wait')): with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute([ 'job', 'restart', '--batch-size=5', 'west/bozo/test/hello', '--config', fp.name ]) assert self.MOCK_OUT == [] assert "Error restarting job west/bozo/test/hello:" in self.MOCK_ERR
def test_killall_job_wait_until_timeout(self): """Test kill client-side API logic.""" mock_context = FakeAuroraCommandContext() mock_scheduler_proxy = create_autospec(spec=SchedulerThriftApiSpec, instance=True) with contextlib.nested( patch('threading._Event.wait'), patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)): api = mock_context.get_api('west') mock_scheduler_proxy.getTasksWithoutConfigs.return_value = self.create_status_call_result() api.kill_job.return_value = self.get_kill_job_response() mock_scheduler_proxy.killTasks.return_value = self.get_kill_job_response() for _ in range(8): mock_context.add_expected_status_query_result(self.create_status_call_result( self.create_mock_task(ScheduleStatus.RUNNING))) with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() assert EXIT_TIMEOUT == cmd.execute( ['job', 'killall', '--no-batching', '--config=%s' % fp.name, 'west/bozo/test/hello']) # Now check that the right API calls got made. assert api.kill_job.call_count == 1 api.kill_job.assert_called_with(AuroraJobKey.from_path('west/bozo/test/hello'), None) self.assert_scheduler_called(api, self.get_expected_task_query(), 8)
def test_restart_no_such_job_with_instances(self): (mock_api, mock_scheduler_proxy) = self.create_mock_api() mock_health_check = self.setup_health_checks(mock_api) self.setup_mock_scheduler_for_simple_restart(mock_api) # Make getTasksWithoutConfigs return an error, which is what happens when a job is not found. mock_scheduler_proxy.getTasksWithoutConfigs.return_value = self.create_error_response() with contextlib.nested( patch('apache.aurora.client.cli.print_aurora_log'), patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS), patch('apache.aurora.client.api.instance_watcher.StatusHealthCheck', return_value=mock_health_check), patch('time.time', side_effect=functools.partial(self.fake_time, self)), patch('threading._Event.wait')) as (mock_log, _, _, _, _, _): with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() result = cmd.execute(['job', 'restart', '--batch-size=5', 'west/bozo/test/hello/1-3', fp.name]) # We need to check tat getTasksWithoutConfigs was called, but that restartShards wasn't. # In older versions of the client, if shards were specified, but the job didn't # exist, the error wouldn't be detected unti0 restartShards was called, which generated # the wrong error message. assert mock_scheduler_proxy.getTasksWithoutConfigs.call_count == 1 assert mock_scheduler_proxy.restartShards.call_count == 0 assert result == EXIT_API_ERROR # Error message should be written to log, and it should be what was returned # by the getTasksWithoutConfigs call. mock_log.assert_called_with(20, 'Error executing command: %s', 'Damn')
def test_status_wildcard(self): """Test status using a wildcard. It should first call api.get_jobs, and then do a getTasksWithoutConfigs on each job.""" mock_context = FakeAuroraCommandContext() mock_api = mock_context.get_api('west') mock_api.check_status.return_value = self.create_status_response() mock_api.get_jobs.return_value = self.create_getjobs_response() with contextlib.nested( patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.cli.context.CLUSTERS', new=self.TEST_CLUSTERS)): cmd = AuroraCommandLine() cmd.execute(['job', 'status', '*']) # Wildcard should have expanded to two jobs, so there should be two calls # to check_status. assert mock_api.check_status.call_count == 2 assert mock_api.check_status.call_args_list[0][0][0].cluster == 'west' assert mock_api.check_status.call_args_list[0][0][0].role == 'RoleA' assert mock_api.check_status.call_args_list[0][0][0].env == 'test' assert mock_api.check_status.call_args_list[0][0][0].name == 'hithere' assert mock_api.check_status.call_args_list[1][0][0].cluster == 'west' assert mock_api.check_status.call_args_list[1][0][0].role == 'bozo' assert mock_api.check_status.call_args_list[1][0][0].env == 'test' assert mock_api.check_status.call_args_list[1][0][0].name == 'hello'
def test_diff_server_error(self): """Test the diff command if the user passes a config with an error in it.""" mock_options = self.setup_mock_options() (mock_api, mock_scheduler_proxy) = self.create_mock_api() mock_scheduler_proxy.getTasksStatus.return_value = self.create_failed_status_response() self.setup_populate_job_config(mock_scheduler_proxy) with contextlib.nested( patch("apache.aurora.client.api.SchedulerProxy", return_value=mock_scheduler_proxy), patch("twitter.common.app.get_options", return_value=mock_options), patch("subprocess.call", return_value=0), patch("json.loads", return_value=Mock()), ) as (mock_scheduler_proxy_class, options, subprocess_patch, json_patch): with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() result = cmd.execute(["job", "diff", "west/bozo/test/hello", fp.name]) assert result == EXIT_INVALID_PARAMETER # In this error case, we should have called the server getTasksStatus; # but since it fails, we shouldn't call populateJobConfig or subprocess. mock_scheduler_proxy.getTasksStatus.assert_called_with( TaskQuery(jobKeys=[JobKey(role="bozo", environment="test", name="hello")], statuses=ACTIVE_STATES) ) assert mock_scheduler_proxy.populateJobConfig.call_count == 0 assert subprocess_patch.call_count == 0