def generic_test_successful_hook(self, command_hook): mock_context = FakeAuroraCommandContext() with patch("apache.aurora.client.cli.jobs.Job.create_context", return_value=mock_context): mock_query = self.create_mock_query() mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.INIT)) mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.RUNNING)) api = mock_context.get_api("west") api.create_job.return_value = self.get_createjob_response() with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute([ "job", "create", "--wait-until=RUNNING", "west/bozo/test/hello", fp.name ]) self.assert_create_job_called(api) self.assert_scheduler_called(api, mock_query, 1) assert command_hook.ran_pre assert command_hook.ran_post
def test_kill_job_with_instances_batched_large(self): """Test kill client-side API logic.""" mock_context = FakeAuroraCommandContext() with contextlib.nested( patch('threading._Event.wait'), patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)): api = mock_context.get_api('west') status_result = self.create_status_call_result() mock_context.add_expected_status_query_result(status_result) api.kill_job.return_value = self.get_kill_job_response() mock_context.add_expected_status_query_result(self.create_status_call_result( self.create_mock_task(ScheduleStatus.KILLED))) with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute(['job', 'kill', '--config=%s' % fp.name, 'west/bozo/test/hello/0,2,4-13']) # Now check that the right API calls got made. assert api.kill_job.call_count == 3 api.kill_job.assert_called_with(AuroraJobKey.from_path('west/bozo/test/hello'), [12, 13]) # Expect total 5 calls (3 from JobMonitor). self.assert_scheduler_called(api, self.get_expected_task_query([12, 13]), 5)
def test_successful_status_output_no_metadata(self): """Test the status command more deeply: in a request with a fully specified job, it should end up doing a query using getTasksWithoutConfigs.""" mock_context = FakeAuroraCommandContext() mock_context.add_expected_status_query_result( self.create_status_null_metadata()) with contextlib.nested( patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)): cmd = AuroraCommandLine() cmd.execute(['job', 'status', 'west/bozo/test/hello']) actual = re.sub("\\d\\d:\\d\\d:\\d\\d", "##:##:##", '\n'.join(mock_context.get_out())) expected = textwrap.dedent("""\ Active tasks (3): \tTask: \t cpus: 2, ram: 2 MB, disk: 2 MB \t events: \t 1970-11-23 ##:##:## RUNNING: Hi there \tTask: \t cpus: 2, ram: 2 MB, disk: 2 MB \t events: \t 1970-11-23 ##:##:## RUNNING: Hi there \tTask: \t cpus: 2, ram: 2 MB, disk: 2 MB \t events: \t 1970-11-23 ##:##:## RUNNING: Hi there Inactive tasks (0): """) assert actual == expected
def test_simple_successful_create_job_with_bindings(self): """Run a test of the "create" command against a mocked-out API: Verifies that the creation command sends the right API RPCs, and performs the correct tests on the result.""" mock_context = FakeAuroraCommandContext() with contextlib.nested( patch('threading._Event.wait'), patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context)): mock_query = self.create_mock_query() mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.PENDING)) mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.RUNNING)) api = mock_context.get_api('west') api.create_job.return_value = self.get_createjob_response() # This is the real test: invoke create as if it had been called by the command line. with temporary_file() as fp: fp.write(self.get_unbound_test_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute(['job', 'create', '--wait-until=RUNNING', '--bind', 'cluster_binding=west', '--bind', 'instances_binding=20', '--bind', 'TEST_BATCH=1', 'west/bozo/test/hello', fp.name]) # Now check that the right API calls got made. # Check that create_job was called exactly once, with an AuroraConfig parameter. self.assert_create_job_called(api) self.assert_scheduler_called(api, mock_query, 2)
def test_plugin_runs_in_create_job(self): """Run a test of the "create" command against a mocked-out API: Verifies that the creation command sends the right API RPCs, and performs the correct tests on the result.""" # We'll patch out create_context, which will give us a fake context # object, and everything can be stubbed through that. mock_context = FakeAuroraCommandContext() with patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context): # After making the client, create sets up a job monitor. # The monitor uses TaskQuery to get the tasks. It's called at least twice:once before # the job is created, and once after. So we need to set up mocks for the query results. mock_query = self.create_mock_query() mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.INIT)) mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.RUNNING)) api = mock_context.get_api('west') api.create_job.return_value = self.get_createjob_response() # This is the real test: invoke create as if it had been called by the command line. with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.register_plugin(BogusPlugin()) cmd.execute(['job', 'create', '--bogosity=maximum', '--wait_until=RUNNING', 'west/bozo/test/hello', fp.name]) # Now check that the right API calls got made. # Check that create_job was called exactly once, with an AuroraConfig parameter. self.assert_create_job_called(api) self.assert_scheduler_called(api, mock_query, 2) # Check that the plugin did its job. assert mock_context.bogosity == "maximum"
def test_create_job_failed(self): """Run a test of the "create" command against a mocked-out API: this time, make the monitor check status several times before successful completion. """ mock_context = FakeAuroraCommandContext() with patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context): mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.INIT)) api = mock_context.get_api('west') api.create_job.return_value = self.get_failed_createjob_response() # This is the real test: invoke create as if it had been called by the command line. with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() result = cmd.execute(['job', 'create', '--wait-until=RUNNING', 'west/bozo/test/hello', fp.name]) assert result == EXIT_COMMAND_FAILURE # Now check that the right API calls got made. # Check that create_job was called exactly once, with an AuroraConfig parameter. self.assert_create_job_called(api) # getTasksStatus was called once, before the create_job assert api.scheduler_proxy.getTasksStatus.call_count == 1
def test_killall_job_wait_until_timeout(self): """Test kill client-side API logic.""" mock_context = FakeAuroraCommandContext() mock_scheduler_proxy = Mock() with contextlib.nested( patch('threading._Event.wait'), patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)): api = mock_context.get_api('west') mock_scheduler_proxy.getTasksWithoutConfigs.return_value = self.create_status_call_result() api.kill_job.return_value = self.get_kill_job_response() mock_scheduler_proxy.killTasks.return_value = self.get_kill_job_response() for _ in range(8): mock_context.add_expected_status_query_result(self.create_status_call_result( self.create_mock_task(ScheduleStatus.RUNNING))) with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() assert EXIT_TIMEOUT == cmd.execute( ['job', 'killall', '--no-batching', '--config=%s' % fp.name, 'west/bozo/test/hello']) # Now check that the right API calls got made. assert api.kill_job.call_count == 1 api.kill_job.assert_called_with(AuroraJobKey.from_path('west/bozo/test/hello'), None) self.assert_scheduler_called(api, self.get_expected_task_query(), 8)
def test_create_job_startup_fails(self): mock_context = FakeAuroraCommandContext() with contextlib.nested( patch('threading._Event.wait'), patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context)): mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.PENDING)) mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.RUNNING)) # We need to override the side_effect behavior of check_status in the context. def check_status_side_effect(*args): return self.create_error_response() mock_context.get_api("west").check_status.side_effect = check_status_side_effect api = mock_context.get_api('west') api.create_job.return_value = self.get_createjob_response() with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() result = cmd.execute(['job', 'create', '--wait-until=RUNNING', 'west/bozo/test/hello', fp.name]) assert result == EXIT_COMMAND_FAILURE assert mock_context.get_out() == [] assert mock_context.get_err() == ["Error occurred while creating job west/bozo/test/hello"]
def test_successful_status_output_no_metadata(self): """Test the status command more deeply: in a request with a fully specified job, it should end up doing a query using getTasksWithoutConfigs.""" mock_context = FakeAuroraCommandContext() mock_context.add_expected_status_query_result(self.create_status_null_metadata()) with contextlib.nested( patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)): cmd = AuroraCommandLine() cmd.execute(['job', 'status', 'west/bozo/test/hello']) actual = re.sub("\\d\\d:\\d\\d:\\d\\d", "##:##:##", '\n'.join(mock_context.get_out())) expected = textwrap.dedent("""\ Active tasks (3): \tTask role: bozo, env: test, name: woops, instance: 1, status: RUNNING on slavehost \t cpus: 2, ram: 2 MB, disk: 2 MB \t events: \t 1970-11-23 ##:##:## RUNNING: Hi there \tTask role: bozo, env: test, name: woops, instance: 2, status: RUNNING on slavehost \t cpus: 2, ram: 2 MB, disk: 2 MB \t events: \t 1970-11-23 ##:##:## RUNNING: Hi there \tTask role: bozo, env: test, name: woops, instance: 3, status: RUNNING on slavehost \t cpus: 2, ram: 2 MB, disk: 2 MB \t events: \t 1970-11-23 ##:##:## RUNNING: Hi there Inactive tasks (0): """) assert actual == expected
def test_killall_job_something_else(self): """Test kill client-side API logic.""" mock_context = FakeAuroraCommandContext() mock_scheduler_proxy = Mock() with contextlib.nested( patch('threading._Event.wait'), patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)): api = mock_context.get_api('west') api.kill_job.return_value = self.get_kill_job_response() mock_context.add_expected_status_query_result(self.create_status_call_result()) mock_scheduler_proxy.killTasks.return_value = self.get_kill_job_response() mock_context.add_expected_status_query_result(self.create_status_call_result( self.create_mock_task(ScheduleStatus.KILLED))) with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute(['job', 'killall', '--config=%s' % fp.name, 'west/bozo/test/hello']) # Now check that the right API calls got made. assert api.kill_job.call_count == 4 instances = [15, 16, 17, 18, 19] api.kill_job.assert_called_with(AuroraJobKey.from_path('west/bozo/test/hello'), instances) self.assert_scheduler_called(api, self.get_expected_task_query(instances), 6)
def test_skip_hooks_in_create(self): """Run a test of create, with a hook that should forbid running create, but with a user who's covered by one of the hook skip exception rules - so it should succeed. """ GlobalCommandHookRegistry.reset() command_hook = HookForTesting(True) GlobalCommandHookRegistry.register_command_hook(command_hook) command_hook_two = SecondHookForTesting(False) GlobalCommandHookRegistry.register_command_hook(command_hook_two) mock_response = Mock() mock_context = FakeAuroraCommandContext() with contextlib.nested( patch("apache.aurora.client.cli.jobs.Job.create_context", return_value=mock_context), patch("requests.get", return_value=mock_response), patch("getpass.getuser", return_value="bozo")): mock_response.json.return_value = { "a": { "users": ["bozo", "clown"], "commands": {"job": ["killall", "create"]}, "hooks": ["test_hook", "second"] }, "b": { "commands": {"user": ["kick"]}, } } mock_query = self.create_mock_query() mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.INIT)) mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.RUNNING)) mock_context.get_api("west").check_status.side_effect = ( lambda x: self.create_mock_status_query_result(ScheduleStatus.RUNNING)) api = mock_context.get_api("west") api.create_job.return_value = self.get_createjob_response() GlobalCommandHookRegistry.setup("http://foo.bar") with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() result = cmd.execute(["job", "create", "--skip-hooks=second", "--wait-until=RUNNING", "west/bozo/test/hello", fp.name]) assert result == 0 self.assert_create_job_called(api) self.assert_scheduler_called(api, mock_query, 1) assert command_hook.ran_pre assert command_hook.ran_post
def test_cannot_skip_hooks_in_create(self): """This time, the hook shouldn't be skippable, because we use a username who isn't allowed by the hook exception rule. """ GlobalCommandHookRegistry.reset() command_hook = HookForTesting(True) GlobalCommandHookRegistry.register_command_hook(command_hook) command_hook_two = SecondHookForTesting(False) GlobalCommandHookRegistry.register_command_hook(command_hook_two) mock_response = Mock() mock_context = FakeAuroraCommandContext() with contextlib.nested( patch("apache.aurora.client.cli.jobs.Job.create_context", return_value=mock_context), patch("requests.get", return_value=mock_response), patch("getpass.getuser", return_value="beezus")): mock_response.json.return_value = { "a": { "users": ["bozo", "clown"], "commands": {"job": ["killall", "create"]}, "hooks": ["test_hook", "second"] }, "b": { "commands": {"user": ["kick"]}, } } mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.INIT)) mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.RUNNING)) api = mock_context.get_api("west") api.create_job.return_value = self.get_createjob_response() GlobalCommandHookRegistry.setup("http://foo.bar") with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() result = cmd.execute(["job", "create", "--skip-hooks=second", "--wait-until=RUNNING", "west/bozo/test/hello", fp.name]) # Check that it returns the right error code, and that create_job didn't get called. assert result == EXIT_PERMISSION_VIOLATION assert api.create_job.call_count == 0
def test_create_job_failed_output(self): """Test that a failed create generates the correct error messages""" mock_context = FakeAuroraCommandContext() with patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context): mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.INIT)) api = mock_context.get_api('west') api.create_job.return_value = self.get_failed_createjob_response() with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() result = cmd.execute(['job', 'create', '--wait-until=RUNNING', 'west/bozo/test/hello', fp.name]) assert result == EXIT_COMMAND_FAILURE # Check that create_job was called exactly once, with an AuroraConfig parameter. print("Out=%s\nErr=%s" % (mock_context.get_out(), mock_context.get_err())) assert mock_context.get_out() == [] assert mock_context.get_err() == ["job create failed because of scheduler error"]
def test_kill_job_with_instances_batched(self): """Test kill client-side API logic.""" mock_context = FakeAuroraCommandContext() with contextlib.nested( patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)): api = mock_context.get_api('west') status_result = self.create_status_call_result() mock_context.add_expected_status_query_result(status_result) api.kill_job.return_value = self.get_kill_job_response() with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute(['job', 'kill', '--config=%s' % fp.name, 'west/bozo/test/hello/0,2,4-6']) # Now check that the right API calls got made. assert api.kill_job.call_count == 1 api.kill_job.assert_called_with(AuroraJobKey.from_path('west/bozo/test/hello'), [0, 2, 4, 5, 6])
def test_kill_job_with_instances_batched_maxerrors(self): """Test kill client-side API logic.""" mock_context = FakeAuroraCommandContext() with contextlib.nested( patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)): api = mock_context.get_api('west') status_result = self.create_status_call_result() mock_context.add_expected_status_query_result(status_result) api.kill_job.return_value = self.create_error_response() with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute(['job', 'kill', '--max-total-failures=1', '--config=%s' % fp.name, 'west/bozo/test/hello/0,2,4-13']) # Now check that the right API calls got made. We should have aborted after the second batch. assert api.kill_job.call_count == 2 assert api.scheduler_proxy.getTasksWithoutConfigs.call_count == 0
def test_create_job_delayed(self): """Run a test of the "create" command against a mocked-out API: this time, make the monitor check status several times before successful completion. """ mock_context = FakeAuroraCommandContext() with contextlib.nested( patch('threading._Event.wait'), patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context)): mock_query = self.create_mock_query() for result in [ScheduleStatus.PENDING, ScheduleStatus.PENDING, ScheduleStatus.RUNNING]: mock_context.add_expected_status_query_result(self.create_mock_status_query_result(result)) api = mock_context.get_api('west') api.create_job.return_value = self.get_createjob_response() with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute(['job', 'create', '--wait-until=RUNNING', 'west/bozo/test/hello', fp.name]) self.assert_create_job_called(api) self.assert_scheduler_called(api, mock_query, 3)
def test_kill_job_with_invalid_instances_strict(self): """Test kill client-side API logic.""" mock_context = FakeAuroraCommandContext() with contextlib.nested( patch('threading._Event.wait'), patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)): api = mock_context.get_api('west') self.setup_get_tasks_status_calls(api.scheduler_proxy) api.kill_job.return_value = self.get_kill_job_response() mock_context.add_expected_status_query_result(self.create_status_call_result( self.create_mock_task(ScheduleStatus.KILLED))) with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute(['job', 'kill', '--config=%s' % fp.name, '--no-batching', '--strict', 'west/bozo/test/hello/0,2,4-6,11-20']) # Now check that the right API calls got made. assert api.kill_job.call_count == 0
def test_simple_successful_create_job_output(self): """Run a test of the "create" command against a mocked-out API: Verifies that the creation command generates the correct output. """ mock_context = FakeAuroraCommandContext() with contextlib.nested( patch('threading._Event.wait'), patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context)): mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.PENDING)) mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.RUNNING)) mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.RUNNING)) api = mock_context.get_api('west') api.create_job.return_value = self.get_createjob_response() with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() result = cmd.execute(['job', 'create', '--wait-until=RUNNING', 'west/bozo/test/hello', fp.name]) assert result == EXIT_OK assert mock_context.get_out() == [ "job create succeeded: job url=http://something_or_other/scheduler/bozo/test/hello"] assert mock_context.get_err() == []
def generic_test_successful_hook(self, command_hook): mock_context = FakeAuroraCommandContext() with patch("apache.aurora.client.cli.jobs.Job.create_context", return_value=mock_context): mock_query = self.create_mock_query() mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.INIT)) mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.RUNNING)) mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.RUNNING)) mock_context.get_api("west").check_status.side_effect = ( lambda x: self.create_mock_status_query_result(ScheduleStatus.RUNNING)) api = mock_context.get_api("west") api.create_job.return_value = self.get_createjob_response() with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute(["job", "create", "--wait-until=RUNNING", "west/bozo/test/hello", fp.name]) self.assert_create_job_called(api) self.assert_scheduler_called(api, mock_query, 1) assert command_hook.ran_pre assert command_hook.ran_post
def test_kill_job_with_instances_batched_maxerrors_output(self): """Test kill client-side API logic.""" mock_context = FakeAuroraCommandContext() with contextlib.nested( patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)): api = mock_context.get_api('west') status_result = self.create_status_call_result() mock_context.add_expected_status_query_result(status_result) api.kill_job.return_value = self.create_error_response() with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute(['job', 'kill', '--max-total-failures=1', '--config=%s' % fp.name, 'west/bozo/test/hello/0,2,4-13']) assert mock_context.get_out() == [] assert mock_context.get_err() == [ 'Kill of shards [0, 2, 4, 5, 6] failed with error:', '\tDamn', 'Kill of shards [7, 8, 9, 10, 11] failed with error:', '\tDamn', 'Exceeded maximum number of errors while killing instances']
def test_kill_job_with_instances_batched_output(self): """Test kill client-side API logic.""" mock_context = FakeAuroraCommandContext() with contextlib.nested( patch('threading._Event.wait'), patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)): api = mock_context.get_api('west') status_result = self.create_status_call_result() mock_context.add_expected_status_query_result(status_result) api.kill_job.return_value = self.get_kill_job_response() mock_context.add_expected_status_query_result(self.create_status_call_result( self.create_mock_task(ScheduleStatus.KILLED))) with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute(['job', 'kill', '--config=%s' % fp.name, 'west/bozo/test/hello/0,2,4-6']) assert mock_context.get_out() == ['Successfully killed shards [0, 2, 4, 5, 6]', 'job kill succeeded'] assert mock_context.get_err() == []
def test_killall_job_output(self): """Test kill output.""" mock_context = FakeAuroraCommandContext() mock_scheduler_proxy = Mock() with contextlib.nested( patch('threading._Event.wait'), patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)): api = mock_context.get_api('west') mock_scheduler_proxy.getTasksWithoutConfigs.return_value = self.create_status_call_result() api.kill_job.return_value = self.get_kill_job_response() mock_scheduler_proxy.killTasks.return_value = self.get_kill_job_response() mock_context.add_expected_status_query_result(self.create_status_call_result( self.create_mock_task(ScheduleStatus.KILLED))) with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute(['job', 'killall', '--no-batching', '--config=%s' % fp.name, 'west/bozo/test/hello']) assert mock_context.get_out() == ['job killall succeeded'] assert mock_context.get_err() == []
def test_kill_job_with_empty_instances_batched(self): """Test kill client-side API logic.""" mock_context = FakeAuroraCommandContext() with contextlib.nested( patch('threading._Event.wait'), patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)): api = mock_context.get_api('west') # set up an empty instance list in the getTasksWithoutConfigs response status_response = self.create_simple_success_response() schedule_status = Mock(spec=ScheduleStatusResult) status_response.result.scheduleStatusResult = schedule_status schedule_status.tasks = [] mock_context.add_expected_status_query_result(status_response) api.kill_job.return_value = self.get_kill_job_response() with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute(['job', 'kill', '--config=%s' % fp.name, 'west/bozo/test/hello/0,2,4-13']) # Now check that the right API calls got made. assert api.kill_job.call_count == 0
def test_configuration_logging(self): """Sets up a log handler, registers it with the logger, and then verifies that calls to the client logging methods correctly get captured in the logs. """ mock_log_handler = MockHandler() logger = logging.getLogger('aurora_client') logger.setLevel(logging.DEBUG) logger.addHandler(mock_log_handler) mock_context = FakeAuroraCommandContext() with patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context): mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.INIT)) mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.RUNNING)) api = mock_context.get_api('west') api.create_job.return_value = self.get_createjob_response() with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() with open(fp.name, "r") as rp: lines = rp.readlines() cmd = AuroraCommandLine() cmd.execute(['job', 'create', 'west/bozo/test/hello', fp.name]) # Check that the contents of the config file were logged, as expected. expected_config_msg = "Config: %s" % lines assert any(expected_config_msg == r.getMessage() for r in mock_log_handler.logs) # Check that things were logged correctly: # there should be at least two entries, with the clientid and username; # and one entry should log the command being invoked. assert any(("'job', 'create', 'west/bozo/test/hello'" in r.getMessage()) for r in mock_log_handler.logs) assert mock_log_handler.logs[0].clientid == mock_log_handler.logs[1].clientid assert mock_log_handler.logs[0].user == mock_log_handler.logs[1].user
def test_create_job_with_failed_hook(self): GlobalCommandHookRegistry.reset() command_hook = HookForTesting(False) GlobalCommandHookRegistry.register_command_hook(command_hook) mock_context = FakeAuroraCommandContext() with patch("apache.aurora.client.cli.jobs.Job.create_context", return_value=mock_context): mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.INIT)) mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.RUNNING)) api = mock_context.get_api("west") api.create_job.return_value = self.get_createjob_response() with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() result = cmd.execute(["job", "create", "--wait-until=RUNNING", "west/bozo/test/hello", fp.name]) assert result == 1 assert api.create_job.call_count == 0 assert command_hook.ran_pre assert not command_hook.ran_post
def test_create_job_delayed(self): """Run a test of the "create" command against a mocked-out API: this time, make the monitor check status several times before successful completion. """ mock_context = FakeAuroraCommandContext() with contextlib.nested( patch('time.sleep'), patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context)): mock_query = self.create_mock_query() for result in [ScheduleStatus.INIT, ScheduleStatus.PENDING, ScheduleStatus.PENDING, ScheduleStatus.RUNNING, ScheduleStatus.FINISHED]: mock_context.add_expected_status_query_result(self.create_mock_status_query_result(result)) api = mock_context.get_api('west') api.create_job.return_value = self.get_createjob_response() with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute(['job', 'create', '--wait_until=RUNNING', 'west/mchucarroll/test/hello', fp.name]) # Now check that the right API calls got made. # Check that create_job was called exactly once, with an AuroraConfig parameter. self.assert_create_job_called(api) self.assert_scheduler_called(api, mock_query, 4)
def test_simple_successful_create_job(self): """Run a test of the "create" command against a mocked-out API: Verifies that the creation command sends the right API RPCs, and performs the correct tests on the result.""" # We'll patch out create_context, which will give us a fake context # object, and everything can be stubbed through that. mock_context = FakeAuroraCommandContext() with contextlib.nested( # TODO(maxim): Patching threading.Event with all possible namespace/patch/mock # combinations did not produce the desired effect. Investigate why (AURORA-510) patch('threading._Event.wait'), patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context)): # After making the client, create sets up a job monitor. # The monitor uses TaskQuery to get the tasks. It's called at least twice:once before # the job is created, and once after. So we need to set up mocks for the query results. mock_query = self.create_mock_query() mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.PENDING)) mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.RUNNING)) api = mock_context.get_api('west') api.create_job.return_value = self.get_createjob_response() # This is the real test: invoke create as if it had been called by the command line. with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute(['job', 'create', '--wait-until=RUNNING', 'west/bozo/test/hello', fp.name]) # Now check that the right API calls got made. # Check that create_job was called exactly once, with an AuroraConfig parameter. self.assert_create_job_called(api) self.assert_scheduler_called(api, mock_query, 2)
def test_empty_plugins_in_create_job(self): """Installs a plugin that doesn't implement any of the plugin methods. Prior to AURORA-362, this would cause the client to crash with an empty argument list. """ mock_context = FakeAuroraCommandContext() with patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context): mock_query = self.create_mock_query() mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.INIT)) mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.RUNNING)) api = mock_context.get_api('west') api.create_job.return_value = self.get_createjob_response() with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.register_plugin(EmptyPlugin()) cmd.execute(['job', 'create', '--wait-until=RUNNING', 'west/bozo/test/hello', fp.name]) self.assert_create_job_called(api) self.assert_scheduler_called(api, mock_query, 1)
def test_command_invocation_logging(self): """Sets up a log handler, registers it with the logger, and then verifies that calls to the client logging methods correctly get captured in the logs. """ mock_log_handler = MockHandler() logger = logging.getLogger('aurora_client') logger.setLevel(logging.DEBUG) logger.addHandler(mock_log_handler) # We'll patch out create_context, which will give us a fake context # object, and everything can be stubbed through that. mock_context = FakeAuroraCommandContext() with patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context): # After making the client, create sets up a job monitor. # The monitor uses TaskQuery to get the tasks. It's called at least twice:once before # the job is created, and once after. So we need to set up mocks for the query results. mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.INIT)) mock_context.add_expected_status_query_result( self.create_mock_status_query_result(ScheduleStatus.RUNNING)) api = mock_context.get_api('west') api.create_job.return_value = self.get_createjob_response() with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute(['job', 'create', '--wait-until=RUNNING', 'west/bozo/test/hello', fp.name]) # Check that things were logged correctly: # there should be at least two entries, with the clientid and username; # and one entry should log the command being invoked. assert any(("'job', 'create', '--wait-until=RUNNING', 'west/bozo/test/hello'" in r.getMessage()) for r in mock_log_handler.logs) assert mock_log_handler.logs[0].clientid == mock_log_handler.logs[1].clientid assert mock_log_handler.logs[0].user == mock_log_handler.logs[1].user
def test_successful_status_json_output_no_metadata(self): """Test the status command more deeply: in a request with a fully specified job, it should end up doing a query using getTasksWithoutConfigs.""" mock_context = FakeAuroraCommandContext() mock_context.add_expected_status_query_result(self.get_task_status_json()) with contextlib.nested( patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)): cmd = AuroraCommandLine() cmd.execute(['job', 'status', '--write-json', 'west/bozo/test/hello']) actual = re.sub("\\d\\d:\\d\\d:\\d\\d", "##:##:##", '\n'.join(mock_context.get_out())) expected = textwrap.dedent("""\ [ { "active": [ { "status": "RUNNING", "assignedTask": { "task": { "isService": false, "environment": "prod", "requestedPorts": [ "http" ], "jobName": "flibber", "priority": 7, "owner": { "role": "nobody" }, "job": { "environment": "prod", "role": "nobody", "name": "flibber" }, "production": false, "diskMb": 4096, "ramMb": 2048, "maxTaskFailures": 3, "numCpus": 2 }, "taskId": "task_0", "instanceId": 0, "assignedPorts": { "http": 1001 }, "slaveHost": "junk.nothing", "slaveId": "random_machine_id" }, "ancestorId": "random_task_ancestor0", "taskEvents": [ { "status": "PENDING", "timestamp": 123456, "message": "looking for a host" }, { "status": "ASSIGNED", "timestamp": 123466, "message": "found a host" }, { "status": "RUNNING", "timestamp": 123476, "message": "running" } ], "failureCount": 4 }, { "status": "RUNNING", "assignedTask": { "task": { "isService": false, "environment": "prod", "requestedPorts": [ "http" ], "jobName": "flibber", "priority": 7, "owner": { "role": "nobody" }, "job": { "environment": "prod", "role": "nobody", "name": "flibber" }, "production": false, "diskMb": 4096, "ramMb": 2048, "maxTaskFailures": 3, "numCpus": 2 }, "taskId": "task_1", "instanceId": 1, "assignedPorts": { "http": 1001 }, "slaveHost": "junk.nothing", "slaveId": "random_machine_id" }, "ancestorId": "random_task_ancestor1", "taskEvents": [ { "status": "PENDING", "timestamp": 234567, "message": "looking for a host" }, { "status": "ASSIGNED", "timestamp": 234577, "message": "found a host" }, { "status": "RUNNING", "timestamp": 234587, "message": "running" } ], "failureCount": 5 } ], "job": "west/bozo/test/hello", "inactive": [] } ]""") assert actual == expected
def test_successful_status_json_output_no_metadata(self): """Test the status command more deeply: in a request with a fully specified job, it should end up doing a query using getTasksWithoutConfigs.""" mock_context = FakeAuroraCommandContext() mock_context.add_expected_status_query_result( self.get_task_status_json()) with contextlib.nested( patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context), patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)): cmd = AuroraCommandLine() cmd.execute( ['job', 'status', '--write-json', 'west/bozo/test/hello']) actual = re.sub("\\d\\d:\\d\\d:\\d\\d", "##:##:##", '\n'.join(mock_context.get_out())) expected = textwrap.dedent("""\ [ { "active": [ { "status": "RUNNING", "assignedTask": { "task": { "isService": false, "environment": "prod", "requestedPorts": [ "http" ], "jobName": "flibber", "priority": 7, "owner": { "role": "nobody" }, "production": false, "diskMb": 4096, "ramMb": 2048, "maxTaskFailures": 3, "numCpus": 2 }, "taskId": "task_0", "instanceId": 0, "assignedPorts": { "http": 1001 }, "slaveHost": "junk.nothing", "slaveId": "random_machine_id" }, "ancestorId": "random_task_ancestor0", "taskEvents": [ { "status": "PENDING", "timestamp": 123456, "message": "looking for a host" }, { "status": "ASSIGNED", "timestamp": 123466, "message": "found a host" }, { "status": "RUNNING", "timestamp": 123476, "message": "running" } ], "failureCount": 4 }, { "status": "RUNNING", "assignedTask": { "task": { "isService": false, "environment": "prod", "requestedPorts": [ "http" ], "jobName": "flibber", "priority": 7, "owner": { "role": "nobody" }, "production": false, "diskMb": 4096, "ramMb": 2048, "maxTaskFailures": 3, "numCpus": 2 }, "taskId": "task_1", "instanceId": 0, "assignedPorts": { "http": 1001 }, "slaveHost": "junk.nothing", "slaveId": "random_machine_id" }, "ancestorId": "random_task_ancestor1", "taskEvents": [ { "status": "PENDING", "timestamp": 234567, "message": "looking for a host" }, { "status": "ASSIGNED", "timestamp": 234577, "message": "found a host" }, { "status": "RUNNING", "timestamp": 234587, "message": "running" } ], "failureCount": 5 } ], "job": "west/bozo/test/hello", "inactive": [] } ]""") assert actual == expected