def test_consume_results_sla_failure_continue(self, mock_sla): mock_sla_instance = mock.MagicMock() mock_sla.return_value = mock_sla_instance mock_sla_instance.add_iteration.side_effect = [ True, True, False, False ] key = {"kw": {"fake": 2}, "name": "fake", "pos": 0} task = mock.MagicMock() config = { "a.benchmark": [{ "context": { "context_a": { "a": 1 } } }], } runner = mock.MagicMock() runner.result_queue = collections.deque([1, 2, 3, 4]) is_done = mock.MagicMock() is_done.isSet.side_effect = [False, False, False, False, True] eng = engine.BenchmarkEngine(config, task, abort_on_sla_failure=False) eng.duration = 123 eng.full_duration = 456 eng.consume_results(key, task, is_done, {}, runner) mock_sla.assert_called_once_with({"fake": 2}) self.assertEqual(0, runner.abort.call_count)
def test__prepare_context(self, mock_meta): default_context = {"a": 1, "b": 2} mock_meta.return_value = default_context task = mock.MagicMock() name = "a.benchmark" context = {"b": 3, "c": 4} endpoint = mock.MagicMock() config = { "a.benchmark": [{ "context": { "context_a": { "a": 1 } } }], } eng = engine.BenchmarkEngine(config, task) result = eng._prepare_context(context, name, endpoint) expected_context = copy.deepcopy(default_context) expected_context.setdefault("users", {}) expected_context.update(context) expected_result = { "task": task, "admin": { "endpoint": endpoint }, "scenario_name": name, "config": expected_context } self.assertEqual(result, expected_result) mock_meta.assert_called_once_with(name, "context")
def test__prepare_context_with_existing_users(self, mock_meta): mock_meta.return_value = {} task = mock.MagicMock() name = "a.benchmark" context = {"b": 3, "c": 4} endpoint = mock.MagicMock() config = { "a.benchmark": [{ "context": { "context_a": { "a": 1 } } }], } existing_users = [mock.MagicMock()] eng = engine.BenchmarkEngine(config, task, users=existing_users) result = eng._prepare_context(context, name, endpoint) expected_context = {"existing_users": existing_users} expected_context.update(context) expected_result = { "task": task, "admin": { "endpoint": endpoint }, "scenario_name": name, "config": expected_context } self.assertEqual(result, expected_result) mock_meta.assert_called_once_with(name, "context")
def test_consume_results(self, mock_sla): mock_sla_instance = mock.MagicMock() mock_sla.return_value = mock_sla_instance key = {"kw": {"fake": 2}, "name": "fake", "pos": 0} task = mock.MagicMock() config = { "a.benchmark": [{ "context": { "context_a": { "a": 1 } } }], } runner = mock.MagicMock() runner.result_queue = collections.deque([1, 2]) is_done = mock.MagicMock() is_done.isSet.side_effect = [False, False, True] eng = engine.BenchmarkEngine(config, task) eng.duration = 123 eng.full_duration = 456 eng.consume_results(key, task, is_done, {}, runner) mock_sla.assert_called_once_with({"fake": 2}) expected_iteration_calls = [mock.call(1), mock.call(2)] self.assertEqual(expected_iteration_calls, mock_sla_instance.add_iteration.mock_calls)
def test_run_exception_is_logged(self, mock_ctx_setup, mock_ctx_cleanup, mock_runner, mock_scenario, mock_consume, mock_log): mock_ctx_setup.side_effect = Exception config = { "a.benchmark": [{ "context": { "context_a": { "a": 1 } } }], "b.benchmark": [{ "context": { "context_b": { "b": 2 } } }] } task = mock.MagicMock() eng = engine.BenchmarkEngine(config, task) eng.run() self.assertEqual(2, mock_log.exception.call_count)
def test_schema_is_valid(self, mock_benchmark_engine__validate_config_semantic): scenarios = set() for dirname, dirnames, filenames in os.walk(self.samples_path): for filename in filenames: full_path = os.path.join(dirname, filename) # NOTE(hughsaunders): Skip non config files # (bug https://bugs.launchpad.net/rally/+bug/1314369) if not re.search("\.(ya?ml|json)$", filename, flags=re.I): continue with open(full_path) as task_file: try: task_config = yaml.safe_load(task_file.read()) eng = engine.BenchmarkEngine(task_config, mock.MagicMock()) eng.validate() except Exception: print(traceback.format_exc()) self.assertTrue(False, "Wrong task config %s" % full_path) else: scenarios.update(task_config.keys()) # TODO(boris-42): We should refactor scenarios framework add "_" to # all non-benchmark methods.. Then this test will pass. missing = set(base.Scenario.list_benchmark_scenarios()) - scenarios # check missing scenario is not from plugin missing = [scenario for scenario in list(missing) if base.Scenario.get_by_name(scenario.split(".")[0]). __module__.startswith("rally")] self.assertEqual(missing, [], "These scenarios don't have samples: %s" % missing)
def test__validate_config_semantic(self, mock_deployment_get, mock_helper, mock_userctx, mock_osclients): mock_userctx.UserGenerator = fakes.FakeUserContext mock_osclients.return_value = mock.MagicMock() config = { "a": [mock.MagicMock(), mock.MagicMock()], "b": [mock.MagicMock()] } fake_task = mock.MagicMock() eng = engine.BenchmarkEngine(config, fake_task) eng.admin = "admin" eng._validate_config_semantic(config) expected_calls = [ mock.call("admin"), mock.call(fakes.FakeUserContext.user["endpoint"]) ] mock_osclients.assert_has_calls(expected_calls) mock_deployment_get.assert_called_once_with(fake_task["uuid"]) admin = user = mock_osclients.return_value fake_deployment = mock_deployment_get.return_value expected_calls = [ mock.call(admin, user, "a", 0, fake_deployment, config["a"][0]), mock.call(admin, user, "a", 1, fake_deployment, config["a"][1]), mock.call(admin, user, "b", 0, fake_deployment, config["b"][0]) ] mock_helper.assert_has_calls(expected_calls, any_order=True)
def start_task(deploy_uuid, config, task=None): """Start a task. Task is a list of benchmarks that will be called one by one, results of execution will be stored in DB. :param deploy_uuid: UUID of the deployment :param config: a dict with a task configuration """ deployment = objects.Deployment.get(deploy_uuid) task = task or objects.Task(deployment_uuid=deploy_uuid) LOG.info("Benchmark Task %s on Deployment %s" % (task['uuid'], deployment['uuid'])) benchmark_engine = engine.BenchmarkEngine(config, task) endpoint = deployment['endpoints'] try: benchmark_engine.bind(endpoint) benchmark_engine.validate() benchmark_engine.run() except exceptions.InvalidTaskException: # NOTE(boris-42): We don't log anything, because it's normal situation # that user put wrong config. pass except Exception: deployment.update_status(consts.DeployStatus.DEPLOY_INCONSISTENT) raise
def test_schema_is_valid(self, mock_semantic): samples_path = os.path.join(os.path.dirname(__file__), "..", "..", "doc", "samples", "tasks") scenarios = set() for dirname, dirnames, filenames in os.walk(samples_path): for filename in filenames: full_path = os.path.join(dirname, filename) with open(full_path) as task_file: try: task_config = yaml.safe_load(task_file.read()) eng = engine.BenchmarkEngine(task_config, mock.MagicMock()) eng.validate() except Exception: print(traceback.format_exc()) self.assertTrue(False, "Wrong task config %s" % full_path) else: scenarios.update(task_config.keys()) # TODO(boris-42): We should refactor scenarios framework add "_" to # all non-benchmark methods.. Then this test will pass. missing = set(base.Scenario.list_benchmark_scenarios()) - scenarios self.assertEqual(missing, set([]), "These scenarios don't have samples: %s" % missing)
def test_schema_is_valid( self, mock_benchmark_engine__validate_config_semantic): discover.load_plugins(os.path.join(self.rally_jobs_path, "plugins")) for filename in ["rally.yaml", "rally-neutron.yaml", "rally-zaqar.yaml", "rally-designate.yaml"]: full_path = os.path.join(self.rally_jobs_path, filename) with open(full_path) as task_file: try: args_file = os.path.join( self.rally_jobs_path, filename.rsplit(".", 1)[0] + "_args.yaml") args = {} if os.path.exists(args_file): args = yaml.safe_load(open(args_file).read()) if not isinstance(args, dict): raise TypeError( "args file %s must be dict in yaml or json " "presenatation" % args_file) task = api.Task.render_template(task_file.read(), **args) task = yaml.safe_load(task) eng = engine.BenchmarkEngine(task, mock.MagicMock()) eng.validate() except Exception: print(traceback.format_exc()) self.fail("Wrong task input file: %s" % full_path)
def start(cls, deployment, config, task=None, abort_on_sla_failure=False): """Start a task. Task is a list of benchmarks that will be called one by one, results of execution will be stored in DB. :param deployment: UUID or name of the deployment :param config: a dict with a task configuration :param task: Task object. If None, it will be created :param abort_on_sla_failure: if True, the execution of a benchmark scenario will stop when any SLA check for it fails """ deployment = objects.Deployment.get(deployment) task = task or objects.Task(deployment_uuid=deployment["uuid"]) LOG.info("Benchmark Task %s on Deployment %s" % (task["uuid"], deployment["uuid"])) benchmark_engine = engine.BenchmarkEngine( config, task, admin=deployment["admin"], users=deployment["users"], abort_on_sla_failure=abort_on_sla_failure) try: benchmark_engine.validate() benchmark_engine.run() except exceptions.InvalidTaskException: # NOTE(boris-42): We don't log anything, because it's a normal # situation when a user puts a wrong config. pass except Exception: deployment.update_status(consts.DeployStatus.DEPLOY_INCONSISTENT) raise
def test__validate_config_semanitc_helper_invalid_arg(self, mock_validate): mock_validate.side_effect = exceptions.InvalidScenarioArgument() eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock()) self.assertRaises(exceptions.InvalidBenchmarkConfig, eng._validate_config_semantic_helper, "a", "u", "n", "p", mock.MagicMock(), {})
def test_validate__wrong_scenarios_name(self, mova_validate): task = mock.MagicMock() eng = engine.BenchmarkEngine(mock.MagicMock(), task) eng._validate_config_scenarios_name = mock.MagicMock( side_effect=exceptions.NotFoundScenarios) self.assertRaises(exceptions.InvalidTaskException, eng.validate) self.assertTrue(task.set_failed.called)
def test_run__update_status(self, mock_consume): task = mock.MagicMock() eng = engine.BenchmarkEngine([], task) eng.run() task.update_status.assert_has_calls([ mock.call(consts.TaskStatus.RUNNING), mock.call(consts.TaskStatus.FINISHED) ])
def test__validate_config_syntax(self, mock_context, mock_runner): config = {"sca": [{"context": "a"}], "scb": [{"runner": "b"}]} eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock()) eng._validate_config_syntax(config) mock_runner.assert_has_calls([mock.call({}), mock.call("b")]) mock_context.assert_has_calls( [mock.call("a", non_hidden=True), mock.call({}, non_hidden=True)])
def test__validate_config_scenarios_name_non_exsisting( self, mock_scenario): config = {"exist": [], "nonexist1": [], "nonexist2": []} mock_scenario.list_benchmark_scenarios.return_value = ["exist", "aaa"] eng = engine.BenchmarkEngine(config, mock.MagicMock()) self.assertRaises(exceptions.NotFoundScenarios, eng._validate_config_scenarios_name, config)
def test_run__update_status(self): task = mock.MagicMock() eng = engine.BenchmarkEngine([], task) results = eng.run() self.assertEqual(results, {}) task.update_status.assert_has_calls([ mock.call(consts.TaskStatus.RUNNING), mock.call(consts.TaskStatus.FINISHED) ])
def test__validate_config_semantic_helper(self, mock_validate): deployment = mock.MagicMock() eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock()) eng._validate_config_semantic_helper("admin", "user", "name", "pos", deployment, {"args": "args"}) mock_validate.assert_called_once_with("name", {"args": "args"}, admin="admin", users=["user"], deployment=deployment)
def test__validate_config_syntax__wrong_context(self, mock_context, mock_runner): config = {"sca": [{"context": "a"}], "scb": [{"runner": "b"}]} eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock()) mock_context.validate = mock.MagicMock( side_effect=jsonschema.ValidationError("a")) self.assertRaises(exceptions.InvalidBenchmarkConfig, eng._validate_config_syntax, config)
def test_run__update_status(self, mock_endpoint, mock_osclients, mock_runner, mock_scenario, mock_setup, mock_cleanup, mock_consume): task = mock.MagicMock() eng = engine.BenchmarkEngine([], task) eng.run() task.update_status.assert_has_calls([ mock.call(consts.TaskStatus.RUNNING), mock.call(consts.TaskStatus.FINISHED) ])
def task_validate(deploy_uuid, config): """Validate a task config against specified deployment. :param deploy_uuid: UUID of the deployment :param config: a dict with a task configuration """ deployment = objects.Deployment.get(deploy_uuid) task = objects.Task(deployment_uuid=deploy_uuid) benchmark_engine = engine.BenchmarkEngine(config, task) benchmark_engine.bind(admin=deployment["admin"], users=deployment["users"]) benchmark_engine.validate()
def test_run__update_status(self, mock_scenario_runner, mock_scenario, mock_context_manager_setup, mock_context_manager_cleanup, mock_consume_results): task = mock.MagicMock() eng = engine.BenchmarkEngine([], task) eng.run() task.update_status.assert_has_calls([ mock.call(consts.TaskStatus.RUNNING), mock.call(consts.TaskStatus.FINISHED) ])
def test__validate_config_syntax(self, mock_context_manager_validate, mock_scenario_runner_validate): config = {"sca": [{"context": "a"}], "scb": [{"runner": "b"}]} eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock()) eng._validate_config_syntax(config) mock_scenario_runner_validate.assert_has_calls( [mock.call({}), mock.call("b")], any_order=True) mock_context_manager_validate.assert_has_calls( [mock.call("a", non_hidden=True), mock.call({}, non_hidden=True)], any_order=True)
def validate(cls, deployment, config): """Validate a task config against specified deployment. :param deployment: UUID or name of the deployment :param config: a dict with a task configuration """ deployment = objects.Deployment.get(deployment) task = objects.Task(deployment_uuid=deployment["uuid"], fake=True) benchmark_engine = engine.BenchmarkEngine(config, task, admin=deployment["admin"], users=deployment["users"]) benchmark_engine.validate()
def test_schema_is_valid(self, mock_validate): rutils.load_plugins(os.path.join(self.rally_scenarios_path, "plugins")) for filename in ["rally.yaml", "rally-neutron.yaml"]: full_path = os.path.join(self.rally_scenarios_path, filename) with open(full_path) as task_file: try: task_config = yaml.safe_load(task_file.read()) eng = engine.BenchmarkEngine(task_config, mock.MagicMock()) eng.validate() except Exception: print(traceback.format_exc()) self.fail("Wrong scenario config %s" % full_path)
def test_get_user_ctx_for_validation_existing_users(self, mock_users_ctx): context = {"a": 10} users = [mock.MagicMock(), mock.MagicMock()] eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock(), users=users) result = eng._get_user_ctx_for_validation(context) self.assertEqual(context["config"]["existing_users"], users) mock_users_ctx.assert_called_once_with(context) self.assertEqual(mock_users_ctx.return_value, result)
def test_validate(self, mock_json_validate): config = mock.MagicMock() eng = engine.BenchmarkEngine(config, mock.MagicMock()) mock_validate = mock.MagicMock() eng._validate_config_scenarios_name = mock_validate.names eng._validate_config_syntax = mock_validate.syntax eng._validate_config_semantic = mock_validate.semantic eng.validate() expected_calls = [ mock.call.names(config), mock.call.syntax(config), mock.call.semantic(config) ] mock_validate.assert_has_calls(expected_calls)
def test_consume_results(self, mock_check_all): key = {"kw": {"fake": 2}, "name": "fake", "pos": 0} task = mock.MagicMock() config = { "a.benchmark": [{ "context": { "context_a": { "a": 1 } } }], } is_done = mock.MagicMock() is_done.isSet.side_effect = [False, False, True] eng = engine.BenchmarkEngine(config, task) eng.duration = 1 eng.consume_results(key, task, collections.deque([1, 2]), is_done) mock_check_all.assert_called_once_with({"fake": 2}, [1, 2])
def test_run__config_has_runner(self, mock_endpoint, mock_osclients, mock_runner): config = { "a.args": [{ "runner": { "type": "a", "b": 1 } }], "b.args": [{ "runner": { "a": 1 } }] } task = mock.MagicMock() eng = engine.BenchmarkEngine(config, task).bind([{}]) eng.run()
def test_run__config_has_runner(self, mock_setup, mock_cleanup, mock_runner, mock_scenario, mock_consume): config = { "a.benchmark": [{ "runner": { "type": "a", "b": 1 } }], "b.benchmark": [{ "runner": { "a": 1 } }] } task = mock.MagicMock() eng = engine.BenchmarkEngine(config, task) eng.run()