def test_schema_is_valid(self): scenarios = set() for path in self.iterate_samples(): with open(path) as task_file: try: try: task_config = yaml.safe_load( self.rapi.task.render_template( task_template=task_file.read())) except Exception: print(traceback.format_exc()) self.fail("Invalid JSON file: %s" % path) eng = engine.TaskEngine(task_cfg.TaskConfig(task_config), mock.MagicMock(), mock.Mock()) eng.validate(only_syntax=True) except Exception: print(traceback.format_exc()) self.fail("Invalid task file: %s" % path) else: scenarios.update(task_config.keys()) missing = set(s.get_name() for s in scenario.Scenario.get_all()) missing -= scenarios # check missing scenario is not from plugin missing = [ s for s in list(missing) if scenario.Scenario.get(s).__module__.startswith("rally") ] self.assertEqual(missing, [], "These scenarios don't have samples: %s" % missing)
def test_schema_is_valid(self): scenarios = set() for path in self.iterate_samples(): with open(path) as task_file: try: try: task_template = self.rapi.task.render_template( task_template=task_file.read()) task_config = yaml.safe_load(task_template) task_config = task_cfg.TaskConfig(task_config) except Exception: print(traceback.format_exc()) self.fail("Invalid JSON file: %s" % path) eng = engine.TaskEngine(task_config, mock.MagicMock(), mock.Mock()) eng.validate(only_syntax=True) except Exception: print(traceback.format_exc()) self.fail("Invalid task file: %s" % path) else: workloads = itertools.chain( *[s["workloads"] for s in task_config.subtasks]) scenarios.update(w["name"] for w in workloads) missing = set(s.get_name() for s in scenario.Scenario.get_all() if s.__module__.startswith("xrally_kubernetes")) missing -= scenarios if missing: self.fail("These scenarios don't have samples: %s" % missing)
def test_hook_config_compatibility(self): cfg = { "xxx": [{ "args": {}, "runner": {"type": "yyy"}, "hooks": [ { "description": "descr", "name": "hook_action", "args": {"k1": "v1"}, "trigger": { "name": "hook_trigger", "args": {"k2": "v2"} } } ] }] } task = task_cfg.TaskConfig(cfg) workload = task.subtasks[0]["workloads"][0] self.assertEqual( {"description": "descr", "action": ("hook_action", {"k1": "v1"}), "trigger": ("hook_trigger", {"k2": "v2"})}, workload["hooks"][0])
def test_run__task_soft_aborted(self, mock_scenario_runner, mock_scenario, mock_context_manager_setup, mock_context_manager_cleanup, mock_result_consumer): scenario_cls = mock_scenario.get.return_value scenario_cls.get_platform.return_value = "openstack" scenario_cls.get_info.return_value = {"title": ""} task = mock.MagicMock() mock_result_consumer.is_task_in_aborting_status.side_effect = [ False, False, True ] config = task_cfg.TaskConfig({ "a.task": [{ "runner": { "type": "a", "b": 1 }, "description": "foo" }], "b.task": [{ "runner": { "type": "a", "b": 1 }, "description": "bar" }], "c.task": [{ "runner": { "type": "a", "b": 1 }, "description": "xxx" }] }) fake_runner_cls = mock.MagicMock() fake_runner = mock.MagicMock() fake_runner_cls.return_value = fake_runner mock_scenario_runner.get.return_value = fake_runner_cls eng = engine.TaskEngine(config, task, mock.MagicMock()) eng.run() self.assertEqual(2, fake_runner.run.call_count) self.assertEqual(mock.call(consts.TaskStatus.ABORTED), task.update_status.mock_calls[-1]) subtask_obj = task.add_subtask.return_value subtask_obj.update_status.assert_has_calls(( mock.call(consts.SubtaskStatus.FINISHED), mock.call(consts.SubtaskStatus.FINISHED), mock.call(consts.SubtaskStatus.ABORTED), ))
def test_schema_is_valid(self): discover.load_plugins(os.path.join(self.rally_jobs_path, "plugins")) files = { f for f in os.listdir(self.rally_jobs_path) if (os.path.isfile(os.path.join(self.rally_jobs_path, f)) and f.endswith(".yaml") and not f.endswith("_args.yaml")) } # TODO(andreykurilin): figure out why it fails files -= {"rally-mos.yaml", "sahara-clusters.yaml"} for filename in files: full_path = os.path.join(self.rally_jobs_path, filename) with open(full_path) as task_file: try: args_file = os.path.join( self.rally_jobs_path, filename.rsplit(".", 1)[0] + "_args.yaml") args = {} if os.path.exists(args_file): args = yaml.safe_load(open(args_file).read()) if not isinstance(args, dict): raise TypeError( "args file %s must be dict in yaml or json " "presentation" % args_file) task_inst = api._Task(api.API(skip_db_check=True)) task = task_inst.render_template( task_template=task_file.read(), **args) task = task_cfg.TaskConfig(yaml.safe_load(task)) task_obj = fakes.FakeTask({"uuid": full_path}) eng = engine.TaskEngine(task, task_obj, mock.Mock()) eng.validate(only_syntax=True) except Exception: print(traceback.format_exc()) self.fail("Wrong task input file: %s" % full_path)
def test_run__subtask_crashed(self, mock_scenario_runner, mock_scenario, mock_context_manager_setup, mock_context_manager_cleanup, mock_result_consumer, mock_task_get_status): task = mock.MagicMock(spec=objects.Task) subtask_obj = task.add_subtask.return_value subtask_obj.add_workload.side_effect = MyException() mock_result_consumer.is_task_in_aborting_status.return_value = False config = task_cfg.TaskConfig({ "a.task": [{ "runner": { "type": "a", "b": 1 } }], "b.task": [{ "runner": { "type": "a", "b": 1 } }], "c.task": [{ "runner": { "type": "a", "b": 1 } }] }) fake_runner_cls = mock.MagicMock() fake_runner = mock.MagicMock() fake_runner_cls.return_value = fake_runner mock_scenario_runner.get.return_value = fake_runner_cls eng = engine.TaskEngine(config, task, mock.Mock()) self.assertRaises(MyException, eng.run) task.update_status.assert_has_calls(( mock.call(consts.TaskStatus.RUNNING), mock.call(consts.TaskStatus.CRASHED), )) subtask_obj.update_status.assert_called_once_with( consts.SubtaskStatus.CRASHED)
def test_run__task_aborted(self, mock_scenario_runner, mock_scenario, mock_context_manager_setup, mock_context_manager_cleanup, mock_result_consumer, mock_task_get_status): task = mock.MagicMock(spec=objects.Task) config = task_cfg.TaskConfig({ "a.task": [{ "runner": { "type": "a", "b": 1 } }], "b.task": [{ "runner": { "type": "a", "b": 1 } }], "c.task": [{ "runner": { "type": "a", "b": 1 } }] }) fake_runner_cls = mock.MagicMock() fake_runner = mock.MagicMock() fake_runner_cls.return_value = fake_runner mock_task_get_status.return_value = consts.TaskStatus.SOFT_ABORTING mock_scenario_runner.get.return_value = fake_runner_cls eng = engine.TaskEngine(config, task, mock.Mock()) eng.run() self.assertEqual(mock.call(consts.TaskStatus.ABORTED), task.update_status.mock_calls[-1]) subtask_obj = task.add_subtask.return_value subtask_obj.update_status.assert_called_once_with( consts.SubtaskStatus.ABORTED)
def test__process_1(self): config = collections.OrderedDict() config["a.task"] = [{"context": {"foo": "bar"}}, {}] config["b.task"] = [{"sla": {"key": "value"}}] config["c.task"] = [{"hooks": [{"name": "foo", "args": "bar", "description": "DESCR!!!", "trigger": { "name": "mega-trigger", "args": {"some": "thing"} }}] }] self.assertEqual( {"title": "Task (adopted from task format v1)", "version": 2, "description": "", "tags": [], "subtasks": [ { "title": "a.task", "description": "", "tags": [], "workloads": [ { "scenario": {"a.task": {}}, "contexts": {"foo": "bar"}, "hooks": [], "sla": {"failure_rate": {"max": 0}}, "runner": {"serial": {}} }, { "scenario": {"a.task": {}}, "contexts": {}, "hooks": [], "sla": {"failure_rate": {"max": 0}}, "runner": {"serial": {}} } ], }, { "title": "b.task", "description": "", "tags": [], "workloads": [ { "scenario": {"b.task": {}}, "contexts": {}, "hooks": [], "runner": {"serial": {}}, "sla": {"key": "value"}, } ], }, { "title": "c.task", "description": "", "tags": [], "workloads": [ { "scenario": {"c.task": {}}, "contexts": {}, "hooks": [{ "description": "DESCR!!!", "action": {"foo": "bar"}, "trigger": { "mega-trigger": {"some": "thing"}}} ], "sla": {"failure_rate": {"max": 0}}, "runner": {"serial": {}} } ], }]}, task_cfg.TaskConfig(config).to_dict())
def test__process_2(self): config = { "version": 2, "title": "foo", "subtasks": [ { "title": "subtask1", "workloads": [ { "scenario": {"workload1": {}}, "runner": {"constant": {}}, "sla": {"key": "value"} }, { "scenario": {"workload2": {}}, } ] }, { "title": "subtask2", "scenario": {"workload1": {}} }, ] } self.assertEqual( {"title": "foo", "version": 2, "description": "", "tags": [], "subtasks": [ { "title": "subtask1", "description": "", "tags": [], "workloads": [ { "scenario": {"workload1": {}}, "contexts": {}, "hooks": [], "sla": {"key": "value"}, "runner": {"constant": {}} }, { "scenario": {"workload2": {}}, "contexts": {}, "hooks": [], "sla": {"failure_rate": {"max": 0}}, "runner": {"serial": {}} } ], }, { "title": "subtask2", "description": "", "tags": [], "workloads": [ { "scenario": {"workload1": {}}, "contexts": {}, "hooks": [], "runner": {"serial": {}}, "sla": {"failure_rate": {"max": 0}}, } ], }]}, task_cfg.TaskConfig(config).to_dict())