def test_setup(self, mock_context_get): mock_context = mock.MagicMock() mock_context.return_value = mock.MagicMock(__lt__=lambda x, y: True) mock_context_get.return_value = mock_context ctx_object = {"config": {"a": [], "b": []}} manager = context.ContextManager(ctx_object) result = manager.setup() self.assertEqual(result, ctx_object) mock_context_get.assert_has_calls( [mock.call("a"), mock.call("b")], any_order=True) mock_context.assert_has_calls( [mock.call(ctx_object), mock.call(ctx_object)], any_order=True) self.assertEqual([mock_context(), mock_context()], manager._visited) mock_context.return_value.assert_has_calls( [mock.call.setup(), mock.call.setup()], any_order=True)
def test_cleanup_exception(self, mock_log_exception): mock_obj = mock.MagicMock() @context.configure("a", platform="foo", order=1) class A(context.Context): def setup(self): pass def cleanup(self): mock_obj("a@foo") raise Exception("So Sad") self.addCleanup(A.unregister) ctx_object = {"config": {"a@foo": []}, "task": {"uuid": "uuid"}} context.ContextManager(ctx_object).cleanup() mock_obj.assert_called_once_with("a@foo") mock_log_exception.assert_called_once_with(mock.ANY)
def _run_workload(self, subtask_obj, workload): if ResultConsumer.is_task_in_aborting_status(self.task["uuid"]): raise TaskAborted() workload_obj = subtask_obj.add_workload( name=workload["name"], description=workload["description"], position=workload["position"], runner=workload["runner"], runner_type=workload["runner_type"], hooks=workload["hooks"], contexts=workload["contexts"], sla=workload["sla"], args=workload["args"]) workload["uuid"] = workload_obj["uuid"] workload_cfg = objects.Workload.to_task(workload) LOG.info( "Running workload: \n" " position = %(position)s\n" " config = %(cfg)s" % { "position": workload["position"], "cfg": json.dumps(workload_cfg, indent=3) }) runner_cls = runner.ScenarioRunner.get(workload["runner_type"]) runner_obj = runner_cls(self.task, workload["runner"]) context_obj = self._prepare_context(workload["contexts"], workload["name"], workload_obj["uuid"]) try: ctx_manager = context.ContextManager(context_obj) with ResultConsumer(workload, task=self.task, subtask=subtask_obj, workload=workload_obj, runner=runner_obj, abort_on_sla_failure=self.abort_on_sla_failure, ctx_manager=ctx_manager): with ctx_manager: runner_obj.run(workload["name"], context_obj, workload["args"]) except Exception: LOG.exception("Unexpected exception during the workload execution")
def test_setup(self, mock__get_sorted_context_lst): foo_context = mock.MagicMock() bar_context = mock.MagicMock() mock__get_sorted_context_lst.return_value = [foo_context, bar_context] ctx_object = { "config": { "a": [], "b": [] }, "scenario_namespace": "foo" } manager = context.ContextManager(ctx_object) result = manager.setup() self.assertEqual(result, ctx_object) foo_context.setup.assert_called_once_with() bar_context.setup.assert_called_once_with()
def _run_workload(self, subtask_obj, workload): if ResultConsumer.is_task_in_aborting_status(self.task["uuid"]): raise TaskAborted() key = workload.make_key() workload_obj = subtask_obj.add_workload(key) LOG.info("Running benchmark with key: \n%s" % json.dumps(key, indent=2)) runner_obj = self._get_runner(workload.runner) context_obj = self._prepare_context(workload.context, workload.name, workload_obj["uuid"]) try: with ResultConsumer(key, self.task, subtask_obj, workload_obj, runner_obj, self.abort_on_sla_failure): with context.ContextManager(context_obj): runner_obj.run(workload.name, context_obj, workload.args) except Exception as e: LOG.debug(traceback.format_exc()) LOG.exception(e)
def test_cleanup_exception(self, mock_context_get): mock_context = mock.MagicMock() mock_context.return_value = mock.MagicMock(__lt__=lambda x, y: True) mock_context.cleanup.side_effect = Exception() mock_context_get.return_value = mock_context ctx_object = {"config": {"a@foo": [], "b@foo": []}} manager = context.ContextManager(ctx_object) manager.cleanup() mock_context_get.assert_has_calls( [mock.call("a", platform="foo", allow_hidden=True, fallback_to_default=False), mock.call("b", platform="foo", allow_hidden=True, fallback_to_default=False)], any_order=True) mock_context.assert_has_calls( [mock.call(ctx_object), mock.call(ctx_object)], any_order=True) mock_context.return_value.assert_has_calls( [mock.call.cleanup(), mock.call.cleanup()], any_order=True)
def run(self): """Run the benchmark according to the test configuration. Test configuration is specified on engine initialization. :returns: List of dicts, each dict containing the results of all the corresponding benchmark test launches """ self.task.update_status(consts.TaskStatus.RUNNING) for subtask in self.config.subtasks: subtask_obj = self.task.add_subtask(**subtask.to_dict()) for workload in subtask.workloads: if ResultConsumer.is_task_in_aborting_status( self.task["uuid"]): LOG.info("Received aborting signal.") self.task.update_status(consts.TaskStatus.ABORTED) return key = workload.make_key() workload_obj = subtask_obj.add_workload(key) LOG.info("Running benchmark with key: \n%s" % json.dumps(key, indent=2)) runner_obj = self._get_runner(workload.runner) context_obj = self._prepare_context(workload.context, workload.name) try: with ResultConsumer(key, self.task, subtask_obj, workload_obj, runner_obj, self.abort_on_sla_failure): with context.ContextManager(context_obj): runner_obj.run(workload.name, context_obj, workload.args) except Exception as e: LOG.debug(traceback.format_exc()) LOG.exception(e) if objects.Task.get_status( self.task["uuid"]) != consts.TaskStatus.ABORTED: self.task.update_status(consts.TaskStatus.FINISHED)
def test_cleanup_exception(self, mock_log_exception, mock_format_exc): mock_obj = mock.MagicMock() exc = Exception("So Sad") @context.configure("a", platform="foo", order=1) class A(context.Context): def setup(self): pass def cleanup(self): mock_obj("a@foo") raise exc self.addCleanup(A.unregister) ctx_object = {"config": {"a@foo": []}, "task": {"uuid": "uuid"}} ctx_manager = context.ContextManager(ctx_object) ctx_manager._data[A.get_fullname()] = { "cleanup": { "atomic_actions": None, "started_at": None, "finished_at": None, "error": None } } ctx_manager.cleanup() mock_obj.assert_called_once_with("a@foo") mock_log_exception.assert_called_once_with(mock.ANY) mock_format_exc.assert_called_once_with(exc) self.assertEqual([{ "cleanup": { "atomic_actions": [], "error": mock_format_exc.return_value, "started_at": mock.ANY, "finished_at": mock.ANY } }], ctx_manager.contexts_results())
def run(self): """Run the benchmark according to the test configuration. Test configuration is specified on engine initialization. :returns: List of dicts, each dict containing the results of all the corresponding benchmark test launches """ self.task.update_status(consts.TaskStatus.RUNNING) for subtask in self.config.subtasks: for pos, scenario_obj in enumerate(subtask.scenarios): if ResultConsumer.is_task_in_aborting_status( self.task["uuid"]): LOG.info("Received aborting signal.") self.task.update_status(consts.TaskStatus.ABORTED) return name = scenario_obj["name"] key = {"name": name, "pos": pos, "kw": scenario_obj} LOG.info("Running benchmark with key: \n%s" % json.dumps(key, indent=2)) runner_obj = self._get_runner(scenario_obj) context_obj = self._prepare_context( scenario_obj.get("context", {}), name, self.admin) try: with ResultConsumer(key, self.task, runner_obj, self.abort_on_sla_failure): print "Vishnu1" with context.ContextManager(context_obj): runner_obj.run(name, context_obj, scenario_obj.get("args", {})) print "Vishnu2" except Exception as e: LOG.exception(e) if objects.Task.get_status( self.task["uuid"]) != consts.TaskStatus.ABORTED: self.task.update_status(consts.TaskStatus.FINISHED)
def _run_workload(self, subtask_obj, workload): if ResultConsumer.is_task_in_aborting_status(self.task["uuid"]): raise TaskAborted() workload_obj = subtask_obj.add_workload( name=workload["name"], description=workload["description"], position=workload["position"], runner=workload["runner"], hooks=workload["hooks"], context=workload["context"], sla=workload["sla"], args=workload["args"]) workload_cfg = objects.Workload.format_workload_config(workload) LOG.info( "Running benchmark with key: \n" " name = %(name)s\n" " description = %(description)s\n" " position = %(position)s\n" " config = %(cfg)s", { "name": workload["name"], "description": workload["description"], "position": workload["position"], "cfg": json.dumps(workload_cfg, indent=3) }) runner_obj = self._get_runner(workload["runner"]) context_obj = self._prepare_context(workload["context"], workload["name"], workload_obj["uuid"]) try: with ResultConsumer(workload, self.task, subtask_obj, workload_obj, runner_obj, self.abort_on_sla_failure): with context.ContextManager(context_obj): runner_obj.run(workload["name"], context_obj, workload["args"]) except Exception as e: LOG.debug(traceback.format_exc()) LOG.exception(e)
def _get_scenario_context(iteration, context_obj): context_obj = copy.deepcopy(context_obj) context_obj["iteration"] = iteration return context.ContextManager(context_obj).map_for_scenario()
def _get_scenario_context(context_obj): return context.ContextManager(context_obj).map_for_scenario()
def test_get_sorted_context_lst(self, mock_context_get, mock_context_get_all): # use ordereddict to predict the order of calls ctx_object = { "config": collections.OrderedDict([("a@foo", []), ("b", []), ("c", []), ("d", [])]), "scenario_namespace": "foo" } def OrderableMock(**kwargs): return mock.Mock(__lt__=(lambda x, y: x), **kwargs) a_ctx = mock.Mock(return_value=OrderableMock()) mock_context_get.return_value = a_ctx b_ctx = mock.Mock(return_value=OrderableMock()) c_ctx = mock.Mock(get_namespace=lambda: "foo", return_value=OrderableMock()) d_ctx = mock.Mock(get_namespace=lambda: "default", return_value=OrderableMock()) all_plugins = { # it is a case when search is performed for any namespace and only # one possible match is found "b": [b_ctx], # it is a case when plugin should be filtered by the scenario # namespace "c": [mock.Mock(get_namespace=lambda: "default"), c_ctx], # it is a case when plugin should be filtered by the scenario # namespace "d": [mock.Mock(get_namespace=lambda: "bar"), d_ctx] } def fake_get_all(name, allow_hidden=True): # use pop to ensure that get_all is called only one time per ctx result = all_plugins.pop(name, None) if result is None: self.fail("Unexpected call of Context.get_all for %s plugin" % name) return result mock_context_get_all.side_effect = fake_get_all manager = context.ContextManager(ctx_object) self.assertEqual( { a_ctx.return_value, b_ctx.return_value, c_ctx.return_value, d_ctx.return_value }, set(manager._get_sorted_context_lst())) mock_context_get.assert_called_once_with("a", namespace="foo", fallback_to_default=False, allow_hidden=True) a_ctx.assert_called_once_with(ctx_object) self.assertEqual([ mock.call(name=name, allow_hidden=True) for name in ("b", "c", "d") ], mock_context_get_all.call_args_list)