def _run_scenario_once(cls, method_name, context_obj, scenario_kwargs): iteration = context_obj["iteration"] # provide arguments isolation between iterations scenario_kwargs = copy.deepcopy(scenario_kwargs) LOG.info("Task %(task)s | ITER: %(iteration)s START" % {"task": context_obj["task"]["uuid"], "iteration": iteration}) scenario_inst = cls(context_obj) error = [] try: with rutils.Timer() as timer: getattr(scenario_inst, method_name)(**scenario_kwargs) except Exception as e: error = utils.format_exc(e) if logging.is_debug(): LOG.exception(e) finally: status = "Error %s: %s" % tuple(error[0:2]) if error else "OK" LOG.info("Task %(task)s | ITER: %(iteration)s END: %(status)s" % {"task": context_obj["task"]["uuid"], "iteration": iteration, "status": status}) return {"duration": timer.duration() - scenario_inst.idle_duration(), "timestamp": timer.timestamp(), "idle_duration": scenario_inst.idle_duration(), "error": error, "output": scenario_inst._output, "atomic_actions": scenario_inst.atomic_actions()}
def _run_scenario_once(args): iteration, cls, method_name, context, kwargs = args LOG.info("Task %(task)s | ITER: %(iteration)s START" % {"task": context["task"]["uuid"], "iteration": iteration}) context["iteration"] = iteration scenario = cls(context=context) error = [] scenario_output = {"errors": "", "data": {}} try: with rutils.Timer() as timer: scenario_output = getattr(scenario, method_name)(**kwargs) or scenario_output except Exception as e: error = utils.format_exc(e) if logging.is_debug(): LOG.exception(e) finally: status = "Error %s: %s" % tuple(error[0:2]) if error else "OK" LOG.info("Task %(task)s | ITER: %(iteration)s END: %(status)s" % {"task": context["task"]["uuid"], "iteration": iteration, "status": status}) return {"duration": timer.duration() - scenario.idle_duration(), "timestamp": timer.timestamp(), "idle_duration": scenario.idle_duration(), "error": error, "scenario_output": scenario_output, "atomic_actions": scenario.atomic_actions()}
def _run_scenario_once(args): iteration, cls, method_name, context_obj, kwargs = args LOG.info("Task %(task)s | ITER: %(iteration)s START" % { "task": context_obj["task"]["uuid"], "iteration": iteration }) context_obj["iteration"] = iteration scenario_inst = cls(context_obj) error = [] try: with rutils.Timer() as timer: getattr(scenario_inst, method_name)(**kwargs) except Exception as e: error = utils.format_exc(e) if logging.is_debug(): LOG.exception(e) finally: status = "Error %s: %s" % tuple(error[0:2]) if error else "OK" LOG.info( "Task %(task)s | ITER: %(iteration)s END: %(status)s" % { "task": context_obj["task"]["uuid"], "iteration": iteration, "status": status }) return { "duration": timer.duration() - scenario_inst.idle_duration(), "timestamp": timer.timestamp(), "idle_duration": scenario_inst.idle_duration(), "error": error, "output": scenario_inst._output, "atomic_actions": scenario_inst.atomic_actions() }
def _run_scenario_once(cls, method_name, context_obj, scenario_kwargs, event_queue): iteration = context_obj["iteration"] event_queue.put({ "type": "iteration", "value": iteration, }) # provide arguments isolation between iterations scenario_kwargs = copy.deepcopy(scenario_kwargs) LOG.info("Task %(task)s | ITER: %(iteration)s START" % {"task": context_obj["task"]["uuid"], "iteration": iteration}) scenario_inst = cls(context_obj) error = [] try: with rutils.Timer() as timer: getattr(scenario_inst, method_name)(**scenario_kwargs) except Exception as e: error = utils.format_exc(e) if logging.is_debug(): LOG.exception(e) finally: status = "Error %s: %s" % tuple(error[0:2]) if error else "OK" LOG.info("Task %(task)s | ITER: %(iteration)s END: %(status)s" % {"task": context_obj["task"]["uuid"], "iteration": iteration, "status": status}) return {"duration": timer.duration() - scenario_inst.idle_duration(), "timestamp": timer.timestamp(), "idle_duration": scenario_inst.idle_duration(), "error": error, "output": scenario_inst._output, "atomic_actions": scenario_inst.atomic_actions()}
def format_result_on_timeout(exc, timeout): return { "duration": timeout, "idle_duration": 0, "scenario_output": {"errors": "", "data": {}}, "atomic_actions": {}, "error": utils.format_exc(exc) }
def format_result_on_timeout(exc, timeout): return { "duration": timeout, "idle_duration": 0, "output": {"additive": [], "complete": []}, "atomic_actions": [], "error": utils.format_exc(exc) }
def format_result_on_timeout(exc, timeout): return { "duration": timeout, "idle_duration": 0, "output": {"additive": [], "complete": []}, "atomic_actions": {}, "error": utils.format_exc(exc) }
def _validate_result_schema(self): """Validates result format.""" try: jsonschema.validate(self._result, objects.task.HOOK_RESULT_SCHEMA) except jsonschema.ValidationError as validation_error: LOG.error(_LE("Hook %s returned result " "in wrong format.") % self.get_name()) LOG.exception(validation_error) self._result = self._format_result( status=consts.HookStatus.VALIDATION_FAILED, error=utils.format_exc(validation_error), )
def run_sync(self): """Run hook synchronously.""" try: with rutils.Timer() as timer: self.run() except Exception as exc: LOG.exception("Hook %s failed during run." % self.get_name()) self.set_error(*utils.format_exc(exc)) self._started_at = timer.timestamp() self._result["started_at"] = self._started_at self._finished_at = timer.finish_timestamp() self._result["finished_at"] = self._finished_at
def _run_scenario_once(args): iteration, cls, method_name, context_obj, kwargs = args LOG.info("Task %(task)s | ITER: %(iteration)s START" % {"task": context_obj["task"]["uuid"], "iteration": iteration}) context_obj["iteration"] = iteration scenario_inst = cls(context_obj) error = [] output = {"additive": [], "complete": []} try: with rutils.Timer() as timer: # NOTE(amaretskiy): Output as return value is deprecated # but supported for backward compatibility deprecated_output = getattr(scenario_inst, method_name)(**kwargs) warning = "" if deprecated_output: warning = ("Returning output data by scenario is deprecated " "in favor of calling add_output().") if scenario_inst._output != {"complete": [], "additive": []}: output = scenario_inst._output if deprecated_output: warning += (" Output data both returned and passed to " "add_output() so returned one is ignored!") elif deprecated_output: output["additive"].append({ "title": "Scenario output", "description": "", "chart_plugin": "StackedArea", "data": [list(item) for item in deprecated_output["data"].items()]}) if warning: LOG.warning(warning) except Exception as e: error = utils.format_exc(e) if logging.is_debug(): LOG.exception(e) finally: status = "Error %s: %s" % tuple(error[0:2]) if error else "OK" LOG.info("Task %(task)s | ITER: %(iteration)s END: %(status)s" % {"task": context_obj["task"]["uuid"], "iteration": iteration, "status": status}) return {"duration": timer.duration() - scenario_inst.idle_duration(), "timestamp": timer.timestamp(), "idle_duration": scenario_inst.idle_duration(), "error": error, "output": output, "atomic_actions": scenario_inst.atomic_actions()}
def _thread_method(self): # Run hook synchronously self.run_sync() try: self.validate_result_schema() except jsonschema.ValidationError as validation_error: LOG.error( _LE("Hook %s returned result " "in wrong format.") % self.get_name()) LOG.exception(validation_error) self._result = self._format_result( status=consts.HookStatus.VALIDATION_FAILED, error=utils.format_exc(validation_error), )
def setup(self): """Creates environment by executing provided context plugins.""" self._visited = [] for ctx in self._get_sorted_context_lst(): ctx_data = { "plugin_name": ctx.get_fullname(), "plugin_cfg": ctx.config, "setup": { "started_at": None, "finished_at": None, "atomic_actions": None, "error": None }, "cleanup": { "started_at": None, "finished_at": None, "atomic_actions": None, "error": None } } self._data[ctx.get_fullname()] = ctx_data self._visited.append(ctx) msg = ("%(log_prefix)s Context %(name)s setup() " % { "log_prefix": self._log_prefix(), "name": ctx.get_fullname() }) timer = utils.Timer() try: with timer: ctx.setup() except Exception as exc: ctx_data["setup"]["error"] = task_utils.format_exc(exc) raise finally: ctx_data["setup"]["atomic_actions"] = ctx.atomic_actions() ctx_data["setup"]["started_at"] = timer.timestamp() ctx_data["setup"]["finished_at"] = timer.finish_timestamp() LOG.info("%(msg)s finished in %(duration)s" % { "msg": msg, "duration": timer.duration(fmt=True) }) return self.context_obj
def cleanup(self): """Cleans up environment by executing provided context plugins.""" ctxlst = self._visited or self._get_sorted_context_lst() for ctx in ctxlst[::-1]: ctx.reset_atomic_actions() msg = ("%(log_prefix)s Context %(name)s cleanup()" % { "log_prefix": self._log_prefix(), "name": ctx.get_fullname() }) # NOTE(andreykurilin): As for our code, ctx_data is # always presented. The further checks for `ctx_data is None` are # added just for "disaster cleanup". It is not officially # presented feature and not we provide out-of-the-box, but some # folks have own scripts which are based on ContextManager and # it would be nice to not break them. ctx_data = None if ctx.get_fullname() in self._data: ctx_data = self._data[ctx.get_fullname()] timer = utils.Timer() try: with timer: LOG.info("%s started" % msg) ctx.cleanup() LOG.info("%(msg)s finished in %(duration)s" % { "msg": msg, "duration": timer.duration(fmt=True) }) except Exception as exc: LOG.exception("%(msg)s failed after %(duration)s" % { "msg": msg, "duration": timer.duration(fmt=True) }) if ctx_data is not None: ctx_data["cleanup"]["error"] = task_utils.format_exc(exc) finally: if ctx_data is not None: aa = ctx.atomic_actions() ctx_data["cleanup"]["atomic_actions"] = aa ctx_data["cleanup"]["started_at"] = timer.timestamp() finished_at = timer.finish_timestamp() ctx_data["cleanup"]["finished_at"] = finished_at
def setup(self): """Creates environment by executing provided context plugins.""" self._visited = [] for ctx in self._get_sorted_context_lst(): ctx_data = { "plugin_name": ctx.get_fullname(), "plugin_cfg": ctx.config, "setup": { "started_at": None, "finished_at": None, "atomic_actions": None, "error": None }, "cleanup": { "started_at": None, "finished_at": None, "atomic_actions": None, "error": None } } self._data[ctx.get_fullname()] = ctx_data self._visited.append(ctx) msg = ("%(log_prefix)s Context %(name)s setup() " % {"log_prefix": self._log_prefix(), "name": ctx.get_fullname()}) timer = utils.Timer() try: with timer: ctx.setup() except Exception as exc: ctx_data["setup"]["error"] = task_utils.format_exc(exc) raise finally: ctx_data["setup"]["atomic_actions"] = ctx.atomic_actions() ctx_data["setup"]["started_at"] = timer.timestamp() ctx_data["setup"]["finished_at"] = timer.finish_timestamp() LOG.info("%(msg)s finished in %(duration)s" % {"msg": msg, "duration": timer.duration(fmt=True)}) return self.context_obj
def cleanup(self): """Cleans up environment by executing provided context plugins.""" ctxlst = self._visited or self._get_sorted_context_lst() for ctx in ctxlst[::-1]: ctx.reset_atomic_actions() msg = ("%(log_prefix)s Context %(name)s cleanup()" % {"log_prefix": self._log_prefix(), "name": ctx.get_fullname()}) # NOTE(andreykurilin): As for our code, ctx_data is # always presented. The further checks for `ctx_data is None` are # added just for "disaster cleanup". It is not officially # presented feature and not we provide out-of-the-box, but some # folks have own scripts which are based on ContextManager and # it would be nice to not break them. ctx_data = None if ctx.get_fullname() in self._data: ctx_data = self._data[ctx.get_fullname()] timer = utils.Timer() try: with timer: LOG.info("%s started" % msg) ctx.cleanup() LOG.info("%(msg)s finished in %(duration)s" % {"msg": msg, "duration": timer.duration(fmt=True)}) except Exception as exc: LOG.exception( "%(msg)s failed after %(duration)s" % {"msg": msg, "duration": timer.duration(fmt=True)}) if ctx_data is not None: ctx_data["cleanup"]["error"] = task_utils.format_exc(exc) finally: if ctx_data is not None: aa = ctx.atomic_actions() ctx_data["cleanup"]["atomic_actions"] = aa ctx_data["cleanup"]["started_at"] = timer.timestamp() finished_at = timer.finish_timestamp() ctx_data["cleanup"]["finished_at"] = finished_at
def _run_scenario_once(args): iteration, cls, method_name, context_obj, kwargs = args LOG.info("Task %(task)s | ITER: %(iteration)s START" % { "task": context_obj["task"]["uuid"], "iteration": iteration }) context_obj["iteration"] = iteration scenario_inst = cls(context_obj) error = [] output = {"additive": [], "complete": []} try: with rutils.Timer() as timer: # NOTE(amaretskiy): Output as return value is deprecated # but supported for backward compatibility deprecated_output = getattr(scenario_inst, method_name)(**kwargs) warning = "" if deprecated_output: warning = ("Returning output data by scenario is deprecated " "in favor of calling add_output().") if scenario_inst._output != {"complete": [], "additive": []}: output = scenario_inst._output if deprecated_output: warning += (" Output data both returned and passed to " "add_output() so returned one is ignored!") elif deprecated_output: output["additive"].append({ "title": "Scenario output", "description": "", "chart_plugin": "StackedArea", "data": [list(item) for item in deprecated_output["data"].items()] }) if warning: LOG.warning(warning) except Exception as e: error = utils.format_exc(e) if logging.is_debug(): LOG.exception(e) finally: status = "Error %s: %s" % tuple(error[0:2]) if error else "OK" LOG.info( "Task %(task)s | ITER: %(iteration)s END: %(status)s" % { "task": context_obj["task"]["uuid"], "iteration": iteration, "status": status }) return { "duration": timer.duration() - scenario_inst.idle_duration(), "timestamp": timer.timestamp(), "idle_duration": scenario_inst.idle_duration(), "error": error, "output": output, "atomic_actions": scenario_inst.atomic_actions() }