def after_task_complete(self, task_ex, task_spec): """Called right after task completes. :param task_ex: Completed task DB model. :param task_spec: Completed task specification. """ wf_ex = task_ex.workflow_execution ctx_view = data_flow.ContextView( task_ex.in_context, wf_ex.context, wf_ex.input ) data_flow.evaluate_object_fields(self, ctx_view) self._validate()
def before_task_start(self, task_ex, task_spec): """Called right before task start. :param task_ex: DB model for task that is about to start. :param task_spec: Task specification. """ wf_ex = task_ex.workflow_execution ctx_view = data_flow.ContextView( task_ex.in_context, wf_ex.context, wf_ex.input ) data_flow.evaluate_object_fields(self, ctx_view) self._validate()
def all_errors_handled(self): for t_ex in lookup_utils.find_error_task_executions(self.wf_ex.id): ctx_view = data_flow.ContextView( data_flow.evaluate_task_outbound_context(t_ex), self.wf_ex.context, self.wf_ex.input ) tasks_on_error = self._find_next_tasks_for_clause( self.wf_spec.get_on_error_clause(t_ex.name), ctx_view ) if not tasks_on_error: return False return True
def test_context_view_as_nested_json(self): ctx = data_flow.ContextView( {'k1': 'v1'}, {'k2': 'v2'}, ) d = {'root': ctx} json_str = utils.to_json_str(d) self.assertIsNotNone(json_str) self.assertNotEqual('{"root": {}}', json_str) # We can't use regular dict comparison because key order # is not defined. self.assertIn('"k1": "v1"', json_str) self.assertIn('"k1": "v1"', json_str) self.assertIn('"root"', json_str)
def _get_action_input(self, ctx=None): ctx = ctx or self.ctx ctx_view = data_flow.ContextView( ctx, self.wf_ex.context, self.wf_ex.input ) input_dict = expr.evaluate_recursively( self.task_spec.get_input(), ctx_view ) return utils.merge_dicts( input_dict, self._get_action_defaults(), overwrite=False )
def _prepare_input(self, input_dict): for k, v in self.action_spec.get_input().items(): if k not in input_dict or input_dict[k] is utils.NotDefined: input_dict[k] = v base_input_dict = input_dict for action_def in self.adhoc_action_defs: action_spec = spec_parser.get_action_spec(action_def.spec) base_input_expr = action_spec.get_base_input() if base_input_expr: ctx_view = data_flow.ContextView(base_input_dict, self.task_ctx, self.wf_ctx) base_input_dict = expr.evaluate_recursively( base_input_expr, ctx_view) else: base_input_dict = {} return super(AdHocAction, self)._prepare_input(base_input_dict)
def get_published_global(task_ex, wf_ex=None): if task_ex.state not in [states.SUCCESS, states.ERROR]: return if wf_ex is None: wf_ex = task_ex.workflow_execution expr_ctx = data_flow.ContextView( data_flow.get_current_task_dict(task_ex), task_ex.in_context, data_flow.get_workflow_environment_dict(wf_ex), wf_ex.context, wf_ex.input) task_spec = spec_parser.get_task_spec(task_ex.spec) publish_spec = task_spec.get_publish(task_ex.state) if not publish_spec: return global_vars = publish_spec.get_global() return expr.evaluate_recursively(global_vars, expr_ctx)
def _prepare_input(self, input_dict): if self._prepared_input is not None: return self._prepared_input base_input_dict = input_dict for action_def in self.adhoc_action_defs: action_spec = spec_parser.get_action_spec(action_def.spec) for k, v in action_spec.get_input().items(): if (k not in base_input_dict or base_input_dict[k] is utils.NotDefined): base_input_dict[k] = v base_input_expr = action_spec.get_base_input() if base_input_expr: wf_ex = ( self.task_ex.workflow_execution if self.task_ex else None ) ctx_view = data_flow.ContextView( base_input_dict, self.task_ctx, data_flow.get_workflow_environment_dict(wf_ex), self.wf_ctx ) base_input_dict = expr.evaluate_recursively( base_input_expr, ctx_view ) else: base_input_dict = {} self._prepared_input = super(AdHocAction, self)._prepare_input( base_input_dict ) return self._prepared_input
def _on_visit(action_desc, prev_res): if action_desc is self: base_action_desc = None base_input_dict = input_dict else: base_action_desc = action_desc base_input_dict = prev_res[1] if not isinstance(action_desc, AdHocActionDescriptor): return base_action_desc, base_input_dict for k, v in action_desc.spec.get_input().items(): if (k not in base_input_dict or base_input_dict[k] is utils.NotDefined): base_input_dict[k] = v ctx = data_flow.ContextView(base_input_dict, wf_ctx) base_input_dict = expr.evaluate_recursively( action_desc.spec.get_base_input(), ctx) return base_action_desc, base_input_dict
def _find_next_tasks(self, task_ex, ctx=None): t_state = task_ex.state t_name = task_ex.name ctx_view = data_flow.ContextView( data_flow.get_current_task_dict(task_ex), ctx or data_flow.evaluate_task_outbound_context(task_ex), self.wf_ex.context, self.wf_ex.input ) # [(task_name, params, 'on-success'|'on-error'|'on-complete'), ...] result = [] def process_clause(clause, event_name): task_tuples = self._find_next_tasks_for_clause(clause, ctx_view) for t in task_tuples: result.append((t[0], t[1], event_name)) if t_state == states.SUCCESS: process_clause( self.wf_spec.get_on_success_clause(t_name), 'on-success' ) elif t_state == states.ERROR: process_clause( self.wf_spec.get_on_error_clause(t_name), 'on-error' ) if states.is_completed(t_state) and not states.is_cancelled(t_state): process_clause( self.wf_spec.get_on_complete_clause(t_name), 'on-complete' ) return result
def _find_next_tasks(self, task_ex, ctx=None): t_state = task_ex.state t_name = task_ex.name ctx_view = data_flow.ContextView( ctx or data_flow.evaluate_task_outbound_context(task_ex), self.wf_ex.context, self.wf_ex.input) t_names_and_params = [] if states.is_completed(t_state) and not states.is_cancelled(t_state): t_names_and_params += (self._find_next_tasks_for_clause( self.wf_spec.get_on_complete_clause(t_name), ctx_view)) if t_state == states.ERROR: t_names_and_params += (self._find_next_tasks_for_clause( self.wf_spec.get_on_error_clause(t_name), ctx_view)) elif t_state == states.SUCCESS: t_names_and_params += (self._find_next_tasks_for_clause( self.wf_spec.get_on_success_clause(t_name), ctx_view)) return t_names_and_params
def _get_with_items_values(self): """Returns all values evaluated from 'with-items' expression. Example: DSL: with-items: - var1 in <% $.arrayI %> - var2 in <% $.arrayJ %> where arrayI = [1,2,3] and arrayJ = [a,b,c] The result of the method in this case will be: { 'var1': [1,2,3], 'var2': [a,b,c] } :return: Evaluated 'with-items' expression values. """ ctx_view = data_flow.ContextView(self.ctx, self.wf_ex.context, self.wf_ex.input) return expr.evaluate_recursively(self.task_spec.get_with_items(), ctx_view)
def _get_with_items_input(self): """Calculate input array for separating each action input. Example: DSL: with_items: - itemX in <% $.arrayI %> - itemY in <% $.arrayJ %> Assume arrayI = [1, 2], arrayJ = ['a', 'b']. with_items_input = { "itemX": [1, 2], "itemY": ['a', 'b'] } Then we get separated input: inputs_per_item = [ {'itemX': 1, 'itemY': 'a'}, {'itemX': 2, 'itemY': 'b'} ] :return: the list of tuples containing indexes and the corresponding input dict. """ ctx_view = data_flow.ContextView(self.ctx, self.wf_ex.context, self.wf_ex.input) with_items_inputs = expr.evaluate_recursively( self.task_spec.get_with_items(), ctx_view) with_items.validate_input(with_items_inputs) inputs_per_item = [] for key, value in with_items_inputs.items(): for index, item in enumerate(value): iter_context = {key: item} if index >= len(inputs_per_item): inputs_per_item.append(iter_context) else: inputs_per_item[index].update(iter_context) action_inputs = [] for item_input in inputs_per_item: new_ctx = utils.merge_dicts(item_input, self.ctx) action_inputs.append(self._get_action_input(new_ctx)) with_items.prepare_runtime_context(self.task_ex, self.task_spec, action_inputs) indices = with_items.get_indices_for_loop(self.task_ex) with_items.decrease_capacity(self.task_ex, len(indices)) if indices: current_inputs = operator.itemgetter(*indices)(action_inputs) return zip( indices, current_inputs if isinstance(current_inputs, tuple) else [current_inputs]) return []
def after_task_complete(self, task_ex, task_spec): """Possible Cases: 1. state = SUCCESS if continue_on is not specified, no need to move to next iteration; if current:count achieve retry:count then policy breaks the loop (regardless on continue-on condition); otherwise - check continue_on condition and if it is True - schedule the next iteration, otherwise policy breaks the loop. 2. retry:count = 5, current:count = 2, state = ERROR, state = IDLE/DELAYED, current:count = 3 3. retry:count = 5, current:count = 4, state = ERROR Iterations complete therefore state = #{state}, current:count = 4. """ super(RetryPolicy, self).after_task_complete(task_ex, task_spec) # There is nothing to repeat if self.count == 0: return # TODO(m4dcoder): If the task_ex.action_executions and # task_ex.workflow_executions collection are not called, # then the retry_no in the runtime_context of the task_ex will not # be updated accurately. To be exact, the retry_no will be one # iteration behind. ex = task_ex.executions # noqa context_key = 'retry_task_policy' runtime_context = _ensure_context_has_key( task_ex.runtime_context, context_key ) wf_ex = task_ex.workflow_execution ctx_view = data_flow.ContextView( data_flow.get_current_task_dict(task_ex), data_flow.evaluate_task_outbound_context(task_ex), wf_ex.context, wf_ex.input ) continue_on_evaluation = expressions.evaluate( self._continue_on_clause, ctx_view ) break_on_evaluation = expressions.evaluate( self._break_on_clause, ctx_view ) task_ex.runtime_context = runtime_context state = task_ex.state if not states.is_completed(state) or states.is_cancelled(state): return policy_context = runtime_context[context_key] retry_no = 0 if 'retry_no' in policy_context: retry_no = policy_context['retry_no'] del policy_context['retry_no'] retries_remain = retry_no < self.count stop_continue_flag = ( task_ex.state == states.SUCCESS and not self._continue_on_clause ) stop_continue_flag = ( stop_continue_flag or (self._continue_on_clause and not continue_on_evaluation) ) stop_continue_flag = ( stop_continue_flag or _has_incomplete_inbound_tasks(task_ex) ) break_triggered = ( task_ex.state == states.ERROR and break_on_evaluation ) if not retries_remain or break_triggered or stop_continue_flag: return _log_task_delay(task_ex, self.delay) data_flow.invalidate_task_execution_result(task_ex) task_ex.state = states.RUNNING_DELAYED policy_context['retry_no'] = retry_no + 1 runtime_context[context_key] = policy_context scheduler.schedule_call( None, _CONTINUE_TASK_PATH, self.delay, task_ex_id=task_ex.id, )
def after_task_complete(self, task_ex, task_spec): """Possible Cases: 1. state = SUCCESS if continue_on is not specified, no need to move to next iteration; if current:count achieve retry:count then policy breaks the loop (regardless on continue-on condition); otherwise - check continue_on condition and if it is True - schedule the next iteration, otherwise policy breaks the loop. 2. retry:count = 5, current:count = 2, state = ERROR, state = IDLE/DELAYED, current:count = 3 3. retry:count = 5, current:count = 4, state = ERROR Iterations complete therefore state = #{state}, current:count = 4. """ super(RetryPolicy, self).after_task_complete(task_ex, task_spec) # There is nothing to repeat if self.count == 0: return # TODO(m4dcoder): If the task_ex.action_executions and # task_ex.workflow_executions collection are not called, # then the retry_no in the runtime_context of the task_ex will not # be updated accurately. To be exact, the retry_no will be one # iteration behind. ex = task_ex.executions # noqa context_key = 'retry_task_policy' runtime_context = _ensure_context_has_key(task_ex.runtime_context, context_key) wf_ex = task_ex.workflow_execution ctx_view = data_flow.ContextView( data_flow.get_current_task_dict(task_ex), data_flow.evaluate_task_outbound_context(task_ex), wf_ex.context, wf_ex.input) continue_on_evaluation = expressions.evaluate(self._continue_on_clause, ctx_view) break_on_evaluation = expressions.evaluate(self._break_on_clause, ctx_view) task_ex.runtime_context = runtime_context state = task_ex.state if not states.is_completed(state) or states.is_cancelled(state): return policy_context = runtime_context[context_key] retry_no = 0 if 'retry_no' in policy_context: retry_no = policy_context['retry_no'] del policy_context['retry_no'] retries_remain = retry_no < self.count stop_continue_flag = (task_ex.state == states.SUCCESS and not self._continue_on_clause) stop_continue_flag = (stop_continue_flag or (self._continue_on_clause and not continue_on_evaluation)) break_triggered = (task_ex.state == states.ERROR and break_on_evaluation) if not retries_remain or break_triggered or stop_continue_flag: return data_flow.invalidate_task_execution_result(task_ex) policy_context['retry_no'] = retry_no + 1 runtime_context[context_key] = policy_context # NOTE(vgvoleg): join tasks in direct workflows can't be # retried as-is, because these tasks can't start without # a correct logical state. if hasattr(task_spec, "get_join") and task_spec.get_join(): from mistral.engine import task_handler as t_h _log_task_delay(task_ex, self.delay, states.WAITING) task_ex.state = states.WAITING t_h._schedule_refresh_task_state(task_ex.id, self.delay) return _log_task_delay(task_ex, self.delay) task_ex.state = states.RUNNING_DELAYED sched = sched_base.get_system_scheduler() job = sched_base.SchedulerJob(run_after=self.delay, func_name=_CONTINUE_TASK_PATH, func_args={'task_ex_id': task_ex.id}) sched.schedule(job)
def test_context_view(self): ctx = data_flow.ContextView( { 'k1': 'v1', 'k11': 'v11', 'k3': 'v3' }, { 'k2': 'v2', 'k21': 'v21', 'k3': 'v32' } ) self.assertIsInstance(ctx, dict) self.assertEqual(5, len(ctx)) self.assertIn('k1', ctx) self.assertIn('k11', ctx) self.assertIn('k3', ctx) self.assertIn('k2', ctx) self.assertIn('k21', ctx) self.assertEqual('v1', ctx['k1']) self.assertEqual('v1', ctx.get('k1')) self.assertEqual('v11', ctx['k11']) self.assertEqual('v11', ctx.get('k11')) self.assertEqual('v3', ctx['k3']) self.assertEqual('v2', ctx['k2']) self.assertEqual('v2', ctx.get('k2')) self.assertEqual('v21', ctx['k21']) self.assertEqual('v21', ctx.get('k21')) self.assertIsNone(ctx.get('Not existing key')) self.assertRaises(exc.MistralError, ctx.update) self.assertRaises(exc.MistralError, ctx.clear) self.assertRaises(exc.MistralError, ctx.pop, 'k1') self.assertRaises(exc.MistralError, ctx.popitem) self.assertRaises(exc.MistralError, ctx.__setitem__, 'k5', 'v5') self.assertRaises(exc.MistralError, ctx.__delitem__, 'k2') self.assertEqual('v1', expr.evaluate('<% $.k1 %>', ctx)) self.assertEqual('v2', expr.evaluate('<% $.k2 %>', ctx)) self.assertEqual('v3', expr.evaluate('<% $.k3 %>', ctx)) # Now change the order of dictionaries and make sure to have # a different for key 'k3'. ctx = data_flow.ContextView( { 'k2': 'v2', 'k21': 'v21', 'k3': 'v32' }, { 'k1': 'v1', 'k11': 'v11', 'k3': 'v3' } ) self.assertEqual('v32', expr.evaluate('<% $.k3 %>', ctx))
def _evaluate_expression(self, expression, ctx=None): ctx_view = data_flow.ContextView( data_flow.get_current_task_dict(self.task_ex), ctx or self.ctx, self.wf_ex.context, self.wf_ex.input) return expr.evaluate_recursively(expression, ctx_view)
def _get_target(self, input_dict): ctx_view = data_flow.ContextView(input_dict, self.ctx, self.wf_ex.context, self.wf_ex.input) return expr.evaluate_recursively(self.task_spec.get_target(), ctx_view)
def _evaluate_expression(self, expression, ctx=None): ctx = ctx or self.ctx ctx_view = data_flow.ContextView(ctx, self.wf_ex.context, self.wf_ex.input) input_dict = expr.evaluate_recursively(expression, ctx_view) return input_dict