def publish_variables(task_ex, task_spec): if task_ex.state not in [states.SUCCESS, states.ERROR]: return wf_ex = task_ex.workflow_execution expr_ctx = ContextView(get_current_task_dict(task_ex), task_ex.in_context, get_workflow_environment_dict(wf_ex), wf_ex.context, wf_ex.input) if task_ex.name in expr_ctx: LOG.warning( 'Shadowing context variable with task name while ' 'publishing: %s', task_ex.name) publish_spec = task_spec.get_publish(task_ex.state) if not publish_spec: return # Publish branch variables. branch_vars = publish_spec.get_branch() task_ex.published = expr.evaluate_recursively(branch_vars, expr_ctx) # Publish global variables. global_vars = publish_spec.get_global() utils.merge_dicts(task_ex.workflow_execution.context, expr.evaluate_recursively(global_vars, expr_ctx))
def test_output(self): tests = [({ 'output': None }, False), ({ 'output': False }, False), ({ 'output': 12345 }, False), ({ 'output': 0.12345 }, False), ({ 'output': 'foobar' }, False), ({ 'output': '<% $.x %>' }, False), ({ 'output': '<% * %>' }, True), ({ 'output': '{{ _.x }}' }, False), ({ 'output': '{{ * }}' }, True), ({ 'output': ['v1'] }, False), ({ 'output': { 'k1': 'v1' } }, False)] actions = {'a1': {'base': 'foobar'}} for outputs, expect_error in tests: overlay = {'actions': copy.deepcopy(actions)} utils.merge_dicts(overlay['actions']['a1'], outputs) self._parse_dsl_spec(changes=overlay, expect_error=expect_error)
def _parse_dsl_spec(self, dsl_file=None, add_tasks=False, changes=None, expect_error=False): if dsl_file and add_tasks: raise Exception('The add_tasks option is not a valid ' 'combination with the dsl_file option.') if dsl_file: dsl_yaml = base.get_resource(self._resource_path + '/' + dsl_file) if changes: dsl_dict = safe_yaml.safe_load(dsl_yaml) utils.merge_dicts(dsl_dict, changes) dsl_yaml = safe_yaml.safe_dump(dsl_dict, default_flow_style=False) else: dsl_dict = copy.deepcopy(self._dsl_blank) if add_tasks: dsl_dict['test']['tasks'] = copy.deepcopy(self._dsl_tasks) if changes: utils.merge_dicts(dsl_dict, changes) dsl_yaml = safe_yaml.safe_dump(dsl_dict, default_flow_style=False) if not expect_error: return self._spec_parser(dsl_yaml) else: return self.assertRaises(exc.DSLParsingException, self._spec_parser, dsl_yaml)
def get_schema(cls, includes=('meta', 'definitions')): if cls._full_schema is not None: return cls._full_schema schema = copy.deepcopy(cls._schema) schema['properties'] = utils.merge_dicts( schema.get('properties', {}), cls._meta_schema.get('properties', {}), overwrite=False ) if includes and 'meta' in includes: schema['required'] = list( set(schema.get('required', []) + cls._meta_schema.get('required', [])) ) if includes and 'definitions' in includes: schema['definitions'] = utils.merge_dicts( schema.get('definitions', {}), cls._definitions, overwrite=False ) cls._full_schema = schema return schema
def _get_published_global_from_tasks(task_execs, wf_ex): wf_published_global_vars = {} for task_ex in task_execs: published_global_vars = task.get_published_global(task_ex, wf_ex) if published_global_vars: merge_dicts(wf_published_global_vars, published_global_vars) return wf_published_global_vars
def test_base_input(self): tests = [({ 'base-input': {} }, True), ({ 'base-input': None }, True), ({ 'base-input': { 'k1': 'v1', 'k2': '<% $.v2 %>' } }, False), ({ 'base-input': { 'k1': 'v1', 'k2': '<% * %>' } }, True), ({ 'base-input': { 'k1': 'v1', 'k2': '{{ _.v2 }}' } }, False), ({ 'base-input': { 'k1': 'v1', 'k2': '{{ * }}' } }, True)] actions = {'a1': {'base': 'foobar'}} for base_inputs, expect_error in tests: overlay = {'actions': copy.deepcopy(actions)} utils.merge_dicts(overlay['actions']['a1'], base_inputs) self._parse_dsl_spec(changes=overlay, expect_error=expect_error)
def test_keep_result(self): tests = [ ({'keep-result': ''}, True), ({'keep-result': []}, True), ({'keep-result': 'asd'}, True), ({'keep-result': None}, True), ({'keep-result': 12345}, True), ({'keep-result': True}, False), ({'keep-result': False}, False), ({'keep-result': "<% 'a' in $.val %>"}, False), ({'keep-result': '<% 1 + 2 %>'}, False), ({'keep-result': '<% * %>'}, True), ({'keep-result': "{{ 'a' in _.val }}"}, False), ({'keep-result': '{{ 1 + 2 }}'}, False), ({'keep-result': '{{ * }}'}, True) ] for keep_result, expect_error in tests: overlay = {'test': {'tasks': {}}} utils.merge_dicts(overlay['test']['tasks'], {'email': keep_result}) self._parse_dsl_spec( add_tasks=True, changes=overlay, expect_error=expect_error )
def test_direct_workflow_invalid_task(self): overlay = {'test': {'type': 'direct', 'tasks': {}}} requires = {'requires': ['echo', 'get']} utils.merge_dicts(overlay['test']['tasks'], {'email': requires}) self._parse_dsl_spec(add_tasks=True, changes=overlay, expect_error=True)
def test_direct_workflow_invalid_join(self): tests = [({ 'task3': { 'join': 2 } }, False), ({ 'task3': { 'join': 5 } }, True), ({ 'task3': { 'join': 1 } }, False), ({ 'task3': { 'join': 'one' } }, False), ({ 'task3': { 'join': 'all' } }, False), ({ 'task4': { 'join': 'all' } }, True), ({ 'task4': { 'join': 1 } }, True), ({ 'task4': { 'join': 'one' } }, True)] for test in tests: overlay = { 'test': { 'type': 'direct', 'tasks': { 'task1': { 'on-complete': 'task3' }, 'task2': { 'on-complete': 'task3' } } } } utils.merge_dicts(overlay['test']['tasks'], test[0]) self._parse_dsl_spec(add_tasks=False, changes=overlay, expect_error=test[1])
def add_workflow_variables_to_context(wf_ex, wf_spec): wf_ex.context = wf_ex.context or {} # The context for calculating workflow variables is workflow input # and other data already stored in workflow initial context. ctx_view = ContextView(get_workflow_environment_dict(wf_ex), wf_ex.context, wf_ex.input) wf_vars = expr.evaluate_recursively(wf_spec.get_vars(), ctx_view) utils.merge_dicts(wf_ex.context, wf_vars)
def _process_action_and_workflow(self): params = {} if self._action: self._action, params = self._parse_cmd_and_input(self._action) elif self._workflow: self._workflow, params = self._parse_cmd_and_input(self._workflow) else: self._action = 'std.noop' utils.merge_dicts(self._input, params)
def test_reverse_workflow(self): overlay = {'test': {'type': 'reverse', 'tasks': {}}} require = {'requires': ['echo', 'get']} utils.merge_dicts(overlay['test']['tasks'], {'email': require}) wfs_spec = self._parse_dsl_spec(add_tasks=True, changes=overlay, expect_error=False) self.assertEqual(1, len(wfs_spec.get_workflows())) self.assertEqual('test', wfs_spec.get_workflows()[0].get_name()) self.assertEqual('reverse', wfs_spec.get_workflows()[0].get_type())
def evaluate_upstream_context(upstream_task_execs): published_vars = {} ctx = {} for t_ex in upstream_task_execs: # TODO(rakhmerov): These two merges look confusing. So it's a # temporary solution. There's still the bug # https://bugs.launchpad.net/mistral/+bug/1424461 that needs to be # fixed using context variable versioning. published_vars = utils.merge_dicts(published_vars, t_ex.published) utils.merge_dicts(ctx, evaluate_task_outbound_context(t_ex)) return utils.merge_dicts(ctx, published_vars)
def __init__(self, data, validate): super(ActionSpec, self).__init__(data, validate) self._name = data['name'] self._description = data.get('description') self._tags = data.get('tags', []) self._base = data['base'] self._base_input = data.get('base-input', {}) self._input = utils.get_dict_from_entries(data.get('input', [])) self._output = data.get('output') self._base, _input = self._parse_cmd_and_input(self._base) utils.merge_dicts(self._base_input, _input)
def test_input(self): tests = [({ 'input': '' }, True), ({ 'input': [] }, True), ({ 'input': [''] }, True), ({ 'input': None }, True), ({ 'input': ['k1', 'k2'] }, False), ({ 'input': ['k1', 12345] }, True), ({ 'input': ['k1', { 'k2': 2 }] }, False), ({ 'input': [{ 'k1': 1 }, { 'k2': 2 }] }, False), ({ 'input': [{ 'k1': None }] }, False), ({ 'input': [{ 'k1': 1 }, { 'k1': 1 }] }, True), ({ 'input': [{ 'k1': 1, 'k2': 2 }] }, True)] actions = {'a1': {'base': 'foobar'}} for inputs, expect_error in tests: overlay = {'actions': copy.deepcopy(actions)} utils.merge_dicts(overlay['actions']['a1'], inputs) self._parse_dsl_spec(changes=overlay, expect_error=expect_error)
class ReverseWorkflowTaskSpec(TaskSpec): _polymorphic_value = 'reverse' _reverse_workflow_schema = { "type": "object", "properties": { "type": { "enum": [_polymorphic_value] }, "requires": { "oneOf": [types.NONEMPTY_STRING, types.UNIQUE_STRING_LIST] } } } _schema = utils.merge_dicts(copy.deepcopy(TaskSpec._schema), _reverse_workflow_schema) def __init__(self, data, validate): super(ReverseWorkflowTaskSpec, self).__init__(data, validate) self._requires = data.get('requires', []) def get_requires(self): if isinstance(self._requires, str): return [self._requires] return self._requires
def _prepare_runtime_context(self, index, safe_rerun): ctx = super(AdHocAction, self)._prepare_runtime_context(index, safe_rerun) # Insert special field into runtime context so that we track # a relationship between python action and adhoc action. return utils.merge_dicts( ctx, {'adhoc_action_name': self.adhoc_action_def.name})
def evaluate_workflow_final_context(self): ctx = {} for batch in self._find_end_task_executions_as_batches(): for t_ex in batch: ctx = utils.merge_dicts( ctx, data_flow.evaluate_task_outbound_context(t_ex)) return ctx
def test_merge_dicts_overwrite_false(self): left = copy.deepcopy(LEFT) right = copy.deepcopy(RIGHT) expected = { 'key1': { 'key11': "val11", 'key12': "val12", 'key13': { 'key131': 'val131' } }, 'key2': 'val2', 'key3': 'val3' } utils.merge_dicts(left, right, overwrite=False) self.assertDictEqual(left, expected)
def test_safe_rerurn(self): tests = [ ({'safe-rerun': True}, False), ({'safe-rerun': False}, False), ({'safe-rerun': '<% false %>'}, False), ({'safe-rerun': '<% true %>'}, False), ({'safe-rerun': '<% * %>'}, True), ({'safe-rerun': None}, True) ] for default, expect_error in tests: overlay = {'test': {'task-defaults': {}}} utils.merge_dicts(overlay['test']['task-defaults'], default) self._parse_dsl_spec( add_tasks=True, changes=overlay, expect_error=expect_error )
def update_workflow_execution_env(wf_ex, env): if not env: return wf_ex if wf_ex.state not in [states.IDLE, states.PAUSED, states.ERROR]: raise exc.NotAllowedException( 'Updating env to workflow execution is only permitted if ' 'it is in IDLE, PAUSED, or ERROR state.') wf_ex.params['env'] = utils.merge_dicts(wf_ex.params['env'], env) return wf_ex
def test_join(self): tests = [ ({'join': ''}, True), ({'join': None}, True), ({'join': 'all'}, False), ({'join': 'one'}, False), ({'join': 0}, False), ({'join': 2}, False), ({'join': 3}, True), ({'join': '3'}, True), ({'join': -3}, True) ] on_success = {'on-success': ['email']} for join, expect_error in tests: overlay = {'test': {'tasks': {}}} utils.merge_dicts(overlay['test']['tasks'], {'get': on_success}) utils.merge_dicts(overlay['test']['tasks'], {'echo': on_success}) utils.merge_dicts(overlay['test']['tasks'], {'email': join}) self._parse_dsl_spec( add_tasks=True, changes=overlay, expect_error=expect_error )
def merge(self, spec_to_merge): if spec_to_merge: if spec_to_merge.get_branch(): utils.merge_dicts(self._branch, spec_to_merge.get_branch()) if spec_to_merge.get_global(): utils.merge_dicts(self._global, spec_to_merge.get_global()) if spec_to_merge.get_atomic(): utils.merge_dicts(self._atomic, spec_to_merge.get_atomic())
def test_requires(self): tests = [ ({'requires': ''}, True), ({'requires': []}, True), ({'requires': ['']}, True), ({'requires': None}, True), ({'requires': 12345}, True), ({'requires': ['echo']}, False), ({'requires': ['echo', 'get']}, False), ({'requires': 'echo'}, False), ] for require, expect_error in tests: overlay = {'test': {'tasks': {}}} utils.merge_dicts(overlay['test'], {'type': 'reverse'}) utils.merge_dicts(overlay['test']['tasks'], {'email': require}) self._parse_dsl_spec( add_tasks=True, changes=overlay, expect_error=expect_error )
def _get_action_input(self, ctx=None): input_spec = self.task_spec.get_input() input_dict = (self.evaluate(input_spec, ctx) if input_spec else {}) if not isinstance(input_dict, dict): raise exc.InputException( "Wrong dynamic input for task: %s. Dict type is expected. " "Actual type: %s. Actual value: %s" % (self.task_spec.get_name(), type(input_dict), str(input_dict))) return utils.merge_dicts(input_dict, self._get_action_defaults(), overwrite=False)
def test_direct_transition(self): tests = [ (['email'], False), (['email%'], True), ([{'email': '<% 1 %>'}], False), ([{'email': '<% 1 %>'}, {'email': '<% 1 %>'}], True), ([{'email': '<% 1 %>', 'more_email': '<% 2 %>'}], True), (['email'], False), ([{'email': '<% 1 %>'}, 'echo'], False), ([{'email': '<% $.v1 in $.v2 %>'}], False), ([{'email': '<% * %>'}], True), ([{'email': '{{ 1 }}'}], False), ([{'email': '{{ 1 }}'}, 'echo'], False), ([{'email': '{{ _.v1 in _.v2 }}'}], False), ([{'email': '{{ * }}'}], True), ('email', False), ('fail msg="<% task().result %>"', False), ('fail(msg=<% task() %>)', False), (None, True), ([''], True), ([], True), (['email', 'email'], True), (['email', 12345], True) ] for on_clause_key in ['on-error', 'on-success', 'on-complete']: for on_clause_value, expect_error in tests: overlay = {'test': {'tasks': {}}} utils.merge_dicts(overlay['test']['tasks'], {'get': {on_clause_key: on_clause_value}}) self._parse_dsl_spec( add_tasks=True, changes=overlay, expect_error=expect_error )
def test_inputs(self): tests = [ ({'input': ''}, True), ({'input': {}}, True), ({'input': None}, True), ({'input': {'k1': 'v1'}}, False), ({'input': {'k1': '<% $.v1 %>'}}, False), ({'input': {'k1': '<% 1 + 2 %>'}}, False), ({'input': {'k1': '<% * %>'}}, True), ({'input': {'k1': '{{ _.v1 }}'}}, False), ({'input': {'k1': '{{ 1 + 2 }}'}}, False), ({'input': {'k1': '{{ * }}'}}, True) ] for task_input, expect_error in tests: overlay = {'test': {'tasks': {'task1': {'action': 'test.mock'}}}} utils.merge_dicts(overlay['test']['tasks']['task1'], task_input) self._parse_dsl_spec( add_tasks=False, changes=overlay, expect_error=expect_error )
def _fail_workflow(self, final_context, msg): if states.is_paused_or_completed(self.wf_ex.state): return output_on_error = {} try: output_on_error = data_flow.evaluate_workflow_output( self.wf_ex, self.wf_spec.get_output_on_error(), final_context ) except exc.MistralException as e: msg = ( "Failed to evaluate expression in output-on-error! " "(output-on-error: '%s', exception: '%s' Cause: '%s'" % (self.wf_spec.get_output_on_error(), e, msg) ) LOG.error(msg) if not self.set_state(states.ERROR, state_info=msg): return # When we set an ERROR state we should safely set output value getting # w/o exceptions due to field size limitations. length_output_on_error = len(str(output_on_error).encode("utf-8")) total_output_length = utils.get_number_of_chars_from_kilobytes( cfg.CONF.engine.execution_field_size_limit_kb) if length_output_on_error < total_output_length: msg = utils.cut_by_char( msg, total_output_length - length_output_on_error ) else: msg = utils.cut_by_kb( msg, cfg.CONF.engine.execution_field_size_limit_kb ) self.wf_ex.output = utils.merge_dicts({'result': msg}, output_on_error) # Publish event. self.notify(events.WORKFLOW_FAILED) if self.wf_ex.task_execution_id: self._send_result_to_parent_workflow()
def test_reverse_workflow_invalid_task(self): overlay = {'test': {'type': 'reverse', 'tasks': {}}} join = {'join': 'all'} on_success = {'on-success': ['email']} utils.merge_dicts(overlay['test']['tasks'], {'get': on_success}) utils.merge_dicts(overlay['test']['tasks'], {'echo': on_success}) utils.merge_dicts(overlay['test']['tasks'], {'email': join}) self._parse_dsl_spec(add_tasks=True, changes=overlay, expect_error=True)
def _get_input_dicts(self, with_items_values): """Calculate input dictionaries for another portion of actions. :return: a list of tuples containing indexes and corresponding input dicts. """ result = [] for i in self._get_next_indexes(): ctx = {} for k, v in with_items_values.items(): ctx.update({k: v[i]}) ctx = utils.merge_dicts(ctx, self.ctx) result.append((i, self._get_action_input(ctx))) return result