Example #1
0
def publish_variables(task_ex, task_spec):
    if task_ex.state not in [states.SUCCESS, states.ERROR]:
        return

    wf_ex = task_ex.workflow_execution

    expr_ctx = ContextView(task_ex.in_context, wf_ex.context, wf_ex.input)

    if task_ex.name in expr_ctx:
        LOG.warning(
            'Shadowing context variable with task name while '
            'publishing: %s',
            task_ex.name
        )

    publish_spec = task_spec.get_publish(task_ex.state)

    if not publish_spec:
        return

    # Publish branch variables.
    branch_vars = publish_spec.get_branch()

    task_ex.published = expr.evaluate_recursively(branch_vars, expr_ctx)

    # Publish global variables.
    global_vars = publish_spec.get_global()

    utils.merge_dicts(
        task_ex.workflow_execution.context,
        expr.evaluate_recursively(global_vars, expr_ctx)
    )
Example #2
0
    def test_keep_result(self):
        tests = [({
            'keep-result': ''
        }, True), ({
            'keep-result': []
        }, True), ({
            'keep-result': 'asd'
        }, True), ({
            'keep-result': None
        }, True), ({
            'keep-result': 12345
        }, True), ({
            'keep-result': True
        }, False), ({
            'keep-result': False
        }, False), ({
            'keep-result': "<% 'a' in $.val %>"
        }, False), ({
            'keep-result': '<% 1 + 2 %>'
        }, False), ({
            'keep-result': '<% * %>'
        }, True)]

        for keep_result, expect_error in tests:
            overlay = {'test': {'tasks': {}}}

            utils.merge_dicts(overlay['test']['tasks'], {'email': keep_result})

            self._parse_dsl_spec(add_tasks=True,
                                 changes=overlay,
                                 expect_error=expect_error)
Example #3
0
    def _parse_dsl_spec(self,
                        dsl_file=None,
                        add_tasks=False,
                        changes=None,
                        expect_error=False):
        if dsl_file and add_tasks:
            raise Exception('The add_tasks option is not a valid '
                            'combination with the dsl_file option.')

        if dsl_file:
            dsl_yaml = base.get_resource(self._resource_path + '/' + dsl_file)

            if changes:
                dsl_dict = yaml.safe_load(dsl_yaml)
                utils.merge_dicts(dsl_dict, changes)
                dsl_yaml = yaml.safe_dump(dsl_dict, default_flow_style=False)
        else:
            dsl_dict = copy.deepcopy(self._dsl_blank)

            if add_tasks:
                dsl_dict['test']['tasks'] = copy.deepcopy(self._dsl_tasks)

            if changes:
                utils.merge_dicts(dsl_dict, changes)

            dsl_yaml = yaml.safe_dump(dsl_dict, default_flow_style=False)

        if not expect_error:
            return self._spec_parser(dsl_yaml)
        else:
            return self.assertRaises(exc.DSLParsingException,
                                     self._spec_parser, dsl_yaml)
Example #4
0
    def _parse_dsl_spec(self, dsl_file=None, add_tasks=False,
                        changes=None, expect_error=False):
        if dsl_file and add_tasks:
            raise Exception('The add_tasks option is not a valid '
                            'combination with the dsl_file option.')

        if dsl_file:
            dsl_yaml = base.get_resource(self._resource_path + '/' + dsl_file)

            if changes:
                dsl_dict = yaml.safe_load(dsl_yaml)
                utils.merge_dicts(dsl_dict, changes)
                dsl_yaml = yaml.safe_dump(dsl_dict, default_flow_style=False)
        else:
            dsl_dict = copy.deepcopy(self._dsl_blank)

            if add_tasks:
                dsl_dict['test']['tasks'] = copy.deepcopy(self._dsl_tasks)

            if changes:
                utils.merge_dicts(dsl_dict, changes)

            dsl_yaml = yaml.safe_dump(dsl_dict, default_flow_style=False)

        if not expect_error:
            return self._spec_parser(dsl_yaml)
        else:
            return self.assertRaises(
                exc.DSLParsingException,
                self._spec_parser,
                dsl_yaml
            )
Example #5
0
    def test_requires(self):
        tests = [
            ({
                'requires': ''
            }, True),
            ({
                'requires': []
            }, True),
            ({
                'requires': ['']
            }, True),
            ({
                'requires': None
            }, True),
            ({
                'requires': 12345
            }, True),
            ({
                'requires': ['echo']
            }, False),
            ({
                'requires': ['echo', 'get']
            }, False),
            ({
                'requires': 'echo'
            }, False),
        ]

        for require, expect_error in tests:
            overlay = {'test': {'tasks': {}}}
            utils.merge_dicts(overlay['test'], {'type': 'reverse'})
            utils.merge_dicts(overlay['test']['tasks'], {'email': require})
            self._parse_dsl_spec(add_tasks=True,
                                 changes=overlay,
                                 expect_error=expect_error)
Example #6
0
    def test_keep_result(self):
        tests = [
            ({'keep-result': ''}, True),
            ({'keep-result': []}, True),
            ({'keep-result': 'asd'}, True),
            ({'keep-result': None}, True),
            ({'keep-result': 12345}, True),
            ({'keep-result': True}, False),
            ({'keep-result': False}, False),
            ({'keep-result': "<% 'a' in $.val %>"}, False),
            ({'keep-result': '<% 1 + 2 %>'}, False),
            ({'keep-result': '<% * %>'}, True),
            ({'keep-result': "{{ 'a' in _.val }}"}, False),
            ({'keep-result': '{{ 1 + 2 }}'}, False),
            ({'keep-result': '{{ * }}'}, True)
        ]

        for keep_result, expect_error in tests:
            overlay = {'test': {'tasks': {}}}

            utils.merge_dicts(overlay['test']['tasks'], {'email': keep_result})

            self._parse_dsl_spec(
                add_tasks=True,
                changes=overlay,
                expect_error=expect_error
            )
Example #7
0
    def test_input(self):
        tests = [
            ({'input': ''}, True),
            ({'input': []}, True),
            ({'input': ['']}, True),
            ({'input': None}, True),
            ({'input': ['k1', 'k2']}, False),
            ({'input': ['k1', 12345]}, True),
            ({'input': ['k1', {'k2': 2}]}, False),
            ({'input': [{'k1': 1}, {'k2': 2}]}, False),
            ({'input': [{'k1': None}]}, False),
            ({'input': [{'k1': 1}, {'k1': 1}]}, True),
            ({'input': [{'k1': 1, 'k2': 2}]}, True)
        ]

        actions = {
            'a1': {
                'base': 'foobar'
            }
        }

        for inputs, expect_error in tests:
            overlay = {'actions': copy.deepcopy(actions)}
            utils.merge_dicts(overlay['actions']['a1'], inputs)
            self._parse_dsl_spec(changes=overlay,
                                 expect_error=expect_error)
Example #8
0
    def test_direct_workflow_invalid_join(self):
        tests = [
            ({'task3': {'join': 2}}, False),
            ({'task3': {'join': 5}}, True),
            ({'task3': {'join': 1}}, False),
            ({'task3': {'join': 'one'}}, False),
            ({'task3': {'join': 'all'}}, False),
            ({'task4': {'join': 'all'}}, True),
            ({'task4': {'join': 1}}, True),
            ({'task4': {'join': 'one'}}, True)
        ]

        for test in tests:
            overlay = {
                'test': {
                    'type': 'direct',
                    'tasks': {
                        'task1': {'on-complete': 'task3'},
                        'task2': {'on-complete': 'task3'}
                    }
                }
            }

            utils.merge_dicts(overlay['test']['tasks'], test[0])

            self._parse_dsl_spec(
                add_tasks=False,
                changes=overlay,
                expect_error=test[1]
            )
Example #9
0
    def test_inputs(self):
        tests = [({
            'input': ''
        }, True), ({
            'input': {}
        }, True), ({
            'input': None
        }, True), ({
            'input': {
                'k1': 'v1'
            }
        }, False), ({
            'input': {
                'k1': '<% $.v1 %>'
            }
        }, False), ({
            'input': {
                'k1': '<% 1 + 2 %>'
            }
        }, False), ({
            'input': {
                'k1': '<% * %>'
            }
        }, True)]

        for task_input, expect_error in tests:
            overlay = {'test': {'tasks': {'task1': {'action': 'test.mock'}}}}

            utils.merge_dicts(overlay['test']['tasks']['task1'], task_input)

            self._parse_dsl_spec(add_tasks=False,
                                 changes=overlay,
                                 expect_error=expect_error)
Example #10
0
def evaluate_upstream_context(upstream_task_execs):
    published_vars = {}
    ctx = {}

    for t_ex in upstream_task_execs:
        # TODO(rakhmerov): These two merges look confusing. So it's a
        # temporary solution.There's still the bug
        # https://bugs.launchpad.net/mistral/+bug/1424461 that needs to be
        # fixed using context variable versioning.
        published_vars = utils.merge_dicts(
            published_vars,
            t_ex.published
        )

        utils.merge_dicts(
            ctx,
            evaluate_task_outbound_context(t_ex, include_result=False)
        )

    ctx = utils.merge_dicts(ctx, published_vars)

    # TODO(rakhmerov): IMO, this method shouldn't deal with these task ids or
    # anything else related to task proxies. Need to refactor.
    return utils.merge_dicts(
        ctx,
        _get_task_identifiers_dict(upstream_task_execs)
    )
Example #11
0
def evaluate_task_outbound_context(task_ex, include_result=True):
    """Evaluates task outbound Data Flow context.

    This method assumes that complete task output (after publisher etc.)
    has already been evaluated.
    :param task_ex: DB task.
    :param include_result: boolean argument, if True - include the
        TaskResultProxy in outbound context under <task_name> key.
    :return: Outbound task Data Flow context.
    """

    in_context = (copy.deepcopy(dict(task_ex.in_context))
                  if task_ex.in_context is not None else {})

    out_ctx = utils.merge_dicts(in_context, task_ex.published)

    # Add task output under key 'taskName'.
    if include_result:
        task_ex_result = TaskResultProxy(task_ex.id)

        out_ctx = utils.merge_dicts(
            out_ctx,
            {task_ex.name: task_ex_result or None}
        )

    return ProxyAwareDict(out_ctx)
Example #12
0
    def test_output(self):
        tests = [({
            'output': None
        }, False), ({
            'output': False
        }, False), ({
            'output': 12345
        }, False), ({
            'output': 0.12345
        }, False), ({
            'output': 'foobar'
        }, False), ({
            'output': '<% $.x %>'
        }, False), ({
            'output': '<% * %>'
        }, True), ({
            'output': '{{ _.x }}'
        }, False), ({
            'output': '{{ * }}'
        }, True), ({
            'output': ['v1']
        }, False), ({
            'output': {
                'k1': 'v1'
            }
        }, False)]

        actions = {'a1': {'base': 'foobar'}}

        for outputs, expect_error in tests:
            overlay = {'actions': copy.deepcopy(actions)}
            utils.merge_dicts(overlay['actions']['a1'], outputs)
            self._parse_dsl_spec(changes=overlay, expect_error=expect_error)
Example #13
0
def validate_input(definition, input, spec=None):
    input_param_names = copy.deepcopy(list((input or {}).keys()))
    missing_param_names = []

    spec_input = (spec.get_input() if spec else
                  utils.get_dict_from_string(definition.input))

    for p_name, p_value in six.iteritems(spec_input):
        if p_value is utils.NotDefined and p_name not in input_param_names:
            missing_param_names.append(p_name)
        if p_name in input_param_names:
            input_param_names.remove(p_name)

    if missing_param_names or input_param_names:
        msg = 'Invalid input [name=%s, class=%s'
        msg_props = [definition.name, spec.__class__.__name__]

        if missing_param_names:
            msg += ', missing=%s'
            msg_props.append(missing_param_names)

        if input_param_names:
            msg += ', unexpected=%s'
            msg_props.append(input_param_names)

        msg += ']'

        raise exc.InputException(
            msg % tuple(msg_props)
        )
    else:
        utils.merge_dicts(input, spec_input, overwrite=False)
Example #14
0
    def test_base_input(self):
        tests = [({
            'base-input': {}
        }, True), ({
            'base-input': None
        }, True), ({
            'base-input': {
                'k1': 'v1',
                'k2': '<% $.v2 %>'
            }
        }, False), ({
            'base-input': {
                'k1': 'v1',
                'k2': '<% * %>'
            }
        }, True), ({
            'base-input': {
                'k1': 'v1',
                'k2': '{{ _.v2 }}'
            }
        }, False), ({
            'base-input': {
                'k1': 'v1',
                'k2': '{{ * }}'
            }
        }, True)]

        actions = {'a1': {'base': 'foobar'}}

        for base_inputs, expect_error in tests:
            overlay = {'actions': copy.deepcopy(actions)}
            utils.merge_dicts(overlay['actions']['a1'], base_inputs)
            self._parse_dsl_spec(changes=overlay, expect_error=expect_error)
Example #15
0
def validate_input(definition, input, spec=None):
    input_param_names = copy.copy((input or {}).keys())
    missing_param_names = []

    spec_input = (spec.get_input() if spec else
                  utils.get_input_dict_from_input_string(definition.input))

    for p_name, p_value in six.iteritems(spec_input):
        if p_value is utils.NotDefined and p_name not in input_param_names:
            missing_param_names.append(p_name)
        if p_name in input_param_names:
            input_param_names.remove(p_name)

    if missing_param_names or input_param_names:
        msg = 'Invalid input [name=%s, class=%s'
        msg_props = [definition.name, spec.__class__.__name__]

        if missing_param_names:
            msg += ', missing=%s'
            msg_props.append(missing_param_names)

        if input_param_names:
            msg += ', unexpected=%s'
            msg_props.append(input_param_names)

        msg += ']'

        raise exc.InputException(msg % tuple(msg_props))
    else:
        utils.merge_dicts(input, spec_input, overwrite=False)
Example #16
0
def evaluate_upstream_context(upstream_task_execs):
    published_vars = {}
    ctx = {}

    for t_ex in upstream_task_execs:
        # TODO(rakhmerov): These two merges look confusing. So it's a
        # temporary solution.There's still the bug
        # https://bugs.launchpad.net/mistral/+bug/1424461 that needs to be
        # fixed using context variable versioning.
        published_vars = utils.merge_dicts(
            published_vars,
            t_ex.published
        )

        utils.merge_dicts(
            ctx,
            evaluate_task_outbound_context(t_ex, include_result=False)
        )

    ctx = utils.merge_dicts(ctx, published_vars)

    # TODO(rakhmerov): IMO, this method shouldn't deal with these task ids or
    # anything else related to task proxies. Need to refactor.
    return utils.merge_dicts(
        ctx,
        _get_task_identifiers_dict(upstream_task_execs)
    )
Example #17
0
def evaluate_task_outbound_context(task_ex, include_result=True):
    """Evaluates task outbound Data Flow context.

    This method assumes that complete task output (after publisher etc.)
    has already been evaluated.
    :param task_ex: DB task.
    :param include_result: boolean argument, if True - include the
        TaskResultProxy in outbound context under <task_name> key.
    :return: Outbound task Data Flow context.
    """

    if task_ex.state != states.SUCCESS:
        return task_ex.in_context

    in_context = (copy.deepcopy(dict(task_ex.in_context))
                  if task_ex.in_context is not None else {})

    out_ctx = utils.merge_dicts(in_context, task_ex.published)

    # Add task output under key 'taskName'.
    if include_result:
        task_ex_result = TaskResultProxy(task_ex.id)

        out_ctx = utils.merge_dicts(
            out_ctx,
            {task_ex.name: task_ex_result or None}
        )

    return ProxyAwareDict(out_ctx)
Example #18
0
    def test_output(self):
        tests = [
            ({'output': None}, False),
            ({'output': False}, False),
            ({'output': 12345}, False),
            ({'output': 0.12345}, False),
            ({'output': 'foobar'}, False),
            ({'output': '<% $.x %>'}, False),
            ({'output': '<% * %>'}, True),
            ({'output': '{{ _.x }}'}, False),
            ({'output': '{{ * }}'}, True),
            ({'output': ['v1']}, False),
            ({'output': {'k1': 'v1'}}, False)
        ]

        actions = {
            'a1': {
                'base': 'foobar'
            }
        }

        for outputs, expect_error in tests:
            overlay = {'actions': copy.deepcopy(actions)}

            utils.merge_dicts(overlay['actions']['a1'], outputs)

            self._parse_dsl_spec(changes=overlay, expect_error=expect_error)
Example #19
0
    def test_direct_transition(self):
        tests = [
            ({'on-success': ['email']}, False),
            ({'on-success': [{'email': '<% 1 %>'}]}, False),
            ({'on-success': [{'email': '<% 1 %>'}, 'echo']}, False),
            ({'on-success': [{'email': '<% $.v1 in $.v2 %>'}]}, False),
            ({'on-success': [{'email': '<% * %>'}]}, True),
            ({'on-success': [{'email': '{{ 1 }}'}]}, False),
            ({'on-success': [{'email': '{{ 1 }}'}, 'echo']}, False),
            ({'on-success': [{'email': '{{ _.v1 in _.v2 }}'}]}, False),
            ({'on-success': [{'email': '{{ * }}'}]}, True),
            ({'on-success': 'email'}, False),
            ({'on-success': None}, True),
            ({'on-success': ['']}, True),
            ({'on-success': []}, True),
            ({'on-success': ['email', 'email']}, True),
            ({'on-success': ['email', 12345]}, True),
            ({'on-error': ['email']}, False),
            ({'on-error': [{'email': '<% 1 %>'}]}, False),
            ({'on-error': [{'email': '<% 1 %>'}, 'echo']}, False),
            ({'on-error': [{'email': '<% $.v1 in $.v2 %>'}]}, False),
            ({'on-error': [{'email': '<% * %>'}]}, True),
            ({'on-error': [{'email': '{{ 1 }}'}]}, False),
            ({'on-error': [{'email': '{{ 1 }}'}, 'echo']}, False),
            ({'on-error': [{'email': '{{ _.v1 in _.v2 }}'}]}, False),
            ({'on-error': [{'email': '{{ * }}'}]}, True),
            ({'on-error': 'email'}, False),
            ({'on-error': None}, True),
            ({'on-error': ['']}, True),
            ({'on-error': []}, True),
            ({'on-error': ['email', 'email']}, True),
            ({'on-error': ['email', 12345]}, True),
            ({'on-complete': ['email']}, False),
            ({'on-complete': [{'email': '<% 1 %>'}]}, False),
            ({'on-complete': [{'email': '<% 1 %>'}, 'echo']}, False),
            ({'on-complete': [{'email': '<% $.v1 in $.v2 %>'}]}, False),
            ({'on-complete': [{'email': '<% * %>'}]}, True),
            ({'on-complete': [{'email': '{{ 1 }}'}]}, False),
            ({'on-complete': [{'email': '{{ 1 }}'}, 'echo']}, False),
            ({'on-complete': [{'email': '{{ _.v1 in _.v2 }}'}]}, False),
            ({'on-complete': [{'email': '{{ * }}'}]}, True),
            ({'on-complete': 'email'}, False),
            ({'on-complete': None}, True),
            ({'on-complete': ['']}, True),
            ({'on-complete': []}, True),
            ({'on-complete': ['email', 'email']}, True),
            ({'on-complete': ['email', 12345]}, True)
        ]

        for transition, expect_error in tests:
            overlay = {'test': {'tasks': {}}}

            utils.merge_dicts(overlay['test']['tasks'], {'get': transition})

            self._parse_dsl_spec(
                add_tasks=True,
                changes=overlay,
                expect_error=expect_error
            )
Example #20
0
    def test_direct_workflow_invalid_task(self):
        overlay = {'test': {'type': 'direct', 'tasks': {}}}
        requires = {'requires': ['echo', 'get']}

        utils.merge_dicts(overlay['test']['tasks'], {'email': requires})

        self._parse_dsl_spec(add_tasks=True,
                             changes=overlay,
                             expect_error=True)
Example #21
0
def add_workflow_variables_to_context(wf_ex, wf_spec):
    wf_ex.context = wf_ex.context or {}

    # The context for calculating workflow variables is workflow input
    # and other data already stored in workflow initial context.
    ctx_view = ContextView(wf_ex.context, wf_ex.input)

    wf_vars = expr.evaluate_recursively(wf_spec.get_vars(), ctx_view)

    utils.merge_dicts(wf_ex.context, wf_vars)
Example #22
0
    def _process_action_and_workflow(self):
        params = {}

        if self._action:
            self._action, params = self._parse_cmd_and_input(self._action)
        elif self._workflow:
            self._workflow, params = self._parse_cmd_and_input(self._workflow)
        else:
            self._action = 'std.noop'

        utils.merge_dicts(self._input, params)
Example #23
0
    def _process_action_and_workflow(self):
        params = {}

        if self._action:
            self._action, params = self._parse_cmd_and_input(self._action)
        elif self._workflow:
            self._workflow, params = self._parse_cmd_and_input(
                self._workflow)
        else:
            self._action = 'std.noop'

        utils.merge_dicts(self._input, params)
Example #24
0
    def test_reverse_workflow(self):
        overlay = {'test': {'type': 'reverse', 'tasks': {}}}
        require = {'requires': ['echo', 'get']}

        utils.merge_dicts(overlay['test']['tasks'], {'email': require})

        wfs_spec = self._parse_dsl_spec(add_tasks=True,
                                        changes=overlay,
                                        expect_error=False)

        self.assertEqual(1, len(wfs_spec.get_workflows()))
        self.assertEqual('test', wfs_spec.get_workflows()[0].get_name())
        self.assertEqual('reverse', wfs_spec.get_workflows()[0].get_type())
Example #25
0
    def _get_task_inbound_context(self, task_spec):
        upstream_task_execs = self._get_upstream_task_executions(task_spec)

        upstream_ctx = data_flow.evaluate_upstream_context(upstream_task_execs)

        ctx = u.merge_dicts(copy.deepcopy(self.wf_ex.context), upstream_ctx)

        if self.wf_ex.context:
            ctx['__env'] = u.merge_dicts(
                copy.deepcopy(upstream_ctx.get('__env', {})),
                copy.deepcopy(self.wf_ex.context.get('__env', {})))

        return ctx
Example #26
0
    def test_reverse_workflow(self):
        overlay = {'test': {'type': 'reverse', 'tasks': {}}}
        require = {'requires': ['echo', 'get']}

        utils.merge_dicts(overlay['test']['tasks'], {'email': require})

        wfs_spec = self._parse_dsl_spec(add_tasks=True,
                                        changes=overlay,
                                        expect_error=False)

        self.assertEqual(1, len(wfs_spec.get_workflows()))
        self.assertEqual('test', wfs_spec.get_workflows()[0].get_name())
        self.assertEqual('reverse', wfs_spec.get_workflows()[0].get_type())
Example #27
0
    def __init__(self, data):
        super(ActionSpec, self).__init__(data)

        self._name = data['name']
        self._description = data.get('description')
        self._tags = data.get('tags', [])
        self._base = data['base']
        self._base_input = data.get('base-input', {})
        self._input = utils.get_input_dict(data.get('input', []))
        self._output = data.get('output')

        self._base, _input = self._parse_cmd_and_input(self._base)

        utils.merge_dicts(self._base_input, _input)
Example #28
0
def evaluate_upstream_context(upstream_task_execs):
    published_vars = {}
    ctx = {}

    for t_ex in upstream_task_execs:
        # TODO(rakhmerov): These two merges look confusing. So it's a
        # temporary solution. There's still the bug
        # https://bugs.launchpad.net/mistral/+bug/1424461 that needs to be
        # fixed using context variable versioning.
        published_vars = utils.merge_dicts(published_vars, t_ex.published)

        utils.merge_dicts(ctx, evaluate_task_outbound_context(t_ex))

    return utils.merge_dicts(ctx, published_vars)
Example #29
0
    def __init__(self, data):
        super(ActionSpec, self).__init__(data)

        self._name = data['name']
        self._description = data.get('description')
        self._tags = data.get('tags', [])
        self._base = data['base']
        self._base_input = data.get('base-input', {})
        self._input = utils.get_dict_from_entries(data.get('input', []))
        self._output = data.get('output')

        self._base, _input = self._parse_cmd_and_input(self._base)

        utils.merge_dicts(self._base_input, _input)
Example #30
0
def add_workflow_variables_to_context(wf_ex, wf_spec):
    wf_ex.context = wf_ex.context or {}

    # The context for calculating workflow variables is workflow input
    # and other data already stored in workflow initial context.
    ctx_view = ContextView(
        get_workflow_environment_dict(wf_ex),
        wf_ex.context,
        wf_ex.input
    )

    wf_vars = expr.evaluate_recursively(wf_spec.get_vars(), ctx_view)

    utils.merge_dicts(wf_ex.context, wf_vars)
Example #31
0
    def get_task_inbound_context(self, task_spec):
        # TODO(rakhmerov): This method should also be able to work with task_ex
        # to cover 'split' (aka 'merge') use case.
        upstream_task_execs = self._get_upstream_task_executions(task_spec)

        upstream_ctx = data_flow.evaluate_upstream_context(upstream_task_execs)

        ctx = u.merge_dicts(copy.deepcopy(self.wf_ex.context), upstream_ctx)

        if self.wf_ex.context:
            ctx['__env'] = u.merge_dicts(
                copy.deepcopy(upstream_ctx.get('__env', {})),
                copy.deepcopy(self.wf_ex.context.get('__env', {})))

        return ctx
Example #32
0
def _schedule_run_action(task_ex, task_spec, action_input, index):
    wf_ex = task_ex.workflow_execution
    wf_spec = spec_parser.get_workflow_spec(wf_ex.spec)

    action_spec_name = task_spec.get_action_name()

    action_def = action_handler.resolve_definition(
        action_spec_name,
        task_ex,
        wf_spec
    )

    action_ex = action_handler.create_action_execution(
        action_def, action_input, task_ex, index
    )

    target = expr.evaluate_recursively(
        task_spec.get_target(),
        utils.merge_dicts(
            copy.deepcopy(action_input),
            copy.copy(task_ex.in_context)
        )
    )

    scheduler.schedule_call(
        None,
        'mistral.engine.action_handler.run_existing_action',
        0,
        action_ex_id=action_ex.id,
        target=target
    )
Example #33
0
    def _fail_workflow(self, final_context, msg):
        if states.is_paused_or_completed(self.wf_ex.state):
            return

        output_on_error = {}
        try:
            output_on_error = data_flow.evaluate_workflow_output(
                self.wf_ex,
                self.wf_spec.get_output_on_error(),
                final_context
            )
        except exc.MistralException as e:
            msg = (
                "Failed to evaluate expression in output-on-error! "
                "(output-on-error: '%s', exception: '%s' Cause: '%s'"
                % (self.wf_spec.get_output_on_error(), e, msg)
            )
            LOG.error(msg)

        self.set_state(states.ERROR, state_info=msg)

        # When we set an ERROR state we should safely set output value getting
        # w/o exceptions due to field size limitations.
        msg = utils.cut_by_kb(
            msg,
            cfg.CONF.engine.execution_field_size_limit_kb
        )

        self.wf_ex.output = merge_dicts({'result': msg}, output_on_error)

        if self.wf_ex.task_execution_id:
            self._schedule_send_result_to_parent_workflow()
Example #34
0
    def _fail_workflow(self, final_context, msg):
        if states.is_paused_or_completed(self.wf_ex.state):
            return

        output_on_error = {}

        try:
            output_on_error = data_flow.evaluate_workflow_output(
                self.wf_ex, self.wf_spec.get_output_on_error(), final_context)
        except exc.MistralException as e:
            msg = ("Failed to evaluate expression in output-on-error! "
                   "(output-on-error: '%s', exception: '%s' Cause: '%s'" %
                   (self.wf_spec.get_output_on_error(), e, msg))
            LOG.error(msg)

        self.set_state(states.ERROR, state_info=msg)

        # When we set an ERROR state we should safely set output value getting
        # w/o exceptions due to field size limitations.

        length_output_on_error = len(str(output_on_error).encode("utf-8"))
        total_output_length = utils.get_number_of_chars_from_kilobytes(
            cfg.CONF.engine.execution_field_size_limit_kb)

        if length_output_on_error < total_output_length:
            msg = utils.cut_by_char(
                msg, total_output_length - length_output_on_error)
        else:
            msg = utils.cut_by_kb(
                msg, cfg.CONF.engine.execution_field_size_limit_kb)

        self.wf_ex.output = merge_dicts({'result': msg}, output_on_error)

        if self.wf_ex.task_execution_id:
            self._send_result_to_parent_workflow()
Example #35
0
def add_workflow_variables_to_context(wf_ex, wf_spec):
    wf_ex.context = wf_ex.context or {}

    return utils.merge_dicts(
        wf_ex.context,
        expr.evaluate_recursively(wf_spec.get_vars(), wf_ex.context)
    )
Example #36
0
    def _get_task_inbound_context(self, task_spec):
        upstream_task_execs = self._get_upstream_task_executions(task_spec)

        return u.merge_dicts(
            copy.deepcopy(self.wf_ex.context),
            data_flow.evaluate_upstream_context(upstream_task_execs)
        )
Example #37
0
    def evaluate_workflow_final_context(self):
        ctx = {}

        for t_ex in self._find_end_tasks():
            ctx = utils.merge_dicts(ctx, data_flow.evaluate_task_outbound_context(t_ex))

        return ctx
Example #38
0
class ReverseWorkflowTaskSpec(TaskSpec):
    _polymorphic_value = 'reverse'

    _reverse_workflow_schema = {
        "type": "object",
        "properties": {
            "type": {
                "enum": [_polymorphic_value]
            },
            "requires": {
                "oneOf": [types.NONEMPTY_STRING, types.UNIQUE_STRING_LIST]
            }
        }
    }

    _schema = utils.merge_dicts(copy.deepcopy(TaskSpec._schema),
                                _reverse_workflow_schema)

    def __init__(self, data):
        super(ReverseWorkflowTaskSpec, self).__init__(data)

        self._requires = data.get('requires', [])

    def get_requires(self):
        if isinstance(self._requires, six.string_types):
            return [self._requires]

        return self._requires
Example #39
0
    def _prepare_runtime_context(self, index):
        ctx = super(AdHocAction, self)._prepare_runtime_context(index)

        # Insert special field into runtime context so that we track
        # a relationship between python action and adhoc action.
        return utils.merge_dicts(
            ctx, {'adhoc_action_name': self.adhoc_action_def.name})
Example #40
0
def _schedule_run_action(task_ex, task_spec, action_input, index, wf_spec):
    action_spec_name = task_spec.get_action_name()

    action_def = action_handler.resolve_definition(
        action_spec_name,
        task_ex,
        wf_spec
    )

    action_ex = action_handler.create_action_execution(
        action_def, action_input, task_ex, index
    )

    target = expr.evaluate_recursively(
        task_spec.get_target(),
        utils.merge_dicts(
            copy.deepcopy(action_input),
            copy.deepcopy(task_ex.in_context)
        )
    )

    scheduler.schedule_call(
        None,
        'mistral.engine.action_handler.run_existing_action',
        0,
        action_ex_id=action_ex.id,
        target=target
    )
Example #41
0
    def _get_task_inbound_context(self, task_spec):
        upstream_task_execs = self._get_upstream_task_executions(task_spec)

        upstream_ctx = data_flow.evaluate_upstream_context(upstream_task_execs)

        ctx = u.merge_dicts(
            copy.deepcopy(self.wf_ex.context),
            upstream_ctx
        )

        if self.wf_ex.context:
            ctx['__env'] = u.merge_dicts(
                copy.deepcopy(upstream_ctx.get('__env', {})),
                copy.deepcopy(self.wf_ex.context.get('__env', {}))
            )

        return ctx
Example #42
0
    def _update_task_ex_env(task_ex, env):
        if not env:
            return task_ex

        task_ex.in_context['__env'] = u.merge_dicts(
            task_ex.in_context['__env'], env)

        return task_ex
Example #43
0
def evaluate_upstream_context(upstream_task_execs):
    published_vars = {}
    ctx = {}

    for t_ex in upstream_task_execs:
        # TODO(rakhmerov): These two merges look confusing. So it's a
        # temporary solution. There's still the bug
        # https://bugs.launchpad.net/mistral/+bug/1424461 that needs to be
        # fixed using context variable versioning.
        published_vars = utils.merge_dicts(
            published_vars,
            t_ex.published
        )

        utils.merge_dicts(ctx, evaluate_task_outbound_context(t_ex))

    return utils.merge_dicts(ctx, published_vars)
Example #44
0
def get_rpc_info_from_oslo(additional_conf=None):
    transport = messaging.TransportURL.parse(CONF, CONF.transport_url)

    rpc_backend = _get_rpc_backend(transport)

    conf_dict = _get_rpc_info(rpc_backend, transport)

    return utils.merge_dicts(conf_dict, _get_extra_info(additional_conf, ))
Example #45
0
    def _get_action_input(self, ctx=None):
        ctx = ctx or self.ctx

        input_dict = expr.evaluate_recursively(self.task_spec.get_input(), ctx)

        return utils.merge_dicts(input_dict,
                                 self._get_action_defaults(),
                                 overwrite=False)
Example #46
0
    def evaluate_workflow_final_context(self):
        ctx = {}

        for t_ex in self._find_end_tasks():
            ctx = utils.merge_dicts(
                ctx, data_flow.evaluate_task_outbound_context(t_ex))

        return ctx
Example #47
0
 def _get_target(self, input_dict):
     return expr.evaluate_recursively(
         self.task_spec.get_target(),
         utils.merge_dicts(
             copy.deepcopy(input_dict),
             copy.deepcopy(self.ctx)
         )
     )
Example #48
0
def get_rpc_info_from_oslo(additional_conf=None):
    transport = messaging.TransportURL.parse(CONF, CONF.transport_url)

    rpc_backend = _get_rpc_backend(transport)

    conf_dict = _get_rpc_info(rpc_backend, transport)

    return utils.merge_dicts(conf_dict, _get_extra_info(additional_conf, ))
Example #49
0
    def test_inputs(self):
        tests = [
            ({'input': ''}, True),
            ({'input': {}}, True),
            ({'input': None}, True),
            ({'input': {'k1': 'v1'}}, False),
            ({'input': {'k1': '<% $.v1 %>'}}, False),
            ({'input': {'k1': '<% 1 + 2 %>'}}, False),
            ({'input': {'k1': '<% * %>'}}, True)
        ]

        for task_input, expect_error in tests:
            overlay = {'test': {'tasks': {'task1': {'action': 'test.mock'}}}}
            utils.merge_dicts(overlay['test']['tasks']['task1'], task_input)
            self._parse_dsl_spec(add_tasks=False,
                                 changes=overlay,
                                 expect_error=expect_error)
Example #50
0
 def _get_target(self, input_dict):
     return expr.evaluate_recursively(
         self.task_spec.get_target(),
         utils.merge_dicts(
             copy.deepcopy(input_dict),
             copy.deepcopy(self.ctx)
         )
     )
Example #51
0
    def _prepare_runtime_context(self, index):
        ctx = super(AdHocAction, self)._prepare_runtime_context(index)

        # Insert special field into runtime context so that we track
        # a relationship between python action and adhoc action.
        return utils.merge_dicts(
            ctx,
            {'adhoc_action_name': self.adhoc_action_def.name}
        )
Example #52
0
    def _update_task_ex_env(self, task_ex, env):
        if not env:
            return task_ex

        task_ex.in_context['__env'] = u.merge_dicts(
            task_ex.in_context['__env'],
            env
        )

        return task_ex
Example #53
0
    def test_requires(self):
        tests = [
            ({'requires': ''}, True),
            ({'requires': []}, True),
            ({'requires': ['']}, True),
            ({'requires': None}, True),
            ({'requires': 12345}, True),
            ({'requires': ['echo']}, False),
            ({'requires': ['echo', 'get']}, False),
            ({'requires': 'echo'}, False),
        ]

        for require, expect_error in tests:
            overlay = {'test': {'tasks': {}}}
            utils.merge_dicts(overlay['test'], {'type': 'reverse'})
            utils.merge_dicts(overlay['test']['tasks'], {'email': require})
            self._parse_dsl_spec(add_tasks=True,
                                 changes=overlay,
                                 expect_error=expect_error)
Example #54
0
    def test_merge_dicts_overwrite_false(self):
        left = copy.deepcopy(LEFT)
        right = copy.deepcopy(RIGHT)

        expected = {
            'key1': {
                'key11': "val11",
                'key12': "val12",
                'key13': {
                    'key131': 'val131'
                }
            },
            'key2': 'val2',
            'key3': 'val3'
        }

        utils.merge_dicts(left, right, overwrite=False)

        self.assertDictEqual(left, expected)
Example #55
0
    def test_base_input(self):
        tests = [
            ({'base-input': {}}, True),
            ({'base-input': None}, True),
            ({'base-input': {'k1': 'v1', 'k2': '<% $.v2 %>'}}, False),
            ({'base-input': {'k1': 'v1', 'k2': '<% * %>'}}, True)
        ]

        actions = {
            'a1': {
                'base': 'foobar'
            }
        }

        for base_inputs, expect_error in tests:
            overlay = {'actions': copy.deepcopy(actions)}
            utils.merge_dicts(overlay['actions']['a1'], base_inputs)
            self._parse_dsl_spec(changes=overlay,
                                 expect_error=expect_error)
Example #56
0
    def _get_action_input(self, ctx=None):
        ctx = ctx or self.ctx

        input_dict = expr.evaluate_recursively(self.task_spec.get_input(), ctx)

        return utils.merge_dicts(
            input_dict,
            self._get_action_defaults(),
            overwrite=False
        )