示例#1
0
    def test_cache_workflow_spec_no_duplicates(self):
        wfs_text = """
        version: '2.0'

        wf:
          tasks:
            task1:
              action: std.noop
              on-success:
                - task2
                - task3

            task2:
              workflow: sub_wf my_param="val1"

            task3:
              workflow: sub_wf my_param="val2"

        sub_wf:
          input:
            - my_param

          tasks:
            task1:
              action: std.echo output="Param value is <% $.my_param %>"
        """

        wfs = wf_service.create_workflows(wfs_text)

        self.assertEqual(2, len(wfs))

        self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size())
        self.assertEqual(0, spec_parser.get_wf_definition_spec_cache_size())

        wf_ex = self.engine.start_workflow('wf')

        self.await_workflow_success(wf_ex.id)

        # We expect to have a cache entry for every workflow execution
        # but two of them should refer to the same object.
        self.assertEqual(3, spec_parser.get_wf_execution_spec_cache_size())
        self.assertEqual(2, spec_parser.get_wf_definition_spec_cache_size())

        sub_wf_execs = db_api.get_workflow_executions(name='sub_wf')

        self.assertEqual(2, len(sub_wf_execs))

        spec1 = spec_parser.get_workflow_spec_by_execution_id(
            sub_wf_execs[0].id
        )
        spec2 = spec_parser.get_workflow_spec_by_execution_id(
            sub_wf_execs[1].id
        )

        self.assertIs(spec1, spec2)
示例#2
0
    def test_cache_workflow_spec_no_duplicates(self):
        wfs_text = """
        version: '2.0'

        wf:
          tasks:
            task1:
              action: std.noop
              on-success:
                - task2
                - task3

            task2:
              workflow: sub_wf my_param="val1"

            task3:
              workflow: sub_wf my_param="val2"

        sub_wf:
          input:
            - my_param

          tasks:
            task1:
              action: std.echo output="Param value is <% $.my_param %>"
        """

        wfs = wf_service.create_workflows(wfs_text)

        self.assertEqual(2, len(wfs))

        self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size())
        self.assertEqual(0, spec_parser.get_wf_definition_spec_cache_size())

        wf_ex = self.engine.start_workflow('wf')

        self.await_workflow_success(wf_ex.id)

        # We expect to have a cache entry for every workflow execution
        # but two of them should refer to the same object.
        self.assertEqual(3, spec_parser.get_wf_execution_spec_cache_size())
        self.assertEqual(2, spec_parser.get_wf_definition_spec_cache_size())

        sub_wf_execs = db_api.get_workflow_executions(name='sub_wf')

        self.assertEqual(2, len(sub_wf_execs))

        spec1 = spec_parser.get_workflow_spec_by_execution_id(
            sub_wf_execs[0].id)
        spec2 = spec_parser.get_workflow_spec_by_execution_id(
            sub_wf_execs[1].id)

        self.assertIs(spec1, spec2)
示例#3
0
def force_fail_task(task_ex, msg, task=None):
    """Forces the given task to fail.

    This method implements the 'forced' task fail without giving a chance
    to a workflow controller to handle the error. Its main purpose is to
    reflect errors caused by workflow structure (errors 'publish', 'on-xxx'
    clauses etc.) rather than failed actions. If such an error happens
    we should also force the entire workflow to fail. I.e., this kind of
    error must be propagated to a higher level, to the workflow.

    :param task_ex: Task execution.
    :param msg: Error message.
    :param task: Task object. Optional.
    """

    LOG.error(msg)

    if not task:
        wf_spec = spec_parser.get_workflow_spec_by_execution_id(
            task_ex.workflow_execution_id
        )

        task = _build_task_from_execution(wf_spec, task_ex)

    old_task_state = task_ex.state
    task.set_state(states.ERROR, msg)
    task.notify(old_task_state, states.ERROR)

    task.save_finished_time()

    wf_handler.force_fail_workflow(task_ex.workflow_execution, msg)
示例#4
0
def get_controller(wf_ex, wf_spec=None):
    """Gets a workflow controller instance by given workflow execution object.

    :param wf_ex: Workflow execution object.
    :param wf_spec: Workflow specification object. If passed, the method works
        faster.
    :returns: Workflow controller class.
    """

    if not wf_spec:
        wf_spec = spec_parser.get_workflow_spec_by_execution_id(wf_ex.id)

    wf_type = wf_spec.get_type()

    ctrl_cls = None

    for cls in u.iter_subclasses(WorkflowController):
        if cls.__workflow_type__ == wf_type:
            ctrl_cls = cls
            break

    if not ctrl_cls:
        raise exc.MistralError(
            'Failed to find a workflow controller [type=%s]' % wf_type)

    return ctrl_cls(wf_ex, wf_spec)
示例#5
0
    def schedule(self, input_dict, target, index=0, desc='', safe_rerun=False):
        assert not self.action_ex

        parent_wf_ex = self.task_ex.workflow_execution
        parent_wf_spec = spec_parser.get_workflow_spec_by_execution_id(
            parent_wf_ex.id)

        task_spec = spec_parser.get_task_spec(self.task_ex.spec)

        wf_spec_name = task_spec.get_workflow_name()

        wf_def = engine_utils.resolve_workflow_definition(
            parent_wf_ex.workflow_name, parent_wf_spec.get_name(),
            wf_spec_name)

        wf_spec = spec_parser.get_workflow_spec_by_definition_id(
            wf_def.id, wf_def.updated_at)

        wf_params = {'task_execution_id': self.task_ex.id, 'index': index}

        if 'env' in parent_wf_ex.params:
            wf_params['env'] = parent_wf_ex.params['env']
            wf_params['evaluate_env'] = parent_wf_ex.params.get('evaluate_env')

        for k, v in list(input_dict.items()):
            if k not in wf_spec.get_input():
                wf_params[k] = v
                del input_dict[k]

        wf_handler.start_workflow(wf_def.id, input_dict,
                                  "sub-workflow execution", wf_params)
示例#6
0
文件: base.py 项目: openstack/mistral
def get_controller(wf_ex, wf_spec=None):
    """Gets a workflow controller instance by given workflow execution object.

    :param wf_ex: Workflow execution object.
    :param wf_spec: Workflow specification object. If passed, the method works
        faster.
    :returns: Workflow controller class.
    """

    if not wf_spec:
        wf_spec = spec_parser.get_workflow_spec_by_execution_id(wf_ex.id)

    wf_type = wf_spec.get_type()

    ctrl_cls = None

    for cls in u.iter_subclasses(WorkflowController):
        if cls.__workflow_type__ == wf_type:
            ctrl_cls = cls
            break

    if not ctrl_cls:
        raise exc.MistralError(
            'Failed to find a workflow controller [type=%s]' % wf_type
        )

    return ctrl_cls(wf_ex, wf_spec)
示例#7
0
def force_fail_task(task_ex, msg, task=None):
    """Forces the given task to fail.

    This method implements the 'forced' task fail without giving a chance
    to a workflow controller to handle the error. Its main purpose is to
    reflect errors caused by workflow structure (errors 'publish', 'on-xxx'
    clauses etc.) rather than failed actions. If such an error happens
    we should also force the entire workflow to fail. I.e., this kind of
    error must be propagated to a higher level, to the workflow.

    :param task_ex: Task execution.
    :param msg: Error message.
    :param task: Task object. Optional.
    """

    LOG.error(msg)

    if not task:
        wf_spec = spec_parser.get_workflow_spec_by_execution_id(
            task_ex.workflow_execution_id)

        task = build_task_from_execution(wf_spec, task_ex)

    task.set_state(states.ERROR, msg)

    wf_handler.force_fail_workflow(task_ex.workflow_execution, msg)
示例#8
0
def _on_action_complete(action_ex):
    """Handles action completion event.

    :param action_ex: Action execution.
    """

    task_ex = action_ex.task_execution

    if not task_ex:
        return

    task_spec = spec_parser.get_task_spec(task_ex.spec)

    wf_ex = task_ex.workflow_execution

    task = _create_task(
        wf_ex, spec_parser.get_workflow_spec_by_execution_id(wf_ex.id),
        task_spec, task_ex.in_context, task_ex)

    try:
        task.on_action_complete(action_ex)
    except exc.MistralException as e:
        wf_ex = task_ex.workflow_execution

        msg = ("Failed to handle action completion [error=%s, wf=%s, task=%s,"
               " action=%s]:\n%s" %
               (e, wf_ex.name, task_ex.name, action_ex.name, tb.format_exc()))

        force_fail_task(task_ex, msg, task=task)

        return

    _check_affected_tasks(task)
def _build_action(action_ex):
    if isinstance(action_ex, models.WorkflowExecution):
        return actions.WorkflowAction(wf_name=action_ex.name,
                                      action_ex=action_ex)

    wf_name = None
    wf_spec_name = None

    if action_ex.workflow_name:
        wf_name = action_ex.workflow_name
        wf_spec = spec_parser.get_workflow_spec_by_execution_id(
            action_ex.task_execution.workflow_execution_id)
        wf_spec_name = wf_spec.get_name()

    adhoc_action_name = action_ex.runtime_context.get('adhoc_action_name')

    if adhoc_action_name:
        action_def = actions.resolve_action_definition(adhoc_action_name,
                                                       wf_name, wf_spec_name)

        return actions.AdHocAction(action_def, action_ex=action_ex)

    action_def = actions.resolve_action_definition(action_ex.name, wf_name,
                                                   wf_spec_name)

    return actions.PythonAction(action_def, action_ex=action_ex)
示例#10
0
def complete_task(task_ex, state, state_info):
    if not task_ex:
        return

    wf_spec = spec_parser.get_workflow_spec_by_execution_id(
        task_ex.workflow_execution_id
    )

    task = _build_task_from_execution(wf_spec, task_ex)

    try:
        task.complete(state, state_info)
    except exc.MistralException as e:
        wf_ex = task_ex.workflow_execution

        msg = (
            "Failed to complete task [error=%s, wf=%s, task=%s]:\n%s" %
            (e, wf_ex.name, task_ex.name, tb.format_exc())
        )

        force_fail_task(task_ex, msg, task=task)

        return

    _check_affected_tasks(task)
示例#11
0
def _build_action(action_ex):
    if isinstance(action_ex, models.WorkflowExecution):
        return actions.WorkflowAction(wf_name=action_ex.name,
                                      action_ex=action_ex)

    wf_name = None
    wf_spec_name = None

    if action_ex.workflow_name:
        wf_name = action_ex.workflow_name
        wf_spec = spec_parser.get_workflow_spec_by_execution_id(
            action_ex.task_execution.workflow_execution_id
        )
        wf_spec_name = wf_spec.get_name()

    adhoc_action_name = action_ex.runtime_context.get('adhoc_action_name')

    if adhoc_action_name:
        action_def = actions.resolve_action_definition(
            adhoc_action_name,
            wf_name,
            wf_spec_name
        )

        return actions.AdHocAction(action_def, action_ex=action_ex)

    action_def = actions.resolve_action_definition(
        action_ex.name,
        wf_name,
        wf_spec_name
    )

    return actions.PythonAction(action_def, action_ex=action_ex)
示例#12
0
def continue_task(task_ex):
    wf_spec = spec_parser.get_workflow_spec_by_execution_id(
        task_ex.workflow_execution_id)

    task = _build_task_from_execution(wf_spec, task_ex)

    try:
        task.set_state(states.RUNNING, None)

        task.run()
    except exc.MistralException as e:
        wf_ex = task_ex.workflow_execution

        msg = ("Failed to run task [error=%s, wf=%s, task=%s]:\n%s" %
               (e, wf_ex.name, task_ex.name, tb.format_exc()))

        LOG.error(msg)

        task.set_state(states.ERROR, msg)

        wf_handler.force_fail_workflow(wf_ex, msg)

        return

    _check_affected_tasks(task)
示例#13
0
def _refresh_task_state(task_ex_id):
    with db_api.transaction():
        task_ex = db_api.load_task_execution(task_ex_id)

        if not task_ex:
            return

        if (states.is_completed(task_ex.state)
                or task_ex.state == states.RUNNING):
            return

        wf_ex = task_ex.workflow_execution

        if states.is_completed(wf_ex.state):
            return

        wf_spec = spec_parser.get_workflow_spec_by_execution_id(
            task_ex.workflow_execution_id
        )

        wf_ctrl = wf_base.get_controller(wf_ex, wf_spec)

        with db_api.named_lock(task_ex.id):
            # NOTE: we have to use this lock to prevent two (or more) such
            # methods from changing task state and starting its action or
            # workflow. Checking task state outside of this section is a
            # performance optimization because locking is pretty expensive.
            db_api.refresh(task_ex)

            if (states.is_completed(task_ex.state)
                    or task_ex.state == states.RUNNING):
                return

            log_state = wf_ctrl.get_logical_task_state(task_ex)

            state = log_state.state
            state_info = log_state.state_info

            # Update 'triggered_by' because it could have changed.
            task_ex.runtime_context['triggered_by'] = log_state.triggered_by

            if state == states.RUNNING:
                continue_task(task_ex)
            elif state == states.ERROR:
                complete_task(task_ex, state, state_info)
            elif state == states.WAITING:
                LOG.info(
                    "Task execution is still in WAITING state"
                    " [task_ex_id=%s, task_name=%s]",
                    task_ex_id,
                    task_ex.name
                )
            else:
                # Must never get here.
                raise RuntimeError(
                    'Unexpected logical task state [task_ex_id=%s, '
                    'task_name=%s, state=%s]' %
                    (task_ex_id, task_ex.name, state)
                )
示例#14
0
def _refresh_task_state(task_ex_id):
    with db_api.transaction():
        task_ex = db_api.load_task_execution(task_ex_id)

        if not task_ex:
            return

        if (states.is_completed(task_ex.state)
                or task_ex.state == states.RUNNING):
            return

        wf_ex = task_ex.workflow_execution

        if states.is_completed(wf_ex.state):
            return

        wf_spec = spec_parser.get_workflow_spec_by_execution_id(
            task_ex.workflow_execution_id
        )

        wf_ctrl = wf_base.get_controller(wf_ex, wf_spec)

        with db_api.named_lock(task_ex.id):
            # NOTE: we have to use this lock to prevent two (or more) such
            # methods from changing task state and starting its action or
            # workflow. Checking task state outside of this section is a
            # performance optimization because locking is pretty expensive.
            db_api.refresh(task_ex)

            if (states.is_completed(task_ex.state)
                    or task_ex.state == states.RUNNING):
                return

            log_state = wf_ctrl.get_logical_task_state(task_ex)

            state = log_state.state
            state_info = log_state.state_info

            # Update 'triggered_by' because it could have changed.
            task_ex.runtime_context['triggered_by'] = log_state.triggered_by

            if state == states.RUNNING:
                continue_task(task_ex)
            elif state == states.ERROR:
                complete_task(task_ex, state, state_info)
            elif state == states.WAITING:
                LOG.info(
                    "Task execution is still in WAITING state"
                    " [task_ex_id=%s, task_name=%s]",
                    task_ex_id,
                    task_ex.name
                )
            else:
                # Must never get here.
                raise RuntimeError(
                    'Unexpected logical task state [task_ex_id=%s, '
                    'task_name=%s, state=%s]' %
                    (task_ex_id, task_ex.name, state)
                )
示例#15
0
    def __init__(self, wf_ex=None):
        self.wf_ex = wf_ex

        if wf_ex:
            # We're processing a workflow that's already in progress.
            self.wf_spec = spec_parser.get_workflow_spec_by_execution_id(
                wf_ex.id)
        else:
            self.wf_spec = None
示例#16
0
    def __init__(self, wf_ex=None):
        self.wf_ex = wf_ex

        if wf_ex:
            # We're processing a workflow that's already in progress.
            self.wf_spec = spec_parser.get_workflow_spec_by_execution_id(
                wf_ex.id
            )
        else:
            self.wf_spec = None
示例#17
0
def restore_command_from_dict(wf_ex, cmd_dict):
    cmd_name = cmd_dict['cmd_name']

    wf_spec = spec_parser.get_workflow_spec_by_execution_id(wf_ex.id)
    task_spec = wf_spec.get_tasks()[cmd_dict['task_name']]
    ctx = cmd_dict['ctx']
    params = {'msg': cmd_dict.get('msg')} if 'msg' in cmd_dict else None
    triggered_by = cmd_dict.get('triggered_by')

    return create_command(cmd_name, wf_ex, wf_spec, task_spec, ctx, params,
                          triggered_by)
示例#18
0
def _on_action_update(action_ex):
    """Handles action update event.

    :param action_ex: Action execution.
    """

    task_ex = action_ex.task_execution

    if not task_ex:
        return

    task_spec = spec_parser.get_task_spec(task_ex.spec)

    wf_ex = task_ex.workflow_execution

    task = _create_task(
        wf_ex,
        spec_parser.get_workflow_spec_by_execution_id(wf_ex.id),
        task_spec,
        task_ex.in_context,
        task_ex
    )

    try:
        task.on_action_update(action_ex)

        if states.is_paused(action_ex.state):
            wf_handler.pause_workflow(wf_ex)

        if states.is_running(action_ex.state):
            # If any subworkflow of the parent workflow is paused,
            # then keep the parent workflow execution paused.
            for task_ex in wf_ex.task_executions:
                if states.is_paused(task_ex.state):
                    return

            # Otherwise if no other subworkflow is paused,
            # then resume the parent workflow execution.
            wf_handler.resume_workflow(wf_ex)

    except exc.MistralException as e:
        wf_ex = task_ex.workflow_execution

        msg = ("Failed to handle action update [error=%s, wf=%s, task=%s,"
               " action=%s]:\n%s" %
               (e, wf_ex.name, task_ex.name, action_ex.name, tb.format_exc()))

        LOG.error(msg)

        task.set_state(states.ERROR, msg)

        wf_handler.force_fail_workflow(wf_ex, msg)

        return
示例#19
0
    def schedule(self, input_dict, target, index=0, desc='', safe_rerun=False,
                 timeout=None):
        assert not self.action_ex

        parent_wf_ex = self.task_ex.workflow_execution
        parent_wf_spec = spec_parser.get_workflow_spec_by_execution_id(
            parent_wf_ex.id
        )

        wf_def = engine_utils.resolve_workflow_definition(
            parent_wf_ex.workflow_name,
            parent_wf_spec.get_name(),
            namespace=parent_wf_ex.params['namespace'],
            wf_spec_name=self.wf_name
        )

        wf_spec = spec_parser.get_workflow_spec_by_definition_id(
            wf_def.id,
            wf_def.updated_at
        )

        # If the parent has a root_execution_id, it must be a sub-workflow. So
        # we should propogate that ID down. Otherwise the parent must be the
        # root execution and we should use the parents ID.
        root_execution_id = parent_wf_ex.root_execution_id or parent_wf_ex.id

        wf_params = {
            'root_execution_id': root_execution_id,
            'task_execution_id': self.task_ex.id,
            'index': index,
            'namespace': parent_wf_ex.params['namespace']
        }

        if 'env' in parent_wf_ex.params:
            wf_params['env'] = parent_wf_ex.params['env']
            wf_params['evaluate_env'] = parent_wf_ex.params.get('evaluate_env')

        if 'notify' in parent_wf_ex.params:
            wf_params['notify'] = parent_wf_ex.params['notify']

        for k, v in list(input_dict.items()):
            if k not in wf_spec.get_input():
                wf_params[k] = v
                del input_dict[k]

        wf_handler.start_workflow(
            wf_def.id,
            wf_def.namespace,
            None,
            input_dict,
            "sub-workflow execution",
            wf_params
        )
示例#20
0
def _on_action_update(action_ex):
    """Handles action update event.

    :param action_ex: Action execution.
    """

    task_ex = action_ex.task_execution

    if not task_ex:
        return

    task_spec = spec_parser.get_task_spec(task_ex.spec)

    wf_ex = task_ex.workflow_execution

    task = _create_task(
        wf_ex,
        spec_parser.get_workflow_spec_by_execution_id(wf_ex.id),
        task_spec,
        task_ex.in_context,
        task_ex
    )

    try:
        task.on_action_update(action_ex)

        if states.is_paused(action_ex.state):
            wf_handler.pause_workflow(wf_ex)

        if states.is_running(action_ex.state):
            # If any subworkflow of the parent workflow is paused,
            # then keep the parent workflow execution paused.
            for task_ex in wf_ex.task_executions:
                if states.is_paused(task_ex.state):
                    return

            # Otherwise if no other subworkflow is paused,
            # then resume the parent workflow execution.
            wf_handler.resume_workflow(wf_ex)

    except exc.MistralException as e:
        wf_ex = task_ex.workflow_execution

        msg = ("Failed to handle action update [error=%s, wf=%s, task=%s,"
               " action=%s]:\n%s" %
               (e, wf_ex.name, task_ex.name, action_ex.name, tb.format_exc()))

        force_fail_task(task_ex, msg, task=task)

        return

    _check_affected_tasks(task)
示例#21
0
    def __init__(self, wf_ex, wf_spec=None):
        """Creates a new workflow controller.

        :param wf_ex: Workflow execution.

        :param wf_spec: Workflow specification.
        """
        self.wf_ex = wf_ex

        if wf_spec is None:
            wf_spec = spec_parser.get_workflow_spec_by_execution_id(wf_ex.id)

        self.wf_spec = wf_spec
示例#22
0
文件: base.py 项目: openstack/mistral
    def __init__(self, wf_ex, wf_spec=None):
        """Creates a new workflow controller.

        :param wf_ex: Workflow execution.

        :param wf_spec: Workflow specification.
        """
        self.wf_ex = wf_ex

        if wf_spec is None:
            wf_spec = spec_parser.get_workflow_spec_by_execution_id(wf_ex.id)

        self.wf_spec = wf_spec
示例#23
0
def _refresh_task_state(task_ex_id):
    with db_api.transaction():
        task_ex = db_api.load_task_execution(task_ex_id)

        if not task_ex:
            return

        wf_ex = task_ex.workflow_execution

        if states.is_completed(wf_ex.state):
            return

        wf_spec = spec_parser.get_workflow_spec_by_execution_id(
            task_ex.workflow_execution_id
        )

        wf_ctrl = wf_base.get_controller(wf_ex, wf_spec)

        log_state = wf_ctrl.get_logical_task_state(
            task_ex
        )

        state = log_state.state
        state_info = log_state.state_info

        # Update 'triggered_by' because it could have changed.
        task_ex.runtime_context['triggered_by'] = log_state.triggered_by

        if state == states.RUNNING:
            continue_task(task_ex)
        elif state == states.ERROR:
            complete_task(task_ex, state, state_info)
        elif state == states.WAITING:
            # Let's assume that a task takes 0.01 sec in average to complete
            # and based on this assumption calculate a time of the next check.
            # The estimation is very rough, of course, but this delay will be
            # decreasing as task preconditions will be completing which will
            # give a decent asymptotic approximation.
            # For example, if a 'join' task has 100 inbound incomplete tasks
            # then the next 'refresh_task_state' call will happen in 10
            # seconds. For 500 tasks it will be 50 seconds. The larger the
            # workflow is, the more beneficial this mechanism will be.
            delay = int(log_state.cardinality * 0.01)

            _schedule_refresh_task_state(task_ex, max(1, delay))
        else:
            # Must never get here.
            raise RuntimeError(
                'Unexpected logical task state [task_ex_id=%s, task_name=%s, '
                'state=%s]' % (task_ex_id, task_ex.name, state)
            )
示例#24
0
def _refresh_task_state(task_ex_id):
    with db_api.transaction():
        task_ex = db_api.load_task_execution(task_ex_id)

        if not task_ex:
            return

        wf_ex = task_ex.workflow_execution

        if states.is_completed(wf_ex.state):
            return

        wf_spec = spec_parser.get_workflow_spec_by_execution_id(
            task_ex.workflow_execution_id)

        wf_ctrl = wf_base.get_controller(wf_ex, wf_spec)

        with db_api.named_lock(task_ex.id):
            db_api.refresh(task_ex)

            if (states.is_completed(task_ex.state)
                    or task_ex.state == states.RUNNING):
                return

            log_state = wf_ctrl.get_logical_task_state(task_ex)

            state = log_state.state
            state_info = log_state.state_info

            # Update 'triggered_by' because it could have changed.
            task_ex.runtime_context['triggered_by'] = log_state.triggered_by

            if state == states.RUNNING:
                continue_task(task_ex)
            elif state == states.ERROR:
                complete_task(task_ex, state, state_info)
            elif state == states.WAITING:
                LOG.info(
                    "Task execution is still in WAITING state"
                    " [task_ex_id=%s, task_name=%s]", task_ex_id, task_ex.name)
            else:
                # Must never get here.
                raise RuntimeError(
                    'Unexpected logical task state [task_ex_id=%s, '
                    'task_name=%s, state=%s]' %
                    (task_ex_id, task_ex.name, state))
示例#25
0
def restore_command_from_dict(wf_ex, cmd_dict):
    cmd_name = cmd_dict['cmd_name']

    wf_spec = spec_parser.get_workflow_spec_by_execution_id(wf_ex.id)
    task_spec = wf_spec.get_tasks()[cmd_dict['task_name']]
    ctx = cmd_dict['ctx']
    params = {'msg': cmd_dict.get('msg')} if 'msg' in cmd_dict else None
    triggered_by = cmd_dict.get('triggered_by')

    return create_command(
        cmd_name,
        wf_ex,
        wf_spec,
        task_spec,
        ctx,
        params,
        triggered_by
    )
示例#26
0
def _check_affected_tasks(task):
    if not task.is_completed():
        return

    task_ex = task.task_ex

    wf_ex = task_ex.workflow_execution

    if states.is_completed(wf_ex.state):
        return

    wf_spec = spec_parser.get_workflow_spec_by_execution_id(
        task_ex.workflow_execution_id
    )

    wf_ctrl = wf_base.get_controller(wf_ex, wf_spec)

    affected_task_execs = wf_ctrl.find_indirectly_affected_task_executions(
        task_ex.name
    )

    def _schedule_if_needed(t_ex_id):
        # NOTE(rakhmerov): we need to minimize the number of delayed calls
        # that refresh state of "join" tasks. We'll check if corresponding
        # calls are already scheduled. Note that we must ignore delayed calls
        # that are currently being processed because of a possible race with
        # the transaction that deletes delayed calls, i.e. the call may still
        # exist in DB (the deleting transaction didn't commit yet) but it has
        # already been processed and the task state hasn't changed.
        cnt = db_api.get_delayed_calls_count(
            key=_get_refresh_state_job_key(t_ex_id),
            processing=False
        )

        if cnt == 0:
            _schedule_refresh_task_state(t_ex_id)

    for t_ex in affected_task_execs:
        post_tx_queue.register_operation(
            _schedule_if_needed,
            args=[t_ex.id],
            in_tx=True
        )
示例#27
0
def _check_affected_tasks(task):
    if not task.is_completed():
        return

    task_ex = task.task_ex

    wf_ex = task_ex.workflow_execution

    if states.is_completed(wf_ex.state):
        return

    wf_spec = spec_parser.get_workflow_spec_by_execution_id(
        task_ex.workflow_execution_id
    )

    wf_ctrl = wf_base.get_controller(wf_ex, wf_spec)

    affected_task_execs = wf_ctrl.find_indirectly_affected_task_executions(
        task_ex.name
    )

    def _schedule_if_needed(t_ex_id):
        # NOTE(rakhmerov): we need to minimize the number of delayed calls
        # that refresh state of "join" tasks. We'll check if corresponding
        # calls are already scheduled. Note that we must ignore delayed calls
        # that are currently being processed because of a possible race with
        # the transaction that deletes delayed calls, i.e. the call may still
        # exist in DB (the deleting transaction didn't commit yet) but it has
        # already been processed and the task state hasn't changed.
        cnt = db_api.get_delayed_calls_count(
            key=_get_refresh_state_job_key(t_ex_id),
            processing=False
        )

        if cnt == 0:
            _schedule_refresh_task_state(t_ex_id)

    for t_ex in affected_task_execs:
        post_tx_queue.register_operation(
            _schedule_if_needed,
            args=[t_ex.id],
            in_tx=True
        )
示例#28
0
def _check_affected_tasks(task):
    # TODO(rakhmerov): this method should eventually move into
    # the class Task. The obvious signal is the only argument
    # that it takes.
    if not task.is_completed():
        return

    task_ex = task.task_ex

    wf_ex = task_ex.workflow_execution

    if states.is_completed(wf_ex.state):
        return

    wf_spec = spec_parser.get_workflow_spec_by_execution_id(
        task_ex.workflow_execution_id)

    wf_ctrl = wf_base.get_controller(wf_ex, wf_spec)

    affected_task_execs = wf_ctrl.find_indirectly_affected_task_executions(
        task_ex.name)

    def _schedule_if_needed(t_ex_id):
        # NOTE(rakhmerov): we need to minimize the number of scheduled jobs
        # that refresh state of "join" tasks. We'll check if corresponding
        # jobs are already scheduled. Note that we must ignore scheduled jobs
        # that are currently being processed because of a possible race with
        # the transaction that deletes scheduled jobs, i.e. the job may still
        # exist in DB (the deleting transaction didn't commit yet) but it has
        # already been processed and the task state hasn't changed.
        sched = sched_base.get_system_scheduler()

        jobs_exist = sched.has_scheduled_jobs(
            key=_get_refresh_state_job_key(t_ex_id), processing=False)

        if not jobs_exist:
            _schedule_refresh_task_state(t_ex_id)

    for t_ex in affected_task_execs:
        post_tx_queue.register_operation(_schedule_if_needed,
                                         args=[t_ex.id],
                                         in_tx=True)
示例#29
0
def _check_affected_tasks(task):
    if not task.is_completed():
        return

    task_ex = task.task_ex

    wf_ex = task_ex.workflow_execution

    if states.is_completed(wf_ex.state):
        return

    wf_spec = spec_parser.get_workflow_spec_by_execution_id(
        task_ex.workflow_execution_id)

    wf_ctrl = wf_base.get_controller(wf_ex, wf_spec)

    affected_task_execs = wf_ctrl.find_indirectly_affected_task_executions(
        task_ex.name)

    for t_ex in affected_task_execs:
        _schedule_refresh_task_state(t_ex)
示例#30
0
def complete_task(task_ex, state, state_info):
    wf_spec = spec_parser.get_workflow_spec_by_execution_id(
        task_ex.workflow_execution_id
    )

    task = _build_task_from_execution(wf_spec, task_ex)

    try:
        task.complete(state, state_info)
    except exc.MistralException as e:
        wf_ex = task_ex.workflow_execution

        msg = (
            "Failed to complete task [error=%s, wf=%s, task=%s]:\n%s" %
            (e, wf_ex.name, task_ex.name, tb.format_exc())
        )

        force_fail_task(task_ex, msg, task=task)

        return

    _check_affected_tasks(task)
示例#31
0
def _on_action_complete(action_ex):
    """Handles action completion event.

    :param action_ex: Action execution.
    """

    task_ex = action_ex.task_execution

    if not task_ex:
        return

    task_spec = spec_parser.get_task_spec(task_ex.spec)

    wf_ex = task_ex.workflow_execution

    task = _create_task(
        wf_ex,
        spec_parser.get_workflow_spec_by_execution_id(wf_ex.id),
        task_spec,
        task_ex.in_context,
        task_ex
    )

    try:
        task.on_action_complete(action_ex)
    except exc.MistralException as e:
        wf_ex = task_ex.workflow_execution

        msg = ("Failed to handle action completion [error=%s, wf=%s, task=%s,"
               " action=%s]:\n%s" %
               (e, wf_ex.name, task_ex.name, action_ex.name, tb.format_exc()))

        force_fail_task(task_ex, msg, task=task)

        return

    _check_affected_tasks(task)
示例#32
0
def continue_task(task_ex):
    wf_spec = spec_parser.get_workflow_spec_by_execution_id(
        task_ex.workflow_execution_id
    )

    task = _build_task_from_execution(wf_spec, task_ex)

    try:
        task.set_state(states.RUNNING, None)

        task.run()
    except exc.MistralException as e:
        wf_ex = task_ex.workflow_execution

        msg = (
            "Failed to run task [error=%s, wf=%s, task=%s]:\n%s" %
            (e, wf_ex.name, task_ex.name, tb.format_exc())
        )

        force_fail_task(task_ex, msg, task=task)

        return

    _check_affected_tasks(task)
示例#33
0
def complete_task(task_ex, state, state_info):
    wf_spec = spec_parser.get_workflow_spec_by_execution_id(
        task_ex.workflow_execution_id
    )

    task = _build_task_from_execution(wf_spec, task_ex)

    try:
        task.complete(state, state_info)
    except exc.MistralException as e:
        wf_ex = task_ex.workflow_execution

        msg = (
            "Failed to complete task [error=%s, wf=%s, task=%s]:\n%s" %
            (e, wf_ex.name, task_ex.name, tb.format_exc())
        )

        LOG.error(msg)

        task.set_state(states.ERROR, msg)

        wf_handler.force_fail_workflow(wf_ex, msg)

        return
示例#34
0
    def test_cache_workflow_spec_by_execution_id(self):
        wf_text = """
        version: '2.0'

        wf:
          tasks:
            task1:
              action: std.echo output="Echo"
        """

        wfs = wf_service.create_workflows(wf_text)

        self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size())
        self.assertEqual(0, spec_parser.get_wf_definition_spec_cache_size())

        wf_def = wfs[0]

        wf_spec = spec_parser.get_workflow_spec_by_definition_id(
            wf_def.id, wf_def.updated_at)

        self.assertEqual(1, len(wf_spec.get_tasks()))
        self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size())
        self.assertEqual(1, spec_parser.get_wf_definition_spec_cache_size())

        with db_api.transaction():
            wf_ex = db_api.create_workflow_execution({
                'id': '1-2-3-4',
                'name': 'wf',
                'workflow_id': wf_def.id,
                'spec': wf_spec.to_dict(),
                'state': states.RUNNING
            })

            # Check that we can get a valid spec by execution id.
            wf_spec_by_exec_id = spec_parser.get_workflow_spec_by_execution_id(
                wf_ex.id)

        self.assertEqual(1, len(wf_spec_by_exec_id.get_tasks()))

        # Now update workflow definition and check that cache is updated too.

        wf_text = """
        version: '2.0'

        wf:
          tasks:
            task1:
              action: std.echo output="1"

            task2:
              action: std.echo output="2"
        """

        wfs = wf_service.update_workflows(wf_text)

        self.assertEqual(1, spec_parser.get_wf_definition_spec_cache_size())

        wf_spec = spec_parser.get_workflow_spec_by_definition_id(
            wfs[0].id, wfs[0].updated_at)

        self.assertEqual(2, len(wf_spec.get_tasks()))
        self.assertEqual(2, spec_parser.get_wf_definition_spec_cache_size())
        self.assertEqual(1, spec_parser.get_wf_execution_spec_cache_size())

        # Now finally update execution cache and check that we can
        # get a valid spec by execution id.
        spec_parser.cache_workflow_spec_by_execution_id(wf_ex.id, wf_spec)

        wf_spec_by_exec_id = spec_parser.get_workflow_spec_by_execution_id(
            wf_ex.id)

        self.assertEqual(2, len(wf_spec_by_exec_id.get_tasks()))
示例#35
0
    def schedule(self,
                 input_dict,
                 target,
                 index=0,
                 desc='',
                 safe_rerun=False,
                 timeout=None):
        assert not self.action_ex

        self.validate_input(input_dict)

        parent_wf_ex = self.task_ex.workflow_execution
        parent_wf_spec = spec_parser.get_workflow_spec_by_execution_id(
            parent_wf_ex.id)

        wf_def = engine_utils.resolve_workflow_definition(
            parent_wf_ex.workflow_name,
            parent_wf_spec.get_name(),
            namespace=parent_wf_ex.params['namespace'],
            wf_spec_name=self.wf_name)

        wf_spec = spec_parser.get_workflow_spec_by_definition_id(
            wf_def.id, wf_def.updated_at)

        # If the parent has a root_execution_id, it must be a sub-workflow. So
        # we should propagate that ID down. Otherwise the parent must be the
        # root execution and we should use the parents ID.
        root_execution_id = parent_wf_ex.root_execution_id or parent_wf_ex.id

        wf_params = {
            'root_execution_id': root_execution_id,
            'task_execution_id': self.task_ex.id,
            'index': index,
            'namespace': parent_wf_ex.params['namespace']
        }

        if 'notify' in parent_wf_ex.params:
            wf_params['notify'] = parent_wf_ex.params['notify']

        for k, v in list(input_dict.items()):
            if k not in wf_spec.get_input():
                wf_params[k] = v
                del input_dict[k]

        if cfg.CONF.engine.start_subworkflows_via_rpc:

            def _start_subworkflow():
                rpc.get_engine_client().start_workflow(
                    wf_def.id,
                    wf_def.namespace,
                    None,
                    input_dict,
                    "sub-workflow execution",
                    async_=True,
                    **wf_params)

            post_tx_queue.register_operation(_start_subworkflow)
        else:
            wf_handler.start_workflow(wf_def.id, wf_def.namespace, None,
                                      input_dict, "sub-workflow execution",
                                      wf_params)
示例#36
0
    def schedule(self, input_dict, target, index=0, desc='', safe_rerun=False,
                 timeout=None):
        assert not self.action_ex

        self.validate_input(input_dict)

        parent_wf_ex = self.task_ex.workflow_execution
        parent_wf_spec = spec_parser.get_workflow_spec_by_execution_id(
            parent_wf_ex.id
        )

        wf_def = engine_utils.resolve_workflow_definition(
            parent_wf_ex.workflow_name,
            parent_wf_spec.get_name(),
            namespace=parent_wf_ex.params['namespace'],
            wf_spec_name=self.wf_name
        )

        wf_spec = spec_parser.get_workflow_spec_by_definition_id(
            wf_def.id,
            wf_def.updated_at
        )

        # If the parent has a root_execution_id, it must be a sub-workflow. So
        # we should propagate that ID down. Otherwise the parent must be the
        # root execution and we should use the parents ID.
        root_execution_id = parent_wf_ex.root_execution_id or parent_wf_ex.id

        wf_params = {
            'root_execution_id': root_execution_id,
            'task_execution_id': self.task_ex.id,
            'index': index,
            'namespace': parent_wf_ex.params['namespace']
        }

        if 'notify' in parent_wf_ex.params:
            wf_params['notify'] = parent_wf_ex.params['notify']

        for k, v in list(input_dict.items()):
            if k not in wf_spec.get_input():
                wf_params[k] = v
                del input_dict[k]

        if cfg.CONF.engine.start_subworkflows_via_rpc:
            def _start_subworkflow():
                rpc.get_engine_client().start_workflow(
                    wf_def.id,
                    wf_def.namespace,
                    None,
                    input_dict,
                    "sub-workflow execution",
                    async_=True,
                    **wf_params
                )

            post_tx_queue.register_operation(_start_subworkflow)
        else:
            wf_handler.start_workflow(
                wf_def.id,
                wf_def.namespace,
                None,
                input_dict,
                "sub-workflow execution",
                wf_params
            )
示例#37
0
    def test_cache_workflow_spec_by_execution_id(self):
        wf_text = """
        version: '2.0'

        wf:
          tasks:
            task1:
              action: std.echo output="Echo"
        """

        wfs = wf_service.create_workflows(wf_text)

        self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size())
        self.assertEqual(0, spec_parser.get_wf_definition_spec_cache_size())

        wf_def = wfs[0]

        wf_spec = spec_parser.get_workflow_spec_by_definition_id(
            wf_def.id,
            wf_def.updated_at
        )

        self.assertEqual(1, len(wf_spec.get_tasks()))
        self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size())
        self.assertEqual(1, spec_parser.get_wf_definition_spec_cache_size())

        wf_ex = db_api.create_workflow_execution({
            'id': '1-2-3-4',
            'name': 'wf',
            'workflow_id': wf_def.id,
            'spec': wf_spec.to_dict(),
            'state': states.RUNNING
        })

        # Check that we can get a valid spec by execution id.

        wf_spec_by_exec_id = spec_parser.get_workflow_spec_by_execution_id(
            wf_ex.id
        )

        self.assertEqual(1, len(wf_spec_by_exec_id.get_tasks()))

        # Now update workflow definition and check that cache is updated too.

        wf_text = """
        version: '2.0'

        wf:
          tasks:
            task1:
              action: std.echo output="1"

            task2:
              action: std.echo output="2"
        """

        wfs = wf_service.update_workflows(wf_text)

        self.assertEqual(1, spec_parser.get_wf_definition_spec_cache_size())

        wf_spec = spec_parser.get_workflow_spec_by_definition_id(
            wfs[0].id,
            wfs[0].updated_at
        )

        self.assertEqual(2, len(wf_spec.get_tasks()))
        self.assertEqual(2, spec_parser.get_wf_definition_spec_cache_size())
        self.assertEqual(1, spec_parser.get_wf_execution_spec_cache_size())

        # Now finally update execution cache and check that we can
        # get a valid spec by execution id.
        spec_parser.cache_workflow_spec_by_execution_id(wf_ex.id, wf_spec)

        wf_spec_by_exec_id = spec_parser.get_workflow_spec_by_execution_id(
            wf_ex.id
        )

        self.assertEqual(2, len(wf_spec_by_exec_id.get_tasks()))