def test_rerun_sub_workflow(self):
        wf_service.create_workflows("""---
        version: '2.0'
        wf1:
          tasks:
            task1:
              workflow: wf2
        wf2:
          tasks:
            task2:
              workflow: wf3
        wf3:
          tasks:
            task3:
              action: std.noop""")

        # Run workflow and fail task.
        wf1_ex = self.engine.start_workflow('wf1')

        self.await_workflow_error(wf1_ex.id)

        with db_api.transaction():
            wf_exs = db_api.get_workflow_executions()
            task_exs = db_api.get_task_executions()

            self.assertEqual(3, len(wf_exs),
                             'The number of workflow executions')
            self.assertEqual(3, len(task_exs),
                             'The number of task executions')

            for wf_ex in wf_exs:
                self.assertEqual(states.ERROR, wf_ex.state,
                                 'The executions must fail the first time')
            for task_ex in task_exs:
                self.assertEqual(states.ERROR, task_ex.state,
                                 'The tasks must fail the first time')

            wf3_ex = self._assert_single_item(wf_exs, name='wf3')
            task3_ex = self._assert_single_item(wf3_ex.task_executions,
                                                name="task3")

        self.engine.rerun_workflow(task3_ex.id)

        self.await_workflow_success(wf1_ex.id)

        with db_api.transaction():
            wf_exs = db_api.get_workflow_executions()
            task_exs = db_api.get_task_executions()

            self.assertEqual(3, len(wf_exs),
                             'The number of workflow executions')
            self.assertEqual(3, len(task_exs),
                             'The number of task executions')

            for wf_ex in wf_exs:
                self.assertEqual(states.SUCCESS, wf_ex.state,
                                 'The executions must success the second time')
            for task_ex in task_exs:
                self.assertEqual(states.SUCCESS, task_ex.state,
                                 'The tasks must success the second time')
    def test_rerun_sub_workflow(self):
        wf_service.create_workflows("""---
        version: '2.0'
        wf1:
          tasks:
            task1:
              workflow: wf2
        wf2:
          tasks:
            task2:
              workflow: wf3
        wf3:
          tasks:
            task3:
              action: std.noop""")

        # Run workflow and fail task.
        wf1_ex = self.engine.start_workflow('wf1')

        self.await_workflow_error(wf1_ex.id)

        with db_api.transaction():
            wf_exs = db_api.get_workflow_executions()
            task_exs = db_api.get_task_executions()

            self.assertEqual(3, len(wf_exs),
                             'The number of workflow executions')
            self.assertEqual(3, len(task_exs),
                             'The number of task executions')

            for wf_ex in wf_exs:
                self.assertEqual(states.ERROR, wf_ex.state,
                                 'The executions must fail the first time')
            for task_ex in task_exs:
                self.assertEqual(states.ERROR, task_ex.state,
                                 'The tasks must fail the first time')

            wf3_ex = self._assert_single_item(wf_exs, name='wf3')
            task3_ex = self._assert_single_item(wf3_ex.task_executions,
                                                name="task3")

        self.engine.rerun_workflow(task3_ex.id)

        self.await_workflow_success(wf1_ex.id)

        with db_api.transaction():
            wf_exs = db_api.get_workflow_executions()
            task_exs = db_api.get_task_executions()

            self.assertEqual(3, len(wf_exs),
                             'The number of workflow executions')
            self.assertEqual(3, len(task_exs),
                             'The number of task executions')

            for wf_ex in wf_exs:
                self.assertEqual(states.SUCCESS, wf_ex.state,
                                 'The executions must success the second time')
            for task_ex in task_exs:
                self.assertEqual(states.SUCCESS, task_ex.state,
                                 'The tasks must success the second time')
Exemplo n.º 3
0
    def test_delete_join_completion_check_on_execution_delete(self):
        wf_text = """---
        version: '2.0'

        wf:
          tasks:
            task1:
              action: std.noop
              on-success: join_task

            task2:
              description: Never ends
              action: std.async_noop
              on-success: join_task

            join_task:
              join: all
        """

        wf_service.create_workflows(wf_text)

        wf_ex = self.engine.start_workflow('wf')

        tasks = db_api.get_task_executions(workflow_execution_id=wf_ex.id)

        self.assertGreaterEqual(len(tasks), 2)

        task1 = self._assert_single_item(tasks, name='task1')

        self.await_task_success(task1.id)

        # Once task1 is finished we know that join_task must be created.

        tasks = db_api.get_task_executions(workflow_execution_id=wf_ex.id)

        self._assert_single_item(
            tasks,
            name='join_task',
            state=states.WAITING
        )

        calls = db_api.get_delayed_calls()

        mtd_name = 'mistral.engine.task_handler._refresh_task_state'

        cnt = sum([1 for c in calls if c.target_method_name == mtd_name])

        # There can be 2 calls with different value of 'processing' flag.
        self.assertTrue(cnt == 1 or cnt == 2)

        # Stop the workflow.
        db_api.delete_workflow_execution(wf_ex.id)

        self._await(
            lambda:
            len(db_api.get_delayed_calls(target_method_name=mtd_name)) == 0
        )
Exemplo n.º 4
0
    def test_delete_join_completion_check_on_stop(self):
        wf_text = """---
        version: '2.0'

        wf:
          tasks:
            task1:
              action: std.noop
              on-success: join_task

            task2:
              description: Never ends
              action: std.async_noop
              on-success: join_task

            join_task:
              join: all
        """

        wf_service.create_workflows(wf_text)

        wf_ex = self.engine.start_workflow('wf')

        tasks = db_api.get_task_executions(workflow_execution_id=wf_ex.id)

        self.assertGreaterEqual(len(tasks), 2)

        task1 = self._assert_single_item(tasks, name='task1')

        self.await_task_success(task1.id)

        # Once task1 is finished we know that join_task must be created.

        tasks = db_api.get_task_executions(workflow_execution_id=wf_ex.id)

        self._assert_single_item(
            tasks,
            name='join_task',
            state=states.WAITING
        )

        # Stop the workflow.
        self.engine.stop_workflow(wf_ex.id, state=states.CANCELLED)

        mtd_name = 'mistral.engine.task_handler._refresh_task_state'

        self._await(
            lambda:
            len(db_api.get_delayed_calls(target_method_name=mtd_name)) == 0
        )
Exemplo n.º 5
0
    def test_delete_join_completion_check_on_execution_delete(self):
        wf_text = """---
        version: '2.0'

        wf:
          tasks:
            task1:
              action: std.noop
              on-success: join_task

            task2:
              description: Never ends
              action: std.async_noop
              on-success: join_task

            join_task:
              join: all
        """

        wf_service.create_workflows(wf_text)

        wf_ex = self.engine.start_workflow('wf')

        tasks = db_api.get_task_executions(workflow_execution_id=wf_ex.id)

        self.assertGreaterEqual(len(tasks), 2)

        task1 = self._assert_single_item(tasks, name='task1')

        self.await_task_success(task1.id)

        # Once task1 is finished we know that join_task must be created.

        tasks = db_api.get_task_executions(workflow_execution_id=wf_ex.id)

        self._assert_single_item(
            tasks,
            name='join_task',
            state=states.WAITING
        )

        # Stop the workflow.
        db_api.delete_workflow_execution(wf_ex.id)

        mtd_name = 'mistral.engine.task_handler._refresh_task_state'

        self._await(
            lambda:
            len(db_api.get_delayed_calls(target_method_name=mtd_name)) == 0
        )
Exemplo n.º 6
0
    def test_cascade_delete_deep(self):
        wf_text = """
        version: 2.0

        wf:
          input:
            - level
          tasks:
            initial:
              action: std.noop
              on-success:
                - recurse: <% $.level > 0 %>

            recurse:
              workflow: wf
              input:
                level: <% $.level - 1 %>
        """

        wf_service.create_workflows(wf_text)

        wf_ex = self.engine.start_workflow('wf', wf_input={"level": 7})

        self.await_workflow_success(wf_ex.id)

        self.assertEqual(8, len(db_api.get_workflow_executions()))

        # Now delete the root workflow execution and make sure that
        # all dependent objects are deleted as well.
        db_api.delete_workflow_execution(wf_ex.id)

        self.assertEqual(0, len(db_api.get_workflow_executions()))
        self.assertEqual(0, len(db_api.get_task_executions()))
        self.assertEqual(0, len(db_api.get_action_executions()))
Exemplo n.º 7
0
def find_task_executions_by_name(wf_ex_id, task_name):
    """Finds task executions by workflow execution id and task name.

    :param wf_ex_id: Workflow execution id.
    :param task_name: Task name.
    :return: Task executions (possibly a cached value). The returned list
        may contain task execution clones not bound to the DB session.
    """
    with _TASK_EX_CACHE_LOCK:
        t_execs = _TASK_EX_CACHE[wf_ex_id].get(task_name)

    if t_execs:
        return t_execs

    t_execs = db_api.get_task_executions(
        workflow_execution_id=wf_ex_id,
        name=task_name,
        sort_keys=[]  # disable sorting
    )

    t_execs = [t_ex.get_clone() for t_ex in t_execs]

    # We can cache only finished tasks because they won't change.
    all_finished = (t_execs and all(
        [states.is_completed(t_ex.state) for t_ex in t_execs]))

    if all_finished:
        with _TASK_EX_CACHE_LOCK:
            _TASK_EX_CACHE[wf_ex_id][task_name] = t_execs

    return t_execs
Exemplo n.º 8
0
def find_task_executions_by_name(wf_ex_id, task_name):
    """Finds task executions by workflow execution id and task name.

    :param wf_ex_id: Workflow execution id.
    :param task_name: Task name.
    :return: Task executions (possibly a cached value).
    """
    cache_key = (wf_ex_id, task_name)

    with _TASK_EXECUTIONS_CACHE_LOCK:
        t_execs = _TASK_EXECUTIONS_CACHE.get(cache_key)

    if t_execs:
        return t_execs

    t_execs = db_api.get_task_executions(
        workflow_execution_id=wf_ex_id,
        name=task_name
    )

    # We can cache only finished tasks because they won't change.
    all_finished = (
        t_execs and
        all([states.is_completed(t_ex.state) for t_ex in t_execs])
    )

    if all_finished:
        with _TASK_EXECUTIONS_CACHE_LOCK:
            _TASK_EXECUTIONS_CACHE[cache_key] = t_execs

    return t_execs
Exemplo n.º 9
0
def find_task_executions_by_name(wf_ex_id, task_name):
    """Finds task executions by workflow execution id and task name.

    :param wf_ex_id: Workflow execution id.
    :param task_name: Task name.
    :return: Task executions (possibly a cached value).
    """
    with _CACHE_LOCK:
        t_execs = _TASK_EX_CACHE[wf_ex_id].get(task_name)

    if t_execs:
        return t_execs

    t_execs = db_api.get_task_executions(workflow_execution_id=wf_ex_id,
                                         name=task_name)

    # We can cache only finished tasks because they won't change.
    all_finished = (t_execs and all(
        [states.is_completed(t_ex.state) for t_ex in t_execs]))

    if all_finished:
        with _CACHE_LOCK:
            _TASK_EX_CACHE[wf_ex_id][task_name] = t_execs

    return t_execs
Exemplo n.º 10
0
def task_(context, task_name=None):
    # This section may not exist in a context if it's calculated not in
    # task scope.
    cur_task = context['__task_execution']

    # 1. If task_name is empty it's 'task()' use case, we need to get the
    # current task.
    # 2. if task_name is not empty but it's equal to the current task name
    # we need to take exactly the current instance of this task. Otherwise
    # there may be ambiguity if there are many tasks with this name.
    # 3. In other case we just find a task in DB by the given name.
    if cur_task and (not task_name or cur_task['name'] == task_name):
        task_ex = db_api.get_task_execution(cur_task['id'])
    else:
        task_execs = db_api.get_task_executions(
            workflow_execution_id=context['__execution']['id'],
            name=task_name
        )

        # TODO(rakhmerov): Account for multiple executions (i.e. in case of
        # cycles).
        task_ex = task_execs[-1] if len(task_execs) > 0 else None

    if not task_ex:
        LOG.warning(
            "Task '%s' not found by the task() expression function",
            task_name
        )
        return None

    # We don't use to_dict() db model method because not all fields
    # make sense for user.
    return _convert_to_user_model(task_ex)
Exemplo n.º 11
0
def task_(context, task_name=None):
    # This section may not exist in a context if it's calculated not in
    # task scope.
    cur_task = context['__task_execution']

    # 1. If task_name is empty it's 'task()' use case, we need to get the
    # current task.
    # 2. if task_name is not empty but it's equal to the current task name
    # we need to take exactly the current instance of this task. Otherwise
    # there may be ambiguity if there are many tasks with this name.
    # 3. In other case we just find a task in DB by the given name.
    if cur_task and (not task_name or cur_task['name'] == task_name):
        task_ex = db_api.get_task_execution(cur_task['id'])
    else:
        task_execs = db_api.get_task_executions(
            workflow_execution_id=context['__execution']['id'],
            name=task_name
        )

        # TODO(rakhmerov): Account for multiple executions (i.e. in case of
        # cycles).
        task_ex = task_execs[-1] if len(task_execs) > 0 else None

    if not task_ex:
        return None

    # We don't use to_dict() db model method because not all fields
    # make sense for user.
    return _convert_to_user_model(task_ex)
Exemplo n.º 12
0
    def test_invalid_workflow_input(self):
        # Check that in case of invalid input workflow objects aren't even
        # created.
        wf_text = """
        version: '2.0'

        wf:
          input:
            - param1
            - param2

          tasks:
            task1:
              action: std.noop
        """

        wf_service.create_workflows(wf_text)

        self.assertRaises(
            exc.InputException,
            self.engine.start_workflow,
            'wf',
            '',
            {'wrong_param': 'some_value'}
        )

        self.assertEqual(0, len(db_api.get_workflow_executions()))
        self.assertEqual(0, len(db_api.get_task_executions()))
        self.assertEqual(0, len(db_api.get_action_executions()))
Exemplo n.º 13
0
def task_(context, task_name):
    # Importing data_flow in order to break cycle dependency between modules.
    from mistral.workflow import data_flow

    # This section may not exist in a context if it's calculated not in
    # task scope.
    cur_task = context['__task_execution']

    if cur_task and cur_task['name'] == task_name:
        task_ex = db_api.get_task_execution(cur_task['id'])
    else:
        task_execs = db_api.get_task_executions(
            workflow_execution_id=context['__execution']['id'], name=task_name)

        # TODO(rakhmerov): Account for multiple executions (i.e. in case of
        # cycles).
        task_ex = task_execs[-1] if len(task_execs) > 0 else None

    if not task_ex:
        return None

    # We don't use to_dict() db model method because not all fields
    # make sense for user.
    return {
        'id': task_ex.id,
        'name': task_ex.name,
        'spec': task_ex.spec,
        'state': task_ex.state,
        'state_info': task_ex.state_info,
        'result': data_flow.get_task_execution_result(task_ex),
        'published': task_ex.published
    }
Exemplo n.º 14
0
    def test_start_task1(self):
        wf_input = {'param1': 'a', 'param2': 'b'}

        wf_ex = self.engine.start_workflow('my_wb.wf1',
                                           wf_input=wf_input,
                                           task_name='task1')

        # Execution 1.
        self.assertIsNotNone(wf_ex)
        self.assertDictEqual(wf_input, wf_ex.input)
        self.assertDictEqual({
            'task_name': 'task1',
            'namespace': '',
            'env': {}
        }, wf_ex.params)

        # Wait till workflow 'wf1' is completed.
        self.await_workflow_success(wf_ex.id)

        with db_api.transaction():
            wf_ex = db_api.get_workflow_execution(wf_ex.id)

            task_execs = wf_ex.task_executions

        self.assertEqual(1, len(task_execs))
        self.assertEqual(1, len(db_api.get_task_executions()))

        task_ex = self._assert_single_item(task_execs,
                                           name='task1',
                                           state=states.SUCCESS)

        self.assertDictEqual({'result1': 'a'}, task_ex.published)
Exemplo n.º 15
0
    def test_start_task2(self):
        wf_input = {'param1': 'a', 'param2': 'b'}

        wf_ex = self.engine.start_workflow('my_wb.wf1',
                                           wf_input,
                                           task_name='task2')

        # Execution 1.
        self.assertIsNotNone(wf_ex)
        self.assertDictEqual(wf_input, wf_ex.input)
        self.assertDictEqual({'task_name': 'task2'}, wf_ex.params)

        # Wait till workflow 'wf1' is completed.
        self._await(lambda: self.is_execution_success(wf_ex.id))

        wf_ex = db_api.get_workflow_execution(wf_ex.id)

        self.assertEqual(2, len(wf_ex.task_executions))
        self.assertEqual(2, len(db_api.get_task_executions()))

        task1_ex = self._assert_single_item(wf_ex.task_executions,
                                            name='task1',
                                            state=states.SUCCESS)

        self.assertDictEqual({'result1': 'a'}, task1_ex.published)

        task2_ex = self._assert_single_item(wf_ex.task_executions,
                                            name='task2',
                                            state=states.SUCCESS)

        self.assertDictEqual({'result2': 'a & b'}, task2_ex.published)
Exemplo n.º 16
0
def _build_fail_info_message(wf_ctrl, wf_ex):
    # Try to find where error is exactly.
    failed_tasks = [
        t_ex
        for t_ex in db_api.get_task_executions(workflow_execution_id=wf_ex.id,
                                               state=states.ERROR,
                                               sort_keys=['name'])
        if not wf_ctrl.is_error_handled_for(t_ex)
    ]

    msg = ('Failure caused by error in tasks: %s\n' %
           ', '.join([t.name for t in failed_tasks]))

    for t in failed_tasks:
        msg += '\n  %s [task_ex_id=%s] -> %s\n' % (t.name, t.id, t.state_info)

        for i, ex in enumerate(t.action_executions):
            if ex.state == states.ERROR:
                output = (ex.output or dict()).get('result', 'Unknown')
                msg += ('    [action_ex_id=%s, idx=%s]: %s\n' %
                        (ex.id, i, str(output)))

        for i, ex in enumerate(t.workflow_executions):
            if ex.state == states.ERROR:
                output = (ex.output or dict()).get('result', 'Unknown')
                msg += ('    [wf_ex_id=%s, idx=%s]: %s\n' %
                        (ex.id, i, str(output)))

    return msg
Exemplo n.º 17
0
    def test_start_task1(self):
        wf_input = {'param1': 'a', 'param2': 'b'}

        wf_ex = self.engine.start_workflow(
            'my_wb.wf1',
            wf_input,
            task_name='task1'
        )

        # Execution 1.
        self.assertIsNotNone(wf_ex)
        self.assertDictEqual(wf_input, wf_ex.input)
        self.assertDictEqual({'task_name': 'task1'}, wf_ex.params)

        # Wait till workflow 'wf1' is completed.
        self._await(lambda: self.is_execution_success(wf_ex.id))

        wf_ex = db_api.get_workflow_execution(wf_ex.id)

        self.assertEqual(1, len(wf_ex.task_executions))
        self.assertEqual(1, len(db_api.get_task_executions()))

        task_ex = self._assert_single_item(
            wf_ex.task_executions,
            name='task1',
            state=states.SUCCESS
        )

        self.assertDictEqual({'result1': 'a'}, task_ex.published)
Exemplo n.º 18
0
def check_and_fix_integrity(wf_ex):
    check_after_seconds = CONF.engine.execution_integrity_check_delay

    if check_after_seconds < 0:
        # Never check integrity if it's a negative value.
        return

    # To break cyclic dependency.
    from mistral.engine import task_handler

    running_task_execs = db_api.get_task_executions(
        workflow_execution_id=wf_ex.id,
        state=states.RUNNING
    )

    for t_ex in running_task_execs:
        # The idea is that we take the latest known timestamp of the task
        # execution and consider it eligible for checking and fixing only
        # if some minimum period of time elapsed since the last update.
        timestamp = t_ex.updated_at or t_ex.created_at

        delta = timeutils.delta_seconds(timestamp, timeutils.utcnow())

        if delta < check_after_seconds:
            continue

        child_executions = t_ex.executions

        if not child_executions:
            continue

        all_finished = all(
            [states.is_completed(c_ex.state) for c_ex in child_executions]
        )

        if all_finished:
            # Find the timestamp of the most recently finished child.
            most_recent_child_timestamp = max(
                [c_ex.updated_at or c_ex.created_at for c_ex in
                 child_executions]
            )
            interval = timeutils.delta_seconds(
                most_recent_child_timestamp,
                timeutils.utcnow()
            )

            if interval > check_after_seconds:
                # We found a task execution in RUNNING state for which all
                # child executions are finished. We need to call
                # "schedule_on_action_complete" on the task handler for any of
                # the child executions so that the task state is calculated and
                # updated properly.
                LOG.warning(
                    "Found a task execution that is likely stuck in RUNNING"
                    " state because all child executions are finished,"
                    " will try to recover [task_execution=%s]", t_ex.id
                )

                task_handler.schedule_on_action_complete(child_executions[-1])
Exemplo n.º 19
0
def _get_task_resources_with_results(wf_ex_id=None):
    filters = {}

    if wf_ex_id:
        filters['workflow_execution_id'] = wf_ex_id

    task_exs = db_api.get_task_executions(**filters)
    tasks = [_get_task_resource_with_result(t_e) for t_e in task_exs]

    return Tasks(tasks=tasks)
Exemplo n.º 20
0
def _build_cancel_info_message(wf_ctrl, wf_ex):
    # Try to find where cancel is exactly.
    cancelled_tasks = [
        t_ex
        for t_ex in db_api.get_task_executions(workflow_execution_id=wf_ex.id,
                                               state=states.CANCELLED,
                                               sort_keys=['name'])
    ]

    return ('Cancelled tasks: %s' %
            ', '.join([t.name for t in cancelled_tasks]))
Exemplo n.º 21
0
    def test_cascade_delete(self):
        wf_text = """
        version: 2.0

        wf:
          tasks:
            task1:
              workflow: sub_wf1

            task2:
              workflow: sub_wf2

        sub_wf1:
          tasks:
            task1:
              action: std.noop

        sub_wf2:
          tasks:
            task1:
              action: std.noop
        """

        wf_service.create_workflows(wf_text)

        wf_ex = self.engine.start_workflow('wf')

        self.await_workflow_success(wf_ex.id)

        self.assertEqual(3, len(db_api.get_workflow_executions()))
        self.assertEqual(4, len(db_api.get_task_executions()))
        self.assertEqual(2, len(db_api.get_action_executions()))

        # Now delete the root workflow execution and make sure that
        # all dependent objects are deleted as well.
        db_api.delete_workflow_execution(wf_ex.id)

        self.assertEqual(0, len(db_api.get_workflow_executions()))
        self.assertEqual(0, len(db_api.get_task_executions()))
        self.assertEqual(0, len(db_api.get_action_executions()))
Exemplo n.º 22
0
    def defer(self):
        """Defers task.

        This method puts task to a waiting state.
        """

        # NOTE(rakhmerov): using named locks may cause problems under load
        # with MySQL that raises a lot of deadlocks in case of high
        # parallelism so it makes sense to do a fast check if the object
        # already exists in DB outside of the lock.
        if not self.task_ex:
            t_execs = db_api.get_task_executions(
                workflow_execution_id=self.wf_ex.id,
                unique_key=self.unique_key,
                state=states.WAITING
            )

            self.task_ex = t_execs[0] if t_execs else None

        if self.task_ex:
            return

        with db_api.named_lock(self.unique_key):
            if not self.task_ex:
                t_execs = db_api.get_task_executions(
                    workflow_execution_id=self.wf_ex.id,
                    unique_key=self.unique_key
                )

                self.task_ex = t_execs[0] if t_execs else None

            msg = 'Task is waiting.'

            if not self.task_ex:
                self._create_task_execution(
                    state=states.WAITING,
                    state_info=msg
                )
            elif self.task_ex.state != states.WAITING:
                self.set_state(states.WAITING, msg)
Exemplo n.º 23
0
    def defer(self):
        """Defers task.

        This method puts task to a waiting state.
        """

        # NOTE(rakhmerov): using named locks may cause problems under load
        # with MySQL that raises a lot of deadlocks in case of high
        # parallelism so it makes sense to do a fast check if the object
        # already exists in DB outside of the lock.
        if not self.task_ex:
            t_execs = db_api.get_task_executions(
                workflow_execution_id=self.wf_ex.id,
                unique_key=self.unique_key,
                state=states.WAITING
            )

            self.task_ex = t_execs[0] if t_execs else None

        if self.task_ex:
            return

        with db_api.named_lock(self.unique_key):
            if not self.task_ex:
                t_execs = db_api.get_task_executions(
                    workflow_execution_id=self.wf_ex.id,
                    unique_key=self.unique_key
                )

                self.task_ex = t_execs[0] if t_execs else None

            msg = 'Task is waiting.'

            if not self.task_ex:
                self._create_task_execution(
                    state=states.WAITING,
                    state_info=msg
                )
            elif self.task_ex.state != states.WAITING:
                self.set_state(states.WAITING, msg)
Exemplo n.º 24
0
def _get_tasks_from_db(workflow_execution_id=None, recursive=False, state=None,
                       flat=False):
    task_execs = []
    nested_task_exs = []

    kwargs = {}

    if workflow_execution_id:
        kwargs['workflow_execution_id'] = workflow_execution_id

    # We can't add state to query if we want to filter by workflow_execution_id
    # recursively. There might be a workflow_execution in one state with a
    # nested workflow execution that has a task in the desired state until we
    # have an optimization for queering all workflow executions under a given
    # top level workflow execution, this is the way to go.
    if state and not (workflow_execution_id and recursive):
        kwargs['state'] = state

    task_execs.extend(db_api.get_task_executions(**kwargs))

    # If it is not recursive no need to check nested workflows.
    # If there is no workflow execution id, we already have all we need, and
    # doing more queries will just create duplication in the results.
    if recursive and workflow_execution_id:
        for t in task_execs:
            if t.type == utils.WORKFLOW_TASK_TYPE:
                # Get nested workflow execution that matches the task.
                nested_workflow_executions = db_api.get_workflow_executions(
                    task_execution_id=t.id
                )

                # There might be zero nested executions.
                for nested_workflow_execution in nested_workflow_executions:
                    nested_task_exs.extend(
                        _get_tasks_from_db(
                            nested_workflow_execution.id,
                            recursive,
                            state,
                            flat
                        )
                    )

    if state or flat:
        # Filter by state and flat.
        task_execs = [
            t for t in task_execs if _should_pass_filter(t, state, flat)
        ]

    # The nested tasks were already filtered, since this is a recursion.
    task_execs.extend(nested_task_exs)

    return task_execs
Exemplo n.º 25
0
def _get_tasks_from_db(workflow_execution_id=None, recursive=False, state=None,
                       flat=False):
    task_execs = []
    nested_task_exs = []

    kwargs = {}

    if workflow_execution_id:
        kwargs['workflow_execution_id'] = workflow_execution_id

    # We can't add state to query if we want to filter by workflow_execution_id
    # recursively. There might be a workflow_execution in one state with a
    # nested workflow execution that has a task in the desired state until we
    # have an optimization for queering all workflow executions under a given
    # top level workflow execution, this is the way to go.
    if state and not (workflow_execution_id and recursive):
        kwargs['state'] = state

    task_execs.extend(db_api.get_task_executions(**kwargs))

    # If it is not recursive no need to check nested workflows.
    # If there is no workflow execution id, we already have all we need, and
    # doing more queries will just create duplication in the results.
    if recursive and workflow_execution_id:
        for t in task_execs:
            if t.type == utils.WORKFLOW_TASK_TYPE:
                # Get nested workflow execution that matches the task.
                nested_workflow_executions = db_api.get_workflow_executions(
                    task_execution_id=t.id
                )

                # There might be zero nested executions.
                for nested_workflow_execution in nested_workflow_executions:
                    nested_task_exs.extend(
                        _get_tasks_from_db(
                            nested_workflow_execution.id,
                            recursive,
                            state,
                            flat
                        )
                    )

    if state or flat:
        # Filter by state and flat.
        task_execs = [
            t for t in task_execs if _should_pass_filter(t, state, flat)
        ]

    # The nested tasks were already filtered, since this is a recursion.
    task_execs.extend(nested_task_exs)

    return task_execs
Exemplo n.º 26
0
def _build_cancel_info_message(wf_ctrl, wf_ex):
    # Try to find where cancel is exactly.
    cancelled_tasks = [
        t_ex for t_ex in db_api.get_task_executions(
            workflow_execution_id=wf_ex.id,
            state=states.CANCELLED,
            sort_keys=['name']
        )
    ]

    return (
        'Cancelled tasks: %s' % ', '.join([t.name for t in cancelled_tasks])
    )
Exemplo n.º 27
0
    def test_start_task2(self):
        wf_input = {
            'param1': 'a',
            'param2': 'b'
        }

        wf_ex = self.engine.start_workflow(
            'my_wb.wf1',
            wf_input=wf_input,
            task_name='task2'
        )

        # Execution 1.
        self.assertIsNotNone(wf_ex)
        self.assertDictEqual(wf_input, wf_ex.input)
        self.assertDictEqual(
            {
                'task_name': 'task2',
                'namespace': '',
                'env': {}
            },
            wf_ex.params
        )

        # Wait till workflow 'wf1' is completed.
        self.await_workflow_success(wf_ex.id)

        with db_api.transaction():
            wf_ex = db_api.get_workflow_execution(wf_ex.id)

            task_execs = wf_ex.task_executions

        self.assertEqual(2, len(task_execs))
        self.assertEqual(2, len(db_api.get_task_executions()))

        task1_ex = self._assert_single_item(
            task_execs,
            name='task1',
            state=states.SUCCESS
        )

        self.assertDictEqual({'result1': 'a'}, task1_ex.published)

        task2_ex = self._assert_single_item(
            task_execs,
            name='task2',
            state=states.SUCCESS
        )

        self.assertDictEqual({'result2': 'a & b'}, task2_ex.published)
Exemplo n.º 28
0
    def test_one_line_requires_syntax(self):
        wf_input = {'param1': 'a', 'param2': 'b'}

        wf_ex = self.engine.start_workflow('my_wb.wf1',
                                           wf_input=wf_input,
                                           task_name='task4')

        self.await_workflow_success(wf_ex.id)

        tasks = db_api.get_task_executions()

        self.assertEqual(2, len(tasks))

        self._assert_single_item(tasks, name='task4', state=states.SUCCESS)
        self._assert_single_item(tasks, name='task3', state=states.SUCCESS)
Exemplo n.º 29
0
    def test_one_line_requires_syntax(self):
        wf_input = {'param1': 'a', 'param2': 'b'}

        wf_ex = self.engine.start_workflow(
            'my_wb.wf1',
            wf_input,
            task_name='task4'
        )

        self._await(lambda: self.is_execution_success(wf_ex.id))

        tasks = db_api.get_task_executions()

        self.assertEqual(2, len(tasks))

        self._assert_single_item(tasks, name='task4', state=states.SUCCESS)
        self._assert_single_item(tasks, name='task3', state=states.SUCCESS)
Exemplo n.º 30
0
def _get_task_resources_with_results(wf_ex_id=None):
    filters = {}

    if wf_ex_id:
        filters['workflow_execution_id'] = wf_ex_id

    tasks = []
    task_execs = db_api.get_task_executions(**filters)
    for task_ex in task_execs:
        task = Task.from_dict(task_ex.to_dict())
        task.result = json.dumps(
            data_flow.get_task_execution_result(task_ex)
        )

        tasks += [task]

    return Tasks(tasks=tasks)
Exemplo n.º 31
0
def _get_task_resources_with_results(wf_ex_id=None):
    filters = {}

    if wf_ex_id:
        filters['workflow_execution_id'] = wf_ex_id

    tasks = []
    with db_api.transaction():
        task_execs = db_api.get_task_executions(**filters)
        for task_ex in task_execs:
            task = Task.from_dict(task_ex.to_dict())
            task.result = json.dumps(
                data_flow.get_task_execution_result(task_ex)
            )

            tasks += [task]

    return Tasks(tasks=tasks)
Exemplo n.º 32
0
    def defer(self):
        """Defers task.

        This method puts task to a waiting state.
        """
        if not self.task_ex:
            t_execs = db_api.get_task_executions(
                workflow_execution_id=self.wf_ex.id,
                name=self.task_spec.get_name())

            self.task_ex = t_execs[0] if t_execs else None

        if not self.task_ex:
            self._create_task_execution()

        if self.task_ex:
            self.set_state(states.WAITING, 'Task is deferred.')

        self.waiting = True
Exemplo n.º 33
0
    def defer(self):
        """Defers task.

        This method puts task to a waiting state.
        """
        with db_api.named_lock(self.unique_key):
            if not self.task_ex:
                t_execs = db_api.get_task_executions(
                    workflow_execution_id=self.wf_ex.id,
                    unique_key=self.unique_key)

                self.task_ex = t_execs[0] if t_execs else None

            msg = 'Task is waiting.'

            if not self.task_ex:
                self._create_task_execution(state=states.WAITING,
                                            state_info=msg)
            elif self.task_ex.state != states.WAITING:
                self.set_state(states.WAITING, msg)
Exemplo n.º 34
0
def _build_fail_info_message(wf_ctrl, wf_ex):
    # Try to find where error is exactly.
    failed_tasks = [
        t_ex for t_ex in db_api.get_task_executions(
            workflow_execution_id=wf_ex.id,
            state=states.ERROR,
            sort_keys=['name']
        ) if not wf_ctrl.is_error_handled_for(t_ex)
    ]

    msg = ('Failure caused by error in tasks: %s\n' %
           ', '.join([t.name for t in failed_tasks]))

    for t in failed_tasks:
        msg += '\n  %s [task_ex_id=%s] -> %s\n' % (t.name, t.id, t.state_info)

        for i, ex in enumerate(t.action_executions):
            if ex.state == states.ERROR:
                output = (ex.output or dict()).get('result', 'Unknown')
                msg += (
                    '    [action_ex_id=%s, idx=%s]: %s\n' % (
                        ex.id,
                        i,
                        str(output)
                    )
                )

        for i, ex in enumerate(t.workflow_executions):
            if ex.state == states.ERROR:
                output = (ex.output or dict()).get('result', 'Unknown')
                msg += (
                    '    [wf_ex_id=%s, idx=%s]: %s\n' % (
                        ex.id,
                        i,
                        str(output)
                    )
                )

    return msg
Exemplo n.º 35
0
    def defer(self):
        """Defers task.

        This method puts task to a waiting state.
        """
        with db_api.named_lock(self.unique_key):
            if not self.task_ex:
                t_execs = db_api.get_task_executions(
                    workflow_execution_id=self.wf_ex.id,
                    unique_key=self.unique_key
                )

                self.task_ex = t_execs[0] if t_execs else None

            msg = 'Task is waiting.'

            if not self.task_ex:
                self._create_task_execution(
                    state=states.WAITING,
                    state_info=msg
                )
            elif self.task_ex.state != states.WAITING:
                self.set_state(states.WAITING, msg)
Exemplo n.º 36
0
def task_(context, task_name):

    # This section may not exist in a context if it's calculated not in
    # task scope.
    cur_task = context['__task_execution']

    if cur_task and cur_task['name'] == task_name:
        task_ex = db_api.get_task_execution(cur_task['id'])
    else:
        task_execs = db_api.get_task_executions(
            workflow_execution_id=context['__execution']['id'],
            name=task_name
        )

        # TODO(rakhmerov): Account for multiple executions (i.e. in case of
        # cycles).
        task_ex = task_execs[-1] if len(task_execs) > 0 else None

    if not task_ex:
        return None

    # We don't use to_dict() db model method because not all fields
    # make sense for user.
    return _convert_to_user_model(task_ex)
Exemplo n.º 37
0
    def _test_subworkflow(self, env):
        wf2_ex = self.engine.start_workflow('my_wb.wf2', env=env)

        # Execution of 'wf2'.
        self.assertIsNotNone(wf2_ex)
        self.assertDictEqual({}, wf2_ex.input)

        self._await(lambda: len(db_api.get_workflow_executions()) == 2, 0.5, 5)

        wf_execs = db_api.get_workflow_executions()

        self.assertEqual(2, len(wf_execs))

        # Execution of 'wf1'.

        wf2_ex = self._assert_single_item(wf_execs, name='my_wb.wf2')
        wf1_ex = self._assert_single_item(wf_execs, name='my_wb.wf1')

        expected_wf1_input = {
            'param1': 'Bonnie',
            'param2': 'Clyde'
        }

        self.assertIsNotNone(wf1_ex.task_execution_id)
        self.assertDictEqual(wf1_ex.input, expected_wf1_input)

        # Wait till workflow 'wf1' is completed.
        self.await_workflow_success(wf1_ex.id)

        with db_api.transaction():
            wf1_ex = db_api.get_workflow_execution(wf1_ex.id)

            self.assertDictEqual(
                {'final_result': "'Bonnie & Clyde'"},
                wf1_ex.output
            )

        # Wait till workflow 'wf2' is completed.
        self.await_workflow_success(wf2_ex.id)

        with db_api.transaction():
            wf2_ex = db_api.get_workflow_execution(wf2_ex.id)

            self.assertDictEqual(
                {'slogan': "'Bonnie & Clyde' is a cool movie!\n"},
                wf2_ex.output
            )

        with db_api.transaction():
            # Check if target is resolved.
            wf1_task_execs = db_api.get_task_executions(
                workflow_execution_id=wf1_ex.id
            )

            self._assert_single_item(wf1_task_execs, name='task1')
            self._assert_single_item(wf1_task_execs, name='task2')

            for t_ex in wf1_task_execs:
                a_ex = t_ex.action_executions[0]

                callback_url = '/v2/action_executions/%s' % a_ex.id

                r_exe.RemoteExecutor.run_action.assert_any_call(
                    a_ex.id,
                    'mistral.actions.std_actions.EchoAction',
                    {},
                    a_ex.input,
                    False,
                    {
                        'task_execution_id': t_ex.id,
                        'callback_url': callback_url,
                        'workflow_execution_id': wf1_ex.id,
                        'workflow_name': wf1_ex.name,
                        'action_execution_id': a_ex.id,
                    },
                    target=TARGET,
                    timeout=None
                )
Exemplo n.º 38
0
    def test_notify_rerun_nested_workflow(self):
        wf_def = """
        wf_1:
          tasks:
            wf_1_t1:
              workflow: wf_2
              on-success:
                - wf_1_t2
            wf_1_t2:
              action: std.noop
        version: '2.0'
        wf_2:
          tasks:
            wf_2_t1:
              action: std.noop
              on-success:
                - wf_2_t2
            wf_2_t2:
              action: std.noop
        """

        wf_svc.create_workflows(wf_def)

        notify_options = [{'type': 'webhook'}]
        params = {'notify': notify_options}

        wf_1_ex = self.engine.start_workflow('wf_1', '', **params)

        self.await_workflow_error(wf_1_ex.id)

        with db_api.transaction():
            wf_exs = db_api.get_workflow_executions()
            self._assert_single_item(wf_exs, name='wf_1', state=states.ERROR)
            self._assert_single_item(wf_exs, name='wf_2', state=states.ERROR)

            task_exs = db_api.get_task_executions()
            self._assert_single_item(task_exs,
                                     name='wf_1_t1',
                                     state=states.ERROR)
            wf_2_t1 = self._assert_single_item(task_exs,
                                               name='wf_2_t1',
                                               state=states.ERROR)

        self.assertEqual(2, len(task_exs))
        self.assertEqual(2, len(wf_exs))

        self.engine.rerun_workflow(wf_2_t1.id)

        self.await_workflow_success(wf_1_ex.id)

        with db_api.transaction():
            wf_exs = db_api.get_workflow_executions()
            wf_1_ex = self._assert_single_item(wf_exs,
                                               name='wf_1',
                                               state=states.SUCCESS)
            wf_2_ex = self._assert_single_item(wf_exs,
                                               name='wf_2',
                                               state=states.SUCCESS)

            task_wf_1_exs = wf_1_ex.task_executions
            wf_1_t1 = self._assert_single_item(task_wf_1_exs,
                                               name='wf_1_t1',
                                               state=states.SUCCESS)
            wf_1_t2 = self._assert_single_item(task_wf_1_exs,
                                               name='wf_1_t2',
                                               state=states.SUCCESS)
            task_wf_2_exs = wf_2_ex.task_executions
            wf_2_t1 = self._assert_single_item(task_wf_2_exs,
                                               name='wf_2_t1',
                                               state=states.SUCCESS)
            wf_2_t2 = self._assert_single_item(task_wf_2_exs,
                                               name='wf_2_t2',
                                               state=states.SUCCESS)

            self.assertEqual(2, len(task_wf_1_exs))
            self.assertEqual(2, len(task_wf_2_exs))
            self.assertEqual(2, len(wf_exs))

            expected_order = [
                (wf_1_ex.id, events.WORKFLOW_LAUNCHED),
                (wf_1_t1.id, events.TASK_LAUNCHED),
                (wf_2_ex.id, events.WORKFLOW_LAUNCHED),
                (wf_2_t1.id, events.TASK_LAUNCHED),
                (wf_2_t1.id, events.TASK_FAILED),
                (wf_2_ex.id, events.WORKFLOW_FAILED),
                (wf_1_t1.id, events.TASK_FAILED),
                (wf_1_ex.id, events.WORKFLOW_FAILED),
                # rerun
                (wf_2_ex.id, events.WORKFLOW_RERUN),
                (wf_1_ex.id, events.WORKFLOW_RERUN),
                (wf_1_t1.id, events.TASK_RERUN),
                (wf_2_t1.id, events.TASK_RERUN),
                (wf_2_t1.id, events.TASK_SUCCEEDED),
                (wf_2_t2.id, events.TASK_LAUNCHED),
                (wf_2_t2.id, events.TASK_SUCCEEDED),
                (wf_2_ex.id, events.WORKFLOW_SUCCEEDED),
                (wf_1_t1.id, events.TASK_SUCCEEDED),
                (wf_1_t2.id, events.TASK_LAUNCHED),
                (wf_1_t2.id, events.TASK_SUCCEEDED),
                (wf_1_ex.id, events.WORKFLOW_SUCCEEDED),
            ]
            self.assertTrue(self.publishers['wbhk'].publish.called)
            self.assertListEqual(expected_order, EVENT_LOGS)
Exemplo n.º 39
0
    def test_subworkflow_success(self):
        wf2_ex = self.engine.start_workflow('wb1.wf2', None)

        project_id = auth_context.ctx().project_id

        # Execution of 'wf2'.
        self.assertEqual(project_id, wf2_ex.project_id)
        self.assertIsNotNone(wf2_ex)
        self.assertDictEqual({}, wf2_ex.input)
        self.assertDictEqual({}, wf2_ex.params)

        self._await(lambda: len(db_api.get_workflow_executions()) == 2, 0.5, 5)

        wf_execs = db_api.get_workflow_executions()

        self.assertEqual(2, len(wf_execs))

        # Execution of 'wf2'.
        wf1_ex = self._assert_single_item(wf_execs, name='wb1.wf1')
        wf2_ex = self._assert_single_item(wf_execs, name='wb1.wf2')

        self.assertEqual(project_id, wf1_ex.project_id)
        self.assertIsNotNone(wf1_ex.task_execution_id)
        self.assertDictContainsSubset(
            {
                'task_name': 'task2',
                'task_execution_id': wf1_ex.task_execution_id
            }, wf1_ex.params)
        self.assertDictEqual({
            'param1': 'Bonnie',
            'param2': 'Clyde'
        }, wf1_ex.input)

        # Wait till workflow 'wf1' is completed.
        self.await_workflow_success(wf1_ex.id)

        wf1_ex = db_api.get_workflow_execution(wf1_ex.id)

        self.assertDictEqual({'final_result': "'Bonnie & Clyde'"},
                             wf1_ex.output)

        # Wait till workflow 'wf2' is completed.
        self.await_workflow_success(wf2_ex.id, timeout=4)

        wf2_ex = db_api.get_workflow_execution(wf2_ex.id)

        self.assertDictEqual({'slogan': "'Bonnie & Clyde' is a cool movie!"},
                             wf2_ex.output)

        # Check project_id in tasks.
        wf1_task_execs = db_api.get_task_executions(
            workflow_execution_id=wf1_ex.id)
        wf2_task_execs = db_api.get_task_executions(
            workflow_execution_id=wf2_ex.id)

        wf2_task1_ex = self._assert_single_item(wf1_task_execs, name='task1')
        wf1_task1_ex = self._assert_single_item(wf2_task_execs, name='task1')
        wf1_task2_ex = self._assert_single_item(wf1_task_execs, name='task2')

        self.assertEqual(project_id, wf2_task1_ex.project_id)
        self.assertEqual(project_id, wf1_task1_ex.project_id)
        self.assertEqual(project_id, wf1_task2_ex.project_id)
Exemplo n.º 40
0
 def _get_task_executions(self, **kwargs):
     return db_api.get_task_executions(
         workflow_execution_id=self.wf_ex.id,
         sort_keys=[],  # disable sorting
         **kwargs
     )
Exemplo n.º 41
0
 def _get_task_executions(self, **kwargs):
     return db_api.get_task_executions(
         workflow_execution_id=self.wf_ex.id,
         sort_keys=[],  # disable sorting
         **kwargs)
Exemplo n.º 42
0
    def _test_subworkflow(self, env):
        wf2_ex = self.engine.start_workflow('my_wb.wf2', {}, env=env)

        # Execution of 'wf2'.
        self.assertIsNotNone(wf2_ex)
        self.assertDictEqual({}, wf2_ex.input)
        self.assertDictContainsSubset({'env': env}, wf2_ex.params)

        self._await(lambda: len(db_api.get_workflow_executions()) == 2, 0.5, 5)

        wf_execs = db_api.get_workflow_executions()

        self.assertEqual(2, len(wf_execs))

        # Execution of 'wf1'.

        wf2_ex = self._assert_single_item(wf_execs, name='my_wb.wf2')
        wf1_ex = self._assert_single_item(wf_execs, name='my_wb.wf1')

        expected_start_params = {
            'task_name': 'task2',
            'task_execution_id': wf1_ex.task_execution_id,
            'env': env
        }

        expected_wf1_input = {'param1': 'Bonnie', 'param2': 'Clyde'}

        self.assertIsNotNone(wf1_ex.task_execution_id)
        self.assertDictContainsSubset(expected_start_params, wf1_ex.params)
        self.assertDictEqual(wf1_ex.input, expected_wf1_input)

        # Wait till workflow 'wf1' is completed.
        self.await_workflow_success(wf1_ex.id)

        with db_api.transaction():
            wf1_ex = db_api.get_workflow_execution(wf1_ex.id)

            self.assertDictEqual({'final_result': "'Bonnie & Clyde'"},
                                 wf1_ex.output)

        # Wait till workflow 'wf2' is completed.
        self.await_workflow_success(wf2_ex.id)

        with db_api.transaction():
            wf2_ex = db_api.get_workflow_execution(wf2_ex.id)

            self.assertDictEqual(
                {'slogan': "'Bonnie & Clyde' is a cool movie!\n"},
                wf2_ex.output)

        with db_api.transaction():
            # Check if target is resolved.
            wf1_task_execs = db_api.get_task_executions(
                workflow_execution_id=wf1_ex.id)

            self._assert_single_item(wf1_task_execs, name='task1')
            self._assert_single_item(wf1_task_execs, name='task2')

            for t_ex in wf1_task_execs:
                a_ex = t_ex.action_executions[0]

                rpc.ExecutorClient.run_action.assert_any_call(
                    a_ex.id,
                    'mistral.actions.std_actions.EchoAction', {},
                    a_ex.input,
                    TARGET,
                    safe_rerun=False)
Exemplo n.º 43
0
 def _num_of_tasks():
     return len(
         db_api.get_task_executions(workflow_execution_id=wf_ex.id)
     )
Exemplo n.º 44
0
def find_task_executions_with_state(wf_ex_id, state):
    return db_api.get_task_executions(
        workflow_execution_id=wf_ex_id,
        state=state
    )
Exemplo n.º 45
0
def find_task_executions_with_state(wf_ex_id, state):
    return db_api.get_task_executions(workflow_execution_id=wf_ex_id,
                                      state=state)
Exemplo n.º 46
0
    def test_subworkflow_success(self):
        wf2_ex = self.engine.start_workflow('wb1.wf2')

        project_id = auth_context.ctx().project_id

        # Execution of 'wf2'.
        self.assertEqual(project_id, wf2_ex.project_id)
        self.assertIsNotNone(wf2_ex)
        self.assertDictEqual({}, wf2_ex.input)
        self.assertDictEqual({'namespace': '', 'env': {}}, wf2_ex.params)

        self._await(lambda: len(db_api.get_workflow_executions()) == 2, 0.5, 5)

        wf_execs = db_api.get_workflow_executions()

        self.assertEqual(2, len(wf_execs))

        # Execution of 'wf2'.
        wf1_ex = self._assert_single_item(wf_execs, name='wb1.wf1')
        wf2_ex = self._assert_single_item(wf_execs, name='wb1.wf2')

        self.assertEqual(project_id, wf1_ex.project_id)
        self.assertIsNotNone(wf1_ex.task_execution_id)
        self.assertDictContainsSubset(
            {
                'task_name': 'task2',
                'task_execution_id': wf1_ex.task_execution_id
            },
            wf1_ex.params
        )
        self.assertDictEqual(
            {
                'param1': 'Bonnie',
                'param2': 'Clyde'
            },
            wf1_ex.input
        )

        # Wait till workflow 'wf1' is completed.
        self.await_workflow_success(wf1_ex.id)

        with db_api.transaction():
            wf1_ex = db_api.get_workflow_execution(wf1_ex.id)

            wf1_output = wf1_ex.output

        self.assertDictEqual(
            {'final_result': "'Bonnie & Clyde'"},
            wf1_output
        )

        # Wait till workflow 'wf2' is completed.
        self.await_workflow_success(wf2_ex.id, timeout=4)

        with db_api.transaction():
            wf2_ex = db_api.get_workflow_execution(wf2_ex.id)

            wf2_output = wf2_ex.output

        self.assertDictEqual(
            {'slogan': "'Bonnie & Clyde' is a cool movie!"},
            wf2_output
        )

        # Check project_id in tasks.
        wf1_task_execs = db_api.get_task_executions(
            workflow_execution_id=wf1_ex.id
        )
        wf2_task_execs = db_api.get_task_executions(
            workflow_execution_id=wf2_ex.id
        )

        wf2_task1_ex = self._assert_single_item(wf1_task_execs, name='task1')
        wf1_task1_ex = self._assert_single_item(wf2_task_execs, name='task1')
        wf1_task2_ex = self._assert_single_item(wf1_task_execs, name='task2')

        self.assertEqual(project_id, wf2_task1_ex.project_id)
        self.assertEqual(project_id, wf1_task1_ex.project_id)
        self.assertEqual(project_id, wf1_task2_ex.project_id)
Exemplo n.º 47
0
 def _num_of_tasks():
     return len(
         db_api.get_task_executions(workflow_execution_id=wf_ex.id)
     )
Exemplo n.º 48
0
def _check_and_fix_integrity(wf_ex_id):
    check_after_seconds = CONF.engine.execution_integrity_check_delay

    if check_after_seconds < 0:
        # Never check integrity if it's a negative value.
        return

    # To break cyclic dependency.
    from mistral.engine import task_handler

    with db_api.transaction():
        wf_ex = db_api.get_workflow_execution(wf_ex_id)

        if states.is_completed(wf_ex.state):
            return

        _schedule_check_and_fix_integrity(wf_ex, delay=120)

        running_task_execs = db_api.get_task_executions(
            workflow_execution_id=wf_ex.id,
            state=states.RUNNING,
            limit=CONF.engine.execution_integrity_check_batch_size
        )

        for t_ex in running_task_execs:
            # The idea is that we take the latest known timestamp of the task
            # execution and consider it eligible for checking and fixing only
            # if some minimum period of time elapsed since the last update.
            timestamp = t_ex.updated_at or t_ex.created_at

            delta = timeutils.delta_seconds(timestamp, timeutils.utcnow())

            if delta < check_after_seconds:
                continue

            child_executions = t_ex.executions

            if not child_executions:
                continue

            all_finished = all(
                [states.is_completed(c_ex.state) for c_ex in child_executions]
            )

            if all_finished:
                # Find the timestamp of the most recently finished child.
                most_recent_child_timestamp = max(
                    [c_ex.updated_at or c_ex.created_at for c_ex in
                     child_executions]
                )
                interval = timeutils.delta_seconds(
                    most_recent_child_timestamp,
                    timeutils.utcnow()
                )

                if interval > check_after_seconds:
                    # We found a task execution in RUNNING state for which all
                    # child executions are finished. We need to call
                    # "schedule_on_action_complete" on the task handler for
                    # any of the child executions so that the task state is
                    # calculated and updated properly.
                    LOG.warning(
                        "Found a task execution that is likely stuck in"
                        " RUNNING state because all child executions are"
                        " finished, will try to recover [task_execution=%s]",
                        t_ex.id
                    )

                    task_handler.schedule_on_action_complete(
                        child_executions[-1]
                    )
Exemplo n.º 49
0
    def _test_subworkflow(self, env):
        wf2_ex = self.engine.start_workflow(
            'my_wb.wf2',
            {},
            env=env
        )

        # Execution of 'wf2'.
        self.assertIsNotNone(wf2_ex)
        self.assertDictEqual({}, wf2_ex.input)
        self.assertDictContainsSubset({'env': env}, wf2_ex.params)

        self._await(lambda: len(db_api.get_workflow_executions()) == 2, 0.5, 5)

        wf_execs = db_api.get_workflow_executions()

        self.assertEqual(2, len(wf_execs))

        # Execution of 'wf1'.

        wf2_ex = self._assert_single_item(wf_execs, name='my_wb.wf2')
        wf1_ex = self._assert_single_item(wf_execs, name='my_wb.wf1')

        expected_start_params = {
            'task_name': 'task2',
            'task_execution_id': wf1_ex.task_execution_id,
            'env': env
        }

        expected_wf1_input = {
            'param1': 'Bonnie',
            'param2': 'Clyde'
        }

        self.assertIsNotNone(wf1_ex.task_execution_id)
        self.assertDictContainsSubset(expected_start_params, wf1_ex.params)
        self.assertDictEqual(wf1_ex.input, expected_wf1_input)

        # Wait till workflow 'wf1' is completed.
        self.await_execution_success(wf1_ex.id)

        wf1_ex = db_api.get_workflow_execution(wf1_ex.id)

        expected_wf1_output = {'final_result': "'Bonnie & Clyde'"}

        self.assertDictEqual(wf1_ex.output, expected_wf1_output)

        # Wait till workflow 'wf2' is completed.
        self.await_execution_success(wf2_ex.id)

        wf2_ex = db_api.get_workflow_execution(wf2_ex.id)

        expected_wf2_output = {'slogan': "'Bonnie & Clyde' is a cool movie!\n"}

        self.assertDictEqual(wf2_ex.output, expected_wf2_output)

        # Check if target is resolved.
        wf1_task_execs = db_api.get_task_executions(
            workflow_execution_id=wf1_ex.id
        )

        self._assert_single_item(wf1_task_execs, name='task1')
        self._assert_single_item(wf1_task_execs, name='task2')

        for t_ex in wf1_task_execs:
            a_ex = t_ex.executions[0]

            rpc.ExecutorClient.run_action.assert_any_call(
                a_ex.id,
                'mistral.actions.std_actions.EchoAction',
                {},
                a_ex.input,
                TARGET
            )