Beispiel #1
0
def _schedule_run_workflow(task_ex, task_spec, wf_input, index,
                           parent_wf_spec):
    parent_wf_ex = task_ex.workflow_execution

    wf_spec_name = task_spec.get_workflow_name()

    wf_def = e_utils.resolve_workflow_definition(parent_wf_ex.workflow_name,
                                                 parent_wf_spec.get_name(),
                                                 wf_spec_name)

    wf_spec = spec_parser.get_workflow_spec(wf_def.spec)

    wf_params = {'task_execution_id': task_ex.id, 'with_items_index': index}

    if 'env' in parent_wf_ex.params:
        wf_params['env'] = parent_wf_ex.params['env']

    for k, v in list(wf_input.items()):
        if k not in wf_spec.get_input():
            wf_params[k] = v
            del wf_input[k]

    wf_ex_id, _ = wf_ex_service.create_workflow_execution(
        wf_def.name, wf_input, "sub-workflow execution", wf_params, wf_spec)

    scheduler.schedule_call(None,
                            'mistral.engine.task_handler.resume_workflow',
                            0,
                            wf_ex_id=wf_ex_id,
                            env=None)
Beispiel #2
0
def _schedule_refresh_task_state(task_ex, delay=0):
    """Schedules task preconditions check.

    This method provides transactional decoupling of task preconditions
    check from events that can potentially satisfy those preconditions.

    It's needed in non-locking model in order to avoid 'phantom read'
    phenomena when reading state of multiple tasks to see if a task that
    depends on them can start. Just starting a separate transaction
    without using scheduler is not safe due to concurrency window that
    we'll have in this case (time between transactions) whereas scheduler
    is a special component that is designed to be resistant to failures.

    :param task_ex: Task execution.
    :param delay: Delay.
    """
    key = 'th_c_t_s_a-%s' % task_ex.id

    scheduler.schedule_call(
        None,
        _REFRESH_TASK_STATE_PATH,
        delay,
        key=key,
        task_ex_id=task_ex.id
    )
Beispiel #3
0
def _schedule_run_workflow(task_ex, task_spec, wf_input, index):
    parent_wf_ex = task_ex.workflow_execution
    parent_wf_spec = spec_parser.get_workflow_spec(parent_wf_ex.spec)

    wf_spec_name = task_spec.get_workflow_name()

    wf_def = e_utils.resolve_workflow_definition(parent_wf_ex.workflow_name,
                                                 parent_wf_spec.get_name(),
                                                 wf_spec_name)

    wf_spec = spec_parser.get_workflow_spec(wf_def.spec)

    wf_params = {'task_execution_id': task_ex.id, 'with_items_index': index}

    if 'env' in parent_wf_ex.params:
        wf_params['env'] = parent_wf_ex.params['env']

    for k, v in wf_input.items():
        if k not in wf_spec.get_input():
            wf_params[k] = v
            del wf_input[k]

    scheduler.schedule_call(None,
                            'mistral.engine.task_handler.run_workflow',
                            0,
                            wf_name=wf_def.name,
                            wf_input=wf_input,
                            wf_params=wf_params)
Beispiel #4
0
    def test_scheduler_with_factory(self, factory):
        target_method_name = 'run_something'
        factory.return_value = type(
            'something',
            (object,),
            {
                target_method_name:
                    mock.MagicMock(side_effect=self.target_method)
            }
        )

        scheduler.schedule_call(
            TARGET_METHOD_PATH,
            target_method_name,
            DELAY,
            **{'name': 'task', 'id': '123'}
        )

        calls = db_api.get_delayed_calls_to_start(get_time_delay())
        call = self._assert_single_item(
            calls,
            target_method_name=target_method_name
        )
        self.assertIn('name', call['method_arguments'])

        self.queue.get()
        factory().run_something.assert_called_once_with(name='task', id='123')

        calls = db_api.get_delayed_calls_to_start(get_time_delay())
        self.assertEqual(0, len(calls))
Beispiel #5
0
    def test_scheduler_call_target_method_with_correct_auth(self, method):
        default_context = base.get_context(default=True)
        auth_context.set_ctx(default_context)
        default_project_id = (
            default_context._BaseContext__values['project_id'])
        method_args1 = {'expected_project_id': default_project_id}

        scheduler.schedule_call(None, CHECK_CONTEXT_METHOD_PATH, DELAY,
                                **method_args1)

        second_context = base.get_context(default=False)
        auth_context.set_ctx(second_context)
        second_project_id = (second_context._BaseContext__values['project_id'])
        method_args2 = {'expected_project_id': second_project_id}

        scheduler.schedule_call(None, CHECK_CONTEXT_METHOD_PATH, DELAY,
                                **method_args2)

        eventlet.sleep(WAIT)

        method.assert_any_call(default_project_id, default_project_id)

        method.assert_any_call(second_project_id, second_project_id)

        self.assertNotEqual(default_project_id, second_project_id)
Beispiel #6
0
    def test_scheduler_call_target_method_with_correct_auth(self, method):
        method.side_effect = self.target_check_context_method

        default_context = base.get_context(default=True)
        auth_context.set_ctx(default_context)
        default_project_id = (
            default_context.project_id
        )

        scheduler.schedule_call(
            None,
            TARGET_METHOD_PATH,
            DELAY,
            **{'expected_project_id': default_project_id}
        )

        second_context = base.get_context(default=False)
        auth_context.set_ctx(second_context)
        second_project_id = (
            second_context.project_id
        )

        scheduler.schedule_call(
            None,
            TARGET_METHOD_PATH,
            DELAY,
            **{'expected_project_id': second_project_id}
        )

        self.assertNotEqual(default_project_id, second_project_id)

        for _ in range(2):
            self.assertTrue(self.queue.get())
Beispiel #7
0
    def test_scheduler_doesnt_handel_calls_the_failed_on_update(self):
        def stop_thread_groups():
            [tg.stop() for tg in self.tgs]

        self.tgs = [scheduler.setup(), scheduler.setup()]
        self.addCleanup(stop_thread_groups)

        method_args = {'name': 'task', 'id': '321'}

        scheduler.schedule_call(
            None,
            TARGET_METHOD_NAME,
            DELAY,
            **method_args
        )

        time_filter = datetime.datetime.now() + datetime.timedelta(seconds=2)
        calls = db_api.get_delayed_calls_to_start(time_filter)

        eventlet.sleep(WAIT)

        # If the scheduler does handel calls that failed on update
        # NotFoundException will raise.
        db_api.get_delayed_call(calls[0].id)

        db_api.delete_delayed_call(calls[0].id)
Beispiel #8
0
    def test_scheduler_call_target_method_with_correct_auth(self, method):
        method.side_effect = self.target_check_context_method

        default_context = base.get_context(default=True)
        auth_context.set_ctx(default_context)
        default_project_id = (
            default_context.project_id
        )

        scheduler.schedule_call(
            None,
            TARGET_METHOD_PATH,
            DELAY,
            **{'expected_project_id': default_project_id}
        )

        second_context = base.get_context(default=False)
        auth_context.set_ctx(second_context)
        second_project_id = (
            second_context.project_id
        )

        scheduler.schedule_call(
            None,
            TARGET_METHOD_PATH,
            DELAY,
            **{'expected_project_id': second_project_id}
        )

        self.assertNotEqual(default_project_id, second_project_id)

        for _ in range(2):
            self.assertTrue(self.queue.get())
Beispiel #9
0
def _schedule_run_workflow(task_ex, task_spec, wf_input, index):
    parent_wf_ex = task_ex.workflow_execution
    parent_wf_spec = spec_parser.get_workflow_spec(parent_wf_ex.spec)

    wf_spec_name = task_spec.get_workflow_name()

    wf_def = e_utils.resolve_workflow_definition(
        parent_wf_ex.workflow_name,
        parent_wf_spec.get_name(),
        wf_spec_name
    )

    wf_spec = spec_parser.get_workflow_spec(wf_def.spec)

    wf_params = {
        'task_execution_id': task_ex.id,
        'with_items_index': index
    }

    if 'env' in parent_wf_ex.params:
        wf_params['env'] = parent_wf_ex.params['env']

    for k, v in wf_input.items():
        if k not in wf_spec.get_input():
            wf_params[k] = v
            del wf_input[k]

    scheduler.schedule_call(
        None,
        'mistral.engine.task_handler.run_workflow',
        0,
        wf_name=wf_def.name,
        wf_input=wf_input,
        wf_params=wf_params
    )
Beispiel #10
0
def _schedule_run_action(task_ex, task_spec, action_input, index):
    wf_ex = task_ex.workflow_execution
    wf_spec = spec_parser.get_workflow_spec(wf_ex.spec)

    action_spec_name = task_spec.get_action_name()

    action_def = action_handler.resolve_definition(
        action_spec_name,
        task_ex,
        wf_spec
    )

    action_ex = action_handler.create_action_execution(
        action_def, action_input, task_ex, index
    )

    target = expr.evaluate_recursively(
        task_spec.get_target(),
        utils.merge_dicts(
            copy.deepcopy(action_input),
            copy.copy(task_ex.in_context)
        )
    )

    scheduler.schedule_call(
        None,
        'mistral.engine.action_handler.run_existing_action',
        0,
        action_ex_id=action_ex.id,
        target=target
    )
Beispiel #11
0
    def test_scheduler_with_factory(self, factory):
        target_method_name = 'run_something'
        factory.return_value = type(
            'something',
            (object,),
            {
                target_method_name:
                    mock.MagicMock(side_effect=self.target_method)
            }
        )

        scheduler.schedule_call(
            TARGET_METHOD_PATH,
            target_method_name,
            DELAY,
            **{'name': 'task', 'id': '123'}
        )

        calls = db_api.get_delayed_calls_to_start(get_time_delay())
        call = self._assert_single_item(
            calls,
            target_method_name=target_method_name
        )
        self.assertIn('name', call['method_arguments'])

        self.queue.get()
        factory().run_something.assert_called_once_with(name='task', id='123')

        calls = db_api.get_delayed_calls_to_start(get_time_delay())
        self.assertEqual(0, len(calls))
Beispiel #12
0
 def _schedule_send_result_to_parent_workflow(self):
     scheduler.schedule_call(
         None,
         _SEND_RESULT_TO_PARENT_WORKFLOW_PATH,
         0,
         wf_ex_id=self.wf_ex.id
     )
Beispiel #13
0
    def test_scheduler_with_custom_batch_size(self):
        self.scheduler.stop()

        number_delayed_calls = 5
        processed_calls_at_time = []
        real_delete_calls_method = scheduler.Scheduler.delete_calls

        @staticmethod
        def delete_calls_counter(delayed_calls):
            real_delete_calls_method(delayed_calls)

            for _ in range(len(delayed_calls)):
                self.queue.put("item")
            processed_calls_at_time.append(len(delayed_calls))

        scheduler.Scheduler.delete_calls = delete_calls_counter

        # Create 5 delayed calls
        for i in range(number_delayed_calls):
            scheduler.schedule_call(
                None,
                TARGET_METHOD_PATH,
                0,
                **{'name': 'task', 'id': i}
            )

        # Start scheduler which process 2 calls at a time
        self.scheduler = scheduler.Scheduler(0, 1, 2)
        self.scheduler.start()

        # Wait when all of calls will be processed
        for _ in range(number_delayed_calls):
            self.queue.get()

        self.assertListEqual([1, 2, 2], sorted(processed_calls_at_time))
    def test_scheduler_multi_instance(self, method):
        def stop_thread_groups():
            [tg.stop() for tg in self.tgs]

        self.tgs = [scheduler.setup(), scheduler.setup()]
        self.addCleanup(stop_thread_groups)

        method_args = {'name': 'task', 'id': '321'}

        scheduler.schedule_call(
            None,
            TARGET_METHOD_PATH,
            DELAY,
            **method_args
        )

        time_filter = datetime.datetime.now() + datetime.timedelta(seconds=2)
        calls = db_api.get_delayed_calls_to_start(time_filter)

        self._assert_single_item(calls, target_method_name=TARGET_METHOD_PATH)

        eventlet.sleep(WAIT)

        method.assert_called_once_with(name='task', id='321')

        time_filter = datetime.datetime.now() + datetime.timedelta(seconds=1)
        calls = db_api.get_delayed_calls_to_start(time_filter)

        self.assertEqual(0, len(calls))
Beispiel #15
0
    def after_task_complete(self, task_ex, task_spec):
        super(WaitAfterPolicy, self).after_task_complete(task_ex, task_spec)

        context_key = 'wait_after_policy'

        runtime_context = _ensure_context_has_key(
            task_ex.runtime_context,
            context_key
        )

        task_ex.runtime_context = runtime_context

        policy_context = runtime_context[context_key]
        if policy_context.get('skip'):
            # Skip, already processed.
            return

        policy_context.update({'skip': True})

        _log_task_delay(task_ex, self.delay)

        state = task_ex.state
        # Set task state to 'DELAYED'.
        task_ex.state = states.RUNNING_DELAYED

        # Schedule to change task state to RUNNING again.
        scheduler.schedule_call(
            _ENGINE_CLIENT_PATH,
            'on_task_state_change',
            self.delay,
            state=state,
            task_ex_id=task_ex.id,
        )
Beispiel #16
0
def _schedule_run_action(task_ex, task_spec, action_input, index, wf_spec):
    action_spec_name = task_spec.get_action_name()

    action_def = action_handler.resolve_definition(
        action_spec_name,
        task_ex,
        wf_spec
    )

    action_ex = action_handler.create_action_execution(
        action_def, action_input, task_ex, index
    )

    target = expr.evaluate_recursively(
        task_spec.get_target(),
        utils.merge_dicts(
            copy.deepcopy(action_input),
            copy.deepcopy(task_ex.in_context)
        )
    )

    scheduler.schedule_call(
        None,
        'mistral.engine.action_handler.run_existing_action',
        0,
        action_ex_id=action_ex.id,
        target=target
    )
Beispiel #17
0
    def test_scheduler_with_factory(self, factory):
        target_method = 'run_something'
        method_args = {'name': 'task', 'id': '123'}

        scheduler.schedule_call(
            FACTORY_METHOD_PATH,
            target_method,
            DELAY,
            **method_args
        )

        calls = db_api.get_delayed_calls_to_start(
            datetime.datetime.now() + datetime.timedelta(seconds=2)
        )

        call = self._assert_single_item(
            calls,
            target_method_name=target_method
        )

        self.assertIn('name', call['method_arguments'])

        eventlet.sleep(WAIT)

        factory().run_something.assert_called_once_with(name='task', id='123')

        time_filter = datetime.datetime.now() + datetime.timedelta(seconds=1)
        calls = db_api.get_delayed_calls_to_start(time_filter)

        self.assertEqual(0, len(calls))
Beispiel #18
0
    def schedule(self, input_dict, target, index=0, desc='', safe_rerun=False):
        assert not self.action_ex

        # Assign the action execution ID here to minimize database calls.
        # Otherwise, the input property of the action execution DB object needs
        # to be updated with the action execution ID after the action execution
        # DB object is created.
        action_ex_id = utils.generate_unicode_uuid()

        self._insert_action_context(action_ex_id, input_dict)

        self._create_action_execution(
            self._prepare_input(input_dict),
            self._prepare_runtime_context(index, safe_rerun),
            desc=desc,
            action_ex_id=action_ex_id
        )

        scheduler.schedule_call(
            None,
            _RUN_EXISTING_ACTION_PATH,
            0,
            action_ex_id=self.action_ex.id,
            target=target
        )
Beispiel #19
0
    def test_scheduler_multi_instance(self, method):
        scheds = [scheduler.start(), scheduler.start()]

        def stop_schedulers():
            [scheduler.stop_scheduler(s, True) for s in scheds]

        self.addCleanup(stop_schedulers)

        method_args = {'name': 'task', 'id': '321'}

        scheduler.schedule_call(None, TARGET_METHOD_PATH, DELAY, **method_args)

        time_filter = datetime.datetime.now() + datetime.timedelta(seconds=2)
        calls = db_api.get_delayed_calls_to_start(time_filter)

        self._assert_single_item(calls, target_method_name=TARGET_METHOD_PATH)

        eventlet.sleep(WAIT)

        method.assert_called_once_with(name='task', id='321')

        time_filter = datetime.datetime.now() + datetime.timedelta(seconds=1)
        calls = db_api.get_delayed_calls_to_start(time_filter)

        self.assertEqual(0, len(calls))
    def test_scheduler_with_factory(self, factory):
        target_method = 'run_something'
        method_args = {'name': 'task', 'id': '123'}

        scheduler.schedule_call(
            FACTORY_METHOD_PATH,
            target_method,
            DELAY,
            **method_args
        )

        calls = db_api.get_delayed_calls_to_start(
            datetime.datetime.now() + datetime.timedelta(seconds=2)
        )

        call = self._assert_single_item(
            calls,
            target_method_name=target_method
        )

        self.assertIn('name', call['method_arguments'])

        eventlet.sleep(WAIT)

        factory().run_something.assert_called_once_with(name='task', id='123')

        time_filter = datetime.datetime.now() + datetime.timedelta(seconds=1)
        calls = db_api.get_delayed_calls_to_start(time_filter)

        self.assertEqual(0, len(calls))
Beispiel #21
0
    def test_scheduler_delete_calls(self, method):
        def stop_thread_groups():
            [tg.stop() for tg in self.tgs]

        self.tgs = [scheduler.setup(), scheduler.setup()]
        self.addCleanup(stop_thread_groups)

        method_args = {'name': 'task', 'id': '321'}

        scheduler.schedule_call(
            None,
            TARGET_METHOD_NAME,
            DELAY,
            **method_args
        )

        time_filter = datetime.datetime.now() + datetime.timedelta(seconds=2)
        calls = db_api.get_delayed_calls_to_start(time_filter)

        self._assert_single_item(calls, target_method_name=TARGET_METHOD_NAME)

        eventlet.sleep(WAIT)

        self.assertRaises(exc.NotFoundException,
                          db_api.get_delayed_call,
                          calls[0].id
                          )
Beispiel #22
0
    def before_task_start(self, task_ex, task_spec):
        super(WaitBeforePolicy, self).before_task_start(task_ex, task_spec)

        context_key = "wait_before_policy"

        runtime_context = _ensure_context_has_key(task_ex.runtime_context, context_key)

        task_ex.runtime_context = runtime_context

        policy_context = runtime_context[context_key]

        if policy_context.get("skip"):
            # Unset state 'DELAYED'.
            wf_trace.info(task_ex, "Task '%s' [%s -> %s]" % (task_ex.name, states.DELAYED, states.RUNNING))

            task_ex.state = states.RUNNING

            return

        if task_ex.state != states.IDLE:
            policy_context.update({"skip": True})
            _log_task_delay(task_ex, self.delay)

            task_ex.state = states.DELAYED

            scheduler.schedule_call(None, _RUN_EXISTING_TASK_PATH, self.delay, task_ex_id=task_ex.id)
Beispiel #23
0
    def test_scheduler_doesnt_handle_calls_the_failed_on_update(
            self,
            update_delayed_call):
        def update_call_failed(id, values, query_filter):
            self.queue.put("item")
            return None, 0

        update_delayed_call.side_effect = update_call_failed

        scheduler.schedule_call(
            None,
            TARGET_METHOD_PATH,
            DELAY,
            **{'name': 'task', 'id': '321'}
        )

        calls = db_api.get_delayed_calls_to_start(get_time_delay())

        self.queue.get()
        eventlet.sleep(1)

        update_delayed_call.assert_called_with(
            id=calls[0].id,
            values=mock.ANY,
            query_filter=mock.ANY
        )
        # If the scheduler does handel calls that failed on update
        # DBEntityNotFoundException will raise.
        db_api.get_delayed_call(calls[0].id)
        db_api.delete_delayed_call(calls[0].id)
Beispiel #24
0
    def after_task_complete(self, task_ex, task_spec):
        super(WaitAfterPolicy, self).after_task_complete(task_ex, task_spec)

        context_key = 'wait_after_policy'

        runtime_context = _ensure_context_has_key(task_ex.runtime_context,
                                                  context_key)

        task_ex.runtime_context = runtime_context

        policy_context = runtime_context[context_key]
        if policy_context.get('skip'):
            # Skip, already processed.
            return

        policy_context.update({'skip': True})

        _log_task_delay(task_ex, self.delay)

        state = task_ex.state
        # Set task state to 'DELAYED'.
        task_ex.state = states.DELAYED

        # Schedule to change task state to RUNNING again.
        scheduler.schedule_call(
            _ENGINE_CLIENT_PATH,
            'on_task_state_change',
            self.delay,
            state=state,
            task_ex_id=task_ex.id,
        )
Beispiel #25
0
def schedule_on_action_update(action_ex, delay=0):
    """Schedules task update check.

    This method provides transactional decoupling of action update from
    task update check. It's needed in non-locking model in order to
    avoid 'phantom read' phenomena when reading state of multiple actions
    to see if a task is updated. Just starting a separate transaction
    without using scheduler is not safe due to concurrency window that we'll
    have in this case (time between transactions) whereas scheduler is a
    special component that is designed to be resistant to failures.

    :param action_ex: Action execution.
    :param delay: Minimum amount of time before task update check
        should be made.
    """

    # Optimization to avoid opening a new transaction if it's not needed.
    if not action_ex.task_execution.spec.get('with-items'):
        _on_action_update(action_ex)

        return

    key = 'th_on_a_u-%s' % action_ex.task_execution_id

    scheduler.schedule_call(None,
                            _SCHEDULED_ON_ACTION_UPDATE_PATH,
                            delay,
                            key=key,
                            action_ex_id=action_ex.id,
                            wf_action=isinstance(action_ex,
                                                 models.WorkflowExecution))
Beispiel #26
0
def _schedule_send_result_to_parent_workflow(wf_ex):
    scheduler.schedule_call(
        None,
        'mistral.engine.workflow_handler.send_result_to_parent_workflow',
        0,
        wf_ex_id=wf_ex.id
    )
Beispiel #27
0
 def _schedule_send_result_to_parent_workflow(self):
     scheduler.schedule_call(
         None,
         _SEND_RESULT_TO_PARENT_WORKFLOW_PATH,
         0,
         wf_ex_id=self.wf_ex.id
     )
Beispiel #28
0
    def schedule(self, input_dict, target, index=0, desc=''):
        assert not self.action_ex

        # Assign the action execution ID here to minimize database calls.
        # Otherwise, the input property of the action execution DB object needs
        # to be updated with the action execution ID after the action execution
        # DB object is created.
        action_ex_id = utils.generate_unicode_uuid()

        self._insert_action_context(action_ex_id, input_dict)

        self._create_action_execution(
            self._prepare_input(input_dict),
            self._prepare_runtime_context(index),
            desc=desc,
            action_ex_id=action_ex_id
        )

        scheduler.schedule_call(
            None,
            _RUN_EXISTING_ACTION_PATH,
            0,
            action_ex_id=self.action_ex.id,
            target=target
        )
Beispiel #29
0
def _schedule_refresh_task_state(task_ex, delay=0):
    """Schedules task preconditions check.

    This method provides transactional decoupling of task preconditions
    check from events that can potentially satisfy those preconditions.

    It's needed in non-locking model in order to avoid 'phantom read'
    phenomena when reading state of multiple tasks to see if a task that
    depends on them can start. Just starting a separate transaction
    without using scheduler is not safe due to concurrency window that
    we'll have in this case (time between transactions) whereas scheduler
    is a special component that is designed to be resistant to failures.

    :param task_ex: Task execution.
    :param delay: Delay.
    """
    key = 'th_c_t_s_a-%s' % task_ex.id

    scheduler.schedule_call(
        None,
        _REFRESH_TASK_STATE_PATH,
        delay,
        key=key,
        task_ex_id=task_ex.id
    )
Beispiel #30
0
def schedule_on_action_complete(action_ex, delay=0):
    """Schedules task completion check.

    This method provides transactional decoupling of action completion from
    task completion check. It's needed in non-locking model in order to
    avoid 'phantom read' phenomena when reading state of multiple actions
    to see if a task is completed. Just starting a separate transaction
    without using scheduler is not safe due to concurrency window that we'll
    have in this case (time between transactions) whereas scheduler is a
    special component that is designed to be resistant to failures.

    :param action_ex: Action execution.
    :param delay: Minimum amount of time before task completion check
        should be made.
    """

    # Optimization to avoid opening a new transaction if it's not needed.
    if not action_ex.task_execution.spec.get('with-items'):
        _on_action_complete(action_ex)

        return

    key = 'th_on_a_c-%s' % action_ex.task_execution_id

    scheduler.schedule_call(
        None,
        _SCHEDULED_ON_ACTION_COMPLETE_PATH,
        delay,
        key=key,
        action_ex_id=action_ex.id,
        wf_action=isinstance(action_ex, models.WorkflowExecution)
    )
Beispiel #31
0
    def test_scheduler_without_factory(self, method):
        method_args = {'name': 'task', 'id': '321'}

        scheduler.schedule_call(
            None,
            FACTORY_METHOD_PATH,
            DELAY,
            **method_args
        )

        time_filter = datetime.datetime.now() + datetime.timedelta(seconds=2)
        calls = db_api.get_delayed_calls_to_start(time_filter)

        call = self._assert_single_item(
            calls,
            target_method_name=FACTORY_METHOD_PATH
        )

        self.assertIn('name', call['method_arguments'])

        eventlet.sleep(WAIT)

        method.assert_called_once_with(name='task', id='321')

        time_filter = datetime.datetime.now() + datetime.timedelta(seconds=1)
        calls = db_api.get_delayed_calls_to_start(time_filter)

        self.assertEqual(0, len(calls))
Beispiel #32
0
    def test_scheduler_with_custom_batch_size(self):
        self.scheduler.stop()

        number_delayed_calls = 5
        processed_calls_at_time = []
        real_delete_calls_method = scheduler.Scheduler.delete_calls

        @staticmethod
        def delete_calls_counter(delayed_calls):
            real_delete_calls_method(delayed_calls)

            for _ in range(len(delayed_calls)):
                self.queue.put("item")
            processed_calls_at_time.append(len(delayed_calls))

        scheduler.Scheduler.delete_calls = delete_calls_counter

        # Create 5 delayed calls
        for i in range(number_delayed_calls):
            scheduler.schedule_call(None, TARGET_METHOD_PATH, 0, **{
                'name': 'task',
                'id': i
            })

        # Start scheduler which process 2 calls at a time
        self.scheduler = scheduler.Scheduler(0, 1, 2)
        self.scheduler.start()

        # Wait when all of calls will be processed
        for _ in range(number_delayed_calls):
            self.queue.get()

        self.assertEqual([2, 2, 1], processed_calls_at_time)
Beispiel #33
0
    def before_task_start(self, task_ex, task_spec):
        super(WaitBeforePolicy, self).before_task_start(task_ex, task_spec)

        context_key = 'wait_before_policy'

        runtime_context = _ensure_context_has_key(task_ex.runtime_context,
                                                  context_key)

        task_ex.runtime_context = runtime_context

        policy_context = runtime_context[context_key]

        if policy_context.get('skip'):
            # Unset state 'DELAYED'.
            wf_trace.info(
                task_ex, "Task '%s' [%s -> %s]" %
                (task_ex.name, states.DELAYED, states.RUNNING))

            task_ex.state = states.RUNNING

            return

        if task_ex.state != states.IDLE:
            policy_context.update({'skip': True})
            _log_task_delay(task_ex, self.delay)

            task_ex.state = states.DELAYED

            scheduler.schedule_call(
                None,
                _RUN_EXISTING_TASK_PATH,
                self.delay,
                task_ex_id=task_ex.id,
            )
    def test_scheduler_without_factory(self, method):
        method_args = {'name': 'task', 'id': '321'}

        scheduler.schedule_call(
            None,
            FACTORY_METHOD_PATH,
            DELAY,
            **method_args
        )

        time_filter = datetime.datetime.now() + datetime.timedelta(seconds=2)
        calls = db_api.get_delayed_calls_to_start(time_filter)

        call = self._assert_single_item(
            calls,
            target_method_name=FACTORY_METHOD_PATH
        )

        self.assertIn('name', call['method_arguments'])

        eventlet.sleep(WAIT)

        method.assert_called_once_with(name='task', id='321')

        time_filter = datetime.datetime.now() + datetime.timedelta(seconds=1)
        calls = db_api.get_delayed_calls_to_start(time_filter)

        self.assertEqual(0, len(calls))
Beispiel #35
0
    def test_scheduler_doesnt_handle_calls_the_failed_on_update(
            self, update_delayed_call):
        def update_call_failed(id, values, query_filter):
            self.queue.put("item")
            return None, 0

        update_delayed_call.side_effect = update_call_failed

        scheduler.schedule_call(None, TARGET_METHOD_PATH, DELAY, **{
            'name': 'task',
            'id': '321'
        })

        calls = db_api.get_delayed_calls_to_start(get_time_delay())

        self.queue.get()
        eventlet.sleep(1)

        update_delayed_call.assert_called_with(id=calls[0].id,
                                               values=mock.ANY,
                                               query_filter=mock.ANY)
        # If the scheduler does handel calls that failed on update
        # DBEntityNotFoundException will raise.
        db_api.get_delayed_call(calls[0].id)
        db_api.delete_delayed_call(calls[0].id)
Beispiel #36
0
    def test_scheduler_with_serializer(self, factory):
        target_method_name = 'run_something'
        factory.return_value = type('something', (object, ), {
            target_method_name:
            mock.MagicMock(side_effect=self.target_method)
        })

        task_result = ml_actions.Result('data', 'error')

        method_args = {'name': 'task', 'id': '123', 'result': task_result}

        serializers = {'result': 'mistral.workflow.utils.ResultSerializer'}

        scheduler.schedule_call(TARGET_METHOD_PATH,
                                target_method_name,
                                DELAY,
                                serializers=serializers,
                                **method_args)

        calls = db_api.get_delayed_calls_to_start(get_time_delay())
        call = self._assert_single_item(calls,
                                        target_method_name=target_method_name)
        self.assertIn('name', call['method_arguments'])

        self.queue.get()

        result = factory().run_something.call_args[1].get('result')

        self.assertIsInstance(result, ml_actions.Result)
        self.assertEqual('data', result.data)
        self.assertEqual('error', result.error)

        calls = db_api.get_delayed_calls_to_start(get_time_delay())
        self.assertEqual(0, len(calls))
Beispiel #37
0
    def after_task_complete(self, task_ex, task_spec):
        """Possible Cases:

        1. state = SUCCESS
           if continue_on is not specified,
           no need to move to next iteration;
           if current:count achieve retry:count then policy
           breaks the loop (regardless on continue-on condition);
           otherwise - check continue_on condition and if
           it is True - schedule the next iteration,
           otherwise policy breaks the loop.
        2. retry:count = 5, current:count = 2, state = ERROR,
           state = IDLE/DELAYED, current:count = 3
        3. retry:count = 5, current:count = 4, state = ERROR
        Iterations complete therefore state = #{state}, current:count = 4.
        """
        super(RetryPolicy, self).after_task_complete(task_ex, task_spec)

        context_key = "retry_task_policy"

        runtime_context = _ensure_context_has_key(task_ex.runtime_context, context_key)

        continue_on_evaluation = expressions.evaluate(
            self._continue_on_clause, data_flow.evaluate_task_outbound_context(task_ex)
        )

        task_ex.runtime_context = runtime_context

        state = task_ex.state

        if not states.is_completed(state):
            return

        policy_context = runtime_context[context_key]

        retry_no = 0

        if "retry_no" in policy_context:
            retry_no = policy_context["retry_no"]
            del policy_context["retry_no"]

        retries_remain = retry_no + 1 < self.count

        stop_continue_flag = task_ex.state == states.SUCCESS and not self._continue_on_clause
        stop_continue_flag = stop_continue_flag or (self._continue_on_clause and not continue_on_evaluation)
        break_triggered = task_ex.state == states.ERROR and self.break_on

        if not retries_remain or break_triggered or stop_continue_flag:
            return

        _log_task_delay(task_ex, self.delay)

        data_flow.invalidate_task_execution_result(task_ex)
        task_ex.state = states.DELAYED

        policy_context["retry_no"] = retry_no + 1
        runtime_context[context_key] = policy_context

        scheduler.schedule_call(None, _RUN_EXISTING_TASK_PATH, self.delay, task_ex_id=task_ex.id)
Beispiel #38
0
    def after_task_complete(self, task_ex, task_spec):
        """Possible Cases:

        1. state = SUCCESS
           No need to move to next iteration.
        2. retry:count = 5, current:count = 2, state = ERROR,
           state = IDLE/DELAYED, current:count = 3
        3. retry:count = 5, current:count = 4, state = ERROR
        Iterations complete therefore state = #{state}, current:count = 4.
        """
        super(RetryPolicy, self).after_task_complete(task_ex, task_spec)

        context_key = 'retry_task_policy'

        runtime_context = _ensure_context_has_key(
            task_ex.runtime_context,
            context_key
        )

        task_ex.runtime_context = runtime_context

        state = task_ex.state

        if state != states.ERROR:
            return

        wf_trace.info(
            task_ex,
            "Task '%s' [%s -> ERROR]"
            % (task_ex.name, task_ex.state)
        )

        policy_context = runtime_context[context_key]

        retry_no = 0

        if 'retry_no' in policy_context:
            retry_no = policy_context['retry_no']
            del policy_context['retry_no']

        retries_remain = retry_no + 1 < self.count

        if not retries_remain or self.break_on:
            return

        _log_task_delay(task_ex, self.delay)

        task_ex.state = states.DELAYED

        policy_context['retry_no'] = retry_no + 1
        runtime_context[context_key] = policy_context

        scheduler.schedule_call(
            None,
            _RUN_EXISTING_TASK_PATH,
            self.delay,
            task_ex_id=task_ex.id,
        )
Beispiel #39
0
    def before_task_start(self, task_ex, task_spec):
        super(TimeoutPolicy, self).before_task_start(task_ex, task_spec)

        scheduler.schedule_call(
            None,
            "mistral.engine.policies.fail_task_if_incomplete",
            self.delay,
            task_ex_id=task_ex.id,
            timeout=self.delay,
        )

        wf_trace.info(task_ex, "Timeout check scheduled [task=%s, timeout(s)=%s]." % (task_ex.id, self.delay))
Beispiel #40
0
    def schedule(self, input_dict, target, index=0, desc=''):
        assert not self.action_ex

        self._create_action_execution(self._prepare_input(input_dict),
                                      self._prepare_runtime_context(index),
                                      desc=desc)

        scheduler.schedule_call(None,
                                _RUN_EXISTING_ACTION_PATH,
                                0,
                                action_ex_id=self.action_ex.id,
                                target=target)
Beispiel #41
0
    def before_task_start(self, task_ex, task_spec):
        super(TimeoutPolicy, self).before_task_start(task_ex, task_spec)

        scheduler.schedule_call(
            None,
            'mistral.engine.policies.fail_task_if_incomplete',
            self.delay,
            task_ex_id=task_ex.id,
            timeout=self.delay)

        wf_trace.info(
            task_ex, "Timeout check scheduled [task=%s, timeout(s)=%s]." %
            (task_ex.id, self.delay))
Beispiel #42
0
    def test_scheduler_delete_calls(self, method):
        method.side_effect = self.target_method

        scheduler.schedule_call(None, TARGET_METHOD_PATH, DELAY, **{
            'name': 'task',
            'id': '321'
        })

        calls = db_api.get_delayed_calls_to_start(get_time_delay())
        self._assert_single_item(calls, target_method_name=TARGET_METHOD_PATH)

        self.queue.get()
        self.assertRaises(exc.DBEntityNotFoundError, db_api.get_delayed_call,
                          calls[0].id)
Beispiel #43
0
    def test_scheduler_with_serializer(self, factory):
        target_method = 'run_something'

        task_result = wf_utils.Result('data', 'error')

        method_args = {
            'name': 'task',
            'id': '123',
            'result': task_result
        }

        serializers = {
            'result': 'mistral.workflow.utils.ResultSerializer'
        }

        delay = 1.5

        scheduler.schedule_call(
            FACTORY_METHOD_NAME,
            target_method,
            delay,
            serializers=serializers,
            **method_args
        )

        calls = db_api.get_delayed_calls_to_start(
            datetime.datetime.now() + datetime.timedelta(seconds=2)
        )

        call = self._assert_single_item(
            calls,
            target_method_name=target_method
        )

        self.assertIn('name', call['method_arguments'])

        eventlet.sleep(delay)

        result = factory().run_something.call_args[1].get('result')

        self.assertIsInstance(result, wf_utils.Result)
        self.assertEqual('data', result.data)
        self.assertEqual('error', result.error)

        calls = db_api.get_delayed_calls_to_start(
            datetime.datetime.now() + datetime.timedelta(seconds=1)
        )

        self.assertEqual(0, len(calls))
Beispiel #44
0
    def test_scheduler_delete_calls(self, method):
        method_args = {'name': 'task', 'id': '321'}

        scheduler.schedule_call(None, FACTORY_METHOD_PATH, DELAY,
                                **method_args)

        time_filter = datetime.datetime.now() + datetime.timedelta(seconds=2)
        calls = db_api.get_delayed_calls_to_start(time_filter)

        self._assert_single_item(calls, target_method_name=FACTORY_METHOD_PATH)

        eventlet.sleep(WAIT)

        self.assertRaises(exc.DBEntityNotFoundError, db_api.get_delayed_call,
                          calls[0].id)
Beispiel #45
0
    def test_scheduler_with_serializer(self, factory):
        target_method_name = 'run_something'
        factory.return_value = type(
            'something',
            (object,),
            {
                target_method_name:
                    mock.MagicMock(side_effect=self.target_method)
            }
        )

        task_result = ml_actions.Result('data', 'error')

        method_args = {
            'name': 'task',
            'id': '123',
            'result': task_result
        }

        serializers = {
            'result': 'mistral.workflow.utils.ResultSerializer'
        }

        scheduler.schedule_call(
            TARGET_METHOD_PATH,
            target_method_name,
            DELAY,
            serializers=serializers,
            **method_args
        )

        calls = db_api.get_delayed_calls_to_start(get_time_delay())
        call = self._assert_single_item(
            calls,
            target_method_name=target_method_name
        )
        self.assertIn('name', call['method_arguments'])

        self.queue.get()

        result = factory().run_something.call_args[1].get('result')

        self.assertIsInstance(result, ml_actions.Result)
        self.assertEqual('data', result.data)
        self.assertEqual('error', result.error)

        calls = db_api.get_delayed_calls_to_start(get_time_delay())
        self.assertEqual(0, len(calls))
Beispiel #46
0
    def test_scheduler_doesnt_handle_calls_the_failed_on_update(self):
        method_args = {'name': 'task', 'id': '321'}

        scheduler.schedule_call(None, FACTORY_METHOD_PATH, DELAY,
                                **method_args)

        time_filter = datetime.datetime.now() + datetime.timedelta(seconds=2)
        calls = db_api.get_delayed_calls_to_start(time_filter)

        eventlet.sleep(WAIT)

        # If the scheduler does handel calls that failed on update
        # DBEntityNotFoundException will raise.
        db_api.get_delayed_call(calls[0].id)

        db_api.delete_delayed_call(calls[0].id)
Beispiel #47
0
    def schedule(self, input_dict, target, index=0, desc=''):
        assert not self.action_ex

        self._create_action_execution(
            self._prepare_input(input_dict),
            self._prepare_runtime_context(index),
            desc=desc
        )

        scheduler.schedule_call(
            None,
            _RUN_EXISTING_ACTION_PATH,
            0,
            action_ex_id=self.action_ex.id,
            target=target
        )
Beispiel #48
0
    def before_task_start(self, task_ex, task_spec):
        super(TimeoutPolicy, self).before_task_start(task_ex, task_spec)

        # No timeout if delay is 0
        if self.delay == 0:
            return

        scheduler.schedule_call(None,
                                _FAIL_IF_INCOMPLETE_TASK_PATH,
                                self.delay,
                                task_ex_id=task_ex.id,
                                timeout=self.delay)

        wf_trace.info(
            task_ex, "Timeout check scheduled [task=%s, timeout(s)=%s]." %
            (task_ex.id, self.delay))
Beispiel #49
0
    def test_scheduler_with_serializer(self, factory):
        target_method = 'run_something'

        task_result = wf_utils.Result('data', 'error')

        method_args = {
            'name': 'task',
            'id': '123',
            'result': task_result
        }

        serializers = {
            'result': 'mistral.workflow.utils.ResultSerializer'
        }

        scheduler.schedule_call(
            FACTORY_METHOD_PATH,
            target_method,
            DELAY,
            serializers=serializers,
            **method_args
        )

        calls = db_api.get_delayed_calls_to_start(
            datetime.datetime.now() + datetime.timedelta(seconds=WAIT)
        )

        call = self._assert_single_item(
            calls,
            target_method_name=target_method
        )

        self.assertIn('name', call['method_arguments'])

        eventlet.sleep(WAIT)

        result = factory().run_something.call_args[1].get('result')

        self.assertIsInstance(result, wf_utils.Result)
        self.assertEqual('data', result.data)
        self.assertEqual('error', result.error)

        calls = db_api.get_delayed_calls_to_start(
            datetime.datetime.now() + datetime.timedelta(seconds=1)
        )

        self.assertEqual(0, len(calls))
Beispiel #50
0
    def after_task_complete(self, task_ex, task_spec):
        super(WaitAfterPolicy, self).after_task_complete(task_ex, task_spec)

        # No need to postpone a task if delay is 0
        if self.delay == 0:
            return

        context_key = 'wait_after_policy'

        runtime_context = _ensure_context_has_key(
            task_ex.runtime_context,
            context_key
        )

        task_ex.runtime_context = runtime_context

        policy_context = runtime_context[context_key]

        if policy_context.get('skip'):
            # Skip, already processed.
            return

        policy_context.update({'skip': True})

        _log_task_delay(task_ex, self.delay)

        end_state = task_ex.state
        end_state_info = task_ex.state_info

        # TODO(rakhmerov): Policies probably need to have tasks.Task
        # interface in order to manage task state safely.
        # Set task state to 'RUNNING_DELAYED'.
        task_ex.state = states.RUNNING_DELAYED
        task_ex.state_info = (
            'Suspended by wait-after policy for %s seconds' % self.delay
        )

        # Schedule to change task state to RUNNING again.
        scheduler.schedule_call(
            None,
            _COMPLETE_TASK_PATH,
            self.delay,
            task_ex_id=task_ex.id,
            state=end_state,
            state_info=end_state_info
        )
Beispiel #51
0
    def after_task_complete(self, task_ex, task_spec):
        super(WaitAfterPolicy, self).after_task_complete(task_ex, task_spec)

        # No need to postpone a task if delay is 0
        if self.delay == 0:
            return

        context_key = 'wait_after_policy'

        runtime_context = _ensure_context_has_key(
            task_ex.runtime_context,
            context_key
        )

        task_ex.runtime_context = runtime_context

        policy_context = runtime_context[context_key]

        if policy_context.get('skip'):
            # Skip, already processed.
            return

        policy_context.update({'skip': True})

        _log_task_delay(task_ex, self.delay)

        end_state = task_ex.state
        end_state_info = task_ex.state_info

        # TODO(rakhmerov): Policies probably need to have tasks.Task
        # interface in order to manage task state safely.
        # Set task state to 'RUNNING_DELAYED'.
        task_ex.state = states.RUNNING_DELAYED
        task_ex.state_info = (
            'Suspended by wait-after policy for %s seconds' % self.delay
        )

        # Schedule to change task state to RUNNING again.
        scheduler.schedule_call(
            None,
            _COMPLETE_TASK_PATH,
            self.delay,
            task_ex_id=task_ex.id,
            state=end_state,
            state_info=end_state_info
        )
Beispiel #52
0
def _schedule_noop_action(task_ex, task_spec, wf_spec):
    wf_ex = task_ex.workflow_execution

    action_def = action_handler.resolve_action_definition(
        'std.noop', wf_ex.workflow_name, wf_spec.get_name())

    action_ex = action_handler.create_action_execution(action_def, {}, task_ex)

    target = expr.evaluate_recursively(task_spec.get_target(),
                                       task_ex.in_context)

    scheduler.schedule_call(
        None,
        'mistral.engine.action_handler.run_existing_action',
        0,
        action_ex_id=action_ex.id,
        target=target)
Beispiel #53
0
def _schedule_check_and_fix_integrity(wf_ex, delay=0):
    """Schedules workflow integrity check.

    :param wf_ex: Workflow execution.
    :param delay: Minimum amount of time before the check should be made.
    """

    if CONF.engine.execution_integrity_check_delay < 0:
        # Never check integrity if it's a negative value.
        return

    key = _get_integrity_check_key(wf_ex)

    scheduler.schedule_call(None,
                            _CHECK_AND_FIX_INTEGRITY_PATH,
                            delay,
                            key=key,
                            wf_ex_id=wf_ex.id)
Beispiel #54
0
    def test_scheduler_without_factory(self, method):
        method.side_effect = self.target_method

        scheduler.schedule_call(None, TARGET_METHOD_PATH, DELAY, **{
            'name': 'task',
            'id': '321'
        })

        calls = db_api.get_delayed_calls_to_start(get_time_delay())
        call = self._assert_single_item(calls,
                                        target_method_name=TARGET_METHOD_PATH)
        self.assertIn('name', call['method_arguments'])

        self.queue.get()
        method.assert_called_once_with(name='task', id='321')

        calls = db_api.get_delayed_calls_to_start(get_time_delay())
        self.assertEqual(0, len(calls))
Beispiel #55
0
    def schedule(self, input_dict, target, index=0, desc=''):
        parent_wf_ex = self.task_ex.workflow_execution
        parent_wf_spec = spec_parser.get_workflow_spec(parent_wf_ex.spec)

        task_spec = spec_parser.get_task_spec(self.task_ex.spec)

        wf_spec_name = task_spec.get_workflow_name()

        wf_def = e_utils.resolve_workflow_definition(
            parent_wf_ex.workflow_name,
            parent_wf_spec.get_name(),
            wf_spec_name
        )

        wf_spec = spec_parser.get_workflow_spec(wf_def.spec)

        wf_params = {
            'task_execution_id': self.task_ex.id,
            'index': index
        }

        if 'env' in parent_wf_ex.params:
            wf_params['env'] = parent_wf_ex.params['env']

        for k, v in list(input_dict.items()):
            if k not in wf_spec.get_input():
                wf_params[k] = v
                del input_dict[k]

        wf_ex, _ = wf_ex_service.create_workflow_execution(
            wf_def.name,
            input_dict,
            "sub-workflow execution",
            wf_params,
            wf_spec
        )

        scheduler.schedule_call(
            None,
            _RESUME_WORKFLOW_PATH,
            0,
            wf_ex_id=wf_ex.id,
            env=None
        )
Beispiel #56
0
def _schedule_run_action(task_ex, task_spec, action_input, index):
    wf_ex = task_ex.workflow_execution
    wf_spec = spec_parser.get_workflow_spec(wf_ex.spec)

    action_spec_name = task_spec.get_action_name()

    # TODO(rakhmerov): Refactor ad-hoc actions and isolate them.
    action_def = e_utils.resolve_action_definition(
        wf_ex.workflow_name,
        wf_spec.get_name(),
        action_spec_name
    )

    if action_def.spec:
        # Ad-hoc action.
        action_spec = spec_parser.get_action_spec(action_def.spec)

        base_name = action_spec.get_base()

        action_def = e_utils.resolve_action_definition(
            task_ex.workflow_name,
            wf_spec.get_name(),
            base_name
        )

    action_ex = _create_action_execution(
        task_ex, action_def, action_input, index
    )

    target = expr.evaluate_recursively(
        task_spec.get_target(),
        utils.merge_dicts(
            copy.deepcopy(action_input),
            copy.copy(task_ex.in_context)
        )
    )

    scheduler.schedule_call(
        None,
        'mistral.engine.task_handler.run_action',
        0,
        action_ex_id=action_ex.id,
        target=target
    )
Beispiel #57
0
    def before_task_start(self, task_ex, task_spec):
        super(WaitBeforePolicy, self).before_task_start(task_ex, task_spec)

        # No need to wait for a task if delay is 0
        if self.delay == 0:
            return

        context_key = 'wait_before_policy'

        runtime_context = _ensure_context_has_key(
            task_ex.runtime_context,
            context_key
        )

        task_ex.runtime_context = runtime_context

        policy_context = runtime_context[context_key]

        if policy_context.get('skip'):
            # Unset state 'RUNNING_DELAYED'.
            wf_trace.info(
                task_ex,
                "Task '%s' [%s -> %s]"
                % (task_ex.name, states.RUNNING_DELAYED, states.RUNNING)
            )

            task_ex.state = states.RUNNING

            return

        if task_ex.state != states.IDLE:
            policy_context.update({'skip': True})

            _log_task_delay(task_ex, self.delay)

            task_ex.state = states.RUNNING_DELAYED

            scheduler.schedule_call(
                None,
                _CONTINUE_TASK_PATH,
                self.delay,
                task_ex_id=task_ex.id,
            )
def _schedule_run_workflow(task_ex, task_spec, wf_input, index,
                           parent_wf_spec):
    parent_wf_ex = task_ex.workflow_execution

    wf_spec_name = task_spec.get_workflow_name()

    wf_def = e_utils.resolve_workflow_definition(
        parent_wf_ex.workflow_name,
        parent_wf_spec.get_name(),
        wf_spec_name
    )

    wf_spec = spec_parser.get_workflow_spec(wf_def.spec)

    wf_params = {
        'task_execution_id': task_ex.id,
        'with_items_index': index
    }

    if 'env' in parent_wf_ex.params:
        wf_params['env'] = parent_wf_ex.params['env']

    for k, v in list(wf_input.items()):
        if k not in wf_spec.get_input():
            wf_params[k] = v
            del wf_input[k]

    wf_ex_id, _ = wf_ex_service.create_workflow_execution(
        wf_def.name,
        wf_input,
        "sub-workflow execution",
        wf_params,
        wf_spec
    )

    scheduler.schedule_call(
        None,
        'mistral.engine.task_handler.resume_workflow',
        0,
        wf_ex_id=wf_ex_id,
        env=None
    )
    def test_scheduler_call_target_method_with_correct_auth(self, method):
        default_context = base.get_context(default=True)
        auth_context.set_ctx(default_context)
        default_project_id = (
            default_context._BaseContext__values['project_id']
        )
        method_args1 = {'expected_project_id': default_project_id}

        scheduler.schedule_call(
            None,
            CHECK_CONTEXT_METHOD_PATH,
            DELAY,
            **method_args1
        )

        second_context = base.get_context(default=False)
        auth_context.set_ctx(second_context)
        second_project_id = (
            second_context._BaseContext__values['project_id']
        )
        method_args2 = {'expected_project_id': second_project_id}

        scheduler.schedule_call(
            None,
            CHECK_CONTEXT_METHOD_PATH,
            DELAY,
            **method_args2
        )

        eventlet.sleep(WAIT)

        method.assert_any_call(
            default_project_id,
            default_project_id
        )

        method.assert_any_call(
            second_project_id,
            second_project_id
        )

        self.assertNotEqual(default_project_id, second_project_id)
    def test_scheduler_doesnt_handle_calls_the_failed_on_update(self):
        method_args = {'name': 'task', 'id': '321'}

        scheduler.schedule_call(
            None,
            FACTORY_METHOD_PATH,
            DELAY,
            **method_args
        )

        time_filter = datetime.datetime.now() + datetime.timedelta(seconds=2)
        calls = db_api.get_delayed_calls_to_start(time_filter)

        eventlet.sleep(WAIT)

        # If the scheduler does handel calls that failed on update
        # DBEntityNotFoundException will raise.
        db_api.get_delayed_call(calls[0].id)

        db_api.delete_delayed_call(calls[0].id)