Пример #1
0
def process_cron_triggers_v2(self, ctx):
    LOG.debug("Processing cron triggers...")

    for trigger in triggers.get_next_cron_triggers():
        LOG.debug("Processing cron trigger: %s", trigger)

        try:
            # Setup admin context before schedule triggers.
            ctx = security.create_context(
                trigger.trust_id,
                trigger.project_id
            )

            auth_ctx.set_ctx(ctx)

            LOG.debug("Cron trigger security context: %s", ctx)

            # Try to advance the cron trigger next_execution_time and
            # remaining_executions if relevant.
            modified = advance_cron_trigger(trigger)

            # If cron trigger was not already modified by another engine.
            if modified:
                LOG.debug(
                    "Starting workflow '%s' by cron trigger '%s'",
                    trigger.workflow.name,
                    trigger.name
                )

                description = {
                    "description": (
                        "Workflow execution created by cron"
                        " trigger '(%s)'." % trigger.id
                    ),
                    "triggered_by": {
                        "type": "cron_trigger",
                        "id": trigger.id,
                        "name": trigger.name,
                    }
                }

                rpc.get_engine_client().start_workflow(
                    trigger.workflow.name,
                    trigger.workflow.namespace,
                    None,
                    trigger.workflow_input,
                    description=json.dumps(description),
                    **trigger.workflow_params
                )
        except Exception:
            # Log and continue to next cron trigger.
            LOG.exception(
                "Failed to process cron trigger %s",
                str(trigger)
            )
        finally:
            auth_ctx.set_ctx(None)
Пример #2
0
 def _start_subworkflow():
     rpc.get_engine_client().start_workflow(
         wf_def.id,
         wf_def.namespace,
         None,
         input_dict,
         "sub-workflow execution",
         async_=True,
         **wf_params
     )
Пример #3
0
    def test_retry_async_action(self):
        retry_wf = """---
          version: '2.0'
          repeated_retry:
            tasks:
              async_http:
                retry:
                  delay: 0
                  count: 100
                action: std.mistral_http url='https://google.com'
            """

        wf_service.create_workflows(retry_wf)
        wf_ex = self.engine.start_workflow('repeated_retry')

        self.await_workflow_running(wf_ex.id)

        with db_api.transaction():
            wf_ex = db_api.get_workflow_execution(wf_ex.id)

            task_ex = wf_ex.task_executions[0]
            self.await_task_running(task_ex.id)

            first_action_ex = task_ex.executions[0]
            self.await_action_state(first_action_ex.id, states.RUNNING)

        complete_action_params = (
            first_action_ex.id,
            ml_actions.Result(error="mock")
        )
        rpc.get_engine_client().on_action_complete(*complete_action_params)

        for _ in range(2):
            self.assertRaises(
                exc.MistralException,
                rpc.get_engine_client().on_action_complete,
                *complete_action_params
            )

        self.await_task_running(task_ex.id)
        with db_api.transaction():
            task_ex = db_api.get_task_execution(task_ex.id)
            action_exs = task_ex.executions

            self.assertEqual(2, len(action_exs))

            for action_ex in action_exs:
                if action_ex.id == first_action_ex.id:
                    expected_state = states.ERROR
                else:
                    expected_state = states.RUNNING

                self.assertEqual(expected_state, action_ex.state)
Пример #4
0
    def post(self, action_ex):
        """Create new action_execution.

        :param action_ex: Action to execute
        """
        acl.enforce('action_executions:create', context.ctx())

        LOG.debug(
            "Create action_execution [action_execution=%s]",
            action_ex
        )

        name = action_ex.name
        description = action_ex.description or None
        action_input = action_ex.input or {}
        params = action_ex.params or {}

        if not name:
            raise exc.InputException(
                "Please provide at least action name to run action."
            )

        values = rpc.get_engine_client().start_action(
            name,
            action_input,
            description=description,
            **params
        )

        return resources.ActionExecution.from_dict(values)
Пример #5
0
    def put(self, id, action_ex):
        """Update the specified action_execution.

        :param id: UUID of action execution to update
        :param action_ex: Action execution for update
        """
        acl.enforce('action_executions:update', context.ctx())

        LOG.debug(
            "Update action_execution [id=%s, action_execution=%s]",
            id,
            action_ex
        )

        if action_ex.state not in SUPPORTED_TRANSITION_STATES:
            raise exc.InvalidResultException(
                "Error. Expected one of %s, actual: %s" % (
                    SUPPORTED_TRANSITION_STATES,
                    action_ex.state
                )
            )

        if states.is_completed(action_ex.state):
            output = action_ex.output

            if action_ex.state == states.SUCCESS:
                result = ml_actions.Result(data=output)
            elif action_ex.state == states.ERROR:
                if not output:
                    output = 'Unknown error'
                result = ml_actions.Result(error=output)
            elif action_ex.state == states.CANCELLED:
                result = ml_actions.Result(cancel=True)

            values = rpc.get_engine_client().on_action_complete(id, result)

        if action_ex.state in [states.PAUSED, states.RUNNING]:
            state = action_ex.state
            values = rpc.get_engine_client().on_action_update(id, state)

        return resources.ActionExecution.from_dict(values)
Пример #6
0
    def setUp(self):
        super(EngineTestCase, self).setUp()

        # Get transport here to let oslo.messaging setup default config
        # before changing the rpc_backend to the fake driver; otherwise,
        # oslo.messaging will throw exception.
        messaging.get_transport(cfg.CONF)

        # Set the transport to 'fake' for Engine tests.
        cfg.CONF.set_default('rpc_backend', 'fake')

        # Drop all RPC objects (transport, clients).
        rpc_base.cleanup()
        rpc_clients.cleanup()
        exe.cleanup()

        self.threads = []

        # Start remote executor.
        if cfg.CONF.executor.type == 'remote':
            LOG.info("Starting remote executor threads...")

            self.executor_client = rpc_clients.get_executor_client()

            exe_svc = executor_server.get_oslo_service(setup_profiler=False)

            self.executor = exe_svc.executor
            self.threads.append(eventlet.spawn(launch_service, exe_svc))
            self.addCleanup(exe_svc.stop, True)

        # Start engine.
        LOG.info("Starting engine threads...")

        self.engine_client = rpc_clients.get_engine_client()

        eng_svc = engine_server.get_oslo_service(setup_profiler=False)

        self.engine = eng_svc.engine
        self.threads.append(eventlet.spawn(launch_service, eng_svc))
        self.addCleanup(eng_svc.stop, True)

        self.addOnException(self.print_executions)
        self.addCleanup(self.kill_threads)

        # Make sure that both services fully started, otherwise
        # the test may run too early.
        if cfg.CONF.executor.type == 'remote':
            exe_svc.wait_started()

        eng_svc.wait_started()
    def __init__(self, conf):
        super(ActionExecutionReporter, self).__init__(conf)
        self._engine_client = rpc.get_engine_client()
        self._running_actions = set()

        self.interval = CONF.action_heartbeat.check_interval
        self.max_missed = CONF.action_heartbeat.max_missed_heartbeats
        self.enabled = self.interval and self.max_missed

        _periodic_task = periodic_task.periodic_task(
            spacing=self.interval,
            run_immediately=True
        )
        self.add_periodic_task(
            _periodic_task(report)
        )
Пример #8
0
    def __init__(self):
        self.engine_client = rpc.get_engine_client()
        self.event_queue = six.moves.queue.Queue()
        self.handler_tg = threadgroup.ThreadGroup()

        self.event_triggers_map = defaultdict(list)
        self.exchange_topic_events_map = defaultdict(set)
        self.exchange_topic_listener_map = {}

        self.lock = threading.Lock()

        LOG.debug('Loading notification definitions.')

        self.notification_converter = NotificationsConverter()

        self._start_handler()
        self._start_listeners()
Пример #9
0
    def __init__(self):
        self.engine_client = rpc.get_engine_client()
        self.event_queue = six.moves.queue.Queue()
        self.handler_tg = threadgroup.ThreadGroup()

        self.event_triggers_map = defaultdict(list)
        self.exchange_topic_events_map = defaultdict(set)
        self.exchange_topic_listener_map = {}

        self.lock = threading.Lock()

        LOG.debug('Loading notification definitions.')

        self.notification_converter = NotificationsConverter()

        self._start_handler()
        self._start_listeners()
Пример #10
0
    def post(self, action_ex):
        """Create new action_execution."""
        acl.enforce('action_executions:create', context.ctx())

        LOG.info("Create action_execution [action_execution=%s]", action_ex)

        name = action_ex.name
        description = action_ex.description or None
        action_input = action_ex.input or {}
        params = action_ex.params or {}

        if not name:
            raise exc.InputException(
                "Please provide at least action name to run action.")

        values = rpc.get_engine_client().start_action(name,
                                                      action_input,
                                                      description=description,
                                                      **params)

        return resources.ActionExecution.from_dict(values)
Пример #11
0
    def post(self, wf_ex):
        """Create a new Execution.

        :param wf_ex: Execution object with input content.
        """
        acl.enforce('executions:create', context.ctx())

        LOG.debug("Create execution [execution=%s]", wf_ex)

        exec_dict = wf_ex.to_dict()

        exec_id = exec_dict.get('id')

        if exec_id:
            # If ID is present we need to check if such execution exists.
            # If yes, the method just returns the object. If not, the ID
            # will be used to create a new execution.
            wf_ex = _get_workflow_execution(exec_id, must_exist=False)

            if wf_ex:
                return resources.Execution.from_db_model(wf_ex)

        if not (exec_dict.get('workflow_id')
                or exec_dict.get('workflow_name')):
            raise exc.WorkflowException(
                "Workflow ID or workflow name must be provided. Workflow ID is"
                " recommended.")

        engine = rpc.get_engine_client()

        result = engine.start_workflow(
            exec_dict.get('workflow_id', exec_dict.get('workflow_name')),
            exec_dict.get('workflow_namespace', ''), exec_id,
            exec_dict.get('input'), exec_dict.get('description', ''),
            **exec_dict.get('params') or {})

        return resources.Execution.from_dict(result)
Пример #12
0
    def post(self, wf_ex):
        """Create a new Execution.

        :param wf_ex: Execution object with input content.
        """
        acl.enforce('executions:create', context.ctx())

        LOG.info("Create execution [execution=%s]", wf_ex)

        engine = rpc.get_engine_client()
        exec_dict = wf_ex.to_dict()

        if not (exec_dict.get('workflow_id')
                or exec_dict.get('workflow_name')):
            raise exc.WorkflowException(
                "Workflow ID or workflow name must be provided. Workflow ID is"
                " recommended.")

        result = engine.start_workflow(
            exec_dict.get('workflow_id', exec_dict.get('workflow_name')),
            exec_dict.get('input'), exec_dict.get('description', ''),
            **exec_dict.get('params') or {})

        return resources.Execution.from_dict(result)
Пример #13
0
    def put(self, id, wf_ex):
        """Update the specified workflow execution.

        :param id: UUID of execution to update.
        :param wf_ex: Execution object.
        """
        acl.enforce('executions:update', context.ctx())

        LOG.debug('Update execution [id=%s, execution=%s]', id, wf_ex)

        @rest_utils.rest_retry_on_db_error
        def _compute_delta(wf_ex):
            with db_api.transaction():
                # ensure that workflow execution exists
                db_api.get_workflow_execution(id)

                delta = {}

                if wf_ex.state:
                    delta['state'] = wf_ex.state

                if wf_ex.description:
                    delta['description'] = wf_ex.description

                if wf_ex.params and wf_ex.params.get('env'):
                    delta['env'] = wf_ex.params.get('env')

                # Currently we can change only state, description, or env.
                if len(delta.values()) <= 0:
                    raise exc.InputException(
                        'The property state, description, or env '
                        'is not provided for update.')

                # Description cannot be updated together with state.
                if delta.get('description') and delta.get('state'):
                    raise exc.InputException(
                        'The property description must be updated '
                        'separately from state.')

                # If state change, environment cannot be updated
                # if not RUNNING.
                if (delta.get('env') and delta.get('state')
                        and delta['state'] != states.RUNNING):
                    raise exc.InputException(
                        'The property env can only be updated when workflow '
                        'execution is not running or on resume from pause.')

                if delta.get('description'):
                    wf_ex = db_api.update_workflow_execution(
                        id, {'description': delta['description']})

                if not delta.get('state') and delta.get('env'):
                    wf_ex = db_api.get_workflow_execution(id)
                    wf_ex = wf_service.update_workflow_execution_env(
                        wf_ex, delta.get('env'))

                return delta, wf_ex

        delta, wf_ex = _compute_delta(wf_ex)

        if delta.get('state'):
            if states.is_paused(delta.get('state')):
                wf_ex = rpc.get_engine_client().pause_workflow(id)
            elif delta.get('state') == states.RUNNING:
                wf_ex = rpc.get_engine_client().resume_workflow(
                    id, env=delta.get('env'))
            elif states.is_completed(delta.get('state')):
                msg = wf_ex.state_info if wf_ex.state_info else None
                wf_ex = rpc.get_engine_client().stop_workflow(
                    id, delta.get('state'), msg)
            else:
                # To prevent changing state in other cases throw a message.
                raise exc.InputException(
                    "Cannot change state to %s. Allowed states are: '%s" %
                    (wf_ex.state, ', '.join([
                        states.RUNNING, states.PAUSED, states.SUCCESS,
                        states.ERROR, states.CANCELLED
                    ])))

        return resources.Execution.from_dict(
            wf_ex if isinstance(wf_ex, dict) else wf_ex.to_dict())
Пример #14
0
    def setUp(self):
        super(EngineTestCase, self).setUp()

        # Get transport here to let oslo.messaging setup default config
        # before changing the rpc_backend to the fake driver; otherwise,
        # oslo.messaging will throw exception.
        messaging.get_transport(cfg.CONF)

        # Set the transport to 'fake' for Engine tests.
        cfg.CONF.set_default('transport_url', 'fake:/')

        # Drop all RPC objects (transport, clients).
        rpc_base.cleanup()
        rpc_clients.cleanup()
        exe.cleanup()

        self.threads = []

        # Start remote executor.
        if cfg.CONF.executor.type == 'remote':
            LOG.info("Starting remote executor threads...")

            self.executor_client = rpc_clients.get_executor_client()

            exe_svc = executor_server.get_oslo_service(setup_profiler=False)

            self.executor = exe_svc.executor
            self.threads.append(eventlet.spawn(launch_service, exe_svc))
            self.addCleanup(exe_svc.stop, True)

        # Start remote notifier.
        if cfg.CONF.notifier.type == 'remote':
            LOG.info("Starting remote notifier threads...")

            self.notifier_client = rpc_clients.get_notifier_client()

            notif_svc = notif_server.get_oslo_service(setup_profiler=False)

            self.notifier = notif_svc.notifier
            self.threads.append(eventlet.spawn(launch_service, notif_svc))
            self.addCleanup(notif_svc.stop, True)

        # Start engine.
        LOG.info("Starting engine threads...")

        self.engine_client = rpc_clients.get_engine_client()

        eng_svc = engine_server.get_oslo_service(setup_profiler=False)

        self.engine = eng_svc.engine
        self.threads.append(eventlet.spawn(launch_service, eng_svc))
        self.addCleanup(eng_svc.stop, True)

        self.addOnException(self.print_executions)
        self.addCleanup(self.kill_threads)

        # Make sure that both services fully started, otherwise
        # the test may run too early.
        if cfg.CONF.executor.type == 'remote':
            exe_svc.wait_started()

        if cfg.CONF.notifier.type == 'remote':
            notif_svc.wait_started()

        eng_svc.wait_started()
Пример #15
0
    def post(self, wf_ex):
        """Create a new Execution.

        :param wf_ex: Execution object with input content.
        """
        acl.enforce('executions:create', context.ctx())

        LOG.debug("Create execution [execution=%s]", wf_ex)

        exec_dict = wf_ex.to_dict()

        exec_id = exec_dict.get('id')

        if not exec_id:
            exec_id = uuidutils.generate_uuid()
            LOG.debug("Generated execution id [exec_id=%s]", exec_id)
            exec_dict.update({'id': exec_id})
            wf_ex = None
        else:
            # If ID is present we need to check if such execution exists.
            # If yes, the method just returns the object. If not, the ID
            # will be used to create a new execution.
            wf_ex = _get_workflow_execution(exec_id, must_exist=False)
            if wf_ex:
                return resources.Execution.from_db_model(wf_ex)

        source_execution_id = exec_dict.get('source_execution_id')

        source_exec_dict = None

        if source_execution_id:
            # If source execution is present we will perform a lookup for
            # previous workflow execution model and the information to start
            # a new workflow based on that information.
            source_exec_dict = db_api.get_workflow_execution(
                source_execution_id).to_dict()

            exec_dict['description'] = "{} Based on the execution '{}'".format(
                exec_dict['description'], source_execution_id)
            exec_dict['description'] = exec_dict['description'].strip()

        result_exec_dict = merge_dicts(source_exec_dict, exec_dict)

        if not (result_exec_dict.get('workflow_id')
                or result_exec_dict.get('workflow_name')):
            raise exc.WorkflowException(
                "Workflow ID or workflow name must be provided. Workflow ID is"
                " recommended.")

        engine = rpc.get_engine_client()

        result = engine.start_workflow(
            result_exec_dict.get('workflow_id',
                                 result_exec_dict.get('workflow_name')),
            result_exec_dict.get('workflow_namespace', ''),
            result_exec_dict.get('id'),
            result_exec_dict.get('input'),
            description=result_exec_dict.get('description', ''),
            **result_exec_dict.get('params') or {})

        return resources.Execution.from_dict(result)
Пример #16
0
 def _send_result():
     rpc.get_engine_client().on_action_complete(self.wf_ex.id,
                                                result,
                                                wf_action=True)
Пример #17
0
 def __init__(self):
     self._engine_client = rpc.get_engine_client()
Пример #18
0
    def put(self, id, wf_ex):
        """Update the specified workflow execution.

        :param id: UUID of execution to update.
        :param wf_ex: Execution object.
        """
        acl.enforce('executions:update', context.ctx())

        LOG.debug('Update execution [id=%s, execution=%s]', id, wf_ex)

        @rest_utils.rest_retry_on_db_error
        def _compute_delta(wf_ex):
            with db_api.transaction():
                # ensure that workflow execution exists
                db_api.get_workflow_execution(
                    id,
                    fields=(db_models.WorkflowExecution.id,)
                )

                delta = {}

                if wf_ex.state:
                    delta['state'] = wf_ex.state

                if wf_ex.description:
                    delta['description'] = wf_ex.description

                if wf_ex.params and wf_ex.params.get('env'):
                    delta['env'] = wf_ex.params.get('env')

                # Currently we can change only state, description, or env.
                if len(delta.values()) <= 0:
                    raise exc.InputException(
                        'The property state, description, or env '
                        'is not provided for update.'
                    )

                # Description cannot be updated together with state.
                if delta.get('description') and delta.get('state'):
                    raise exc.InputException(
                        'The property description must be updated '
                        'separately from state.'
                    )

                # If state change, environment cannot be updated
                # if not RUNNING.
                if (delta.get('env') and
                        delta.get('state') and
                        delta['state'] != states.RUNNING):
                    raise exc.InputException(
                        'The property env can only be updated when workflow '
                        'execution is not running or on resume from pause.'
                    )

                if delta.get('description'):
                    wf_ex = db_api.update_workflow_execution(
                        id,
                        {'description': delta['description']}
                    )

                if not delta.get('state') and delta.get('env'):
                    wf_ex = db_api.get_workflow_execution(id)
                    wf_ex = wf_service.update_workflow_execution_env(
                        wf_ex,
                        delta.get('env')
                    )

                return delta, wf_ex

        delta, wf_ex = _compute_delta(wf_ex)

        if delta.get('state'):
            if states.is_paused(delta.get('state')):
                wf_ex = rpc.get_engine_client().pause_workflow(id)
            elif delta.get('state') == states.RUNNING:
                wf_ex = rpc.get_engine_client().resume_workflow(
                    id,
                    env=delta.get('env')
                )
            elif states.is_completed(delta.get('state')):
                msg = wf_ex.state_info if wf_ex.state_info else None
                wf_ex = rpc.get_engine_client().stop_workflow(
                    id,
                    delta.get('state'),
                    msg
                )
            else:
                # To prevent changing state in other cases throw a message.
                raise exc.InputException(
                    "Cannot change state to %s. Allowed states are: '%s" % (
                        wf_ex.state,
                        ', '.join([
                            states.RUNNING,
                            states.PAUSED,
                            states.SUCCESS,
                            states.ERROR,
                            states.CANCELLED
                        ])
                    )
                )

        return resources.Execution.from_dict(
            wf_ex if isinstance(wf_ex, dict) else wf_ex.to_dict()
        )
Пример #19
0
    def post(self, wf_ex):
        """Create a new Execution.

        :param wf_ex: Execution object with input content.
        """
        acl.enforce('executions:create', context.ctx())

        LOG.debug("Create execution [execution=%s]", wf_ex)

        exec_dict = wf_ex.to_dict()

        exec_id = exec_dict.get('id')

        if not exec_id:
            exec_id = uuidutils.generate_uuid()
            LOG.debug("Generated execution id [exec_id=%s]", exec_id)
            exec_dict.update({'id': exec_id})
            wf_ex = None
        else:
            # If ID is present we need to check if such execution exists.
            # If yes, the method just returns the object. If not, the ID
            # will be used to create a new execution.
            wf_ex = _get_workflow_execution(exec_id, must_exist=False)
            if wf_ex:
                return resources.Execution.from_db_model(wf_ex)

        source_execution_id = exec_dict.get('source_execution_id')

        source_exec_dict = None

        if source_execution_id:
            # If source execution is present we will perform a lookup for
            # previous workflow execution model and the information to start
            # a new workflow based on that information.
            source_exec_dict = db_api.get_workflow_execution(
                source_execution_id).to_dict()

            exec_dict['description'] = "{} Based on the execution '{}'".format(
                exec_dict['description'], source_execution_id)
            exec_dict['description'] = exec_dict['description'].strip()

        result_exec_dict = merge_dicts(source_exec_dict, exec_dict)

        if not (result_exec_dict.get('workflow_id') or
                result_exec_dict.get('workflow_name')):
            raise exc.WorkflowException(
                "Workflow ID or workflow name must be provided. Workflow ID is"
                " recommended."
            )

        engine = rpc.get_engine_client()

        result = engine.start_workflow(
            result_exec_dict.get('workflow_id',
                                 result_exec_dict.get('workflow_name')),
            result_exec_dict.get('workflow_namespace', ''),
            result_exec_dict.get('id'),
            result_exec_dict.get('input'),
            description=result_exec_dict.get('description', ''),
            **result_exec_dict.get('params') or {}
        )

        return resources.Execution.from_dict(result)
Пример #20
0
    def setUp(self):
        super(EngineTestCase, self).setUp()

        # We assume that most tests don't need a remote executor.
        # But this option can be overridden on a test level, if needed,
        # because an executor instance (local or a client to a remote one)
        # is obtained by engine dynamically on every need.
        self.override_config('type', 'local', 'executor')

        # Get transport here to let oslo.messaging setup default config
        # before changing the rpc_backend to the fake driver; otherwise,
        # oslo.messaging will throw exception.
        messaging.get_transport(cfg.CONF)

        # Set the transport to 'fake' for Engine tests.
        cfg.CONF.set_default('transport_url', 'fake:/')

        # Drop all RPC objects (transport, clients).
        rpc_base.cleanup()
        rpc_clients.cleanup()
        exe.cleanup()

        self.threads = []

        # Start remote executor.
        if cfg.CONF.executor.type == 'remote':
            LOG.info("Starting remote executor threads...")

            self.executor_client = rpc_clients.get_executor_client()

            exe_svc = executor_server.get_oslo_service(setup_profiler=False)

            self.executor = exe_svc.executor

            self.threads.append(eventlet.spawn(launch_service, exe_svc))

            self.addCleanup(exe_svc.stop, True)
        else:
            self.executor = exe.get_executor(cfg.CONF.executor.type)

        # Start remote notifier.
        if cfg.CONF.notifier.type == 'remote':
            LOG.info("Starting remote notifier threads...")

            self.notifier_client = rpc_clients.get_notifier_client()

            notif_svc = notif_server.get_oslo_service(setup_profiler=False)

            self.notifier = notif_svc.notifier
            self.threads.append(eventlet.spawn(launch_service, notif_svc))
            self.addCleanup(notif_svc.stop, True)

        # Start engine.
        LOG.info("Starting engine threads...")

        self.engine_client = rpc_clients.get_engine_client()

        eng_svc = engine_server.get_oslo_service(setup_profiler=False)

        self.engine = eng_svc.engine
        self.threads.append(eventlet.spawn(launch_service, eng_svc))
        self.addCleanup(eng_svc.stop, True)

        self.addOnException(self.print_executions)
        self.addCleanup(self.kill_threads)

        # This call ensures that all plugged in action providers are
        # properly initialized.
        action_service.get_system_action_provider()

        # Make sure that both services fully started, otherwise
        # the test may run too early.
        if cfg.CONF.executor.type == 'remote':
            exe_svc.wait_started()

        if cfg.CONF.notifier.type == 'remote':
            notif_svc.wait_started()

        eng_svc.wait_started()
Пример #21
0
 def _send_result():
     rpc.get_engine_client().on_action_complete(
         self.wf_ex.id,
         result,
         wf_action=True
     )
Пример #22
0
    def put(self, id, task):
        """Update the specified task execution.

        :param id: Task execution ID.
        :param task: Task execution object.
        """
        acl.enforce('tasks:update', context.ctx())

        LOG.debug("Update task execution [id=%s, task=%s]", id, task)

        @rest_utils.rest_retry_on_db_error
        def _read_task_params(id, task):
            with db_api.transaction():
                task_ex = db_api.get_task_execution(id)
                task_spec = spec_parser.get_task_spec(task_ex.spec)
                task_name = task.name or None
                reset = task.reset
                env = task.env or None

                if task_name and task_name != task_ex.name:
                    raise exc.WorkflowException('Task name does not match.')

                wf_ex = db_api.get_workflow_execution(
                    task_ex.workflow_execution_id
                )

                return env, reset, task_ex, task_spec, wf_ex

        env, reset, task_ex, task_spec, wf_ex = _read_task_params(id, task)

        wf_name = task.workflow_name or None

        if wf_name and wf_name != wf_ex.name:
            raise exc.WorkflowException('Workflow name does not match.')

        if task.state != states.RUNNING:
            raise exc.WorkflowException(
                'Invalid task state. '
                'Only updating task to rerun is supported.'
            )

        if task_ex.state != states.ERROR:
            raise exc.WorkflowException(
                'The current task execution must be in ERROR for rerun.'
                ' Only updating task to rerun is supported.'
            )

        if not task_spec.get_with_items() and not reset:
            raise exc.WorkflowException(
                'Only with-items task has the option to not reset.'
            )

        rpc.get_engine_client().rerun_workflow(
            task_ex.id,
            reset=reset,
            env=env
        )

        @rest_utils.rest_retry_on_db_error
        def _retrieve_task():
            with db_api.transaction():
                task_ex = db_api.get_task_execution(id)

                return _get_task_resource_with_result(task_ex)

        return _retrieve_task()