Exemplo n.º 1
0
    def _send_result_to_parent_workflow(self):
        if self.wf_ex.state == states.SUCCESS:
            # The result of the sub workflow is already saved
            # so there's no need to send it over RPC.
            result = None
        elif self.wf_ex.state == states.ERROR:
            err_msg = (
                self.wf_ex.state_info or
                'Failed subworkflow [execution_id=%s]' % self.wf_ex.id
            )

            result = ml_actions.Result(error=err_msg)
        elif self.wf_ex.state == states.CANCELLED:
            err_msg = (
                self.wf_ex.state_info or
                'Cancelled subworkflow [execution_id=%s]' % self.wf_ex.id
            )

            result = ml_actions.Result(error=err_msg, cancel=True)
        else:
            raise RuntimeError(
                "Method _send_result_to_parent_workflow() must never be called"
                " if a workflow is not in SUCCESS, ERROR or CANCELLED state."
            )

        # Register a command executed in a separate thread to send the result
        # to the parent workflow outside of the main DB transaction.
        def _send_result():
            rpc.get_engine_client().on_action_complete(
                self.wf_ex.id,
                result,
                wf_action=True
            )

        post_tx_queue.register_operation(_send_result)
Exemplo n.º 2
0
    def _send_result_to_parent_workflow(self):
        if self.wf_ex.state == states.SUCCESS:
            # The result of the sub workflow is already saved
            # so there's no need to send it over RPC.
            result = None
        elif self.wf_ex.state == states.ERROR:
            err_msg = (self.wf_ex.state_info or
                       'Failed subworkflow [execution_id=%s]' % self.wf_ex.id)

            result = ml_actions.Result(error=err_msg)
        elif self.wf_ex.state == states.CANCELLED:
            err_msg = (self.wf_ex.state_info
                       or 'Cancelled subworkflow [execution_id=%s]' %
                       self.wf_ex.id)

            result = ml_actions.Result(error=err_msg, cancel=True)
        else:
            raise RuntimeError(
                "Method _send_result_to_parent_workflow() must never be called"
                " if a workflow is not in SUCCESS, ERROR or CANCELLED state.")

        # Register a command executed in a separate thread to send the result
        # to the parent workflow outside of the main DB transaction.
        def _send_result():
            rpc.get_engine_client().on_action_complete(self.wf_ex.id,
                                                       result,
                                                       wf_action=True)

        post_tx_queue.register_operation(_send_result)
Exemplo n.º 3
0
    def notify(self, old_task_state, new_task_state):
        publishers = self.wf_ex.params.get('notify')

        if not publishers and not isinstance(publishers, list):
            return

        notifier = notif.get_notifier(cfg.CONF.notifier.type)
        event = events.identify_task_event(old_task_state, new_task_state)

        def _convert_to_notification_data():
            return {
                "id": self.task_ex.id,
                "name": self.task_ex.name,
                "workflow_name": self.task_ex.workflow_name,
                "workflow_namespace": self.task_ex.workflow_namespace,
                "workflow_id": self.task_ex.workflow_id,
                "state": self.task_ex.state,
                "state_info": self.task_ex.state_info,
                "type": self.task_ex.type,
                "project_id": self.task_ex.project_id
            }

        def _send_notification():
            notifier.notify(self.task_ex.id, _convert_to_notification_data(),
                            event, self.task_ex.updated_at, publishers)

        post_tx_queue.register_operation(_send_notification)
Exemplo n.º 4
0
    def register_workflow_completion_check(self):
        wf_ctrl = wf_base.get_controller(self.wf_ex, self.wf_spec)

        # Register an asynchronous command to check workflow completion
        # in a separate transaction if the task may potentially lead to
        # workflow completion.
        def _check():
            wf_handler.check_and_complete(self.wf_ex.id)

        if wf_ctrl.may_complete_workflow(self.task_ex):
            post_tx_queue.register_operation(_check, in_tx=True)
Exemplo n.º 5
0
    def register_workflow_completion_check(self):
        wf_ctrl = wf_base.get_controller(self.wf_ex, self.wf_spec)

        # Register an asynchronous command to check workflow completion
        # in a separate transaction if the task may potentially lead to
        # workflow completion.
        def _check():
            wf_handler.check_and_complete(self.wf_ex.id)

        if wf_ctrl.may_complete_workflow(self.task_ex):
            post_tx_queue.register_operation(_check, in_tx=True)
Exemplo n.º 6
0
    def schedule(self,
                 input_dict,
                 target,
                 index=0,
                 desc='',
                 safe_rerun=False,
                 timeout=None):
        assert not self.action_ex

        self.action_desc.check_parameters(input_dict)

        wf_ex = self.task_ex.workflow_execution if self.task_ex else None

        wf_ctx = data_flow.ContextView(
            self.task_ctx, data_flow.get_workflow_environment_dict(wf_ex),
            wf_ex.context if wf_ex else {})

        try:
            action = self.action_desc.instantiate(input_dict, wf_ctx)
        except Exception:
            raise exc.InvalidActionException(
                'Failed to instantiate an action'
                ' [action_desc=%s, input_dict=%s]' %
                (self.action_desc, input_dict))

        # Assign the action execution ID here to minimize database calls.
        # Otherwise, the input property of the action execution DB object needs
        # to be updated with the action execution ID after the action execution
        # DB object is created.
        action_ex_id = utils.generate_unicode_uuid()

        self._create_action_execution(input_dict,
                                      self._prepare_runtime_context(
                                          index, safe_rerun),
                                      desc=desc,
                                      action_ex_id=action_ex_id,
                                      is_sync=action.is_sync())

        def _run_action():
            executor = exe.get_executor(cfg.CONF.executor.type)

            return executor.run_action(
                action,
                self.action_ex.id if self.action_ex is not None else None,
                safe_rerun,
                self._prepare_execution_context(),
                target=target,
                timeout=timeout)

        # Register an asynchronous command to run the action
        # on an executor outside of the main DB transaction.
        post_tx_queue.register_operation(_run_action)
Exemplo n.º 7
0
    def _notify(self, from_state, to_state):
        publishers = self.wf_ex.params.get('notify')

        if not publishers and not isinstance(publishers, list):
            return

        notifier = notif.get_notifier(cfg.CONF.notifier.type)

        event = events.identify_task_event(from_state, to_state)

        filtered_publishers = []

        for publisher in publishers:
            if not isinstance(publisher, dict):
                continue

            target_events = publisher.get('event_types', [])

            if not target_events or event in target_events:
                filtered_publishers.append(publisher)

        if not filtered_publishers:
            return

        data = {
            "id": self.task_ex.id,
            "name": self.task_ex.name,
            "workflow_execution_id": self.task_ex.workflow_execution_id,
            "workflow_name": self.task_ex.workflow_name,
            "workflow_namespace": self.task_ex.workflow_namespace,
            "workflow_id": self.task_ex.workflow_id,
            "state": self.task_ex.state,
            "state_info": self.task_ex.state_info,
            "type": self.task_ex.type,
            "project_id": self.task_ex.project_id,
            "created_at": utils.datetime_to_str(self.task_ex.created_at),
            "updated_at": utils.datetime_to_str(self.task_ex.updated_at),
            "started_at": utils.datetime_to_str(self.task_ex.started_at),
            "finished_at": utils.datetime_to_str(self.task_ex.finished_at)
        }

        def _send_notification():
            notifier.notify(self.task_ex.id, data, event,
                            self.task_ex.updated_at, filtered_publishers)

        post_tx_queue.register_operation(_send_notification)
Exemplo n.º 8
0
    def notify(self, event):
        publishers = self.wf_ex.params.get('notify')

        if not publishers and not isinstance(publishers, list):
            return

        notifier = notif.get_notifier(cfg.CONF.notifier.type)

        filtered_publishers = []

        for publisher in publishers:
            if not isinstance(publisher, dict):
                continue

            target_events = publisher.get('event_types', [])

            if not target_events or event in target_events:
                filtered_publishers.append(publisher)

        if not filtered_publishers:
            return

        def _convert_to_notification_data():
            return {
                "id": self.wf_ex.id,
                "name": self.wf_ex.name,
                "workflow_name": self.wf_ex.workflow_name,
                "workflow_namespace": self.wf_ex.workflow_namespace,
                "workflow_id": self.wf_ex.workflow_id,
                "state": self.wf_ex.state,
                "state_info": self.wf_ex.state_info,
                "project_id": self.wf_ex.project_id,
                "task_execution_id": self.wf_ex.task_execution_id
            }

        def _send_notification():
            notifier.notify(
                self.wf_ex.id,
                _convert_to_notification_data(),
                event,
                self.wf_ex.updated_at,
                filtered_publishers
            )

        post_tx_queue.register_operation(_send_notification)
Exemplo n.º 9
0
def _check_affected_tasks(task):
    if not task.is_completed():
        return

    task_ex = task.task_ex

    wf_ex = task_ex.workflow_execution

    if states.is_completed(wf_ex.state):
        return

    wf_spec = spec_parser.get_workflow_spec_by_execution_id(
        task_ex.workflow_execution_id
    )

    wf_ctrl = wf_base.get_controller(wf_ex, wf_spec)

    affected_task_execs = wf_ctrl.find_indirectly_affected_task_executions(
        task_ex.name
    )

    def _schedule_if_needed(t_ex_id):
        # NOTE(rakhmerov): we need to minimize the number of delayed calls
        # that refresh state of "join" tasks. We'll check if corresponding
        # calls are already scheduled. Note that we must ignore delayed calls
        # that are currently being processed because of a possible race with
        # the transaction that deletes delayed calls, i.e. the call may still
        # exist in DB (the deleting transaction didn't commit yet) but it has
        # already been processed and the task state hasn't changed.
        cnt = db_api.get_delayed_calls_count(
            key=_get_refresh_state_job_key(t_ex_id),
            processing=False
        )

        if cnt == 0:
            _schedule_refresh_task_state(t_ex_id)

    for t_ex in affected_task_execs:
        post_tx_queue.register_operation(
            _schedule_if_needed,
            args=[t_ex.id],
            in_tx=True
        )
Exemplo n.º 10
0
def _check_affected_tasks(task):
    if not task.is_completed():
        return

    task_ex = task.task_ex

    wf_ex = task_ex.workflow_execution

    if states.is_completed(wf_ex.state):
        return

    wf_spec = spec_parser.get_workflow_spec_by_execution_id(
        task_ex.workflow_execution_id
    )

    wf_ctrl = wf_base.get_controller(wf_ex, wf_spec)

    affected_task_execs = wf_ctrl.find_indirectly_affected_task_executions(
        task_ex.name
    )

    def _schedule_if_needed(t_ex_id):
        # NOTE(rakhmerov): we need to minimize the number of delayed calls
        # that refresh state of "join" tasks. We'll check if corresponding
        # calls are already scheduled. Note that we must ignore delayed calls
        # that are currently being processed because of a possible race with
        # the transaction that deletes delayed calls, i.e. the call may still
        # exist in DB (the deleting transaction didn't commit yet) but it has
        # already been processed and the task state hasn't changed.
        cnt = db_api.get_delayed_calls_count(
            key=_get_refresh_state_job_key(t_ex_id),
            processing=False
        )

        if cnt == 0:
            _schedule_refresh_task_state(t_ex_id)

    for t_ex in affected_task_execs:
        post_tx_queue.register_operation(
            _schedule_if_needed,
            args=[t_ex.id],
            in_tx=True
        )
Exemplo n.º 11
0
    def schedule(self,
                 input_dict,
                 target,
                 index=0,
                 desc='',
                 safe_rerun=False,
                 timeout=None):
        assert not self.action_ex

        self.validate_input(input_dict)

        # Assign the action execution ID here to minimize database calls.
        # Otherwise, the input property of the action execution DB object needs
        # to be updated with the action execution ID after the action execution
        # DB object is created.
        action_ex_id = utils.generate_unicode_uuid()

        self._create_action_execution(self._prepare_input(input_dict),
                                      self._prepare_runtime_context(
                                          index, safe_rerun),
                                      self.is_sync(input_dict),
                                      desc=desc,
                                      action_ex_id=action_ex_id)

        execution_context = self._prepare_execution_context()

        # Register an asynchronous command to send the action to
        # run on an executor outside of the main DB transaction.
        def _run_action():
            executor = exe.get_executor(cfg.CONF.executor.type)

            executor.run_action(self.action_ex.id,
                                self.action_def.action_class,
                                self.action_def.attributes or {},
                                self.action_ex.input,
                                self.action_ex.runtime_context.get(
                                    'safe_rerun', False),
                                execution_context,
                                target=target,
                                timeout=timeout)

        post_tx_queue.register_operation(_run_action)
Exemplo n.º 12
0
def _check_affected_tasks(task):
    # TODO(rakhmerov): this method should eventually move into
    # the class Task. The obvious signal is the only argument
    # that it takes.
    if not task.is_completed():
        return

    task_ex = task.task_ex

    wf_ex = task_ex.workflow_execution

    if states.is_completed(wf_ex.state):
        return

    wf_spec = spec_parser.get_workflow_spec_by_execution_id(
        task_ex.workflow_execution_id)

    wf_ctrl = wf_base.get_controller(wf_ex, wf_spec)

    affected_task_execs = wf_ctrl.find_indirectly_affected_task_executions(
        task_ex.name)

    def _schedule_if_needed(t_ex_id):
        # NOTE(rakhmerov): we need to minimize the number of scheduled jobs
        # that refresh state of "join" tasks. We'll check if corresponding
        # jobs are already scheduled. Note that we must ignore scheduled jobs
        # that are currently being processed because of a possible race with
        # the transaction that deletes scheduled jobs, i.e. the job may still
        # exist in DB (the deleting transaction didn't commit yet) but it has
        # already been processed and the task state hasn't changed.
        sched = sched_base.get_system_scheduler()

        jobs_exist = sched.has_scheduled_jobs(
            key=_get_refresh_state_job_key(t_ex_id), processing=False)

        if not jobs_exist:
            _schedule_refresh_task_state(t_ex_id)

    for t_ex in affected_task_execs:
        post_tx_queue.register_operation(_schedule_if_needed,
                                         args=[t_ex.id],
                                         in_tx=True)
Exemplo n.º 13
0
    def schedule(self, input_dict, target, index=0, desc='', safe_rerun=False,
                 timeout=None):
        assert not self.action_ex

        self.validate_input(input_dict)

        # Assign the action execution ID here to minimize database calls.
        # Otherwise, the input property of the action execution DB object needs
        # to be updated with the action execution ID after the action execution
        # DB object is created.
        action_ex_id = utils.generate_unicode_uuid()

        self._create_action_execution(
            self._prepare_input(input_dict),
            self._prepare_runtime_context(index, safe_rerun),
            self.is_sync(input_dict),
            desc=desc,
            action_ex_id=action_ex_id
        )

        execution_context = self._prepare_execution_context()

        # Register an asynchronous command to send the action to
        # run on an executor outside of the main DB transaction.
        def _run_action():
            executor = exe.get_executor(cfg.CONF.executor.type)

            executor.run_action(
                self.action_ex.id,
                self.action_def.action_class,
                self.action_def.attributes or {},
                self.action_ex.input,
                self.action_ex.runtime_context.get('safe_rerun', False),
                execution_context,
                target=target,
                timeout=timeout
            )

        post_tx_queue.register_operation(_run_action)
Exemplo n.º 14
0
    def schedule(self,
                 input_dict,
                 target,
                 index=0,
                 desc='',
                 safe_rerun=False,
                 timeout=None):
        assert not self.action_ex

        self.validate_input(input_dict)

        parent_wf_ex = self.task_ex.workflow_execution
        parent_wf_spec = spec_parser.get_workflow_spec_by_execution_id(
            parent_wf_ex.id)

        wf_def = engine_utils.resolve_workflow_definition(
            parent_wf_ex.workflow_name,
            parent_wf_spec.get_name(),
            namespace=parent_wf_ex.params['namespace'],
            wf_spec_name=self.wf_name)

        wf_spec = spec_parser.get_workflow_spec_by_definition_id(
            wf_def.id, wf_def.updated_at)

        # If the parent has a root_execution_id, it must be a sub-workflow. So
        # we should propagate that ID down. Otherwise the parent must be the
        # root execution and we should use the parents ID.
        root_execution_id = parent_wf_ex.root_execution_id or parent_wf_ex.id

        wf_params = {
            'root_execution_id': root_execution_id,
            'task_execution_id': self.task_ex.id,
            'index': index,
            'namespace': parent_wf_ex.params['namespace']
        }

        if 'notify' in parent_wf_ex.params:
            wf_params['notify'] = parent_wf_ex.params['notify']

        for k, v in list(input_dict.items()):
            if k not in wf_spec.get_input():
                wf_params[k] = v
                del input_dict[k]

        if cfg.CONF.engine.start_subworkflows_via_rpc:

            def _start_subworkflow():
                rpc.get_engine_client().start_workflow(
                    wf_def.id,
                    wf_def.namespace,
                    None,
                    input_dict,
                    "sub-workflow execution",
                    async_=True,
                    **wf_params)

            post_tx_queue.register_operation(_start_subworkflow)
        else:
            wf_handler.start_workflow(wf_def.id, wf_def.namespace, None,
                                      input_dict, "sub-workflow execution",
                                      wf_params)
Exemplo n.º 15
0
    def schedule(self, input_dict, target, index=0, desc='', safe_rerun=False,
                 timeout=None):
        assert not self.action_ex

        self.validate_input(input_dict)

        parent_wf_ex = self.task_ex.workflow_execution
        parent_wf_spec = spec_parser.get_workflow_spec_by_execution_id(
            parent_wf_ex.id
        )

        wf_def = engine_utils.resolve_workflow_definition(
            parent_wf_ex.workflow_name,
            parent_wf_spec.get_name(),
            namespace=parent_wf_ex.params['namespace'],
            wf_spec_name=self.wf_name
        )

        wf_spec = spec_parser.get_workflow_spec_by_definition_id(
            wf_def.id,
            wf_def.updated_at
        )

        # If the parent has a root_execution_id, it must be a sub-workflow. So
        # we should propagate that ID down. Otherwise the parent must be the
        # root execution and we should use the parents ID.
        root_execution_id = parent_wf_ex.root_execution_id or parent_wf_ex.id

        wf_params = {
            'root_execution_id': root_execution_id,
            'task_execution_id': self.task_ex.id,
            'index': index,
            'namespace': parent_wf_ex.params['namespace']
        }

        if 'notify' in parent_wf_ex.params:
            wf_params['notify'] = parent_wf_ex.params['notify']

        for k, v in list(input_dict.items()):
            if k not in wf_spec.get_input():
                wf_params[k] = v
                del input_dict[k]

        if cfg.CONF.engine.start_subworkflows_via_rpc:
            def _start_subworkflow():
                rpc.get_engine_client().start_workflow(
                    wf_def.id,
                    wf_def.namespace,
                    None,
                    input_dict,
                    "sub-workflow execution",
                    async_=True,
                    **wf_params
                )

            post_tx_queue.register_operation(_start_subworkflow)
        else:
            wf_handler.start_workflow(
                wf_def.id,
                wf_def.namespace,
                None,
                input_dict,
                "sub-workflow execution",
                wf_params
            )