示例#1
0
    def test_workflow_spec_cache_update_via_workbook_service(self):
        wb_text = """
        version: '2.0'

        name: wb

        workflows:
          wf:
            tasks:
              task1:
                action: std.echo output="Echo"
        """

        wb_service.create_workbook_v2(wb_text)

        self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size())
        self.assertEqual(0, spec_parser.get_wf_definition_spec_cache_size())

        wf = db_api.get_workflow_definition('wb.wf')

        wf_spec = spec_parser.get_workflow_spec_by_definition_id(
            wf.id, wf.updated_at)

        self.assertEqual(1, len(wf_spec.get_tasks()))
        self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size())
        self.assertEqual(1, spec_parser.get_wf_definition_spec_cache_size())

        # Now update workflow definition and check that cache is updated too.

        wb_text = """
        version: '2.0'

        name: wb

        workflows:
          wf:
            tasks:
              task1:
                action: std.echo output="1"

              task2:
                action: std.echo output="2"
        """

        wb_service.update_workbook_v2(wb_text)

        self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size())
        self.assertEqual(1, spec_parser.get_wf_definition_spec_cache_size())

        wf = db_api.get_workflow_definition(wf.id)

        wf_spec = spec_parser.get_workflow_spec_by_definition_id(
            wf.id, wf.updated_at)

        self.assertEqual(2, len(wf_spec.get_tasks()))
        self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size())
        self.assertEqual(2, spec_parser.get_wf_definition_spec_cache_size())
示例#2
0
def pause_workflow(wf_ex, msg=None):
    wf = workflows.Workflow(
        db_api.get_workflow_definition(wf_ex.workflow_id),
        wf_ex=wf_ex
    )

    wf.set_state(states.PAUSED, msg)
示例#3
0
    def get(self, identifier):
        """Return the named workflow."""
        LOG.info("Fetch workflow [identifier=%s]" % identifier)

        db_model = db_api.get_workflow_definition(identifier)

        return Workflow.from_dict(db_model.to_dict())
示例#4
0
    def post(self, member_info):
        """Shares the resource to a new member."""
        LOG.info(
            "Share resource to a member. [resource_id=%s, "
            "resource_type=%s, member_info=%s].",
            self.resource_id,
            self.type,
            member_info
        )

        if not member_info.member_id:
            msg = "Member id must be provided."
            raise exc.WorkflowException(msg)

        wf_db = db_api.get_workflow_definition(self.resource_id)

        if wf_db.scope != 'private':
            msg = "Only private resource could be shared."
            raise exc.WorkflowException(msg)

        resource_member = {
            'resource_id': self.resource_id,
            'resource_type': self.type,
            'member_id': member_info.member_id,
            'status': 'pending'
        }

        db_member = db_api.create_resource_member(resource_member)

        return Member.from_dict(db_member.to_dict())
示例#5
0
def get_workflow_spec_by_id(wf_def_id):
    if not wf_def_id:
        return None

    wf_def = db_api.get_workflow_definition(wf_def_id)

    return get_workflow_spec(wf_def.spec)
示例#6
0
    def post(self, member_info):
        """Shares the resource to a new member."""
        acl.enforce('members:create', context.ctx())

        LOG.info(
            "Share resource to a member. [resource_id=%s, "
            "resource_type=%s, member_info=%s].",
            self.resource_id,
            self.type,
            member_info
        )

        if not member_info.member_id:
            msg = "Member id must be provided."
            raise exc.WorkflowException(msg)

        wf_db = db_api.get_workflow_definition(self.resource_id)

        if wf_db.scope != 'private':
            msg = "Only private resource could be shared."
            raise exc.WorkflowException(msg)

        resource_member = {
            'resource_id': self.resource_id,
            'resource_type': self.type,
            'member_id': member_info.member_id,
            'status': 'pending'
        }

        db_member = db_api.create_resource_member(resource_member)

        return resources.Member.from_dict(db_member.to_dict())
示例#7
0
文件: member.py 项目: nmaludy/mistral
    def post(self, member_info):
        """Shares the resource to a new member."""
        acl.enforce('members:create', context.ctx())

        LOG.info(
            "Share resource to a member. [resource_id=%s, "
            "resource_type=%s, member_info=%s].", self.resource_id, self.type,
            member_info)

        if not member_info.member_id:
            raise exc.WorkflowException("Member id must be provided.")

        with db_api.transaction():
            wf_db = db_api.get_workflow_definition(self.resource_id)

            if wf_db.scope != 'private':
                raise exc.WorkflowException(
                    "Only private resource could be shared.")

            resource_member = {
                'resource_id': self.resource_id,
                'resource_type': self.type,
                'member_id': member_info.member_id,
                'status': 'pending'
            }

            db_member = db_api.create_resource_member(resource_member)

        return resources.Member.from_db_model(db_member)
示例#8
0
def _on_task_complete(task_ex_id):
    # Note: This method can only be called via scheduler.
    with db_api.transaction():
        task_ex = db_api.get_task_execution(task_ex_id)

        wf_ex = task_ex.workflow_execution

        wf = workflows.Workflow(db_api.get_workflow_definition(
            wf_ex.workflow_id),
                                wf_ex=wf_ex)

        try:
            wf.on_task_complete(task_ex)
        except exc.MistralException as e:
            msg = ("Failed to handle task completion [wf_ex=%s, task_ex=%s]:"
                   " %s\n%s" % (wf_ex, task_ex, e, tb.format_exc()))

            LOG.error(msg)

            fail_workflow(wf.wf_ex, msg)

            return

        if not states.is_completed(wf_ex.state):
            # TODO(rakhmerov): Moving forward we can implement some more fancy
            # algorithm for increasing delay for rescheduling so that we don't
            # put too serious load onto scheduler.
            delay = 1
            schedule_on_task_complete(task_ex, delay)
示例#9
0
    def get(self, name):
        """Return the named workflow."""
        LOG.info("Fetch workflow [name=%s]" % name)

        db_model = db_api.get_workflow_definition(name)

        return Workflow.from_dict(db_model.to_dict())
示例#10
0
def get_workflow_spec_by_id(wf_def_id):
    if not wf_def_id:
        return None

    wf_def = db_api.get_workflow_definition(wf_def_id)

    return get_workflow_spec(wf_def.spec)
示例#11
0
    def get(self, identifier):
        """Return the named workflow."""
        LOG.info("Fetch workflow [identifier=%s]" % identifier)

        db_model = db_api.get_workflow_definition(identifier)

        return Workflow.from_dict(db_model.to_dict())
示例#12
0
    def start_workflow(self, wf_name, wf_input, description='', **params):
        wf_exec_id = None

        try:
            params = self._canonize_workflow_params(params)

            with db_api.transaction():
                wf_def = db_api.get_workflow_definition(wf_name)
                wf_spec = spec_parser.get_workflow_spec(wf_def.spec)

                eng_utils.validate_input(wf_def, wf_input, wf_spec)

                wf_ex = self._create_workflow_execution(
                    wf_def, wf_spec, wf_input, description, params)
                wf_exec_id = wf_ex.id

                wf_trace.info(wf_ex, "Starting workflow: '%s'" % wf_name)

                wf_ctrl = wf_base.WorkflowController.get_controller(
                    wf_ex, wf_spec)

                self._dispatch_workflow_commands(wf_ex,
                                                 wf_ctrl.continue_workflow())

                return wf_ex.get_clone()
        except Exception as e:
            LOG.error("Failed to start workflow '%s' id=%s: %s\n%s", wf_name,
                      wf_exec_id, e, traceback.format_exc())
            self._fail_workflow(wf_exec_id, e)
            raise e
示例#13
0
def start_workflow(wf_identifier, wf_input, desc, params):
    wf = workflows.Workflow(db_api.get_workflow_definition(wf_identifier))

    wf.start(wf_input, desc=desc, params=params)

    _schedule_check_and_complete(wf.wf_ex)

    return wf.wf_ex
示例#14
0
def rerun_workflow(wf_ex, task_ex, reset=True, env=None):
    if wf_ex.state == states.PAUSED:
        return wf_ex.get_clone()

    wf = workflows.Workflow(db_api.get_workflow_definition(wf_ex.workflow_id),
                            wf_ex=wf_ex)

    wf.rerun(task_ex, reset=reset, env=env)
示例#15
0
    def get(self, identifier):
        """Return the named workflow."""
        acl.enforce('workflows:get', context.ctx())
        LOG.info("Fetch workflow [identifier=%s]" % identifier)

        db_model = db_api.get_workflow_definition(identifier)

        return Workflow.from_dict(db_model.to_dict())
示例#16
0
    def get(self, identifier):
        """Return the named workflow."""
        acl.enforce('workflows:get', context.ctx())
        LOG.info("Fetch workflow [identifier=%s]" % identifier)

        db_model = db_api.get_workflow_definition(identifier)

        return Workflow.from_dict(db_model.to_dict())
示例#17
0
def resume_workflow(wf_ex, env=None):
    if not states.is_paused_or_idle(wf_ex.state):
        return wf_ex.get_clone()

    wf = workflows.Workflow(db_api.get_workflow_definition(wf_ex.workflow_id),
                            wf_ex=wf_ex)

    wf.resume(env=env)
示例#18
0
def start_workflow(wf_identifier, wf_input, desc, params):
    wf = workflows.Workflow(
        db_api.get_workflow_definition(wf_identifier)
    )

    wf.start(wf_input, desc=desc, params=params)

    return wf.wf_ex
示例#19
0
def create_cron_trigger(name, workflow_name, workflow_input,
                        workflow_params=None, pattern=None, first_time=None,
                        count=None, start_time=None, workflow_id=None):
    if not start_time:
        start_time = datetime.datetime.now()

    if isinstance(first_time, six.string_types):
        try:
            first_time = datetime.datetime.strptime(
                first_time,
                '%Y-%m-%d %H:%M'
            )
        except ValueError as e:
            raise exc.InvalidModelException(e.message)

    validate_cron_trigger_input(pattern, first_time, count)

    first_utc_time = first_time

    if first_time:
        first_second = time.mktime(first_time.timetuple())
        first_utc_time = datetime.datetime.utcfromtimestamp(first_second)
        next_time = first_utc_time

        if not (pattern or count):
            count = 1
    else:
        next_time = get_next_execution_time(pattern, start_time)

    with db_api.transaction():
        wf_def = db_api.get_workflow_definition(
            workflow_id if workflow_id else workflow_name
        )

        eng_utils.validate_input(
            wf_def,
            workflow_input or {},
            parser.get_workflow_spec(wf_def.spec)
        )

        values = {
            'name': name,
            'pattern': pattern,
            'first_execution_time': first_utc_time,
            'next_execution_time': next_time,
            'remaining_executions': count,
            'workflow_name': wf_def.name,
            'workflow_id': wf_def.id,
            'workflow_input': workflow_input or {},
            'workflow_params': workflow_params or {},
            'scope': 'private'
        }

        security.add_trust_id(values)

        trig = db_api.create_cron_trigger(values)

    return trig
示例#20
0
def resume_workflow(wf_ex, env=None):
    if not states.is_paused_or_idle(wf_ex.state):
        return wf_ex.get_clone()

    wf = workflows.Workflow(
        db_api.get_workflow_definition(wf_ex.workflow_id),
        wf_ex=wf_ex
    )

    wf.resume(env=env)
示例#21
0
def start_workflow(wf_identifier, wf_input, desc, params):
    wf = workflows.Workflow(
        db_api.get_workflow_definition(wf_identifier)
    )

    wf.start(wf_input, desc=desc, params=params)

    _schedule_check_and_complete(wf.wf_ex)

    return wf.wf_ex
示例#22
0
def rerun_workflow(wf_ex, task_ex, reset=True, env=None):
    if wf_ex.state == states.PAUSED:
        return wf_ex.get_clone()

    wf = workflows.Workflow(
        db_api.get_workflow_definition(wf_ex.workflow_id),
        wf_ex=wf_ex
    )

    wf.rerun(task_ex, reset=reset, env=env)
示例#23
0
def stop_workflow(wf_ex, state, msg=None):
    wf = workflows.Workflow(
        db_api.get_workflow_definition(wf_ex.workflow_id),
        wf_ex=wf_ex
    )

    # In this case we should not try to handle possible errors. Instead,
    # we need to let them pop up since the typical way of failing objects
    # doesn't work here. Failing a workflow is the same as stopping it
    # with ERROR state.
    wf.stop(state, msg)
示例#24
0
    def delete(self, name):
        """Delete the named workflow."""
        LOG.info("Delete workflow [name=%s]" % name)

        with db_api.transaction():
            wf_db = db_api.get_workflow_definition(name)

            if wf_db.is_system:
                msg = "Attempt to delete a system workflow: %s" % name
                raise exc.DataAccessException(msg)

            db_api.delete_workflow_definition(name)
示例#25
0
def rerun_workflow(wf_ex, task_ex, reset=True, env=None):
    if wf_ex.state == states.PAUSED:
        return wf_ex.get_clone()

    wf = workflows.Workflow(db_api.get_workflow_definition(wf_ex.workflow_id),
                            wf_ex=wf_ex)

    wf.rerun(task_ex, reset=reset, env=env)

    _schedule_check_and_complete(wf_ex)

    if wf_ex.task_execution_id:
        _schedule_check_and_complete(wf_ex.task_execution.workflow_execution)
示例#26
0
def start_workflow(wf_identifier, wf_namespace, wf_input, desc, params):
    wf = workflows.Workflow()

    wf_def = db_api.get_workflow_definition(wf_identifier, wf_namespace)

    if 'namespace' not in params:
        params['namespace'] = wf_def.namespace

    wf.start(wf_def=wf_def, input_dict=wf_input, desc=desc, params=params)

    _schedule_check_and_complete(wf.wf_ex)

    return wf.wf_ex
示例#27
0
def create_workflow_execution(wf_identifier, wf_input, description, params):
    params = canonize_workflow_params(params)

    wf_def = db_api.get_workflow_definition(wf_identifier)
    wf_spec = spec_parser.get_workflow_spec(wf_def.spec)

    eng_utils.validate_input(wf_def, wf_input, wf_spec)

    wf_ex = _create_workflow_execution(wf_def, wf_spec, wf_input, description,
                                       params)

    wf_trace.info(wf_ex, "Starting workflow: '%s'" % wf_identifier)

    return wf_ex.id
示例#28
0
def run_existing_task(task_ex_id):
    """This function runs existing task execution.

    It is needed mostly by scheduler.
    """
    task_ex = db_api.get_task_execution(task_ex_id)
    task_spec = spec_parser.get_task_spec(task_ex.spec)
    wf_def = db_api.get_workflow_definition(task_ex.workflow_name)
    wf_spec = spec_parser.get_workflow_spec(wf_def.spec)

    # Explicitly change task state to RUNNING.
    task_ex.state = states.RUNNING

    _run_existing_task(task_ex, task_spec, wf_spec)
示例#29
0
def run_existing_task(task_ex_id):
    """This function runs existing task execution.

    It is needed mostly by scheduler.
    """
    task_ex = db_api.get_task_execution(task_ex_id)
    task_spec = spec_parser.get_task_spec(task_ex.spec)
    wf_def = db_api.get_workflow_definition(task_ex.workflow_name)
    wf_spec = spec_parser.get_workflow_spec(wf_def.spec)

    # Explicitly change task state to RUNNING.
    task_ex.state = states.RUNNING

    _run_existing_task(task_ex, task_spec, wf_spec)
    def test_workflow_spec_cache_update_via_workbook_service(self):
        wb_text = """
        version: '2.0'

        name: wb

        workflows:
          wf:
            tasks:
              task1:
                action: std.echo output="Echo"
        """

        wb_service.create_workbook_v2(wb_text)

        self.assertEqual(0, spec_parser.get_workflow_spec_cache_size())

        wf = db_api.get_workflow_definition('wb.wf')

        wf_spec = spec_parser.get_workflow_spec_by_id(wf.id)

        self.assertEqual(1, len(wf_spec.get_tasks()))
        self.assertEqual(1, spec_parser.get_workflow_spec_cache_size())

        # Now update workflow definition and check that cache is updated too.

        wb_text = """
        version: '2.0'

        name: wb

        workflows:
          wf:
            tasks:
              task1:
                action: std.echo output="1"

              task2:
                action: std.echo output="2"
        """

        wb_service.update_workbook_v2(wb_text)

        self.assertEqual(1, spec_parser.get_workflow_spec_cache_size())

        wf_spec = spec_parser.get_workflow_spec_by_id(wf.id)

        self.assertEqual(2, len(wf_spec.get_tasks()))
        self.assertEqual(1, spec_parser.get_workflow_spec_cache_size())
示例#31
0
def rerun_workflow(wf_ex, task_ex, reset=True, env=None):
    if wf_ex.state == states.PAUSED:
        return wf_ex.get_clone()

    wf = workflows.Workflow(
        db_api.get_workflow_definition(wf_ex.workflow_id),
        wf_ex=wf_ex
    )

    wf.rerun(task_ex, reset=reset, env=env)

    _schedule_check_and_complete(wf_ex)

    if wf_ex.task_execution_id:
        _schedule_check_and_complete(wf_ex.task_execution.workflow_execution)
示例#32
0
def on_task_complete(task_ex):
    wf_ex = task_ex.workflow_execution

    wf = workflows.Workflow(db_api.get_workflow_definition(wf_ex.workflow_id),
                            wf_ex=wf_ex)

    try:
        wf.on_task_complete(task_ex)
    except exc.MistralException as e:
        msg = (
            "Failed to handle task completion [wf_ex=%s, task_ex=%s]: %s\n%s" %
            (wf_ex, task_ex, e, tb.format_exc()))

        LOG.error(msg)

        fail_workflow(wf.wf_ex, msg)
示例#33
0
    def get(self, identifier, namespace=''):
        """Return the named workflow.

        :param identifier: Name or UUID of the workflow to retrieve.
        :param namespace: Optional. Namespace of the workflow to retrieve.
        """
        acl.enforce('workflows:get', context.ctx())

        LOG.info("Fetch workflow [identifier=%s]", identifier)

        db_model = db_api.get_workflow_definition(
            identifier,
            namespace=namespace
        )

        return resources.Workflow.from_db_model(db_model)
示例#34
0
        def _create_resource_member():
            with db_api.transaction():
                wf_db = db_api.get_workflow_definition(self.resource_id)

                if wf_db.scope != 'private':
                    raise exc.WorkflowException(
                        "Only private resource could be shared.")

                resource_member = {
                    'resource_id': self.resource_id,
                    'resource_type': self.type,
                    'member_id': member_info.member_id,
                    'status': 'pending'
                }

                return db_api.create_resource_member(resource_member)
示例#35
0
def run_existing_task(task_ex_id, reset=True):
    """This function runs existing task execution.

    It is needed mostly by scheduler.

    :param task_ex_id: Task execution id.
    :param reset: Reset action executions for the task.
    """
    task_ex = db_api.get_task_execution(task_ex_id)
    task_spec = spec_parser.get_task_spec(task_ex.spec)
    wf_def = db_api.get_workflow_definition(task_ex.workflow_name)
    wf_spec = spec_parser.get_workflow_spec(wf_def.spec)

    # Throw exception if the existing task already succeeded.
    if task_ex.state == states.SUCCESS:
        raise exc.EngineException(
            'Rerunning existing task that already succeeded is not supported.'
        )

    # Exit if the existing task failed and reset is not instructed.
    # For a with-items task without reset, re-running the existing
    # task will re-run the failed and unstarted items.
    if (task_ex.state == states.ERROR and not reset and
            not task_spec.get_with_items()):
        return task_ex

    # Reset nested executions only if task is not already RUNNING.
    if task_ex.state != states.RUNNING:
        # Reset state of processed task and related action executions.
        if reset:
            action_exs = task_ex.executions
        else:
            action_exs = db_api.get_action_executions(
                task_execution_id=task_ex.id,
                state=states.ERROR,
                accepted=True
            )

        for action_ex in action_exs:
            action_ex.accepted = False

    # Explicitly change task state to RUNNING.
    set_task_state(task_ex, states.RUNNING, None, processed=False)

    _run_existing_task(task_ex, task_spec, wf_spec)

    return task_ex
示例#36
0
        def _create_resource_member():
            with db_api.transaction():
                wf_db = db_api.get_workflow_definition(self.resource_id)

                if wf_db.scope != 'private':
                    raise exc.WorkflowException(
                        "Only private resource could be shared."
                    )

                resource_member = {
                    'resource_id': self.resource_id,
                    'resource_type': self.type,
                    'member_id': member_info.member_id,
                    'status': 'pending'
                }

                return db_api.create_resource_member(resource_member)
示例#37
0
def run_existing_task(task_ex_id, reset=True):
    """This function runs existing task execution.

    It is needed mostly by scheduler.

    :param task_ex_id: Task execution id.
    :param reset: Reset action executions for the task.
    """
    task_ex = db_api.get_task_execution(task_ex_id)
    task_spec = spec_parser.get_task_spec(task_ex.spec)
    wf_def = db_api.get_workflow_definition(task_ex.workflow_name)
    wf_spec = spec_parser.get_workflow_spec(wf_def.spec)

    # Throw exception if the existing task already succeeded.
    if task_ex.state == states.SUCCESS:
        raise exc.EngineException(
            'Rerunning existing task that already succeeded is not supported.')

    # Exit if the existing task failed and reset is not instructed.
    # For a with-items task without reset, re-running the existing
    # task will re-run the failed and unstarted items.
    if (task_ex.state == states.ERROR and not reset
            and not task_spec.get_with_items()):
        return task_ex

    # Reset nested executions only if task is not already RUNNING.
    if task_ex.state != states.RUNNING:
        # Reset state of processed task and related action executions.
        if reset:
            action_exs = task_ex.executions
        else:
            action_exs = db_api.get_action_executions(
                task_execution_id=task_ex.id,
                state=states.ERROR,
                accepted=True)

        for action_ex in action_exs:
            action_ex.accepted = False

    # Explicitly change task state to RUNNING.
    set_task_state(task_ex, states.RUNNING, None, processed=False)

    _run_existing_task(task_ex, task_spec, wf_spec)

    return task_ex
示例#38
0
    def set_state(self, state, state_info=None, recursive=False):
        assert self.wf_ex

        cur_state = self.wf_ex.state

        if states.is_valid_transition(cur_state, state):
            self.wf_ex.state = state
            self.wf_ex.state_info = state_info

            wf_trace.info(
                self.wf_ex, "Workflow '%s' [%s -> %s, msg=%s]" %
                (self.wf_ex.workflow_name, cur_state, state, state_info))
        else:
            msg = ("Can't change workflow execution state from %s to %s. "
                   "[workflow=%s, execution_id=%s]" %
                   (cur_state, state, self.wf_ex.name, self.wf_ex.id))

            raise exc.WorkflowException(msg)

        # Workflow result should be accepted by parent workflows (if any)
        # only if it completed successfully or failed.
        self.wf_ex.accepted = states.is_completed(state)

        if states.is_completed(state):
            # No need to keep task executions of this workflow in the
            # lookup cache anymore.
            lookup_utils.invalidate_cached_task_executions(self.wf_ex.id)

        if recursive and self.wf_ex.task_execution_id:
            parent_task_ex = db_api.get_task_execution(
                self.wf_ex.task_execution_id)

            parent_wf = Workflow(
                db_api.get_workflow_definition(parent_task_ex.workflow_id),
                parent_task_ex.workflow_execution)

            parent_wf.lock()

            parent_wf.set_state(state, recursive=recursive)

            # TODO(rakhmerov): It'd be better to use instance of Task here.
            parent_task_ex.state = state
            parent_task_ex.state_info = None
            parent_task_ex.processed = False
示例#39
0
def on_task_complete(task_ex):
    wf_ex = task_ex.workflow_execution

    wf = workflows.Workflow(
        db_api.get_workflow_definition(wf_ex.workflow_id),
        wf_ex=wf_ex
    )

    try:
        wf.on_task_complete(task_ex)
    except exc.MistralException as e:
        msg = (
            "Failed to handle task completion [wf_ex=%s, task_ex=%s]: %s\n%s"
            % (wf_ex, task_ex, e, tb.format_exc())
        )

        LOG.error(msg)

        fail_workflow(wf.wf_ex, msg)
示例#40
0
    def set_state(self, state, state_info=None, recursive=False):
        assert self.wf_ex

        cur_state = self.wf_ex.state

        if states.is_valid_transition(cur_state, state):
            self.wf_ex.state = state
            self.wf_ex.state_info = state_info

            wf_trace.info(
                self.wf_ex,
                "Execution of workflow '%s' [%s -> %s]"
                % (self.wf_ex.workflow_name, cur_state, state)
            )
        else:
            msg = ("Can't change workflow execution state from %s to %s. "
                   "[workflow=%s, execution_id=%s]" %
                   (cur_state, state, self.wf_ex.name, self.wf_ex.id))

            raise exc.WorkflowException(msg)

        # Workflow result should be accepted by parent workflows (if any)
        # only if it completed successfully or failed.
        self.wf_ex.accepted = states.is_completed(state)

        if recursive and self.wf_ex.task_execution_id:
            parent_task_ex = db_api.get_task_execution(
                self.wf_ex.task_execution_id
            )

            parent_wf = Workflow(
                db_api.get_workflow_definition(parent_task_ex.workflow_id),
                parent_task_ex.workflow_execution
            )

            parent_wf.lock()

            parent_wf.set_state(state, recursive=recursive)

            # TODO(rakhmerov): It'd be better to use instance of Task here.
            parent_task_ex.state = state
            parent_task_ex.state_info = None
            parent_task_ex.processed = False
示例#41
0
def create_workflow_execution(wf_identifier, wf_input, description, params):
    params = canonize_workflow_params(params)

    wf_def = db_api.get_workflow_definition(wf_identifier)
    wf_spec = spec_parser.get_workflow_spec(wf_def.spec)

    eng_utils.validate_input(wf_def, wf_input, wf_spec)

    wf_ex = _create_workflow_execution(
        wf_def,
        wf_spec,
        wf_input,
        description,
        params
    )

    wf_trace.info(wf_ex, "Starting workflow: '%s'" % wf_identifier)

    return wf_ex.id
示例#42
0
def stop_workflow(wf_ex, state, msg=None):
    wf = workflows.Workflow(db_api.get_workflow_definition(wf_ex.workflow_id),
                            wf_ex=wf_ex)

    # In this case we should not try to handle possible errors. Instead,
    # we need to let them pop up since the typical way of failing objects
    # doesn't work here. Failing a workflow is the same as stopping it
    # with ERROR state.
    wf.stop(state, msg)

    # Cancels subworkflows.
    if state == states.CANCELLED:
        for task_ex in wf_ex.task_executions:
            sub_wf_exs = db_api.get_workflow_executions(
                task_execution_id=task_ex.id)

            for sub_wf_ex in sub_wf_exs:
                if not states.is_completed(sub_wf_ex.state):
                    stop_workflow(sub_wf_ex, state, msg=msg)
示例#43
0
def create_cron_trigger(name, workflow_name, workflow_input,
                        workflow_params=None, pattern=None, first_time=None,
                        count=None, start_time=None):
    if not start_time:
        start_time = datetime.datetime.now()

    if type(first_time) in [str, unicode]:
        try:
            first_time = datetime.datetime.strptime(first_time,
                                                    '%Y-%m-%d %H:%M')
        except ValueError as e:
            raise exc.InvalidModelException(e.message)

    validate_cron_trigger_input(pattern, first_time, count)

    if first_time:
        next_time = first_time
        if not (pattern and count):
            count = 1
    else:
        next_time = get_next_execution_time(pattern, start_time)

    with db_api_v2.transaction():
        wf = db_api_v2.get_workflow_definition(workflow_name)

        values = {
            'name': name,
            'pattern': pattern,
            'next_execution_time': next_time,
            'remaining_executions': count,
            'workflow_name': workflow_name,
            'workflow_id': wf.id,
            'workflow_input': workflow_input or {},
            'workflow_params': workflow_params or {},
            'scope': 'private'
        }

        security.add_trust_id(values)

        trig = db_api_v2.create_cron_trigger(values)

    return trig
示例#44
0
def start_workflow(wf_identifier, wf_namespace, wf_ex_id, wf_input, desc,
                   params):
    wf = workflows.Workflow()

    wf_def = db_api.get_workflow_definition(wf_identifier, wf_namespace)

    if 'namespace' not in params:
        params['namespace'] = wf_def.namespace

    wf.start(
        wf_def=wf_def,
        wf_ex_id=wf_ex_id,
        input_dict=wf_input,
        desc=desc,
        params=params
    )

    _schedule_check_and_fix_integrity(wf.wf_ex, delay=10)

    return wf.wf_ex
示例#45
0
    def start_workflow(self, wf_name, wf_input, description='', **params):
        wf_exec_id = None

        try:
            params = self._canonize_workflow_params(params)

            with db_api.transaction():
                wf_def = db_api.get_workflow_definition(wf_name)
                wf_spec = spec_parser.get_workflow_spec(wf_def.spec)

                eng_utils.validate_input(wf_def, wf_input, wf_spec)

                wf_ex = self._create_workflow_execution(
                    wf_def,
                    wf_spec,
                    wf_input,
                    description,
                    params
                )
                wf_exec_id = wf_ex.id

                wf_trace.info(wf_ex, "Starting workflow: '%s'" % wf_name)

                wf_ctrl = wf_base.WorkflowController.get_controller(
                    wf_ex,
                    wf_spec
                )

                self._dispatch_workflow_commands(
                    wf_ex,
                    wf_ctrl.continue_workflow()
                )

                return wf_ex.get_clone()
        except Exception as e:
            LOG.error(
                "Failed to start workflow '%s' id=%s: %s\n%s",
                wf_name, wf_exec_id, e, traceback.format_exc()
            )
            self._fail_workflow(wf_exec_id, e)
            raise e
示例#46
0
def create_delay_tolerant_workload(name, workflow_name, workflow_input,
                                   workflow_params=None, deadline=None,
                                   job_duration=None, workflow_id=None):
    try:
        deadline = date_parser.parse(deadline)
    except ValueError as e:
        raise exc.InvalidModelException(e.message)
    if deadline < datetime.datetime.now() + datetime.timedelta(seconds=60):
        raise exc.InvalidModelException(
            'deadline must be at least 1 minute in the future.'
        )

    with db_api.transaction():
        wf_def = db_api.get_workflow_definition(
            workflow_id if workflow_id else workflow_name
        )

        eng_utils.validate_input(
            wf_def,
            workflow_input or {},
            parser.get_workflow_spec(wf_def.spec)
        )

        values = {
            'name': name,
            'deadline': deadline,
            'job_duration': job_duration,
            'workflow_name': wf_def.name,
            'workflow_id': wf_def.id,
            'workflow_input': workflow_input or {},
            'workflow_params': workflow_params or {},
            'scope': 'private',
            'executed': False
        }

        security.add_trust_id(values)

        dtw = db_api.create_delay_tolerant_workload(values)

    return dtw
示例#47
0
def _check_and_complete(wf_ex_id):
    # Note: This method can only be called via scheduler.
    with db_api.transaction():
        wf_ex = db_api.load_workflow_execution(wf_ex_id)

        if not wf_ex or states.is_completed(wf_ex.state):
            return

        wf = workflows.Workflow(
            db_api.get_workflow_definition(wf_ex.workflow_id),
            wf_ex=wf_ex
        )

        try:
            incomplete_tasks_count = wf.check_and_complete()
        except exc.MistralException as e:
            msg = (
                "Failed to check and complete [wf_ex=%s]:"
                " %s\n%s" % (wf_ex, e, tb.format_exc())
            )

            LOG.error(msg)

            force_fail_workflow(wf.wf_ex, msg)

            return

        if not states.is_completed(wf_ex.state):
            # Let's assume that a task takes 0.01 sec in average to complete
            # and based on this assumption calculate a time of the next check.
            # The estimation is very rough but this delay will be decreasing
            # as tasks will be completing which will give a decent
            # approximation.
            # For example, if a workflow has 100 incomplete tasks then the
            # next check call will happen in 10 seconds. For 500 tasks it will
            # be 50 seconds. The larger the workflow is, the more beneficial
            # this mechanism will be.
            delay = int(incomplete_tasks_count * 0.01)

            _schedule_check_and_complete(wf_ex, delay)
def _check_and_complete(wf_ex_id):
    # Note: This method can only be called via scheduler.
    with db_api.transaction():
        wf_ex = db_api.load_workflow_execution(wf_ex_id)

        if not wf_ex or states.is_completed(wf_ex.state):
            return

        wf = workflows.Workflow(
            db_api.get_workflow_definition(wf_ex.workflow_id),
            wf_ex=wf_ex
        )

        try:
            incomplete_tasks_count = wf.check_and_complete()
        except exc.MistralException as e:
            msg = (
                "Failed to check and complete [wf_ex=%s]:"
                " %s\n%s" % (wf_ex, e, tb.format_exc())
            )

            LOG.error(msg)

            force_fail_workflow(wf.wf_ex, msg)

            return

        if not states.is_completed(wf_ex.state):
            # Let's assume that a task takes 0.01 sec in average to complete
            # and based on this assumption calculate a time of the next check.
            # The estimation is very rough but this delay will be decreasing
            # as tasks will be completing which will give a decent
            # approximation.
            # For example, if a workflow has 100 incomplete tasks then the
            # next check call will happen in 10 seconds. For 500 tasks it will
            # be 50 seconds. The larger the workflow is, the more beneficial
            # this mechanism will be.
            delay = int(incomplete_tasks_count * 0.01)

            _schedule_check_and_complete(wf_ex, delay)
示例#49
0
def stop_workflow(wf_ex, state, msg=None):
    wf = workflows.Workflow(
        db_api.get_workflow_definition(wf_ex.workflow_id),
        wf_ex=wf_ex
    )

    # In this case we should not try to handle possible errors. Instead,
    # we need to let them pop up since the typical way of failing objects
    # doesn't work here. Failing a workflow is the same as stopping it
    # with ERROR state.
    wf.stop(state, msg)

    # Cancels subworkflows.
    if state == states.CANCELLED:
        for task_ex in wf_ex.task_executions:
            sub_wf_exs = db_api.get_workflow_executions(
                task_execution_id=task_ex.id
            )

            for sub_wf_ex in sub_wf_exs:
                if not states.is_completed(sub_wf_ex.state):
                    stop_workflow(sub_wf_ex, state, msg=msg)
示例#50
0
def get_workflow_spec_by_definition_id(wf_def_id, wf_def_updated_at):
    """Gets specification by workflow definition id and its 'updated_at'.

    The idea of this method is to return a cached specification for the
    given workflow id and workflow definition 'updated_at'. As long as the
    given workflow definition remains the same in DB users of this method
    will be getting a cached value. Once the workflow definition has
    changed clients will be providing a different 'updated_at' value and
    hence this method will be called and spec is updated for this combination
    of parameters. Old cached values will be kicked out by LRU algorithm
    if the cache runs out of space.

    :param wf_def_id: Workflow definition id.
    :param wf_def_updated_at: Workflow definition 'updated_at' value. It
        serves only as part of cache key and is not explicitly used in the
        method.
    :return: Workflow specification.
    """
    if not wf_def_id:
        return None

    wf_def = db_api.get_workflow_definition(wf_def_id)

    return get_workflow_spec(wf_def.spec)
示例#51
0
def create_cron_trigger(name, workflow_name, workflow_input,
                        workflow_params=None, pattern=None, first_time=None,
                        count=None, start_time=None, workflow_id=None):
    if not start_time:
        start_time = datetime.datetime.utcnow()

    if isinstance(first_time, six.string_types):
        try:
            first_time = datetime.datetime.strptime(
                first_time,
                '%Y-%m-%d %H:%M'
            )
        except ValueError as e:
            raise exc.InvalidModelException(str(e))

    validate_cron_trigger_input(pattern, first_time, count)

    if first_time:
        next_time = first_time

        if not (pattern or count):
            count = 1
    else:
        next_time = get_next_execution_time(pattern, start_time)

    with db_api.transaction():
        wf_def = db_api.get_workflow_definition(
            workflow_id if workflow_id else workflow_name
        )

        wf_spec = parser.get_workflow_spec_by_definition_id(
            wf_def.id,
            wf_def.updated_at
        )

        # TODO(rakhmerov): Use Workflow object here instead of utils.
        eng_utils.validate_input(
            wf_spec.get_input(),
            workflow_input,
            wf_spec.get_name(),
            wf_spec.__class__.__name__
        )

        trigger_parameters = {
            'name': name,
            'pattern': pattern,
            'first_execution_time': first_time,
            'next_execution_time': next_time,
            'remaining_executions': count,
            'workflow_name': wf_def.name,
            'workflow_id': wf_def.id,
            'workflow_input': workflow_input or {},
            'workflow_params': workflow_params or {},
            'scope': 'private'
        }

        security.add_trust_id(trigger_parameters)

        try:
            trig = db_api.create_cron_trigger(trigger_parameters)
        except Exception:
            # Delete trust before raising exception.
            security.delete_trust(trigger_parameters.get('trust_id'))
            raise

    return trig
示例#52
0
def create_cron_trigger(name,
                        workflow_name,
                        workflow_input,
                        workflow_params=None,
                        pattern=None,
                        first_time=None,
                        count=None,
                        start_time=None,
                        workflow_id=None):
    if not start_time:
        start_time = datetime.datetime.utcnow()

    if isinstance(first_time, six.string_types):
        try:
            first_time = datetime.datetime.strptime(first_time,
                                                    '%Y-%m-%d %H:%M')
        except ValueError as e:
            raise exc.InvalidModelException(str(e))

    validate_cron_trigger_input(pattern, first_time, count)

    if first_time:
        next_time = first_time

        if not (pattern or count):
            count = 1
    else:
        next_time = get_next_execution_time(pattern, start_time)

    with db_api.transaction():
        wf_def = db_api.get_workflow_definition(
            workflow_id if workflow_id else workflow_name)

        wf_spec = parser.get_workflow_spec_by_definition_id(
            wf_def.id, wf_def.updated_at)

        # TODO(rakhmerov): Use Workflow object here instead of utils.
        eng_utils.validate_input(wf_spec.get_input(), workflow_input,
                                 wf_spec.get_name(),
                                 wf_spec.__class__.__name__)

        trigger_parameters = {
            'name': name,
            'pattern': pattern,
            'first_execution_time': first_time,
            'next_execution_time': next_time,
            'remaining_executions': count,
            'workflow_name': wf_def.name,
            'workflow_id': wf_def.id,
            'workflow_input': workflow_input or {},
            'workflow_params': workflow_params or {},
            'scope': 'private'
        }

        security.add_trust_id(trigger_parameters)

        try:
            trig = db_api.create_cron_trigger(trigger_parameters)
        except Exception:
            # Delete trust before raising exception.
            security.delete_trust(trigger_parameters.get('trust_id'))
            raise

    return trig