Exemplo n.º 1
0
def clear_all():
    with threading_utils.MultiLock(READ_SAVE_ORDER):
        count = 0
        for lb_id in list(LOG_BOOKS.iterkeys()):
            logbook_destroy(lb_id)
            count += 1
        return count
Exemplo n.º 2
0
    def add_dependency(self, provider_uuid, consumer_uuid):
        """Manually adds a dependency between a provider and a consumer."""

        def check_and_fetch():
            if self.state not in self.MUTABLE_STATES:
                raise exc.InvalidStateException("Flow is currently in a"
                                                " non-mutable state %s"
                                                % (self.state))
            provider = self._find_uuid(provider_uuid)
            if not provider or not self._graph.has_node(provider):
                raise exc.InvalidStateException("Can not add a dependency "
                                                "from unknown uuid %s" %
                                                (provider_uuid))
            consumer = self._find_uuid(consumer_uuid)
            if not consumer or not self._graph.has_node(consumer):
                raise exc.InvalidStateException("Can not add a dependency "
                                                "to unknown uuid %s"
                                                % (consumer_uuid))
            if provider is consumer:
                raise exc.InvalidStateException("Can not add a dependency "
                                                "to loop via uuid %s"
                                                % (consumer_uuid))
            return (provider, consumer)

        check_and_fetch()

        # All locks must be acquired so that modifications can not be made
        # while running, cancelling or performing a simultaneous mutation.
        with threading_utils.MultiLock(self._core_locks):
            (provider, consumer) = check_and_fetch()
            self._graph.add_edge(provider, consumer, reason='manual')
            LOG.debug("Connecting %s as a manual provider for %s",
                      provider, consumer)
Exemplo n.º 3
0
    def rollback(self, context, cause):
        """Rolls back all tasks that are *not* still pending or cancelled."""

        def check():
            if self.state not in self.REVERTABLE_STATES:
                raise exc.InvalidStateException("Flow is currently unable "
                                                "to be rolled back in "
                                                "state %s" % (self.state))

        check()

        # All locks must be acquired so that modifications can not be made
        # while another entity is running, rolling-back, cancelling or
        # performing a mutation operation.
        with threading_utils.MultiLock(self._core_locks):
            check()
            accum = misc.RollbackAccumulator()
            for r in self._graph.nodes_iter():
                if r.has_ran():
                    accum.add(misc.Rollback(context, r,
                                            self, self.task_notifier))
            try:
                self._change_state(context, states.REVERTING)
                accum.rollback(cause)
            finally:
                self._change_state(context, states.FAILURE)
Exemplo n.º 4
0
    def cancel(self):

        def check():
            if self.state not in self.CANCELLABLE_STATES:
                raise exc.InvalidStateException("Can not attempt cancellation"
                                                " when in state %s" %
                                                self.state)

        check()
        cancelled = 0
        was_empty = False

        # We don't lock the other locks so that the flow can be cancelled while
        # running. Further state management logic is then used while running
        # to verify that the flow should still be running when it has been
        # cancelled.
        with threading_utils.MultiLock(self._cancel_locks):
            check()
            if len(self._graph) == 0:
                was_empty = True
            else:
                for r in self._graph.nodes_iter():
                    try:
                        if r.cancel(blocking=False):
                            cancelled += 1
                    except exc.InvalidStateException:
                        pass
            if cancelled or was_empty:
                self._change_state(None, states.CANCELLED)

        return cancelled
Exemplo n.º 5
0
 def reset(self):
     # All locks are used so that resets can not happen while running or
     # cancelling or modifying.
     with threading_utils.MultiLock(self._core_locks):
         super(Flow, self).reset()
         self.results = {}
         self.resumer = None
Exemplo n.º 6
0
def logbook_destroy(lb_id):
    try:
        with threading_utils.MultiLock(READ_SAVE_ORDER):
            # Do the same cascading delete that the sql layer does.
            lb = LOG_BOOKS.pop(lb_id)
            for fd in lb:
                FLOW_DETAILS.pop(fd.uuid, None)
                for td in fd:
                    TASK_DETAILS.pop(td.uuid, None)
    except KeyError:
        raise exc.NotFound("No logbook found with id: %s" % lb_id)
Exemplo n.º 7
0
    def add_many(self, tasks):
        """Adds a list of tasks to the flow."""

        def check():
            if self.state not in self.MUTABLE_STATES:
                raise exc.InvalidStateException("Flow is currently in a"
                                                " non-mutable state %s"
                                                % (self.state))

        # Ensure that we do a quick check to see if we can even perform this
        # addition before we go about actually acquiring the lock.
        check()

        # All locks must be acquired so that modifications can not be made
        # while running, cancelling or performing a simultaneous mutation.
        with threading_utils.MultiLock(self._core_locks):
            check()
            added = []
            for t in tasks:
                added.append(self.add(t))
            return added
Exemplo n.º 8
0
def flowdetails_save(fd):
    try:
        with threading_utils.MultiLock(READ_SAVE_ORDER):
            e_fd = FLOW_DETAILS[fd.uuid]
            if e_fd.meta != fd.meta:
                e_fd.meta = fd.meta
            if e_fd.state != fd.state:
                e_fd.state = fd.state
            for td in fd:
                if td not in e_fd:
                    td = copy.deepcopy(td)
                    TASK_DETAILS[td.uuid] = td
                    e_fd.add(td)
                else:
                    # Previously added but not saved into the taskdetails
                    # 'permanent' storage.
                    if td.uuid not in TASK_DETAILS:
                        TASK_DETAILS[td.uuid] = copy.deepcopy(td)
                    taskdetails_save(td)
            return e_fd
    except KeyError:
        raise exc.NotFound("No flow details found with id: %s" % fd.uuid)
Exemplo n.º 9
0
    def add(self, task, timeout=None, infer=True):
        """Adds a task to the given flow using the given timeout which will be
        used a the timeout to wait for dependencies (if any) to be
        fulfilled.
        """
        def check():
            if self.state not in self.MUTABLE_STATES:
                raise exc.InvalidStateException("Flow is currently in a"
                                                " non-mutable %s state" %
                                                (self.state))

        # Ensure that we do a quick check to see if we can even perform this
        # addition before we go about actually acquiring the lock to perform
        # the actual addition.
        check()

        # All locks must be acquired so that modifications can not be made
        # while running, cancelling or performing a simultaneous mutation.
        with threading_utils.MultiLock(self._core_locks):
            check()
            runner = ThreadRunner(task, self, timeout)
            self._graph.add_node(runner, infer=infer)
            return runner.uuid
Exemplo n.º 10
0
def logbook_save(lb):
    # Acquire all the locks that will be needed to perform this operation with
    # out being affected by other threads doing it at the same time.
    with threading_utils.MultiLock(READ_SAVE_ORDER):
        # Get a existing logbook model (or create it if it isn't there).
        try:
            backing_lb = LOG_BOOKS[lb.uuid]
            if backing_lb.meta != lb.meta:
                backing_lb.meta = lb.meta
            # Add anything on to the existing loaded logbook that isn't already
            # in the existing logbook.
            for fd in lb:
                if fd not in backing_lb:
                    FLOW_DETAILS[fd.uuid] = copy.deepcopy(fd)
                    backing_lb.add(flowdetails_save(fd))
                else:
                    # Previously added but not saved into the flowdetails
                    # 'permanent' storage.
                    if fd.uuid not in FLOW_DETAILS:
                        FLOW_DETAILS[fd.uuid] = copy.deepcopy(fd)
                    flowdetails_save(fd)
            # TODO(harlowja): figure out a better way to set this property
            # without actually letting others set it external.
            backing_lb._updated_at = timeutils.utcnow()
        except KeyError:
            backing_lb = copy.deepcopy(lb)
            # TODO(harlowja): figure out a better way to set this property
            # without actually letting others set it external.
            backing_lb._created_at = timeutils.utcnow()
            # Record all the pieces as being saved.
            LOG_BOOKS[lb.uuid] = backing_lb
            for fd in backing_lb:
                FLOW_DETAILS[fd.uuid] = fd
                for td in fd:
                    TASK_DETAILS[td.uuid] = td
        return backing_lb
Exemplo n.º 11
0
def taskdetails_save(td):
    with threading_utils.MultiLock(READ_SAVE_ORDER):
        try:
            return _taskdetails_merge(TASK_DETAILS[td.uuid], td)
        except KeyError:
            raise exc.NotFound("No task details found with id: %s" % td.uuid)
Exemplo n.º 12
0
    def run(self, context, *args, **kwargs):
        """Executes the given flow using the given context and args/kwargs."""

        def abort_if(current_state, ok_states):
            if current_state in (states.CANCELLED,):
                return False
            if current_state not in ok_states:
                return False
            return True

        def check():
            if self.state not in self.RUNNABLE_STATES:
                raise exc.InvalidStateException("Flow is currently unable "
                                                "to be ran in state %s"
                                                % (self.state))

        def connect_and_verify():
            """Do basic sanity tests on the graph structure."""
            if len(self._graph) == 0:
                return
            self._connect()
            degrees = [g[1] for g in self._graph.in_degree_iter()]
            zero_degrees = [d for d in degrees if d == 0]
            if not zero_degrees:
                # If every task depends on something else to produce its input
                # then we will be in a deadlock situation.
                raise exc.InvalidStateException("No task has an in-degree"
                                                " of zero")
            self_loops = self._graph.nodes_with_selfloops()
            if self_loops:
                # A task that has a dependency on itself will never be able
                # to run.
                raise exc.InvalidStateException("%s tasks have been detected"
                                                " with dependencies on"
                                                " themselves" %
                                                len(self_loops))
            simple_cycles = len(cycles.recursive_simple_cycles(self._graph))
            if simple_cycles:
                # A task loop will never be able to run, unless it somehow
                # breaks that loop.
                raise exc.InvalidStateException("%s tasks have been detected"
                                                " with dependency loops" %
                                                simple_cycles)

        def run_it(result_cb, args, kwargs):
            check_runnable = functools.partial(abort_if,
                                               ok_states=self.RUNNABLE_STATES)
            if self._change_state(context, states.RUNNING,
                                  check_func=check_runnable):
                self.results = {}
                if len(self._graph) == 0:
                    return
                for r in self._graph.nodes_iter():
                    r.reset()
                    r._result_cb = result_cb
                executor = threading_utils.ThreadGroupExecutor()
                for r in self._graph.nodes_iter():
                    executor.submit(r, *args, **kwargs)
                executor.await_termination()

        def trigger_rollback(failures):
            if not failures:
                return
            causes = []
            for r in failures:
                causes.append(misc.FlowFailure(r, self))
            try:
                self.rollback(context, causes)
            except exc.InvalidStateException:
                pass
            finally:
                if len(failures) > 1:
                    exc_infos = [f.exc_info for f in failures]
                    raise exc.LinkedException.link(exc_infos)
                else:
                    f = failures[0]
                    raise f.exc_info[0], f.exc_info[1], f.exc_info[2]

        def handle_results():
            # Isolate each runner state into groups so that we can easily tell
            # which ones failed, cancelled, completed...
            groups = collections.defaultdict(list)
            for r in self._graph.nodes_iter():
                groups[r.state].append(r)
            for r in self._graph.nodes_iter():
                if r not in groups.get(states.FAILURE, []) and r.has_ran():
                    self.results[r.uuid] = r.result
            if groups[states.FAILURE]:
                self._change_state(context, states.FAILURE)
                trigger_rollback(groups[states.FAILURE])
            elif (groups[states.CANCELLED] or groups[states.PENDING]
                  or groups[states.TIMED_OUT] or groups[states.STARTED]):
                self._change_state(context, states.INCOMPLETE)
            else:
                check_ran = functools.partial(abort_if,
                                              ok_states=[states.RUNNING])
                self._change_state(context, states.SUCCESS,
                                   check_func=check_ran)

        def get_resumer_cb():
            if not self.resumer:
                return None
            (ran, _others) = self.resumer(self, self._graph.nodes_iter())

            def fetch_results(runner):
                for (r, metadata) in ran:
                    if r is runner:
                        return (True, metadata.get('result'))
                return (False, None)

            result_cb = fetch_results
            return result_cb

        args = [context] + list(args)
        check()

        # Only acquire the run lock (but use further state checking) and the
        # mutation lock to stop simultaneous running and simultaneous mutating
        # which are not allowed on a running flow. Allow simultaneous cancel
        # by performing repeated state checking while running.
        with threading_utils.MultiLock(self._run_locks):
            check()
            connect_and_verify()
            try:
                run_it(get_resumer_cb(), args, kwargs)
            finally:
                handle_results()