Beispiel #1
0
 def __init__(self, listener, ctxt, message, unique_id, msg_id, reply_q,
              obsolete_reply_queues):
     """
     message is Intance of
     simpleservice.rpc.driver.message.RabbitMessage
     AMQPIncomingMessage for dispather
     """
     self.ctxt = ctxt
     self.message = message
     self.listener = listener
     self.unique_id = unique_id
     self.msg_id = msg_id
     self.reply_q = reply_q
     self._obsolete_reply_queues = obsolete_reply_queues
     self.stopwatch = timeutils.StopWatch()
     self.stopwatch.start()
Beispiel #2
0
    def wait(self, timeout=None):
        """Waits until the latch is released.

        :param timeout: wait until the timeout expires
        :type timeout: number
        :returns: true if the latch has been released before the
                  timeout expires otherwise false
        :rtype: boolean
        """
        watch = timeutils.StopWatch(duration=timeout)
        watch.start()
        with self._cond:
            while self._count > 0:
                if watch.expired():
                    return False
                else:
                    self._cond.wait(watch.leftover(return_none=True))
            return True
 def _run_loop(self,
               idle_for_func,
               initial_delay=None,
               stop_on_exception=True):
     kind = self._KIND
     func_name = reflection.get_callable_name(self.f)
     func = self.f if stop_on_exception else _safe_wrapper(
         self.f, kind, func_name)
     if initial_delay:
         greenthread.sleep(initial_delay)
     try:
         watch = timeutils.StopWatch()
         while self._running:
             watch.restart()
             result = func(*self.args, **self.kw)
             watch.stop()
             if not self._running:
                 break
             idle = idle_for_func(result, watch.elapsed())
             LOG.trace(
                 '%(kind)s %(func_name)r sleeping '
                 'for %(idle).02f seconds', {
                     'func_name': func_name,
                     'idle': idle,
                     'kind': kind
                 })
             greenthread.sleep(idle)
     except LoopingCallDone as e:
         self.done.send(e.retvalue)
     except Exception:
         exc_info = sys.exc_info()
         try:
             LOG.error('%(kind)s %(func_name)r failed', {
                 'kind': kind,
                 'func_name': func_name
             },
                       exc_info=exc_info)
             self.done.send_exception(*exc_info)
         finally:
             del exc_info
         return
     else:
         self.done.send(True)
Beispiel #4
0
    def build(self, statistics, timeout=None, gather_statistics=True):
        """Builds a state-machine (that is used during running)."""
        if gather_statistics:
            watches = {}
            state_statistics = {}
            statistics['seconds_per_state'] = state_statistics
            # watches = {}
            for timed_state in TIMED_STATES:
                state_statistics[timed_state.lower()] = 0.0
                watches[timed_state] = timeutils.StopWatch()
            statistics['discarded_failures'] = 0
            statistics['awaiting'] = 0
            statistics['completed'] = 0
            statistics['incomplete'] = 0

        memory = MachineMemory()
        if timeout is None:
            timeout = WAITING_TIMEOUT

        # Cache some local functions/methods...
        do_complete = self._completer.complete
        do_complete_failure = self._completer.complete_failure
        get_atom_intention = self._storage.get_atom_intention

        def do_schedule(next_nodes):
            with self._storage.lock.write_lock():
                return self._scheduler.schedule(
                    sorted(next_nodes,
                           key=lambda node: getattr(node, 'priority', 0),
                           reverse=True))

        def iter_next_atoms(atom=None, apply_deciders=True):
            # Yields and filters and tweaks the next atoms to run...
            maybe_atoms_it = self._selector.iter_next_atoms(atom=atom)
            for atom, late_decider in maybe_atoms_it:
                if apply_deciders:
                    proceed = late_decider.check_and_affect(self._runtime)
                    if proceed:
                        yield atom
                else:
                    yield atom

        def resume(old_state, new_state, event):
            # This reaction function just updates the state machines memory
            # to include any nodes that need to be executed (from a previous
            # attempt, which may be empty if never ran before) and any nodes
            # that are now ready to be ran.
            with self._storage.lock.write_lock():
                memory.next_up.update(
                    iter_utils.unique_seen(
                        (self._completer.resume(), iter_next_atoms())))
            return SCHEDULE

        def game_over(old_state, new_state, event):
            # This reaction function is mainly a intermediary delegation
            # function that analyzes the current memory and transitions to
            # the appropriate handler that will deal with the memory values,
            # it is *always* called before the final state is entered.
            if memory.failures:
                return FAILED
            with self._storage.lock.read_lock():
                leftover_atoms = iter_utils.count(
                    # Avoid activating the deciders, since at this point
                    # the engine is finishing and there will be no more further
                    # work done anyway...
                    iter_next_atoms(apply_deciders=False))
            if leftover_atoms:
                # Ok we didn't finish (either reverting or executing...) so
                # that means we must of been stopped at some point...
                LOG.trace(
                    "Suspension determined to have been reacted to"
                    " since (at least) %s atoms have been left in an"
                    " unfinished state", leftover_atoms)
                return SUSPENDED
            elif self._runtime.is_success():
                return SUCCESS
            else:
                return REVERTED

        def schedule(old_state, new_state, event):
            # This reaction function starts to schedule the memory's next
            # nodes (iff the engine is still runnable, which it may not be
            # if the user of this engine has requested the engine/storage
            # that holds this information to stop or suspend); handles failures
            # that occur during this process safely...
            with self._storage.lock.write_lock():
                current_flow_state = self._storage.get_flow_state()
                if current_flow_state == st.RUNNING and memory.next_up:
                    not_done, failures = do_schedule(memory.next_up)
                    if not_done:
                        memory.not_done.update(not_done)
                    if failures:
                        memory.failures.extend(failures)
                    memory.next_up.intersection_update(not_done)
                elif current_flow_state == st.SUSPENDING and memory.not_done:
                    # Try to force anything not cancelled to now be cancelled
                    # so that the executor that gets it does not continue to
                    # try to work on it (if the future execution is still in
                    # its backlog, if it's already being executed, this will
                    # do nothing).
                    memory.cancel_futures()
            return WAIT

        def complete_an_atom(fut):
            # This completes a single atom saving its result in
            # storage and preparing whatever predecessors or successors will
            # now be ready to execute (or revert or retry...); it also
            # handles failures that occur during this process safely...
            atom = fut.atom
            try:
                outcome, result = fut.result()
                do_complete(atom, outcome, result)
                if isinstance(result, failure.Failure):
                    retain = do_complete_failure(atom, outcome, result)
                    if retain:
                        memory.failures.append(result)
                    else:
                        # NOTE(harlowja): avoid making any intention request
                        # to storage unless we are sure we are in DEBUG
                        # enabled logging (otherwise we will call this all
                        # the time even when DEBUG is not enabled, which
                        # would suck...)
                        if LOG.isEnabledFor(logging.DEBUG):
                            intention = get_atom_intention(atom.name)
                            LOG.debug(
                                "Discarding failure '%s' (in response"
                                " to outcome '%s') under completion"
                                " units request during completion of"
                                " atom '%s' (intention is to %s)", result,
                                outcome, atom, intention)
                        if gather_statistics:
                            statistics['discarded_failures'] += 1
                if gather_statistics:
                    statistics['completed'] += 1
            # except futures.CancelledError:
            except futurist.CancelledError:
                # Well it got cancelled, skip doing anything
                # and move on; at a further time it will be resumed
                # and something should be done with it to get it
                # going again.
                return WAS_CANCELLED
            except Exception:
                memory.failures.append(failure.Failure())
                LOG.exception("Engine '%s' atom post-completion"
                              " failed", atom)
                return FAILED_COMPLETING
            else:
                return SUCCESSFULLY_COMPLETED

        def wait(old_state, new_state, event):
            # call sometime in the future, or equivalent that will work in
            # py2 and py3.
            if memory.not_done:
                done, not_done = self._waiter(memory.not_done, timeout=timeout)
                memory.done.update(done)
                memory.not_done = not_done
            return ANALYZE

        def analyze(old_state, new_state, event):
            # This reaction function is responsible for analyzing all nodes
            # that have finished executing/reverting and figuring
            # out what nodes are now ready to be ran (and then triggering those
            # nodes to be scheduled in the future); handles failures that
            # occur during this process safely...
            next_up = set()
            with self._storage.lock.write_lock():
                while memory.done:
                    fut = memory.done.pop()
                    # Force it to be completed so that we can ensure that
                    # before we iterate over any successors or predecessors
                    # that we know it has been completed and saved and so on...
                    completion_status = complete_an_atom(fut)
                    if (not memory.failures
                            and completion_status != WAS_CANCELLED):
                        atom = fut.atom
                        try:
                            more_work = set(iter_next_atoms(atom=atom))
                        except Exception:
                            memory.failures.append(failure.Failure())
                            LOG.exception(
                                "Engine '%s' atom post-completion"
                                " next atom searching failed", atom)
                        else:
                            next_up.update(more_work)
            current_flow_state = self._storage.get_flow_state()
            if (current_flow_state == st.RUNNING and next_up
                    and not memory.failures):
                memory.next_up.update(next_up)
                return SCHEDULE
            elif memory.not_done:
                if current_flow_state == st.SUSPENDING:
                    memory.cancel_futures()
                return WAIT
            else:
                return FINISH

        def on_exit(old_state, event):
            LOG.trace("Exiting old state '%s' in response to event '%s'",
                      old_state, event)
            if gather_statistics:
                if old_state in watches:
                    w = watches[old_state]
                    w.stop()
                    state_statistics[old_state.lower()] += w.elapsed()
                if old_state in (st.SCHEDULING, st.WAITING):
                    statistics['incomplete'] = len(memory.not_done)
                if old_state in (st.ANALYZING, st.SCHEDULING):
                    statistics['awaiting'] = len(memory.next_up)

        def on_enter(new_state, event):
            LOG.trace("Entering new state '%s' in response to event '%s'",
                      new_state, event)
            if gather_statistics and new_state in watches:
                watches[new_state].restart()

        state_kwargs = {
            'on_exit': on_exit,
            'on_enter': on_enter,
        }
        m = machines.FiniteMachine()
        m.add_state(GAME_OVER, **state_kwargs)
        m.add_state(UNDEFINED, **state_kwargs)
        m.add_state(st.ANALYZING, **state_kwargs)
        m.add_state(st.RESUMING, **state_kwargs)
        m.add_state(st.REVERTED, terminal=True, **state_kwargs)
        m.add_state(st.SCHEDULING, **state_kwargs)
        m.add_state(st.SUCCESS, terminal=True, **state_kwargs)
        m.add_state(st.SUSPENDED, terminal=True, **state_kwargs)
        m.add_state(st.WAITING, **state_kwargs)
        m.add_state(st.FAILURE, terminal=True, **state_kwargs)
        m.default_start_state = UNDEFINED

        m.add_transition(GAME_OVER, st.REVERTED, REVERTED)
        m.add_transition(GAME_OVER, st.SUCCESS, SUCCESS)
        m.add_transition(GAME_OVER, st.SUSPENDED, SUSPENDED)
        m.add_transition(GAME_OVER, st.FAILURE, FAILED)
        m.add_transition(UNDEFINED, st.RESUMING, START)
        m.add_transition(st.ANALYZING, GAME_OVER, FINISH)
        m.add_transition(st.ANALYZING, st.SCHEDULING, SCHEDULE)
        m.add_transition(st.ANALYZING, st.WAITING, WAIT)
        m.add_transition(st.RESUMING, st.SCHEDULING, SCHEDULE)
        m.add_transition(st.SCHEDULING, st.WAITING, WAIT)
        m.add_transition(st.WAITING, st.ANALYZING, ANALYZE)

        m.add_reaction(GAME_OVER, FINISH, game_over)
        m.add_reaction(st.ANALYZING, ANALYZE, analyze)
        m.add_reaction(st.RESUMING, START, resume)
        m.add_reaction(st.SCHEDULING, SCHEDULE, schedule)
        m.add_reaction(st.WAITING, WAIT, wait)

        m.freeze()
        return (m, memory)
Beispiel #5
0
    def run_iter(self, timeout=None):
        """Runs the engine using iteration (or die trying).

        :param timeout: timeout to wait for any atoms to complete (this timeout
            will be used during the waiting period that occurs after the
            waiting state is yielded when unfinished atoms are being waited
            on).

        Instead of running to completion in a blocking manner, this will
        return a generator which will yield back the various states that the
        engine is going through (and can be used to run multiple engines at
        once using a generator per engine). The iterator returned also
        responds to the ``send()`` method from :pep:`0342` and will attempt to
        suspend itself if a truthy value is sent in (the suspend may be
        delayed until all active atoms have finished).

        NOTE(harlowja): using the ``run_iter`` method will **not** retain the
        engine lock while executing so the user should ensure that there is
        only one entity using a returned engine iterator (one per engine) at a
        given time.
        """
        self.compile()
        self.prepare()
        self.validate()
        # Keep track of the last X state changes, which if a failure happens
        # are quite useful to log (and the performance of tracking this
        # should be negligible).
        last_transitions = collections.deque(
            maxlen=max(1, self.MAX_MACHINE_STATES_RETAINED))
        with _start_stop(self._task_executor, self._retry_executor):
            self._change_state(states.RUNNING)
            if self._gather_statistics:
                self._statistics.clear()
                w = timeutils.StopWatch()
                w.start()
            else:
                w = None
            try:
                closed = False
                machine, memory = self._runtime.builder.build(
                    self._statistics,
                    timeout=timeout,
                    gather_statistics=self._gather_statistics)
                r = runners.FiniteRunner(machine)
                for transition in r.run_iter(builder.START):
                    last_transitions.append(transition)
                    _prior_state, new_state = transition
                    # NOTE(harlowja): skip over meta-states
                    if new_state in builder.META_STATES:
                        continue
                    if new_state == states.FAILURE:
                        failure.Failure.reraise_if_any(memory.failures)
                    if closed:
                        continue
                    try:
                        try_suspend = yield new_state
                    except GeneratorExit:
                        # The generator was closed, attempt to suspend and
                        # continue looping until we have cleanly closed up
                        # shop...
                        closed = True
                        self.suspend()
                    except Exception:
                        # Capture the failure, and ensure that the
                        # machine will notice that something externally
                        # has sent an exception in and that it should
                        # finish up and reraise.
                        memory.failures.append(failure.Failure())
                        closed = True
                    else:
                        if try_suspend:
                            self.suspend()
            except Exception:
                with excutils.save_and_reraise_exception():
                    if not hasattr(last_transitions, 'maxlen'):
                        maxlen = int(
                            re.match("^.*?maxlen=([0-9]+)\)$",
                                     str(last_transitions)).group(1))
                    else:
                        maxlen = last_transitions.maxlen
                    LOG.error(
                        "Engine execution has failed, something"
                        " bad must of happened (last"
                        " %s machine transitions were %s)", maxlen,
                        list(last_transitions))
                    self._change_state(states.FAILURE)
            else:
                if last_transitions:
                    _prior_state, new_state = last_transitions[-1]
                    if new_state not in self.IGNORABLE_STATES:
                        self._change_state(new_state)
                        if new_state not in self.NO_RERAISING_STATES:
                            e_failures = self.storage.get_execute_failures()
                            r_failures = self.storage.get_revert_failures()
                            er_failures = itertools.chain(
                                six.itervalues(e_failures),
                                six.itervalues(r_failures))
                            failure.Failure.reraise_if_any(er_failures)
            finally:
                if w is not None:
                    w.stop()
                    self._statistics['active_for'] = w.elapsed()
Beispiel #6
0
 def __init__(self, duration=None):
     self._watch = timeutils.StopWatch(duration=duration)