def _create_fsm(start_state, add_start=True, add_states=None): m = machines.FiniteMachine() if add_start: m.add_state(start_state) m.default_start_state = start_state if add_states: for s in add_states: if s in m: continue m.add_state(s) return m
def __init__(self): """ Initialize the automata """ #states by Luca """The states""" states = [ 'Welcome state', 'Pure comment', 'Hybrid comment', 'Lulls comment' ] self.states = states #dictionaries """Dict states to index""" self.states2idx = {k: i for i, k in enumerate(states)} """Dict index to state""" self.id2st = {self.states2idx[k]: k for k in self.states2idx.keys()} self.n_states = len(states) #transaction defined by Luca :P self.transitions = np.zeros((self.n_states, self.n_states)) self.transitions[0, 1] = 1 self.transitions[1, 2] = 1 / 4 self.transitions[2, 1] = 1 self.transitions[1, 1] = 1 / 2 self.transitions[1, 3] = 1 / 4 self.transitions[3, 1] = 1 #PROBABILISTIC PART distributions = np.array(self.transitions) distributions = distributions / np.sum(self.transitions, axis=1) self.cum_SUM = np.cumsum(distributions, axis=1) '''print("self.transitions") print(self.transitions) print("distributions") print(distributions) print(self.cum_SUM) print() ''' #Defining ASF STATES self.asf = machines.FiniteMachine() for i in range(self.n_states): self.asf.add_state(self.id2st[i]) #on_exit=print_on_exit) #Defining ASF TRANSACTIONS for i in range(self.n_states): for j, pr in enumerate(self.transitions[i]): if pr > 0: self.asf.add_transition( self.id2st[i], self.id2st[j], str(round(pr, 3)) + "_" + self.id2st[j]) #INITIAL STATE self.asf.default_start_state = self.id2st[0] self.asf.initialize()
def make_machine(start_state, transitions, event_name_cb): machine = machines.FiniteMachine() machine.add_state(start_state) machine.default_start_state = start_state for (start_state, end_state) in transitions: if start_state not in machine: machine.add_state(start_state) if end_state not in machine: machine.add_state(end_state) event = event_name_cb(start_state, end_state) machine.add_transition(start_state, end_state, event) return machine
def make_machine(start_state, transitions): machine = machines.FiniteMachine() machine.add_state(start_state) for (start_state, end_state) in transitions: if start_state not in machine: machine.add_state(start_state) if end_state not in machine: machine.add_state(end_state) # Make a fake event (not used anyway)... event = "on_%s" % (end_state) machine.add_transition(start_state, end_state, event.lower()) machine.default_start_state = start_state return machine
def _create_fsm(start_state, add_start=True, hierarchical=False, add_states=None): if hierarchical: m = machines.HierarchicalFiniteMachine() else: m = machines.FiniteMachine() if add_start: m.add_state(start_state) m.default_start_state = start_state if add_states: for s in add_states: if s not in m: m.add_state(s) return m
def build_a_machine(freeze=True): """Builds a state machine that requests are allowed to go through.""" m = machines.FiniteMachine() for st in (WAITING, PENDING, RUNNING): m.add_state(st) for st in (SUCCESS, FAILURE): m.add_state(st, terminal=True) # When a executor starts to publish a request to a selected worker but the # executor has not recved confirmation from that worker that anything has # happened yet. m.default_start_state = WAITING m.add_transition(WAITING, PENDING, make_an_event(PENDING)) # When a request expires (isn't able to be processed by any worker). m.add_transition(WAITING, FAILURE, make_an_event(FAILURE)) # Worker has started executing a request. m.add_transition(PENDING, RUNNING, make_an_event(RUNNING)) # Worker has gone away. m.add_transition(RUNNING, WAITING, make_an_event(WAITING)) # Worker failed to construct/process a request to run (either the worker # did not transition to RUNNING in the given timeout or the worker itself # had some type of failure before RUNNING started). # # Also used by the executor if the request was attempted to be published # but that did publishing process did not work out. m.add_transition(PENDING, FAILURE, make_an_event(FAILURE)) # Execution failed due to some type of remote failure. m.add_transition(RUNNING, FAILURE, make_an_event(FAILURE)) # Execution succeeded & has completed. m.add_transition(RUNNING, SUCCESS, make_an_event(SUCCESS)) # No further changes allowed. if freeze: m.freeze() return m
from automaton import machines #references: # https://pypi.org/project/automaton/ # https://docs.openstack.org/automaton/latest/user/examples.html states = ['q0', 'q1', 'q2', 'q3', 'q4', 'q5','q6', 'q7', 'q8', 'q9', 'q10', 'q11', 'q12', 'q13', 'q14', 'q15', 'q16', 'q17', 'q18', 'q19', 'q20', 'q21', 'q22', 'q23'] #q6 final state q = machines.FiniteMachine() i = 0 while i<len(states): q.add_state(states[i]) i = i+1 q.add_transition(states[0], states[1], 'f') q.add_transition(states[0], states[2], 'r') q.add_transition(states[0], states[3], 'e') q.add_transition(states[0], states[4], 'p') q.add_transition(states[0], states[5], 'i') q.add_transition(states[1], states[7], 'o') q.add_transition(states[2], states[12], 'a') q.add_transition(states[3], states[14], 'l') q.add_transition(states[4], states[9], 'o') q.add_transition(states[4], states[15], 'r') q.add_transition(states[5], states[19], 'm') q.add_transition(states[5], states[17], 'n') q.add_transition(states[5], states[6], 'f') q.add_transition(states[7], states[6], 'r')
def __init__(self): self.fsm = machines.FiniteMachine() self.define_state()
def build(self, timeout=None): """Builds a state-machine (that is used during running).""" memory = MachineMemory() if timeout is None: timeout = WAITING_TIMEOUT # Cache some local functions/methods... do_schedule = self._scheduler.schedule do_complete = self._completer.complete def is_runnable(): # Checks if the storage says the flow is still runnable... return self._storage.get_flow_state() == st.RUNNING def iter_next_atoms(atom=None, apply_deciders=True): # Yields and filters and tweaks the next atoms to run... maybe_atoms_it = self._analyzer.iter_next_atoms(atom=atom) for atom, late_decider in maybe_atoms_it: if apply_deciders: proceed = late_decider.check_and_affect(self._runtime) if proceed: yield atom else: yield atom def resume(old_state, new_state, event): # This reaction function just updates the state machines memory # to include any nodes that need to be executed (from a previous # attempt, which may be empty if never ran before) and any nodes # that are now ready to be ran. memory.next_up.update( iter_utils.unique_seen(self._completer.resume(), iter_next_atoms())) return SCHEDULE def game_over(old_state, new_state, event): # This reaction function is mainly a intermediary delegation # function that analyzes the current memory and transitions to # the appropriate handler that will deal with the memory values, # it is *always* called before the final state is entered. if memory.failures: return FAILED leftover_atoms = iter_utils.count( # Avoid activating the deciders, since at this point # the engine is finishing and there will be no more further # work done anyway... iter_next_atoms(apply_deciders=False)) if leftover_atoms: # Ok we didn't finish (either reverting or executing...) so # that means we must of been stopped at some point... LOG.blather( "Suspension determined to have been reacted to" " since (at least) %s atoms have been left in an" " unfinished state", leftover_atoms) return SUSPENDED elif self._analyzer.is_success(): return SUCCESS else: return REVERTED def schedule(old_state, new_state, event): # This reaction function starts to schedule the memory's next # nodes (iff the engine is still runnable, which it may not be # if the user of this engine has requested the engine/storage # that holds this information to stop or suspend); handles failures # that occur during this process safely... if is_runnable() and memory.next_up: not_done, failures = do_schedule(memory.next_up) if not_done: memory.not_done.update(not_done) if failures: memory.failures.extend(failures) memory.next_up.intersection_update(not_done) return WAIT def wait(old_state, new_state, event): # TODO(harlowja): maybe we should start doing 'yield from' this # call sometime in the future, or equivalent that will work in # py2 and py3. if memory.not_done: done, not_done = self._waiter(memory.not_done, timeout=timeout) memory.done.update(done) memory.not_done = not_done return ANALYZE def analyze(old_state, new_state, event): # This reaction function is responsible for analyzing all nodes # that have finished executing and completing them and figuring # out what nodes are now ready to be ran (and then triggering those # nodes to be scheduled in the future); handles failures that # occur during this process safely... next_up = set() while memory.done: fut = memory.done.pop() atom = fut.atom try: outcome, result = fut.result() retain = do_complete(atom, outcome, result) if isinstance(result, failure.Failure): if retain: memory.failures.append(result) else: # NOTE(harlowja): avoid making any # intention request to storage unless we are # sure we are in DEBUG enabled logging (otherwise # we will call this all the time even when DEBUG # is not enabled, which would suck...) if LOG.isEnabledFor(logging.DEBUG): intention = self._storage.get_atom_intention( atom.name) LOG.debug( "Discarding failure '%s' (in" " response to outcome '%s') under" " completion units request during" " completion of atom '%s' (intention" " is to %s)", result, outcome, atom, intention) except Exception: memory.failures.append(failure.Failure()) else: try: more_work = set(iter_next_atoms(atom=atom)) except Exception: memory.failures.append(failure.Failure()) else: next_up.update(more_work) if is_runnable() and next_up and not memory.failures: memory.next_up.update(next_up) return SCHEDULE elif memory.not_done: return WAIT else: return FINISH def on_exit(old_state, event): LOG.debug("Exiting old state '%s' in response to event '%s'", old_state, event) def on_enter(new_state, event): LOG.debug("Entering new state '%s' in response to event '%s'", new_state, event) # NOTE(harlowja): when ran in blather mode it is quite useful # to track the various state transitions as they happen... watchers = {} if LOG.isEnabledFor(logging.BLATHER): watchers['on_exit'] = on_exit watchers['on_enter'] = on_enter m = machines.FiniteMachine() m.add_state(GAME_OVER, **watchers) m.add_state(UNDEFINED, **watchers) m.add_state(st.ANALYZING, **watchers) m.add_state(st.RESUMING, **watchers) m.add_state(st.REVERTED, terminal=True, **watchers) m.add_state(st.SCHEDULING, **watchers) m.add_state(st.SUCCESS, terminal=True, **watchers) m.add_state(st.SUSPENDED, terminal=True, **watchers) m.add_state(st.WAITING, **watchers) m.add_state(st.FAILURE, terminal=True, **watchers) m.default_start_state = UNDEFINED m.add_transition(GAME_OVER, st.REVERTED, REVERTED) m.add_transition(GAME_OVER, st.SUCCESS, SUCCESS) m.add_transition(GAME_OVER, st.SUSPENDED, SUSPENDED) m.add_transition(GAME_OVER, st.FAILURE, FAILED) m.add_transition(UNDEFINED, st.RESUMING, START) m.add_transition(st.ANALYZING, GAME_OVER, FINISH) m.add_transition(st.ANALYZING, st.SCHEDULING, SCHEDULE) m.add_transition(st.ANALYZING, st.WAITING, WAIT) m.add_transition(st.RESUMING, st.SCHEDULING, SCHEDULE) m.add_transition(st.SCHEDULING, st.WAITING, WAIT) m.add_transition(st.WAITING, st.ANALYZING, ANALYZE) m.add_reaction(GAME_OVER, FINISH, game_over) m.add_reaction(st.ANALYZING, ANALYZE, analyze) m.add_reaction(st.RESUMING, START, resume) m.add_reaction(st.SCHEDULING, SCHEDULE, schedule) m.add_reaction(st.WAITING, WAIT, wait) m.freeze() return (m, memory)
def build(self, statistics, timeout=None, gather_statistics=True): """Builds a state-machine (that is used during running).""" if gather_statistics: watches = {} state_statistics = {} statistics['seconds_per_state'] = state_statistics watches = {} for timed_state in TIMED_STATES: state_statistics[timed_state.lower()] = 0.0 watches[timed_state] = timeutils.StopWatch() statistics['discarded_failures'] = 0 statistics['awaiting'] = 0 statistics['completed'] = 0 statistics['incomplete'] = 0 memory = MachineMemory() if timeout is None: timeout = WAITING_TIMEOUT # Cache some local functions/methods... do_complete = self._completer.complete do_complete_failure = self._completer.complete_failure get_atom_intention = self._storage.get_atom_intention def do_schedule(next_nodes): return self._scheduler.schedule( sorted(next_nodes, key=lambda node: getattr(node, 'priority', 0), reverse=True)) def iter_next_atoms(atom=None, apply_deciders=True): # Yields and filters and tweaks the next atoms to run... maybe_atoms_it = self._analyzer.iter_next_atoms(atom=atom) for atom, late_decider in maybe_atoms_it: if apply_deciders: proceed = late_decider.check_and_affect(self._runtime) if proceed: yield atom else: yield atom def resume(old_state, new_state, event): # This reaction function just updates the state machines memory # to include any nodes that need to be executed (from a previous # attempt, which may be empty if never ran before) and any nodes # that are now ready to be ran. memory.next_up.update( iter_utils.unique_seen( (self._completer.resume(), iter_next_atoms()))) return SCHEDULE def game_over(old_state, new_state, event): # This reaction function is mainly a intermediary delegation # function that analyzes the current memory and transitions to # the appropriate handler that will deal with the memory values, # it is *always* called before the final state is entered. if memory.failures: return FAILED leftover_atoms = iter_utils.count( # Avoid activating the deciders, since at this point # the engine is finishing and there will be no more further # work done anyway... iter_next_atoms(apply_deciders=False)) if leftover_atoms: # Ok we didn't finish (either reverting or executing...) so # that means we must of been stopped at some point... LOG.trace( "Suspension determined to have been reacted to" " since (at least) %s atoms have been left in an" " unfinished state", leftover_atoms) return SUSPENDED elif self._analyzer.is_success(): return SUCCESS else: return REVERTED def schedule(old_state, new_state, event): # This reaction function starts to schedule the memory's next # nodes (iff the engine is still runnable, which it may not be # if the user of this engine has requested the engine/storage # that holds this information to stop or suspend); handles failures # that occur during this process safely... current_flow_state = self._storage.get_flow_state() if current_flow_state == st.RUNNING and memory.next_up: not_done, failures = do_schedule(memory.next_up) if not_done: memory.not_done.update(not_done) if failures: memory.failures.extend(failures) memory.next_up.intersection_update(not_done) elif current_flow_state == st.SUSPENDING and memory.not_done: # Try to force anything not cancelled to now be cancelled # so that the executor that gets it does not continue to # try to work on it (if the future execution is still in # its backlog, if it's already being executed, this will # do nothing). memory.cancel_futures() return WAIT def complete_an_atom(fut): # This completes a single atom saving its result in # storage and preparing whatever predecessors or successors will # now be ready to execute (or revert or retry...); it also # handles failures that occur during this process safely... atom = fut.atom try: outcome, result = fut.result() do_complete(atom, outcome, result) if isinstance(result, failure.Failure): retain = do_complete_failure(atom, outcome, result) if retain: memory.failures.append(result) else: # NOTE(harlowja): avoid making any intention request # to storage unless we are sure we are in DEBUG # enabled logging (otherwise we will call this all # the time even when DEBUG is not enabled, which # would suck...) if LOG.isEnabledFor(logging.DEBUG): intention = get_atom_intention(atom.name) LOG.debug( "Discarding failure '%s' (in response" " to outcome '%s') under completion" " units request during completion of" " atom '%s' (intention is to %s)", result, outcome, atom, intention) if gather_statistics: statistics['discarded_failures'] += 1 if gather_statistics: statistics['completed'] += 1 except futures.CancelledError: # Well it got cancelled, skip doing anything # and move on; at a further time it will be resumed # and something should be done with it to get it # going again. return WAS_CANCELLED except Exception: memory.failures.append(failure.Failure()) LOG.exception("Engine '%s' atom post-completion" " failed", atom) return FAILED_COMPLETING else: return SUCCESSFULLY_COMPLETED def wait(old_state, new_state, event): # TODO(harlowja): maybe we should start doing 'yield from' this # call sometime in the future, or equivalent that will work in # py2 and py3. if memory.not_done: done, not_done = self._waiter(memory.not_done, timeout=timeout) memory.done.update(done) memory.not_done = not_done return ANALYZE def analyze(old_state, new_state, event): # This reaction function is responsible for analyzing all nodes # that have finished executing/reverting and figuring # out what nodes are now ready to be ran (and then triggering those # nodes to be scheduled in the future); handles failures that # occur during this process safely... next_up = set() while memory.done: fut = memory.done.pop() # Force it to be completed so that we can ensure that # before we iterate over any successors or predecessors # that we know it has been completed and saved and so on... completion_status = complete_an_atom(fut) if (not memory.failures and completion_status != WAS_CANCELLED): atom = fut.atom try: more_work = set(iter_next_atoms(atom=atom)) except Exception: memory.failures.append(failure.Failure()) LOG.exception( "Engine '%s' atom post-completion" " next atom searching failed", atom) else: next_up.update(more_work) current_flow_state = self._storage.get_flow_state() if (current_flow_state == st.RUNNING and next_up and not memory.failures): memory.next_up.update(next_up) return SCHEDULE elif memory.not_done: if current_flow_state == st.SUSPENDING: memory.cancel_futures() return WAIT else: return FINISH def on_exit(old_state, event): LOG.trace("Exiting old state '%s' in response to event '%s'", old_state, event) if gather_statistics: if old_state in watches: w = watches[old_state] w.stop() state_statistics[old_state.lower()] += w.elapsed() if old_state in (st.SCHEDULING, st.WAITING): statistics['incomplete'] = len(memory.not_done) if old_state in (st.ANALYZING, st.SCHEDULING): statistics['awaiting'] = len(memory.next_up) def on_enter(new_state, event): LOG.trace("Entering new state '%s' in response to event '%s'", new_state, event) if gather_statistics and new_state in watches: watches[new_state].restart() state_kwargs = { 'on_exit': on_exit, 'on_enter': on_enter, } m = machines.FiniteMachine() m.add_state(GAME_OVER, **state_kwargs) m.add_state(UNDEFINED, **state_kwargs) m.add_state(st.ANALYZING, **state_kwargs) m.add_state(st.RESUMING, **state_kwargs) m.add_state(st.REVERTED, terminal=True, **state_kwargs) m.add_state(st.SCHEDULING, **state_kwargs) m.add_state(st.SUCCESS, terminal=True, **state_kwargs) m.add_state(st.SUSPENDED, terminal=True, **state_kwargs) m.add_state(st.WAITING, **state_kwargs) m.add_state(st.FAILURE, terminal=True, **state_kwargs) m.default_start_state = UNDEFINED m.add_transition(GAME_OVER, st.REVERTED, REVERTED) m.add_transition(GAME_OVER, st.SUCCESS, SUCCESS) m.add_transition(GAME_OVER, st.SUSPENDED, SUSPENDED) m.add_transition(GAME_OVER, st.FAILURE, FAILED) m.add_transition(UNDEFINED, st.RESUMING, START) m.add_transition(st.ANALYZING, GAME_OVER, FINISH) m.add_transition(st.ANALYZING, st.SCHEDULING, SCHEDULE) m.add_transition(st.ANALYZING, st.WAITING, WAIT) m.add_transition(st.RESUMING, st.SCHEDULING, SCHEDULE) m.add_transition(st.SCHEDULING, st.WAITING, WAIT) m.add_transition(st.WAITING, st.ANALYZING, ANALYZE) m.add_reaction(GAME_OVER, FINISH, game_over) m.add_reaction(st.ANALYZING, ANALYZE, analyze) m.add_reaction(st.RESUMING, START, resume) m.add_reaction(st.SCHEDULING, SCHEDULE, schedule) m.add_reaction(st.WAITING, WAIT, wait) m.freeze() return (m, memory)