def _ensure_task(self, task_name, task_version, result_mapping): """Ensures there is a taskdetail that corresponds to the task info. If task does not exist, adds a record for it. Added task will have PENDING state. Sets result mapping for the task from result_mapping argument. Returns uuid for the task details corresponding to the task with given name. """ if not task_name: raise ValueError("Task name must be non-empty") with self._lock.write_lock(): try: task_id = self._atom_name_to_uuid[task_name] except KeyError: task_id = uuidutils.generate_uuid() self._create_atom_detail(logbook.TaskDetail, task_name, task_id, task_version) else: ad = self._flowdetail.find(task_id) if not isinstance(ad, logbook.TaskDetail): raise exceptions.Duplicate( "Atom detail %s already exists in flow detail %s." % (task_name, self._flowdetail.name)) self._set_result_mapping(task_name, result_mapping) return task_id
def _ensure_retry(self, retry_name, retry_version, result_mapping): """Ensures there is a retrydetail that corresponds to the retry info. If retry does not exist, adds a record for it. Added retry will have PENDING state. Sets result mapping for the retry from result_mapping argument. Initializes retry result as an empty collections of results and failures history. Returns uuid for the retry details corresponding to the retry with given name. """ if not retry_name: raise ValueError("Retry name must be non-empty") with self._lock.write_lock(): try: retry_id = self._atom_name_to_uuid[retry_name] except KeyError: retry_id = uuidutils.generate_uuid() self._create_atom_detail(logbook.RetryDetail, retry_name, retry_id, retry_version) else: ad = self._flowdetail.find(retry_id) if not isinstance(ad, logbook.RetryDetail): raise exceptions.Duplicate( "Atom detail %s already exists in flow detail %s." % (retry_name, self._flowdetail.name)) self._set_result_mapping(retry_name, result_mapping) return retry_id
def add_state(self, state, terminal=False, on_enter=None, on_exit=None): """Adds a given state to the state machine. The on_enter and on_exit callbacks, if provided will be expected to take two positional parameters, these being the state being exited (for on_exit) or the state being entered (for on_enter) and a second parameter which is the event that is being processed that caused the state transition. """ if self.frozen: raise FrozenMachine() if state in self._states: raise excp.Duplicate("State '%s' already defined" % state) if on_enter is not None: if not six.callable(on_enter): raise ValueError("On enter callback must be callable") if on_exit is not None: if not six.callable(on_exit): raise ValueError("On exit callback must be callable") self._states[state] = { 'terminal': bool(terminal), 'reactions': {}, 'on_enter': on_enter, 'on_exit': on_exit, } self._transitions[state] = OrderedDict()
def add_reaction(self, state, event, reaction, *args, **kwargs): """Adds a reaction that may get triggered by the given event & state. Reaction callbacks may (depending on how the state machine is ran) be used after an event is processed (and a transition occurs) to cause the machine to react to the newly arrived at stable state. These callbacks are expected to accept three default positional parameters (although more can be passed in via *args and **kwargs, these will automatically get provided to the callback when it is activated *ontop* of the three default). The three default parameters are the last stable state, the new stable state and the event that caused the transition to this new stable state to be arrived at. The expected result of a callback is expected to be a new event that the callback wants the state machine to react to. This new event may (depending on how the state machine is ran) get processed (and this process typically repeats) until the state machine reaches a terminal state. """ if self.frozen: raise FrozenMachine() if state not in self._states: raise excp.NotFound("Can not add a reaction to event '%s' for an" " undefined state '%s'" % (event, state)) if not six.callable(reaction): raise ValueError("Reaction callback must be callable") if event not in self._states[state]['reactions']: self._states[state]['reactions'][event] = (reaction, args, kwargs) else: raise excp.Duplicate("State '%s' reaction to event '%s'" " already defined" % (state, event))
def add_reaction(self, state, event, reaction, *args, **kwargs): """Adds a reaction that may get triggered by the given event & state. :param state: the last stable state expressed :type state: string :param event: event that caused the transition :param args: non-keyworded arguments :type args: list :param kwargs: key-value pair arguments :type kwargs: dictionary Reaction callbacks may (depending on how the state machine is ran) be used after an event is processed (and a transition occurs) to cause the machine to react to the newly arrived at stable state. The expected result of a callback is expected to be a new event that the callback wants the state machine to react to. This new event may (depending on how the state machine is ran) get processed (and this process typically repeats) until the state machine reaches a terminal state. """ if state not in self._states: raise excp.NotFound("Can not add a reaction to event '%s' for an" " undefined state '%s'" % (event, state)) if not six.callable(reaction): raise ValueError("Reaction callback must be callable") if event not in self._states[state]['reactions']: self._states[state]['reactions'][event] = (reaction, args, kwargs) else: raise excp.Duplicate("State '%s' reaction to event '%s'" " already defined" % (state, event))
def add_state(self, state, terminal=False, on_enter=None, on_exit=None): """Adds a given state to the state machine. :param on_enter: callback, if provided will be expected to take two positional parameters, these being state being entered and the second parameter is the event that is being processed that caused the state transition :param on_exit: callback, if provided will be expected to take two positional parameters, these being state being entered and the second parameter is the event that is being processed that caused the state transition :param state: state being entered or exited :type state: string """ if state in self._states: raise excp.Duplicate("State '%s' already defined" % state) if on_enter is not None: if not six.callable(on_enter): raise ValueError("On enter callback must be callable") if on_exit is not None: if not six.callable(on_exit): raise ValueError("On exit callback must be callable") self._states[state] = { 'terminal': bool(terminal), 'reactions': {}, 'on_enter': on_enter, 'on_exit': on_exit, } self._transitions[state] = OrderedDict()
def ensure_atoms(self, atoms): """Ensure there is an atomdetail for **each** of the given atoms. Returns list of atomdetail uuids for each atom processed. """ atom_ids = [] missing_ads = [] for i, atom in enumerate(atoms): match = misc.match_type(atom, self._ensure_matchers) if not match: raise TypeError("Unknown atom '%s' (%s) requested to ensure" % (atom, type(atom))) atom_detail_cls, kind = match atom_name = atom.name if not atom_name: raise ValueError("%s name must be non-empty" % (kind)) try: atom_id = self._atom_name_to_uuid[atom_name] except KeyError: missing_ads.append((i, atom, atom_detail_cls)) # This will be later replaced with the uuid that is created... atom_ids.append(None) else: ad = self._flowdetail.find(atom_id) if not isinstance(ad, atom_detail_cls): raise exceptions.Duplicate( "Atom detail '%s' already exists in flow" " detail '%s'" % (atom_name, self._flowdetail.name)) else: atom_ids.append(ad.uuid) self._set_result_mapping(atom_name, atom.save_as) if missing_ads: needs_to_be_created_ads = [] for (i, atom, atom_detail_cls) in missing_ads: ad = self._create_atom_detail( atom.name, atom_detail_cls, atom_version=misc.get_version_string(atom)) needs_to_be_created_ads.append((i, atom, ad)) # Add the atom detail(s) to a clone, which upon success will be # updated into the contained flow detail; if it does not get saved # then no update will happen. source, clone = self._fetch_flowdetail(clone=True) for (_i, _atom, ad) in needs_to_be_created_ads: clone.add(ad) self._with_connection(self._save_flow_detail, source, clone) # Insert the needed data, and get outta here... for (i, atom, ad) in needs_to_be_created_ads: atom_name = atom.name atom_ids[i] = ad.uuid self._atom_name_to_uuid[atom_name] = ad.uuid self._set_result_mapping(atom_name, atom.save_as) self._failures.setdefault(atom_name, {}) return atom_ids
def _post_compile(self, graph, node): """Called after the compilation of the root finishes successfully.""" dup_names = misc.get_duplicate_keys(graph.nodes_iter(), key=lambda node: node.name) if dup_names: raise exc.Duplicate( "Atoms with duplicate names found: %s" % (sorted(dup_names))) if graph.number_of_nodes() == 0: raise exc.Empty("Root container '%s' (%s) is empty" % (self._root, type(self._root))) self._history.clear()
def save(self, session=None): """Save this object.""" if not session: session = sql_session.get_session() session.add(self) try: session.flush() except IntegrityError, e: if str(e).endswith('is not unique'): raise exception.Duplicate(str(e)) else: raise
def _check_compilation(compilation): """Performs post compilation validation/checks.""" seen = set() dups = set() execution_graph = compilation.execution_graph for node, node_attrs in execution_graph.nodes_iter(data=True): if node_attrs['kind'] in compiler.ATOMS: atom_name = node.name if atom_name in seen: dups.add(atom_name) else: seen.add(atom_name) if dups: raise exc.Duplicate("Atoms with duplicate names found: %s" % (sorted(dups))) return compilation
def _post_compile(self, graph, node): """Called after the compilation of the root finishes successfully.""" dup_names = misc.get_duplicate_keys( (node for node, node_attrs in graph.nodes_iter(data=True) if node_attrs['kind'] in ATOMS), key=lambda node: node.name) if dup_names: raise exc.Duplicate("Atoms with duplicate names found: %s" % (sorted(dup_names))) atoms = iter_utils.count( node for node, node_attrs in graph.nodes_iter(data=True) if node_attrs['kind'] in ATOMS) if atoms == 0: raise exc.Empty("Root container '%s' (%s) is empty" % (self._root, type(self._root))) self._history.clear()
def _exc_wrapper(self): """Exception context-manager which wraps kazoo exceptions. This is used to capture and wrap any kazoo specific exceptions and then group them into corresponding taskflow exceptions (not doing that would expose the underlying kazoo exception model). """ try: yield except self._client.handler.timeout_exception as e: raise exc.StorageFailure("Storage backend timeout", e) except k_exc.SessionExpiredError as e: raise exc.StorageFailure("Storage backend session has expired", e) except k_exc.NoNodeError as e: raise exc.NotFound("Storage backend node not found: %s" % e) except k_exc.NodeExistsError as e: raise exc.Duplicate("Storage backend duplicate node: %s" % e) except (k_exc.KazooException, k_exc.ZookeeperError) as e: raise exc.StorageFailure("Storage backend internal error", e)
def _post_flatten(self, graph, node): """Called after the flattening of the root finishes successfully.""" dup_names = misc.get_duplicate_keys(graph.nodes_iter(), key=lambda node: node.name) if dup_names: raise exc.Duplicate("Atoms with duplicate names found: %s" % (sorted(dup_names))) if graph.number_of_nodes() == 0: raise exc.Empty("Root container '%s' (%s) is empty" % (self._root, type(self._root))) self._history.clear() # NOTE(harlowja): this one can be expensive to calculate (especially # the cycle detection), so only do it if we know BLATHER is enabled # and not under all cases. if LOG.isEnabledFor(logging.BLATHER): LOG.blather("Translated '%s'", self._root) LOG.blather("Graph:") for line in graph.pformat().splitlines(): # Indent it so that it's slightly offset from the above line. LOG.blather(" %s", line) LOG.blather("Hierarchy:") for line in node.pformat().splitlines(): # Indent it so that it's slightly offset from the above line. LOG.blather(" %s", line)