def _set_triggers(self, name: str, suicide: bool, trigs: List[str], expr: str, orig_expr: str) -> None: """Record parsed triggers. Args: name: task name suicide: whether this is a suicide trigger or not trigs: parsed trigger info expr: the associated graph expression orig_expr: the original associated graph expression """ # Check suicide triggers with contextlib.suppress(KeyError): osuicide = self.triggers[name][expr][1] # This trigger already exists, so we must have both # "expr => member" and "expr => !member" in the graph, # or simply a duplicate trigger not recognized earlier # because of parameter offsets. if not expr: pass elif suicide is not osuicide: oexp = re.sub(r'(&|\|)', r' \1 ', orig_expr) oexp = re.sub(r':succeeded', '', oexp) raise GraphParseError( f"{oexp} can't trigger both {name} and !{name}") # Record triggers self.triggers.setdefault(name, {}) self.triggers[name][expr] = (trigs, suicide) self.original.setdefault(name, {}) self.original[name][expr] = orig_expr
def parse(self, node): """Parse graph node, and cache the result. Args: node (str): node to parse Return: tuple: (name, offset_is_from_icp, offset_is_irregular, offset, output) Raise: GraphParseError: on illegal syntax. """ if node not in self._nodes: match = self.REC_NODE.match(node) if not match: raise GraphParseError('Illegal graph node: %s' % node) name, offset_is_from_icp, offset, output = match.groups() if offset_is_from_icp and not offset: offset = self._get_offset() offset_is_irregular = False if offset: if self.REC_IRREGULAR_OFFSET.search(offset): offset_is_irregular = True else: offset = self._get_offset(offset) self._nodes[node] = (name, offset_is_from_icp, offset_is_irregular, offset, output) return self._nodes[node]
def parse(self, node): """Parse graph node, and cache the result. Args: node (str): node to parse Return: tuple: (name, offset, output, offset_is_from_icp, offset_is_irregular, offset_is_absolute) NOTE that offsets from ICP like foo[^] and foo[^+P1] are not considered absolute like foo[2] etc. Raise: GraphParseError: on illegal syntax. """ if node not in self._nodes: match = self.REC_NODE.match(node) if not match: raise GraphParseError('Illegal graph node: %s' % node) name, icp_mark, offset, output = match.groups() offset_is_from_icp = (icp_mark == '^') # convert to boolean if offset_is_from_icp and not offset: offset = self._get_offset() offset_is_irregular = False offset_is_absolute = False if offset: if is_offset_absolute(offset): offset_is_absolute = True if self.REC_IRREGULAR_OFFSET.search(offset): offset_is_irregular = True else: offset = self._get_offset(offset) self._nodes[node] = ( name, offset, output, offset_is_from_icp, offset_is_irregular, offset_is_absolute) return self._nodes[node]
def _report_invalid_lines(cls, lines): """Raise GraphParseError in a consistent format when there are lines with bad syntax. The list of bad lines are inserted into the error message to show exactly what lines have problems. The correct syntax of graph lines is displayed to direct people on the correct path. Keyword Arguments: lines -- a list of bad graph lines to be reported Raises: GraphParseError -- always. This is the sole purpose of this method """ raise GraphParseError( "bad graph node format:\n" " " + "\n ".join(lines) + "\n" "Correct format is:\n" " @ACTION or " " NAME(<PARAMS>)([CYCLE-POINT-OFFSET])(:TRIGGER-TYPE)\n" " {NAME(<PARAMS>) can also be: " "<PARAMS>NAME or NAME<PARAMS>NAME_CONTINUED}\n" " or\n" " NAME(<REMOTE-SUITE-TRIGGER>)(:TRIGGER-TYPE)")
def _proc_dep_pair(self, left, right): """Process a single dependency pair 'left => right'. 'left' can be a logical expression of qualified node names. 'right' can be one or more node names joined by AND. A node is an xtrigger, or a task or a family name. A qualified name is NAME([CYCLE-POINT-OFFSET])(:TRIGGER-TYPE). Trigger qualifiers, but not cycle offsets, are ignored on the right to allow chaining. """ # Raise error for right-hand-side OR operators. if right and self.__class__.OP_OR in right: raise GraphParseError("illegal OR on RHS: %s" % right) # Remove qualifiers from right-side nodes. if right: for qual in self.__class__.REC_TRIG_QUAL.findall(right): right = right.replace(qual, '') # Raise error if suicide triggers on the left of the trigger. if left and self.__class__.SUICIDE_MARK in left: raise GraphParseError("suicide markers must be" " on the right of a trigger: %s" % left) # Cycle point offsets are not allowed on the right side (yet). if right and '[' in right: raise GraphParseError( "illegal cycle point offset on the right: %s => %s" % (left, right)) # Check that parentheses match. if left and left.count("(") != left.count(")"): raise GraphParseError("parenthesis mismatch in: \"" + left + "\"") # Split right side on AND. rights = right.split(self.__class__.OP_AND) if '' in rights or right and not all(rights): raise GraphParseError("null task name in graph: %s => %s" % (left, right)) if not left or (self.__class__.OP_OR in left or '(' in left): # Treat conditional or bracketed expressions as a single entity. lefts = [left] else: # Split non-conditional left-side expressions on AND. lefts = left.split(self.__class__.OP_AND) if '' in lefts or left and not all(lefts): raise GraphParseError("null task name in graph: %s => %s" % (left, right)) for left in lefts: # Extract information about all nodes on the left. if left: info = self.__class__.REC_NODES.findall(left) expr = left else: # There is no left-hand-side task. info = [] expr = '' # Make success triggers explicit. n_info = [] for name, offset, trig in info: if not trig and not name.startswith('@'): # (Avoiding @trigger nodes.) trig = self.__class__.TRIG_SUCCEED if offset: this = r'\b%s\b%s(?!:)' % (re.escape(name), re.escape(offset)) else: this = r'\b%s\b(?![\[:])' % re.escape(name) that = name + offset + trig expr = re.sub(this, that, expr) n_info.append((name, offset, trig)) info = n_info # Determine semantics of all family triggers present. family_trig_map = {} for name, offset, trig in info: if name.startswith('@'): # (Avoiding @trigger nodes.) continue if name in self.family_map: if trig.endswith(self.__class__.FAM_TRIG_EXT_ANY): ttype = trig[:-self.__class__.LEN_FAM_TRIG_EXT_ANY] ext = self.__class__.FAM_TRIG_EXT_ANY elif trig.endswith(self.__class__.FAM_TRIG_EXT_ALL): ttype = trig[:-self.__class__.LEN_FAM_TRIG_EXT_ALL] ext = self.__class__.FAM_TRIG_EXT_ALL else: # Unqualified (FAM => foo) or bad (FAM:bad => foo). raise GraphParseError("bad family trigger in %s" % expr) family_trig_map[(name, trig)] = (ttype, ext) else: if (trig.endswith(self.__class__.FAM_TRIG_EXT_ANY) or trig.endswith(self.__class__.FAM_TRIG_EXT_ALL)): raise GraphParseError("family trigger on non-" "family namespace %s" % expr) self._families_all_to_all(expr, rights, info, family_trig_map)
def parse_graph(self, graph_string): """Parse the graph string for a single graph section. (Assumes any general line-continuation markers have been processed). 1. Strip comments, whitespace, and blank lines. (all whitespace is removed up front so we don't have to consider it in regexes and strip it from matched elements) 2. Join incomplete lines starting or ending with '=>'. 3. Replicate and expand any parameterized lines. 4. Split and process by pairs "left-expression => right-node": i. Replace families with members (any or all semantics). ii. Record parsed dependency information for each right-side node. """ # Strip comments, whitespace, and blank lines. non_blank_lines = [] bad_lines = [] for line in graph_string.split('\n'): modified_line = self.__class__.REC_COMMENT.sub('', line) # Ignore empty lines if not modified_line or modified_line.isspace(): continue # Catch simple bad lines that would be accepted once # spaces are removed, e.g. 'foo bar => baz' if self.REC_GRAPH_BAD_SPACES_LINE.search(modified_line): bad_lines.append(line) continue # Apparently this is the fastest way to strip all whitespace!: modified_line = "".join(modified_line.split()) non_blank_lines.append(modified_line) # Check if there were problem lines and abort if bad_lines: self._report_invalid_lines(bad_lines) # Join incomplete lines (beginning or ending with an arrow). full_lines = [] part_lines = [] for i, _ in enumerate(non_blank_lines): this_line = non_blank_lines[i] if i == 0: # First line can't start with an arrow. if this_line.startswith(ARROW): raise GraphParseError("leading arrow: %s" % this_line) try: next_line = non_blank_lines[i + 1] except IndexError: next_line = '' if this_line.endswith(ARROW): # Last line can't end with an arrow. raise GraphParseError("trailing arrow: %s" % this_line) part_lines.append(this_line) if (this_line.endswith(ARROW) or next_line.startswith(ARROW)): continue full_line = ''.join(part_lines) # Record inter-suite dependence and remove the marker notation. # ("foo<SUITE::TASK:fail> => bar" becomes:fail "foo => bar"). repl = Replacement('\\1') full_line = self.__class__.REC_SUITE_STATE.sub(repl, full_line) for item in repl.match_groups: l_task, r_all, r_suite, r_task, r_status = item if r_status: r_status = r_status[1:] else: r_status = self.__class__.TRIG_SUCCEED[1:] self.suite_state_polling_tasks[l_task] = (r_suite, r_task, r_status, r_all) full_lines.append(full_line) part_lines = [] # Check for double-char conditional operators (a common mistake), # and bad node syntax (order of qualifiers). bad_lines = [] for line in full_lines: if self.__class__.OP_AND_ERR in line: raise GraphParseError("the graph AND operator is '%s': %s" % (self.__class__.OP_AND, line)) if self.__class__.OP_OR_ERR in line: raise GraphParseError("the graph OR operator is '%s': %s" % (self.__class__.OP_OR, line)) # Check node syntax. First drop all non-node characters. node_str = line for s in ['=>', '|', '&', '(', ')', '!']: node_str = node_str.replace(s, ' ') # Drop all valid @triggers, longest first to avoid sub-strings. nodes = self.__class__.REC_ACTION.findall(node_str) nodes.sort(key=len, reverse=True) for node in nodes: node_str = node_str.replace(node, '') # Then drop all valid nodes, longest first to avoid sub-strings. bad_lines = [ node_str for node in node_str.split() if self.__class__.REC_NODE_FULL.sub('', node, 1) ] if bad_lines: self._report_invalid_lines(bad_lines) # Expand parameterized lines (or detect undefined parameters). line_set = set() graph_expander = GraphExpander(self.parameters) for line in full_lines: if not self.__class__.REC_PARAMS.search(line): line_set.add(line) continue for l in graph_expander.expand(line): line_set.add(l) # Process chains of dependencies as pairs: left => right. # Parameterization can duplicate some dependencies, so use a set. pairs = set() for line in line_set: # "foo => bar => baz" becomes [foo, bar, baz] chain = line.split(ARROW) # Auto-trigger lone nodes and initial nodes in a chain. for name, offset, _ in self.__class__.REC_NODES.findall(chain[0]): if not offset and not name.startswith('@'): pairs.add((None, name)) for i in range(0, len(chain) - 1): pairs.add((chain[i], chain[i + 1])) for pair in pairs: self._proc_dep_pair(pair[0], pair[1])
def _compute_triggers(self, orig_expr: str, rights: List[str], expr: str, info: List[Tuple[str, str, str]]) -> None: """Store trigger info from "expr => right". Args: orig_expr: the original associated graph expression rights: list of right-side nodes including qualifiers like :fail? expr: the associated graph expression info: [(name, offset, trigger-name)] for each name in expr. """ trigs = [] for name, offset, trigger in info: # Replace finish triggers (must be done after member substn). if name.startswith(self.__class__.XTRIG): trigs += [name] elif trigger == TASK_OUTPUT_FINISHED: this = f"{name}{offset}:{trigger}" that = "(%s%s:%s%s%s%s:%s)" % ( name, offset, TASK_OUTPUT_SUCCEEDED, self.__class__.OP_OR, name, offset, TASK_OUTPUT_FAILED) expr = expr.replace(this, that) trigs += [ "%s%s:%s" % (name, offset, TASK_OUTPUT_SUCCEEDED), "%s%s:%s" % (name, offset, TASK_OUTPUT_FAILED) ] else: trigs += [f"{name}{offset}:{trigger}"] for right in rights: m = self.__class__.REC_RHS_NODE.match(right) # This will match, bad nodes are detected earlier (type ignore): suicide_char, name, output, opt_char = m.groups() # type: ignore suicide = (suicide_char == self.__class__.SUICIDE) optional = (opt_char == self.__class__.OPTIONAL) if output: output = output.strip(self.__class__.QUALIFIER) if name in self.family_map: fam = True mems = self.family_map[name] if not output: # (Plain family name on RHS). # Make implicit success explicit. output = self.__class__.QUAL_FAM_SUCCEED_ALL elif output.startswith("finish"): if optional: raise GraphParseError( f"Family pseudo-output {name}:{output} can't be" " optional") # But implicit optional for the real succeed/fail outputs. optional = True try: outputs = self.__class__.fam_to_mem_output_map[output] except KeyError: # Illegal family trigger on RHS of a pair. raise GraphParseError( f"Illegal family trigger: {name}:{output}") else: fam = False if not output: # Make implicit success explicit. output = TASK_OUTPUT_SUCCEEDED else: # Convert to standard output names if necessary. output = TaskTrigger.standardise_name(output) mems = [name] outputs = [output] for mem in mems: self._set_triggers(mem, suicide, trigs, expr, orig_expr) for output in outputs: self._set_output_opt(mem, output, optional, suicide, fam)
def _set_output_opt(self, name: str, output: str, optional: bool, suicide: bool, fam_member: bool = False) -> None: """Set or check consistency of optional/required output. Args: name: task name output: task output name optional: is the output optional? suicide: is this from a suicide trigger? fam_member: is this from an expanded family trigger? """ if cylc.flow.flags.cylc7_back_compat: # Set all outputs optional (set :succeed required elsewhere). self.task_output_opt[(name, output)] = (True, True, True) return # Do not infer output optionality from suicide triggers: if suicide: return if output == TASK_OUTPUT_FINISHED: # Interpret :finish pseudo-output if optional: raise GraphParseError( f"Pseudo-output {name}:{output} can't be optional") # But implicit optional for the real succeed/fail outputs. optional = True for outp in [TASK_OUTPUT_SUCCEEDED, TASK_OUTPUT_FAILED]: self._set_output_opt(name, outp, optional, suicide, fam_member) try: prev_optional, prev_default, prev_fixed = (self.task_output_opt[( name, output)]) except KeyError: # Not already set; set it. Fix it if not fam_member. self.task_output_opt[(name, output)] = (optional, optional, not fam_member) else: # Already set; check consistency with previous. if prev_fixed: # optionality fixed already if fam_member: pass else: if optional != prev_optional: raise GraphParseError( f"Output {name}:{output} can't be both required" " and optional") else: # optionality not fixed yet (only family default) if fam_member: # family defaults must be consistent if optional != prev_default: raise GraphParseError( f"Output {name}:{output} can't default to both" " optional and required (via family trigger" " defaults)") else: # fix the optionality now self.task_output_opt[(name, output)] = (optional, prev_default, True) # Check opposite output where appropriate. for opposites in [(TASK_OUTPUT_SUCCEEDED, TASK_OUTPUT_FAILED), (TASK_OUTPUT_SUBMITTED, TASK_OUTPUT_SUBMIT_FAILED)]: if output not in opposites: continue succeed, fail = opposites opposite = fail if output == succeed else succeed try: opp_optional, opp_default, opp_fixed = (self.task_output_opt[( name, opposite)]) except KeyError: # opposite not set, no need to check continue else: # opposite already set; check consistency optional, default, oset = (self.task_output_opt[(name, output)]) msg = (f"Opposite outputs {name}:{output} and {name}:" f"{opposite} must both be optional if both are used") if fam_member or not opp_fixed: if not optional or not opp_default: raise GraphParseError(msg + " (via family trigger defaults)") elif not optional or not opp_optional: raise GraphParseError(msg + " (via family trigger)") elif not optional or not opp_optional: raise GraphParseError(msg)
def _proc_dep_pair(self, pair: Tuple[Optional[str], str]) -> None: """Process a single dependency pair 'left => right'. 'left' can be a logical expression of qualified node names. 'left' can be None, when triggering a left-side or lone node. 'left' can be "", if null task name in graph error (a => => b). 'right' can be one or more node names joined by AND. 'right' can't be None or "". A node is an xtrigger, or a task or a family name. A qualified name is NAME([CYCLE-POINT-OFFSET])(:QUALIFIER). Trigger qualifiers, but not cycle offsets, are ignored on the right to allow chaining. """ left, right = pair # Raise error for right-hand-side OR operators. if self.__class__.OP_OR in right: raise GraphParseError(f"Illegal OR on right side: {right}") # Raise error if suicide triggers on the left of the trigger. if left and self.__class__.SUICIDE in left: raise GraphParseError("Suicide markers must be" f" on the right of a trigger: {left}") # Ignore cycle point offsets on the right side. # (Note we can't ban this; all nodes get process as left and right.) if '[' in right: return # Check that parentheses match. if left and left.count("(") != left.count(")"): raise GraphParseError("Mismatched parentheses in: \"" + left + "\"") # Split right side on AND. rights = right.split(self.__class__.OP_AND) if '' in rights or right and not all(rights): raise GraphParseError( f"Null task name in graph: {left} => {right}") if not left or (self.__class__.OP_OR in left or '(' in left): # Treat conditional or bracketed expressions as a single entity. # Can get [None] or [""] here lefts: List[Optional[str]] = [left] else: # Split non-conditional left-side expressions on AND. # Can get [""] here too # TODO figure out how to handle this wih mypy: # assign List[str] to List[Optional[str]] lefts = left.split(self.__class__.OP_AND) # type: ignore if '' in lefts or left and not all(lefts): raise GraphParseError( f"Null task name in graph: {left} => {right}") for left in lefts: # Extract information about all nodes on the left. if left: info = self.__class__.REC_NODES.findall(left) expr = left else: # There is no left-hand-side task. info = [] expr = '' n_info: List[Tuple[str, str, str, bool]] = [] for name, offset, trig, opt_char in info: opt = opt_char == self.__class__.OPTIONAL if name.startswith(self.__class__.XTRIG): n_info.append((name, offset, trig, opt)) continue if trig: # Replace with standard trigger name if necessary trig = trig.strip(self.__class__.QUALIFIER) n_trig = TaskTrigger.standardise_name(trig) if n_trig != trig: if offset: this = r'\b%s\b%s:%s(?!:)' % (re.escape(name), re.escape(offset), re.escape(trig)) else: this = r'\b%s:%s\b(?![\[:])' % (re.escape(name), re.escape(trig)) that = f"{name}{offset}:{n_trig}" expr = re.sub(this, that, expr) else: # Make success triggers explicit. n_trig = TASK_OUTPUT_SUCCEEDED if offset: this = r'\b%s\b%s(?!:)' % (re.escape(name), re.escape(offset)) else: this = r'\b%s\b(?![\[:])' % re.escape(name) that = f"{name}{offset}:{n_trig}" expr = re.sub(this, that, expr) n_info.append((name, offset, n_trig, opt)) info = n_info # Determine semantics of all family triggers present. family_trig_map = {} for name, _, trig, _ in info: if name.startswith(self.__class__.XTRIG): # Avoid @xtrigger nodes. continue if name in self.family_map: # Family; deal with members. try: family_trig_map[(name, trig)] = ( self.__class__.fam_to_mem_trigger_map[trig]) except KeyError: # "FAM:bad => foo" in LHS (includes "FAM => bar" too). raise GraphParseError( f"Illegal family trigger in {expr}") else: # Not a family. if trig in self.__class__.fam_to_mem_trigger_map: raise GraphParseError( "family trigger on non-family namespace {expr}") # remove '?' from expr (not needed in logical trigger evaluation) expr = re.sub(self.__class__._RE_OPT, '', expr) self._families_all_to_all(expr, rights, info, family_trig_map)
def parse_graph(self, graph_string: str) -> None: """Parse the graph string for a single graph section. (Assumes any general line-continuation markers have been processed). 1. Strip comments, whitespace, and blank lines. (all whitespace is removed up front so we don't have to consider it in regexes and strip it from matched elements) 2. Join incomplete lines starting or ending with '=>'. 3. Replicate and expand any parameterized lines. 4. Split and process by pairs "left-expression => right-node": i. Replace families with members (any or all semantics). ii. Record parsed dependency information for each right-side node. """ # Strip comments, whitespace, and blank lines. non_blank_lines = [] bad_lines = [] for line in graph_string.split('\n'): modified_line = self.__class__.REC_COMMENT.sub('', line) # Ignore empty lines if not modified_line or modified_line.isspace(): continue # Catch simple bad lines that would be accepted once # spaces are removed, e.g. 'foo bar => baz' if self.REC_GRAPH_BAD_SPACES_LINE.search(modified_line): bad_lines.append(line) continue # Apparently this is the fastest way to strip all whitespace!: modified_line = "".join(modified_line.split()) non_blank_lines.append(modified_line) # Check if there were problem lines and abort if bad_lines: self._report_invalid_lines(bad_lines) # Join incomplete lines (beginning or ending with an arrow). full_lines = [] part_lines = [] for i, _ in enumerate(non_blank_lines): this_line = non_blank_lines[i] for seq in self.CONTINUATION_STRS: if i == 0 and this_line.startswith(seq): # First line can't start with an arrow. raise GraphParseError(f"Leading {seq}: {this_line}") try: next_line = non_blank_lines[i + 1] except IndexError: next_line = '' for seq in self.CONTINUATION_STRS: if this_line.endswith(seq): # Last line can't end with an arrow, & or |. raise GraphParseError(f"Dangling {seq}:" f"{this_line}") part_lines.append(this_line) # Check that a continuation sequence doesn't end this line and # begin the next: if (this_line.endswith(self.CONTINUATION_STRS) and next_line.startswith(self.CONTINUATION_STRS)): raise GraphParseError( 'Consecutive lines end and start with continuation ' 'characters:\n' f'{this_line}\n' f'{next_line}') # Check that line ends with a valid continuation sequence: if (any( this_line.endswith(seq) or next_line.startswith(seq) for seq in self.CONTINUATION_STRS) and not (any( this_line.endswith(seq) or next_line.startswith(seq) for seq in self.BAD_STRS))): continue full_line = ''.join(part_lines) # Record inter-workflow dependence and remove the marker notation. # ("foo<WORKFLOW::TASK:fail> => bar" becomes:fail "foo => bar"). repl = Replacement('\\1') full_line = self.__class__.REC_WORKFLOW_STATE.sub(repl, full_line) for item in repl.match_groups: l_task, r_all, r_workflow, r_task, r_status = item if r_status: r_status = r_status.strip(self.__class__.QUALIFIER) r_status = TaskTrigger.standardise_name(r_status) else: r_status = TASK_OUTPUT_SUCCEEDED self.workflow_state_polling_tasks[l_task] = (r_workflow, r_task, r_status, r_all) full_lines.append(full_line) part_lines = [] # Check for double-char conditional operators (a common mistake), # and bad node syntax (order of qualifiers). bad_lines = [] for line in full_lines: if self.__class__.OP_AND_ERR in line: raise GraphParseError("The graph AND operator is " f"'{self.__class__.OP_AND}': {line}") if self.__class__.OP_OR_ERR in line: raise GraphParseError("The graph OR operator is " f"'{self.__class__.OP_OR}': {line}") # Check node syntax. First drop all non-node characters. node_str = line for spec in [ self.__class__.ARROW, self.__class__.OP_OR, self.__class__.OP_AND, self.__class__.SUICIDE, '(', ')', ]: node_str = node_str.replace(spec, ' ') # Drop all valid @xtriggers, longest first to avoid sub-strings. nodes = self.__class__.REC_XTRIG.findall(node_str) nodes.sort(key=len, reverse=True) for node in nodes: node_str = node_str.replace(node, '') # Then drop all valid nodes, longest first to avoid sub-strings. bad_lines = [ node_str for node in node_str.split() if self.__class__.REC_NODE_FULL.sub('', node, 1) ] if bad_lines: self._report_invalid_lines(bad_lines) # Expand parameterized lines (or detect undefined parameters). line_set = set() graph_expander = GraphExpander(self.parameters) for line in full_lines: if not self.__class__.REC_PARAMS.search(line): line_set.add(line) continue for line_ in graph_expander.expand(line): line_set.add(line_) # Process chains of dependencies as pairs: left => right. # Parameterization can duplicate some dependencies, so use a set. pairs: Set[Tuple[Optional[str], str]] = set() for line in line_set: chain = [] # "foo => bar => baz" becomes [foo, bar, baz] # "foo => bar_-32768 => baz" becomes [foo] # "foo_-32768 => bar" becomes [] for node in line.split(self.__class__.ARROW): # This can happen, e.g. "foo => => bar" produces # "foo, '', bar", so we add so that later it raises # an error if node == '': chain.append(node) continue node = self.REC_NODE_OUT_OF_RANGE.sub('', node) if node == '': # For "foo => bar<err> => baz", stop at "bar<err>" break else: chain.append(node) if not chain: continue for item in self.__class__.REC_NODES.findall(chain[0]): # Auto-trigger lone nodes and initial nodes in a chain. if not item[0].startswith(self.__class__.XTRIG): pairs.add((None, ''.join(item))) for i in range(0, len(chain) - 1): pairs.add((chain[i], chain[i + 1])) for pair in pairs: self._proc_dep_pair(pair)