def p_sequence_match_empty(p): """sequence_match : """ p[0] = SequenceMatch() try: p[0].source_span = SourceSpan.right_after(p[-1].source_span) except AttributeError: p[0].source_span = SourceSpan.at_beginning()
def merge_error_tokens(tokens_stream): """ By itself, lex() cannot tell exactly when an error ends, so it just emits 1-character error tokens until the next valid token is encountered. This generator processor glues consecutive error tokens together, so that resulting error tokens appear to span the correct width. """ last_err_token = None for token in tokens_stream: if token.type == 'error': if last_err_token is not None: if last_err_token.lexpos + len(last_err_token.text) == token.lexpos: last_err_token.text += token.text last_err_token.source_span = SourceSpan.from_to(last_err_token.source_span, token.source_span) continue yield last_err_token last_err_token = token else: if last_err_token is not None: yield last_err_token last_err_token = None yield token if last_err_token is not None: yield last_err_token
def parse_rules(rules_text, start_rule=None): if start_rule is None: parser = parser_template else: parser = yacc.yacc(start=start_rule, debug=False, write_tables=False, errorlog=NullLogger()) try: return parser.parse(rules_text, RulesLexerForYACC()) except EOFRuleParseException: raise RuleSyntaxError(source_span=SourceSpan.at_end_of_source(rules_text)) from None
def semantic_check(self, scope): try: self._semantic_check_before_children(scope) for child in self.iter_ast_children(): child.semantic_check(scope) self._semantic_check_after_children(scope) except RuleCheckError as e: if e.scope is None: e.scope = scope if e.source_span is None: e.source_span = SourceSpan.copy(self.source_span) raise e
def _get_block_level_adjusted_error_span(self): block_start = self.currentBlock().position() block_end = self.currentBlock().position() + self.currentBlock().length() - 2 if block_start > block_end: return None error_span = self._get_document_level_adjusted_error_span() if error_span is None: return None start = clamp(error_span.start_pos, block_start, block_end + 1) - block_start end = clamp(error_span.end_pos, block_start - 1, block_end) - block_start if start > end: return None return SourceSpan.from_start_end(start, end, self.currentBlock().text())
def _get_document_level_adjusted_error_span(self): if self._error_span is None: return None source = self.document().toPlainText() + "\n" start = clamp(self._error_span.start_pos, 0, len(source) - 1) end = clamp(self._error_span.end_pos, start, len(source) - 1) # An error that occurs between two tokens will be extended backwards until it touches the last character in # the first token; this is so that we can be sure it shows up in the highlighter while start > 0 and str.isspace(source[start]): start -= 1 while end > start and source[end] == "\n": end -= 1 return SourceSpan.from_start_end(start, end, source)
def _set_source_span(node, from_item, to_item=None): if to_item is None: node.source_span = SourceSpan.copy(from_item.source_span) else: node.source_span = SourceSpan.from_to(from_item.source_span, to_item.source_span)
def __init__(self, source_span=None, **error_parameters): super().__init__(**error_parameters) self.source_span = SourceSpan.copy(source_span)