def merge_error_tokens(tokens_stream): """ By itself, lex() cannot tell exactly when an error ends, so it just emits 1-character error tokens until the next valid token is encountered. This generator processor glues consecutive error tokens together, so that resulting error tokens appear to span the correct width. """ last_err_token = None for token in tokens_stream: if token.type == 'error': if last_err_token is not None: if last_err_token.lexpos + len(last_err_token.text) == token.lexpos: last_err_token.text += token.text last_err_token.source_span = SourceSpan.from_to(last_err_token.source_span, token.source_span) continue yield last_err_token last_err_token = token else: if last_err_token is not None: yield last_err_token last_err_token = None yield token if last_err_token is not None: yield last_err_token
def _set_source_span(node, from_item, to_item=None): if to_item is None: node.source_span = SourceSpan.copy(from_item.source_span) else: node.source_span = SourceSpan.from_to(from_item.source_span, to_item.source_span)