def test_binary_expression_honors_precedence(op1: SyntaxKind, op2: SyntaxKind): op1_precedence = syntax_facts.get_binary_operator_precedence(op1) op2_precedence = syntax_facts.get_binary_operator_precedence(op2) op1_text = syntax_facts.text_for(op1) op2_text = syntax_facts.text_for(op2) text = f"a {op1_text} b {op2_text} c" expression = SyntaxTree.parse(text).root if op1_precedence >= op2_precedence: with AssertingEnumerator(expression) as e: e.assert_node(SyntaxKind.BINARY_EXPRESSION) e.assert_node(SyntaxKind.BINARY_EXPRESSION) e.assert_node(SyntaxKind.NAME_EXPRESSION) e.assert_token(SyntaxKind.IDENTIFIER_TOKEN, "a") e.assert_token(op1, op1_text) e.assert_node(SyntaxKind.NAME_EXPRESSION) e.assert_token(SyntaxKind.IDENTIFIER_TOKEN, "b") e.assert_token(op2, op2_text) e.assert_node(SyntaxKind.NAME_EXPRESSION) e.assert_token(SyntaxKind.IDENTIFIER_TOKEN, "c") else: with AssertingEnumerator(expression) as e: e.assert_node(SyntaxKind.BINARY_EXPRESSION) e.assert_node(SyntaxKind.NAME_EXPRESSION) e.assert_token(SyntaxKind.IDENTIFIER_TOKEN, "a") e.assert_token(op1, op1_text) e.assert_node(SyntaxKind.BINARY_EXPRESSION) e.assert_node(SyntaxKind.NAME_EXPRESSION) e.assert_token(SyntaxKind.IDENTIFIER_TOKEN, "b") e.assert_token(op2, op2_text) e.assert_node(SyntaxKind.NAME_EXPRESSION) e.assert_token(SyntaxKind.IDENTIFIER_TOKEN, "c")
def test_unary_expression_honors_precedences(unary_kind: SyntaxKind, binary_kind: SyntaxKind): unary_precedence = syntax_facts.get_unary_operator_precedence(unary_kind) binary_precedence = syntax_facts.get_binary_operator_precedence(binary_kind) unary_text = syntax_facts.text_for(unary_kind) binary_text = syntax_facts.text_for(binary_kind) text = f"{unary_text} a {binary_text} b" expression = SyntaxTree.parse(text).root if unary_precedence >= binary_precedence: with AssertingEnumerator(expression) as e: e.assert_node(SyntaxKind.BINARY_EXPRESSION) e.assert_node(SyntaxKind.UNARY_EXPRESSION) e.assert_token(unary_kind, unary_text) e.assert_node(SyntaxKind.NAME_EXPRESSION) e.assert_token(SyntaxKind.IDENTIFIER_TOKEN, "a") e.assert_token(binary_kind, binary_text) e.assert_node(SyntaxKind.NAME_EXPRESSION) e.assert_token(SyntaxKind.IDENTIFIER_TOKEN, "b") else: with AssertingEnumerator(expression) as e: e.assert_node(SyntaxKind.UNARY_EXPRESSION) e.assert_token(unary_kind, unary_text) e.assert_node(SyntaxKind.BINARY_EXPRESSION) e.assert_node(SyntaxKind.NAME_EXPRESSION) e.assert_token(SyntaxKind.IDENTIFIER_TOKEN, "a") e.assert_token(binary_kind, binary_text) e.assert_node(SyntaxKind.NAME_EXPRESSION) e.assert_token(SyntaxKind.IDENTIFIER_TOKEN, "b")
def lex(self): self._start = self._position self._kind = SyntaxKind.BAD_TOKEN self._value = None if self._current().isdigit(): self._read_number() elif self._current().isspace(): self._read_whitespace() elif self._current().isalpha(): self._read_identifier_or_keyword() elif self._current() == "&" and self._lookahead() == "&": self._kind = SyntaxKind.AMPERSAND_AMPERSAND_TOKEN self._position += 2 elif self._current() == "|" and self._lookahead() == "|": self._kind = SyntaxKind.PIPE_PIPE_TOKEN self._position += 2 elif self._current() == "=" and self._lookahead() == "=": self._kind = SyntaxKind.EQUALS_EQUALS_TOKEN self._position += 2 elif self._current() == "!" and self._lookahead() == "=": self._kind = SyntaxKind.BANG_EQUALS_TOKEN self._position += 2 elif self._current() == "\0": self._kind = SyntaxKind.END_OF_FILE_TOKEN self._position += 1 elif self._current() == "+": self._kind = SyntaxKind.PLUS_TOKEN self._position += 1 elif self._current() == "-": self._kind = SyntaxKind.MINUS_TOKEN self._position += 1 elif self._current() == "/": self._kind = SyntaxKind.SLASH_TOKEN self._position += 1 elif self._current() == "*": self._kind = SyntaxKind.STAR_TOKEN self._position += 1 elif self._current() == "(": self._kind = SyntaxKind.OPEN_PARENTHESIS_TOKEN self._position += 1 elif self._current() == ")": self._kind = SyntaxKind.CLOSE_PARENTHESIS_TOKEN self._position += 1 elif self._current() == "!": self._kind = SyntaxKind.BANG_TOKEN self._position += 1 elif self._current() == "=": self._kind = SyntaxKind.EQUALS_TOKEN self._position += 1 else: self.diagnostics.report_bad_character( TextSpan(self._position, 1), self._current() ) self._position += 1 text = syntax_facts.text_for(self._kind) if text is None: text = self._text[self._start:self._position] return SyntaxToken(self._kind, self._start, text, self._value)
def test_get_text_round_trips(kind: SyntaxKind): text = syntax_facts.text_for(kind) if text is None: return tokens = tuple(SyntaxTree.parse_tokens(text)) assert len(tokens) == 1 token = tokens[0] assert token.kind() == kind assert token.text == text
def get_tokens() -> Iterable[tuple[SyntaxKind, str]]: fixed_tokens = filter( lambda t: t[1] is not None, map(lambda k: (k, syntax_facts.text_for(k)), SyntaxKind)) dynamic_tokens = ( (SyntaxKind.IDENTIFIER_TOKEN, "a"), (SyntaxKind.IDENTIFIER_TOKEN, "abc"), (SyntaxKind.NUMBER_TOKEN, "1"), (SyntaxKind.NUMBER_TOKEN, "123"), ) return itertools.chain(fixed_tokens, dynamic_tokens)