示例#1
0
 def lexes_block_strings():
     assert lex_one('""""""') == Token(TokenKind.BLOCK_STRING, 0, 6, 1, 1,
                                       None, "")
     assert lex_one('"""simple"""') == Token(TokenKind.BLOCK_STRING, 0, 12,
                                             1, 1, None, "simple")
     assert lex_one('""" white space """') == Token(TokenKind.BLOCK_STRING,
                                                    0, 19, 1, 1, None,
                                                    " white space ")
     assert lex_one('"""contains " quote"""') == Token(
         TokenKind.BLOCK_STRING, 0, 22, 1, 1, None, 'contains " quote')
     assert lex_one('"""contains \\""" triple-quote"""') == Token(
         TokenKind.BLOCK_STRING, 0, 32, 1, 1, None,
         'contains """ triple-quote')
     assert lex_one('"""multi\nline"""') == Token(TokenKind.BLOCK_STRING, 0,
                                                  16, 1, 1, None,
                                                  "multi\nline")
     assert lex_one('"""multi\rline\r\nnormalized"""') == Token(
         TokenKind.BLOCK_STRING, 0, 28, 1, 1, None,
         "multi\nline\nnormalized")
     assert lex_one('"""unescaped \\n\\r\\b\\t\\f\\u1234"""') == Token(
         TokenKind.BLOCK_STRING,
         0,
         32,
         1,
         1,
         None,
         "unescaped \\n\\r\\b\\t\\f\\u1234",
     )
     assert lex_one('"""slashes \\\\ \\/"""') == Token(
         TokenKind.BLOCK_STRING, 0, 19, 1, 1, None, "slashes \\\\ \\/")
     assert lex_one('"""\n\n        spans\n          multiple\n'
                    '            lines\n\n        """') == Token(
                        TokenKind.BLOCK_STRING, 0, 68, 1, 1, None,
                        "spans\n  multiple\n    lines")
示例#2
0
 def skips_whitespace_and_comments():
     token = lex_one('\n\n    foo\n\n\n')
     assert token == Token(TokenKind.NAME, 6, 9, 3, 5, None, 'foo')
     token = lex_one('\n    #comment\n    foo#comment\n')
     assert token == Token(TokenKind.NAME, 18, 21, 3, 5, None, 'foo')
     token = lex_one(',,,foo,,,')
     assert token == Token(TokenKind.NAME, 3, 6, 1, 4, None, 'foo')
示例#3
0
 def skips_whitespace_and_comments():
     token = lex_one("\n\n    foo\n\n\n")
     assert token == Token(TokenKind.NAME, 6, 9, 3, 5, None, "foo")
     token = lex_one("\n    #comment\n    foo#comment\n")
     assert token == Token(TokenKind.NAME, 18, 21, 3, 5, None, "foo")
     token = lex_one(",,,foo,,,")
     assert token == Token(TokenKind.NAME, 3, 6, 1, 4, None, "foo")
示例#4
0
def describe_location_class():
    token1 = Token(TokenKind.NAME, 1, 2, 1, 2)
    token2 = Token(TokenKind.NAME, 2, 3, 1, 3)
    source = Source("source")

    def initializes():
        loc = Location(token1, token2, source)
        assert loc.start == token1.start
        assert loc.end == token2.end
        assert loc.start_token is token1
        assert loc.end_token is token2
        assert loc.source is source

    def can_stringify_with_start_and_end():
        loc = Location(token1, token2, source)
        assert str(loc) == "1:3"

    def has_representation_with_start_and_end():
        loc = Location(token1, token2, source)
        assert repr(loc) == "<Location 1:3>"
        assert inspect(loc) == repr(loc)

    def can_check_equality():
        loc1 = Location(token1, token2, source)
        loc2 = Location(token1, token2, source)
        assert loc2 == loc1
        loc3 = Location(token1, token1, source)
        assert loc3 != loc1
        loc4 = Location(token2, token2, source)
        assert loc4 != loc1
        assert loc4 != loc3

    def can_check_equality_with_tuple_or_list():
        loc = Location(token1, token2, source)
        assert loc == (1, 3)
        assert loc == [1, 3]
        assert not loc != (1, 3)
        assert not loc != [1, 3]
        assert loc != (1, 2)
        assert loc != [2, 3]

    def does_not_equal_incompatible_object():
        loc = Location(token1, token2, source)
        assert not loc == (1, 2, 3)
        assert loc != (1, 2, 3)
        assert not loc == {1: 2}
        assert loc != {1: 2}

    def can_hash():
        loc1 = Location(token1, token2, source)
        loc2 = Location(token1, token2, source)
        assert loc2 == loc1
        assert hash(loc2) == hash(loc1)
        loc3 = Location(token1, token1, source)
        assert loc3 != loc1
        assert hash(loc3) != hash(loc1)
        loc4 = Location(token2, token2, source)
        assert loc4 != loc1
        assert hash(loc4) != hash(loc1)
        assert hash(loc4) != hash(loc3)
示例#5
0
 def can_hash():
     token1 = Token(TokenKind.NAME, 1, 2, 1, 2, value="hash")
     token2 = Token(TokenKind.NAME, 1, 2, 1, 2, value="hash")
     assert token2 == token1
     assert hash(token2) == hash(token1)
     token3 = Token(TokenKind.NAME, 1, 2, 1, 2, value="bash")
     assert token3 != token1
     assert hash(token3) != hash(token1)
示例#6
0
 def can_check_equality():
     token1 = Token(TokenKind.NAME, 1, 2, 1, 2, value="test")
     token2 = Token(TokenKind.NAME, 1, 2, 1, 2, value="test")
     assert token2 == token1
     assert not token2 != token1
     token3 = Token(TokenKind.NAME, 1, 2, 1, 2, value="text")
     assert token3 != token1
     token4 = Token(TokenKind.NAME, 1, 4, 1, 2, value="test")
     assert token4 != token1
     token5 = Token(TokenKind.NAME, 1, 2, 1, 4, value="test")
     assert token5 != token1
示例#7
0
def describe_location_class():
    token1 = Token(TokenKind.NAME, 1, 2, 1, 2)
    token2 = Token(TokenKind.NAME, 2, 3, 1, 3)
    source = Source("source")

    def initializes():
        loc = Location(1, 2, token1, token2, source)
        assert loc.start == 1
        assert loc.end == 2
        assert loc.start_token is token1
        assert loc.end_token is token2
        assert loc.source is source

    def can_stringify_with_start_and_end():
        loc = Location(1, 2, token1, token2, source)
        assert str(loc) == "1:2"

    def has_representation_with_start_and_end():
        loc = Location(1, 2, token1, token2, source)
        assert repr(loc) == "<Location 1:2>"
        assert inspect(loc) == repr(loc)

    def can_check_equality():
        loc1 = Location(1, 2, token1, token2, source)
        loc2 = Location(1, 2, token1, token2, source)
        assert loc2 == loc1
        loc3 = Location(3, 2, token1, token2, source)
        assert loc3 != loc1
        loc4 = Location(1, 4, token1, token2, source)
        assert loc4 != loc1

    def can_check_equality_with_tuple_or_list():
        loc = Location(1, 2, token1, token2, source)
        assert loc == (1, 2)
        assert loc == [1, 2]
        assert not loc != (1, 2)
        assert not loc != [1, 2]
        assert loc != (3, 2)
        assert loc != [1, 4]

    def can_hash():
        loc1 = Location(1, 2, token1, token2, source)
        loc2 = Location(1, 2, token1, token2, source)
        assert loc2 == loc1
        assert hash(loc2) == hash(loc1)
        loc3 = Location(1, 3, token1, token2, source)
        assert loc3 != loc1
        assert hash(loc3) != hash(loc1)
示例#8
0
    def advance_line_after_lexing_multiline_block_string():
        assert (lex_second('''"""

        spans
          multiple
            lines

        \n """ second_token''') == Token(TokenKind.NAME, 71, 83, 8, 6, None,
                                         "second_token"))
示例#9
0
 def tracks_line_breaks():
     assert lex_one("foo") == Token(TokenKind.NAME, 0, 3, 1, 1, "foo")
     assert lex_one("\nfoo") == Token(TokenKind.NAME, 1, 4, 2, 1, "foo")
     assert lex_one("\rfoo") == Token(TokenKind.NAME, 1, 4, 2, 1, "foo")
     assert lex_one("\r\nfoo") == Token(TokenKind.NAME, 2, 5, 2, 1, "foo")
     assert lex_one("\n\rfoo") == Token(TokenKind.NAME, 2, 5, 3, 1, "foo")
     assert lex_one("\r\r\n\nfoo") == Token(TokenKind.NAME, 4, 7, 4, 1, "foo")
     assert lex_one("\n\n\r\rfoo") == Token(TokenKind.NAME, 4, 7, 5, 1, "foo")
示例#10
0
 def lexes_comments():
     assert lex_one("# Comment").prev == Token(
         TokenKind.COMMENT, 0, 9, 1, 1, " Comment"
     )
     assert lex_one("# Comment\nAnother line").prev == Token(
         TokenKind.COMMENT, 0, 9, 1, 1, " Comment"
     )
     assert lex_one("# Comment\r\nAnother line").prev == Token(
         TokenKind.COMMENT, 0, 9, 1, 1, " Comment"
     )
     assert lex_one("# Comment \U0001f600").prev == Token(
         TokenKind.COMMENT, 0, 11, 1, 1, " Comment \U0001f600"
     )
     assert lex_one("# Comment \uD83D\uDE00").prev == Token(
         TokenKind.COMMENT, 0, 12, 1, 1, " Comment \uD83D\uDE00"
     )
     assert_syntax_error(
         "# Invalid surrogate \uDEAD", "Invalid character: U+DEAD.", (1, 21)
     )
示例#11
0
 def lex_reports_useful_information_for_dashes_in_names():
     q = "a-b"
     lexer = Lexer(Source(q))
     first_token = lexer.advance()
     assert first_token == Token(TokenKind.NAME, 0, 1, 1, 1, None, "a")
     with raises(GraphQLSyntaxError) as exc_info:
         lexer.advance()
     error = exc_info.value
     assert error.message == (
         "Syntax Error: Invalid number, expected digit but got: 'b'.")
     assert error.locations == [(1, 3)]
示例#12
0
 def initializes():
     prev = Token(TokenKind.EQUALS, 10, 11, 1, 2)
     token = Token(
         kind=TokenKind.NAME,
         start=11,
         end=12,
         line=1,
         column=3,
         prev=prev,
         value="n",
     )
     assert prev.kind == TokenKind.EQUALS
     assert prev.start == 10
     assert prev.end == 11
     assert prev.line == 1
     assert prev.column == 2
     assert token.kind == TokenKind.NAME
     assert token.start == 11
     assert token.end == 12
     assert token.line == 1
     assert token.column == 3
     assert token.prev is prev
     assert token.value == "n"
示例#13
0
 def initializes():
     token = Token(
         kind=TokenKind.NAME,
         start=11,
         end=12,
         line=1,
         column=3,
         value="n",
     )
     assert token.kind == TokenKind.NAME
     assert token.start == 11
     assert token.end == 12
     assert token.line == 1
     assert token.column == 3
     assert token.prev is None
     assert token.value == "n"
示例#14
0
 def lexes_strings():
     assert lex_one('""') == Token(TokenKind.STRING, 0, 2, 1, 1, None, "")
     assert lex_one('"simple"') == Token(TokenKind.STRING, 0, 8, 1, 1, None,
                                         "simple")
     assert lex_one('" white space "') == Token(TokenKind.STRING, 0, 15, 1,
                                                1, None, " white space ")
     assert lex_one('"quote \\""') == Token(TokenKind.STRING, 0, 10, 1, 1,
                                            None, 'quote "')
     assert lex_one('"escaped \\n\\r\\b\\t\\f"') == Token(
         TokenKind.STRING, 0, 20, 1, 1, None, "escaped \n\r\b\t\f")
     assert lex_one('"slashes \\\\ \\/"') == Token(TokenKind.STRING, 0, 15,
                                                   1, 1, None,
                                                   "slashes \\ /")
     assert lex_one('"unicode \\u1234\\u5678\\u90AB\\uCDEF"') == Token(
         TokenKind.STRING, 0, 34, 1, 1, None,
         "unicode \u1234\u5678\u90AB\uCDEF")
示例#15
0
 def skips_whitespace_and_comments():
     token = lex_one("\n\n    foo\n\n\n")
     assert token == Token(TokenKind.NAME, 6, 9, 3, 5, "foo")
     token = lex_one("\r\n\r\n  foo\r\n\r\n")
     assert token == Token(TokenKind.NAME, 6, 9, 3, 3, "foo")
     token = lex_one("\r\r  foo\r\r")
     assert token == Token(TokenKind.NAME, 4, 7, 3, 3, "foo")
     token = lex_one("\t\tfoo\t\t")
     assert token == Token(TokenKind.NAME, 2, 5, 1, 3, "foo")
     token = lex_one("\n    #comment\n    foo#comment\n")
     assert token == Token(TokenKind.NAME, 18, 21, 3, 5, "foo")
     token = lex_one(",,,foo,,,")
     assert token == Token(TokenKind.NAME, 3, 6, 1, 4, "foo")
示例#16
0
 def can_can_convert_to_dict_with_locations():
     token = Token(
         kind=TokenKind.NAME,
         start=1,
         end=3,
         line=1,
         column=1,
         value="foo",
     )
     loc = Location(token, token, Source("foo"))
     node = SampleTestNode(alpha=1, beta=2, loc=loc)
     res = node.to_dict(locations=True)
     assert res == {
         "kind": "sample_test",
         "alpha": 1,
         "beta": 2,
         "loc": {
             "start": 1,
             "end": 3
         },
     }
     assert list(res) == ["kind", "alpha", "beta", "loc"]
     assert list(res["loc"]) == ["start", "end"]
示例#17
0
 def lexes_punctuation():
     assert lex_one("!") == Token(TokenKind.BANG, 0, 1, 1, 1, None, None)
     assert lex_one("$") == Token(TokenKind.DOLLAR, 0, 1, 1, 1, None, None)
     assert lex_one("(") == Token(TokenKind.PAREN_L, 0, 1, 1, 1, None, None)
     assert lex_one(")") == Token(TokenKind.PAREN_R, 0, 1, 1, 1, None, None)
     assert lex_one("...") == Token(TokenKind.SPREAD, 0, 3, 1, 1, None,
                                    None)
     assert lex_one(":") == Token(TokenKind.COLON, 0, 1, 1, 1, None, None)
     assert lex_one("=") == Token(TokenKind.EQUALS, 0, 1, 1, 1, None, None)
     assert lex_one("@") == Token(TokenKind.AT, 0, 1, 1, 1, None, None)
     assert lex_one("[") == Token(TokenKind.BRACKET_L, 0, 1, 1, 1, None,
                                  None)
     assert lex_one("]") == Token(TokenKind.BRACKET_R, 0, 1, 1, 1, None,
                                  None)
     assert lex_one("{") == Token(TokenKind.BRACE_L, 0, 1, 1, 1, None, None)
     assert lex_one("}") == Token(TokenKind.BRACE_R, 0, 1, 1, 1, None, None)
     assert lex_one("|") == Token(TokenKind.PIPE, 0, 1, 1, 1, None, None)
示例#18
0
 def can_compare_with_string():
     token = Token(TokenKind.NAME, 1, 2, 1, 2, value="test")
     assert token == "Name 'test'"
     assert token != "Name 'foo'"
示例#19
0
 def does_not_equal_incompatible_object():
     token = Token(TokenKind.NAME, 1, 2, 1, 2, value="test")
     assert token != {"Name": "test"}
示例#20
0
 def has_representation_with_line_and_column():
     token = Token(TokenKind.NAME, 1, 2, 1, 2, value="test")
     assert repr(token) == "<Token Name 'test' 1:2>"
     assert inspect(token) == repr(token)
示例#21
0
 def lexes_punctuation():
     assert lex_one('!') == Token(TokenKind.BANG, 0, 1, 1, 1, None, None)
     assert lex_one('$') == Token(TokenKind.DOLLAR, 0, 1, 1, 1, None, None)
     assert lex_one('(') == Token(TokenKind.PAREN_L, 0, 1, 1, 1, None, None)
     assert lex_one(')') == Token(TokenKind.PAREN_R, 0, 1, 1, 1, None, None)
     assert lex_one('...') == Token(TokenKind.SPREAD, 0, 3, 1, 1, None,
                                    None)
     assert lex_one(':') == Token(TokenKind.COLON, 0, 1, 1, 1, None, None)
     assert lex_one('=') == Token(TokenKind.EQUALS, 0, 1, 1, 1, None, None)
     assert lex_one('@') == Token(TokenKind.AT, 0, 1, 1, 1, None, None)
     assert lex_one('[') == Token(TokenKind.BRACKET_L, 0, 1, 1, 1, None,
                                  None)
     assert lex_one(']') == Token(TokenKind.BRACKET_R, 0, 1, 1, 1, None,
                                  None)
     assert lex_one('{') == Token(TokenKind.BRACE_L, 0, 1, 1, 1, None, None)
     assert lex_one('}') == Token(TokenKind.BRACE_R, 0, 1, 1, 1, None, None)
     assert lex_one('|') == Token(TokenKind.PIPE, 0, 1, 1, 1, None, None)
示例#22
0
 def records_line_and_column():
     token = lex_one('\n \r\n \r  foo\n')
     assert token == Token(TokenKind.NAME, 8, 11, 4, 3, None, 'foo')
示例#23
0
 def can_stringify():
     token = Token(TokenKind.NAME, 1, 2, 1, 2, value="test")
     assert str(token) == "Name 'test'"
     assert token.desc == str(token)
示例#24
0
 def lexes_empty_string():
     token = lex_one('""')
     assert token == Token(TokenKind.STRING, 0, 2, 1, 1, None, "")
     assert token.value == ""
示例#25
0
 def records_line_and_column():
     token = lex_one("\n \r\n \r  foo\n")
     assert token == Token(TokenKind.NAME, 8, 11, 4, 3, None, "foo")
示例#26
0
 def can_copy():
     token1 = Token(TokenKind.NAME, 1, 2, 1, 2, value="copy")
     token2 = copy(token1)
     assert token2 == token1
     assert token2 is not token1
示例#27
0
 def accepts_bom_header():
     token = lex_one("\uFEFF foo")
     assert token == Token(TokenKind.NAME, 2, 5, 1, 3, None, "foo")
示例#28
0
 def lexes_numbers():
     assert lex_one('0') == Token(TokenKind.INT, 0, 1, 1, 1, None, '0')
     assert lex_one('1') == Token(TokenKind.INT, 0, 1, 1, 1, None, '1')
     assert lex_one('4') == Token(TokenKind.INT, 0, 1, 1, 1, None, '4')
     assert lex_one('9') == Token(TokenKind.INT, 0, 1, 1, 1, None, '9')
     assert lex_one('42') == Token(TokenKind.INT, 0, 2, 1, 1, None, '42')
     assert lex_one('4.123') == Token(TokenKind.FLOAT, 0, 5, 1, 1, None,
                                      '4.123')
     assert lex_one('-4') == Token(TokenKind.INT, 0, 2, 1, 1, None, '-4')
     assert lex_one('-42') == Token(TokenKind.INT, 0, 3, 1, 1, None, '-42')
     assert lex_one('-4.123') == Token(TokenKind.FLOAT, 0, 6, 1, 1, None,
                                       '-4.123')
     assert lex_one('0.123') == Token(TokenKind.FLOAT, 0, 5, 1, 1, None,
                                      '0.123')
     assert lex_one('123e4') == Token(TokenKind.FLOAT, 0, 5, 1, 1, None,
                                      '123e4')
     assert lex_one('123E4') == Token(TokenKind.FLOAT, 0, 5, 1, 1, None,
                                      '123E4')
     assert lex_one('123e-4') == Token(TokenKind.FLOAT, 0, 6, 1, 1, None,
                                       '123e-4')
     assert lex_one('123e+4') == Token(TokenKind.FLOAT, 0, 6, 1, 1, None,
                                       '123e+4')
     assert lex_one('-1.123e4') == Token(TokenKind.FLOAT, 0, 8, 1, 1, None,
                                         '-1.123e4')
     assert lex_one('-1.123E4') == Token(TokenKind.FLOAT, 0, 8, 1, 1, None,
                                         '-1.123E4')
     assert lex_one('-1.123e-4') == Token(TokenKind.FLOAT, 0, 9, 1, 1, None,
                                          '-1.123e-4')
     assert lex_one('-1.123e+4') == Token(TokenKind.FLOAT, 0, 9, 1, 1, None,
                                          '-1.123e+4')
     assert lex_one('-1.123e4567') == Token(TokenKind.FLOAT, 0, 11, 1, 1,
                                            None, '-1.123e4567')
示例#29
0
 def lexes_numbers():
     assert lex_one("0") == Token(TokenKind.INT, 0, 1, 1, 1, None, "0")
     assert lex_one("1") == Token(TokenKind.INT, 0, 1, 1, 1, None, "1")
     assert lex_one("4") == Token(TokenKind.INT, 0, 1, 1, 1, None, "4")
     assert lex_one("9") == Token(TokenKind.INT, 0, 1, 1, 1, None, "9")
     assert lex_one("42") == Token(TokenKind.INT, 0, 2, 1, 1, None, "42")
     assert lex_one("4.123") == Token(TokenKind.FLOAT, 0, 5, 1, 1, None,
                                      "4.123")
     assert lex_one("-4") == Token(TokenKind.INT, 0, 2, 1, 1, None, "-4")
     assert lex_one("-42") == Token(TokenKind.INT, 0, 3, 1, 1, None, "-42")
     assert lex_one("-4.123") == Token(TokenKind.FLOAT, 0, 6, 1, 1, None,
                                       "-4.123")
     assert lex_one("0.123") == Token(TokenKind.FLOAT, 0, 5, 1, 1, None,
                                      "0.123")
     assert lex_one("123e4") == Token(TokenKind.FLOAT, 0, 5, 1, 1, None,
                                      "123e4")
     assert lex_one("123E4") == Token(TokenKind.FLOAT, 0, 5, 1, 1, None,
                                      "123E4")
     assert lex_one("123e-4") == Token(TokenKind.FLOAT, 0, 6, 1, 1, None,
                                       "123e-4")
     assert lex_one("123e+4") == Token(TokenKind.FLOAT, 0, 6, 1, 1, None,
                                       "123e+4")
     assert lex_one("-1.123e4") == Token(TokenKind.FLOAT, 0, 8, 1, 1, None,
                                         "-1.123e4")
     assert lex_one("-1.123E4") == Token(TokenKind.FLOAT, 0, 8, 1, 1, None,
                                         "-1.123E4")
     assert lex_one("-1.123e-4") == Token(TokenKind.FLOAT, 0, 9, 1, 1, None,
                                          "-1.123e-4")
     assert lex_one("-1.123e+4") == Token(TokenKind.FLOAT, 0, 9, 1, 1, None,
                                          "-1.123e+4")
     assert lex_one("-1.123e4567") == Token(TokenKind.FLOAT, 0, 11, 1, 1,
                                            None, "-1.123e4567")
示例#30
0
 def ignores_bom_header():
     token = lex_one("\uFEFF foo")
     assert token == Token(TokenKind.NAME, 2, 5, 1, 3, "foo")