Exemplo n.º 1
0
def test_skips_whitespace():
    assert lex_one("""

    foo


""") == Token(TokenKind.NAME, 6, 9, 'foo')

    assert lex_one("""
    #comment
    foo#comment
""") == Token(TokenKind.NAME, 18, 21, 'foo')

    assert lex_one(""",,,foo,,,""") == Token(TokenKind.NAME, 3, 6, 'foo')
Exemplo n.º 2
0
def test_lex_reports_useful_information_for_dashes_in_names():
    q = u'a-b'
    lexer = Lexer(Source(q))
    first_token = lexer.next_token()
    assert first_token == Token(TokenKind.NAME, 0, 1, 'a')
    with raises(LanguageError) as excinfo:
        lexer.next_token()

    assert u'Syntax Error GraphQL (1:3) Invalid number, expected digit but got: "b".' in excinfo.value.message
Exemplo n.º 3
0
def test_lexes_strings():
    assert lex_one(u'"simple"') == Token(TokenKind.STRING, 0, 8, 'simple')
    assert lex_one(u'" white space "') == Token(TokenKind.STRING, 0, 15, ' white space ')
    assert lex_one(u'"quote \\""') == Token(TokenKind.STRING, 0, 10, 'quote "')
    assert lex_one(u'"escaped \\n\\r\\b\\t\\f"') == Token(TokenKind.STRING, 0, 20, 'escaped \n\r\b\t\f')
    assert lex_one(u'"slashes \\\\ \\/"') == Token(TokenKind.STRING, 0, 15, 'slashes \\ /')
    assert lex_one(u'"unicode \\u1234\\u5678\\u90AB\\uCDEF"') == Token(TokenKind.STRING, 0, 34,
                                                                       u'unicode \u1234\u5678\u90AB\uCDEF')
Exemplo n.º 4
0
def test_lexes_punctuation():
    assert lex_one('!') == Token(TokenKind.BANG, 0, 1)
    assert lex_one('$') == Token(TokenKind.DOLLAR, 0, 1)
    assert lex_one('(') == Token(TokenKind.PAREN_L, 0, 1)
    assert lex_one(')') == Token(TokenKind.PAREN_R, 0, 1)
    assert lex_one('...') == Token(TokenKind.SPREAD, 0, 3)
    assert lex_one(':') == Token(TokenKind.COLON, 0, 1)
    assert lex_one('=') == Token(TokenKind.EQUALS, 0, 1)
    assert lex_one('@') == Token(TokenKind.AT, 0, 1)
    assert lex_one('[') == Token(TokenKind.BRACKET_L, 0, 1)
    assert lex_one(']') == Token(TokenKind.BRACKET_R, 0, 1)
    assert lex_one('{') == Token(TokenKind.BRACE_L, 0, 1)
    assert lex_one('|') == Token(TokenKind.PIPE, 0, 1)
    assert lex_one('}') == Token(TokenKind.BRACE_R, 0, 1)
Exemplo n.º 5
0
def test_lexes_numbers():
    assert lex_one('4') == Token(TokenKind.INT, 0, 1, '4')
    assert lex_one('4.123') == Token(TokenKind.FLOAT, 0, 5, '4.123')
    assert lex_one('-4') == Token(TokenKind.INT, 0, 2, '-4')
    assert lex_one('9') == Token(TokenKind.INT, 0, 1, '9')
    assert lex_one('0') == Token(TokenKind.INT, 0, 1, '0')
    assert lex_one('00') == Token(TokenKind.INT, 0, 1, '0')
    assert lex_one('-4.123') == Token(TokenKind.FLOAT, 0, 6, '-4.123')
    assert lex_one('0.123') == Token(TokenKind.FLOAT, 0, 5, '0.123')
    assert lex_one('-1.123e4') == Token(TokenKind.FLOAT, 0, 8, '-1.123e4')
    assert lex_one('-1.123e-4') == Token(TokenKind.FLOAT, 0, 9, '-1.123e-4')
    assert lex_one('-1.123e4567') == Token(TokenKind.FLOAT, 0, 11,
                                           '-1.123e4567')
Exemplo n.º 6
0
def test_accepts_bom_header():
    assert lex_one(u'\uFEFF foo') == Token(TokenKind.NAME, 2, 5, u'foo')