Exemplo n.º 1
0
def token_grouping(self, tokenize):
    for css_source, expected_tokens in [
        ('', []),
        (r'Lorem\26 "i\psum"4px', [
            ('IDENT', 'Lorem&'), ('STRING', 'ipsum'), ('DIMENSION', 4)]),

        ('not([[lorem]]{ipsum (42)})', [
            ('FUNCTION', 'not', [
                ('[', [
                    ('[', [
                        ('IDENT', 'lorem'),
                    ]),
                ]),
                ('{', [
                    ('IDENT', 'ipsum'),
                    ('S', ' '),
                    ('(', [
                        ('INTEGER', 42),
                    ])
                ])
            ])]),

        # Close everything at EOF, no error
        ('a[b{"d', [
            ('IDENT', 'a'),
            ('[', [
                ('IDENT', 'b'),
                ('{', [
                    ('STRING', 'd'),
                ]),
            ]),
        ]),

        # Any remaining ), ] or } token is a nesting error
        ('a[b{d]e}', [
            ('IDENT', 'a'),
            ('[', [
                ('IDENT', 'b'),
                ('{', [
                    ('IDENT', 'd'),
                    (']', ']'),  # The error is visible here
                    ('IDENT', 'e'),
                ]),
            ]),
        ]),
        # ref:
        ('a[b{d}e]', [
            ('IDENT', 'a'),
            ('[', [
                ('IDENT', 'b'),
                ('{', [
                    ('IDENT', 'd'),
                ]),
                ('IDENT', 'e'),
            ]),
        ]),
    ]:
        tokens = regroup(tokenize(css_source, ignore_comments=False))
        result = list(jsonify(tokens))
        self.ae(result, expected_tokens)
Exemplo n.º 2
0
def token_api(self, tokenize):
    for css_source in [
            '(8, foo, [z])', '[8, foo, (z)]', '{8, foo, [z]}', 'func(8, foo, [z])'
    ]:
        tokens = list(regroup(tokenize(css_source)))
        self.ae(len(tokens), 1)
        self.ae(len(tokens[0].content), 7)
def test_comments(tokenize, ignore_comments, expected_tokens):
    if tokenize is None:  # pragma: no cover
        pytest.skip('Speedups not available')
    css_source = '/* lorem */ ipsum[dolor/* sit */]/* amet'
    tokens = regroup(tokenize(css_source, ignore_comments))
    result = list(jsonify(tokens))
    assert result == expected_tokens
Exemplo n.º 4
0
def test_comments(tokenize, ignore_comments, expected_tokens):
    if tokenize is None:  # pragma: no cover
        pytest.skip('Speedups not available')
    css_source = '/* lorem */ ipsum[dolor/* sit */]/* amet'
    tokens = regroup(tokenize(css_source, ignore_comments))
    result = list(jsonify(tokens))
    assert result == expected_tokens
Exemplo n.º 5
0
def token_grouping(self, tokenize):
    for css_source, expected_tokens in [
        ('', []),
        (r'Lorem\26 "i\psum"4px', [
            ('IDENT', 'Lorem&'), ('STRING', 'ipsum'), ('DIMENSION', 4)]),

        ('not([[lorem]]{ipsum (42)})', [
            ('FUNCTION', 'not', [
                ('[', [
                    ('[', [
                        ('IDENT', 'lorem'),
                    ]),
                ]),
                ('{', [
                    ('IDENT', 'ipsum'),
                    ('S', ' '),
                    ('(', [
                        ('INTEGER', 42),
                    ])
                ])
            ])]),

        # Close everything at EOF, no error
        ('a[b{"d', [
            ('IDENT', 'a'),
            ('[', [
                ('IDENT', 'b'),
                ('{', [
                    ('STRING', 'd'),
                ]),
            ]),
        ]),

        # Any remaining ), ] or } token is a nesting error
        ('a[b{d]e}', [
            ('IDENT', 'a'),
            ('[', [
                ('IDENT', 'b'),
                ('{', [
                    ('IDENT', 'd'),
                    (']', ']'),  # The error is visible here
                    ('IDENT', 'e'),
                ]),
            ]),
        ]),
        # ref:
        ('a[b{d}e]', [
            ('IDENT', 'a'),
            ('[', [
                ('IDENT', 'b'),
                ('{', [
                    ('IDENT', 'd'),
                ]),
                ('IDENT', 'e'),
            ]),
        ]),
    ]:
        tokens = regroup(tokenize(css_source, ignore_comments=False))
        result = list(jsonify(tokens))
        self.ae(result, expected_tokens)
Exemplo n.º 6
0
def token_api(self, tokenize):
    for css_source in [
            '(8, foo, [z])', '[8, foo, (z)]', '{8, foo, [z]}', 'func(8, foo, [z])'
    ]:
        tokens = list(regroup(tokenize(css_source)))
        self.ae(len(tokens), 1)
        self.ae(len(tokens[0].content), 7)
def test_token_api(tokenize, css_source):
    if tokenize is None:  # pragma: no cover
        pytest.skip('Speedups not available')
    tokens = list(regroup(tokenize(css_source)))
    assert len(tokens) == 1
    token = tokens[0]
    expected_len = 7  # 2 spaces, 2 commas, 3 others.
    assert len(token.content) == expected_len
Exemplo n.º 8
0
def test_token_api(tokenize, css_source):
    if tokenize is None:  # pragma: no cover
        pytest.skip('Speedups not available')
    tokens = list(regroup(tokenize(css_source)))
    assert len(tokens) == 1
    token = tokens[0]
    expected_len = 7  # 2 spaces, 2 commas, 3 others.
    assert len(token.content) == expected_len
Exemplo n.º 9
0
def comments(self, tokenize):
    for ignore_comments, expected_tokens in [
        (False, [('COMMENT', '/* lorem */'), ('S', ' '), ('IDENT', 'ipsum'),
                 ('[', [
                     ('IDENT', 'dolor'),
                     ('COMMENT', '/* sit */'),
                 ]), ('BAD_COMMENT', '/* amet')]),
        (True, [
            ('S', ' '),
            ('IDENT', 'ipsum'),
            ('[', [
                ('IDENT', 'dolor'),
            ]),
        ]),
    ]:
        css_source = '/* lorem */ ipsum[dolor/* sit */]/* amet'
        tokens = regroup(tokenize(css_source, ignore_comments))
        result = list(jsonify(tokens))
        self.ae(result, expected_tokens)
Exemplo n.º 10
0
def comments(self, tokenize):
    for ignore_comments, expected_tokens in [
        (False, [
            ('COMMENT', '/* lorem */'),
            ('S', ' '),
            ('IDENT', 'ipsum'),
            ('[', [
                ('IDENT', 'dolor'),
                ('COMMENT', '/* sit */'),
            ]),
            ('BAD_COMMENT', '/* amet')
        ]),
        (True, [
            ('S', ' '),
            ('IDENT', 'ipsum'),
            ('[', [
                ('IDENT', 'dolor'),
            ]),
        ]),
    ]:
        css_source = '/* lorem */ ipsum[dolor/* sit */]/* amet'
        tokens = regroup(tokenize(css_source, ignore_comments))
        result = list(jsonify(tokens))
        self.ae(result, expected_tokens)
def test_token_grouping(tokenize, css_source, expected_tokens):
    if tokenize is None:  # pragma: no cover
        pytest.skip('Speedups not available')
    tokens = regroup(tokenize(css_source, ignore_comments=False))
    result = list(jsonify(tokens))
    assert result == expected_tokens
Exemplo n.º 12
0
def test_token_grouping(tokenize, css_source, expected_tokens):
    if tokenize is None:  # pragma: no cover
        pytest.skip('Speedups not available')
    tokens = regroup(tokenize(css_source, ignore_comments=False))
    result = list(jsonify(tokens))
    assert result == expected_tokens