コード例 #1
0
ファイル: tokenizing.py プロジェクト: AtulKumar2/calibre
def token_grouping(self, tokenize):
    for css_source, expected_tokens in [
        ('', []),
        (r'Lorem\26 "i\psum"4px', [
            ('IDENT', 'Lorem&'), ('STRING', 'ipsum'), ('DIMENSION', 4)]),

        ('not([[lorem]]{ipsum (42)})', [
            ('FUNCTION', 'not', [
                ('[', [
                    ('[', [
                        ('IDENT', 'lorem'),
                    ]),
                ]),
                ('{', [
                    ('IDENT', 'ipsum'),
                    ('S', ' '),
                    ('(', [
                        ('INTEGER', 42),
                    ])
                ])
            ])]),

        # Close everything at EOF, no error
        ('a[b{"d', [
            ('IDENT', 'a'),
            ('[', [
                ('IDENT', 'b'),
                ('{', [
                    ('STRING', 'd'),
                ]),
            ]),
        ]),

        # Any remaining ), ] or } token is a nesting error
        ('a[b{d]e}', [
            ('IDENT', 'a'),
            ('[', [
                ('IDENT', 'b'),
                ('{', [
                    ('IDENT', 'd'),
                    (']', ']'),  # The error is visible here
                    ('IDENT', 'e'),
                ]),
            ]),
        ]),
        # ref:
        ('a[b{d}e]', [
            ('IDENT', 'a'),
            ('[', [
                ('IDENT', 'b'),
                ('{', [
                    ('IDENT', 'd'),
                ]),
                ('IDENT', 'e'),
            ]),
        ]),
    ]:
        tokens = regroup(tokenize(css_source, ignore_comments=False))
        result = list(jsonify(tokens))
        self.ae(result, expected_tokens)
コード例 #2
0
ファイル: tokenizing.py プロジェクト: AtulKumar2/calibre
def token_api(self, tokenize):
    for css_source in [
            '(8, foo, [z])', '[8, foo, (z)]', '{8, foo, [z]}', 'func(8, foo, [z])'
    ]:
        tokens = list(regroup(tokenize(css_source)))
        self.ae(len(tokens), 1)
        self.ae(len(tokens[0].content), 7)
コード例 #3
0
def test_comments(tokenize, ignore_comments, expected_tokens):
    if tokenize is None:  # pragma: no cover
        pytest.skip('Speedups not available')
    css_source = '/* lorem */ ipsum[dolor/* sit */]/* amet'
    tokens = regroup(tokenize(css_source, ignore_comments))
    result = list(jsonify(tokens))
    assert result == expected_tokens
コード例 #4
0
ファイル: test_tokenizer.py プロジェクト: aESeguridad/GERE
def test_comments(tokenize, ignore_comments, expected_tokens):
    if tokenize is None:  # pragma: no cover
        pytest.skip('Speedups not available')
    css_source = '/* lorem */ ipsum[dolor/* sit */]/* amet'
    tokens = regroup(tokenize(css_source, ignore_comments))
    result = list(jsonify(tokens))
    assert result == expected_tokens
コード例 #5
0
def token_grouping(self, tokenize):
    for css_source, expected_tokens in [
        ('', []),
        (r'Lorem\26 "i\psum"4px', [
            ('IDENT', 'Lorem&'), ('STRING', 'ipsum'), ('DIMENSION', 4)]),

        ('not([[lorem]]{ipsum (42)})', [
            ('FUNCTION', 'not', [
                ('[', [
                    ('[', [
                        ('IDENT', 'lorem'),
                    ]),
                ]),
                ('{', [
                    ('IDENT', 'ipsum'),
                    ('S', ' '),
                    ('(', [
                        ('INTEGER', 42),
                    ])
                ])
            ])]),

        # Close everything at EOF, no error
        ('a[b{"d', [
            ('IDENT', 'a'),
            ('[', [
                ('IDENT', 'b'),
                ('{', [
                    ('STRING', 'd'),
                ]),
            ]),
        ]),

        # Any remaining ), ] or } token is a nesting error
        ('a[b{d]e}', [
            ('IDENT', 'a'),
            ('[', [
                ('IDENT', 'b'),
                ('{', [
                    ('IDENT', 'd'),
                    (']', ']'),  # The error is visible here
                    ('IDENT', 'e'),
                ]),
            ]),
        ]),
        # ref:
        ('a[b{d}e]', [
            ('IDENT', 'a'),
            ('[', [
                ('IDENT', 'b'),
                ('{', [
                    ('IDENT', 'd'),
                ]),
                ('IDENT', 'e'),
            ]),
        ]),
    ]:
        tokens = regroup(tokenize(css_source, ignore_comments=False))
        result = list(jsonify(tokens))
        self.ae(result, expected_tokens)
コード例 #6
0
def token_api(self, tokenize):
    for css_source in [
            '(8, foo, [z])', '[8, foo, (z)]', '{8, foo, [z]}', 'func(8, foo, [z])'
    ]:
        tokens = list(regroup(tokenize(css_source)))
        self.ae(len(tokens), 1)
        self.ae(len(tokens[0].content), 7)
コード例 #7
0
def test_token_api(tokenize, css_source):
    if tokenize is None:  # pragma: no cover
        pytest.skip('Speedups not available')
    tokens = list(regroup(tokenize(css_source)))
    assert len(tokens) == 1
    token = tokens[0]
    expected_len = 7  # 2 spaces, 2 commas, 3 others.
    assert len(token.content) == expected_len
コード例 #8
0
ファイル: test_tokenizer.py プロジェクト: aESeguridad/GERE
def test_token_api(tokenize, css_source):
    if tokenize is None:  # pragma: no cover
        pytest.skip('Speedups not available')
    tokens = list(regroup(tokenize(css_source)))
    assert len(tokens) == 1
    token = tokens[0]
    expected_len = 7  # 2 spaces, 2 commas, 3 others.
    assert len(token.content) == expected_len
コード例 #9
0
def comments(self, tokenize):
    for ignore_comments, expected_tokens in [
        (False, [('COMMENT', '/* lorem */'), ('S', ' '), ('IDENT', 'ipsum'),
                 ('[', [
                     ('IDENT', 'dolor'),
                     ('COMMENT', '/* sit */'),
                 ]), ('BAD_COMMENT', '/* amet')]),
        (True, [
            ('S', ' '),
            ('IDENT', 'ipsum'),
            ('[', [
                ('IDENT', 'dolor'),
            ]),
        ]),
    ]:
        css_source = '/* lorem */ ipsum[dolor/* sit */]/* amet'
        tokens = regroup(tokenize(css_source, ignore_comments))
        result = list(jsonify(tokens))
        self.ae(result, expected_tokens)
コード例 #10
0
ファイル: tokenizing.py プロジェクト: AtulKumar2/calibre
def comments(self, tokenize):
    for ignore_comments, expected_tokens in [
        (False, [
            ('COMMENT', '/* lorem */'),
            ('S', ' '),
            ('IDENT', 'ipsum'),
            ('[', [
                ('IDENT', 'dolor'),
                ('COMMENT', '/* sit */'),
            ]),
            ('BAD_COMMENT', '/* amet')
        ]),
        (True, [
            ('S', ' '),
            ('IDENT', 'ipsum'),
            ('[', [
                ('IDENT', 'dolor'),
            ]),
        ]),
    ]:
        css_source = '/* lorem */ ipsum[dolor/* sit */]/* amet'
        tokens = regroup(tokenize(css_source, ignore_comments))
        result = list(jsonify(tokens))
        self.ae(result, expected_tokens)
コード例 #11
0
def test_token_grouping(tokenize, css_source, expected_tokens):
    if tokenize is None:  # pragma: no cover
        pytest.skip('Speedups not available')
    tokens = regroup(tokenize(css_source, ignore_comments=False))
    result = list(jsonify(tokens))
    assert result == expected_tokens
コード例 #12
0
ファイル: test_tokenizer.py プロジェクト: aESeguridad/GERE
def test_token_grouping(tokenize, css_source, expected_tokens):
    if tokenize is None:  # pragma: no cover
        pytest.skip('Speedups not available')
    tokens = regroup(tokenize(css_source, ignore_comments=False))
    result = list(jsonify(tokens))
    assert result == expected_tokens