コード例 #1
0
 def test_fail_parsing_c_block_comment(self):
     try:
         lines = tokenize_lines(
             '#include <stdio.h>\n#int i = 42; /* test \n', lexer='c')
     except LexError as e:
         assert e.row == 2
         assert e.col == 14
コード例 #2
0
ファイル: plexer_tests.py プロジェクト: shawnpresser/plexer
    def test_parse_includes(self):
        lines = tokenize_lines(
            '#include <stdio.h>\n#include "myfile.h"\n',
            lexer='c')
        assert len(lines) == 2
        assert lines[0][0]['value'] == '#include'

        # #include <name.h>
        include_global = [
            TYPE.IDENTIFIER, TYPE.WHITESPACE, TYPE.SPECIAL,
            TYPE.IDENTIFIER, TYPE.SPECIAL, TYPE.IDENTIFIER, TYPE.SPECIAL]
        include_global_names = [
            'identifier', 'whitespace', 'special',
            'identifier', 'special', 'identifier', 'special']

        # #include "name.h"
        include_local = [
            TYPE.IDENTIFIER, TYPE.WHITESPACE, TYPE.STRING]
        include_local_names = [
            'identifier', 'whitespace', 'string']

        verify_token_types = [
            include_global, # #include <stdio.h>
            include_local ] # #include "myfile.h"

        verify_token_type_names = [
            include_global_names, # #include <stdio.h>
            include_local_names ] # #include "myfile.h"

        for i in range(len(lines)):
            line = lines[i]
            for j in range(len(line)):
                token = line[j]
                assert token['type'] == verify_token_types[i][j]
                assert token['name'] == verify_token_type_names[i][j]
コード例 #3
0
def print_includes(s):
    lines = tokenize_lines(s, "c")
    for line in lines:
        if line[0]["value"] == "#include":
            p = ""
            for token in line:
                p = p + token["value"]
            print p
コード例 #4
0
def print_includes(s):
    lines = tokenize_lines(s, 'c')
    for line in lines:
        if line[0]['value'] == '#include':
            p = ''
            for token in line:
                p = p + token['value']
            print p
コード例 #5
0
ファイル: plexer_tests.py プロジェクト: shawnpresser/plexer
 def test_fail_parsing_c_block_comment(self):
     try:
         lines = tokenize_lines(
             '#include <stdio.h>\n#int i = 42; /* test \n',
             lexer='c')
     except LexError as e:
         assert e.row == 2
         assert e.col == 14
コード例 #6
0
    def test_parse_includes(self):
        lines = tokenize_lines('#include <stdio.h>\n#include "myfile.h"\n',
                               lexer='c')
        assert len(lines) == 2
        assert lines[0][0]['value'] == '#include'

        # #include <name.h>
        include_global = [
            TYPE.IDENTIFIER, TYPE.WHITESPACE, TYPE.SPECIAL, TYPE.IDENTIFIER,
            TYPE.SPECIAL, TYPE.IDENTIFIER, TYPE.SPECIAL
        ]
        include_global_names = [
            'identifier', 'whitespace', 'special', 'identifier', 'special',
            'identifier', 'special'
        ]

        # #include "name.h"
        include_local = [TYPE.IDENTIFIER, TYPE.WHITESPACE, TYPE.STRING]
        include_local_names = ['identifier', 'whitespace', 'string']

        verify_token_types = [
            include_global,  # #include <stdio.h>
            include_local
        ]  # #include "myfile.h"

        verify_token_type_names = [
            include_global_names,  # #include <stdio.h>
            include_local_names
        ]  # #include "myfile.h"

        for i in range(len(lines)):
            line = lines[i]
            for j in range(len(line)):
                token = line[j]
                assert token['type'] == verify_token_types[i][j]
                assert token['name'] == verify_token_type_names[i][j]