コード例 #1
0
def parseIdentifier(node: Cursor, regex: str = None) -> str:
    # TODO: Check this
    while node.kind == CursorKind.CSTYLE_CAST_EXPR:
        node = list(node.get_children())[1]

    # Descend to child node
    if node.kind == CursorKind.UNEXPOSED_EXPR:
        node = descend(node)

    # Descend to child node
    if node.kind == CursorKind.UNARY_OPERATOR and next(
            node.get_tokens()).spelling in ('&', '*'):
        node = descend(node)

    # Check for null
    if node.kind == CursorKind.GNU_NULL_EXPR:
        return ''

    # Validate the node
    if node.kind != CursorKind.DECL_REF_EXPR:
        raise ParseError('expected identifier')

    value = node.spelling

    # Apply conditional regex constraint
    if regex and not re.match(regex, value):
        raise ParseError(f'expected identifier matching {regex}')

    return value
コード例 #2
0
def get_file_comment(cursor: Cursor, child: Optional[Cursor]) -> str:
    """
    Attempts to get the comment at the top of the file.

    Args:
        cursor (:class:`cindex.Cursor`): The root cursor of the file.
        child (:class:`cindex.Cursor`): The first child node in the file.
            This can be None.

    Returns:
        str: The file level comment.
    """
    try:
        token = next(cursor.get_tokens())
    except StopIteration:
        # Only happens with a completely empty file
        return ""

    if token.kind == cindex.TokenKind.COMMENT:
        if child is not None:
            child_comment = child.raw_comment
        else:
            child_comment = ""

        # if the first comment is not the documentation comment for the first
        # child then assume it is the file comment.
        if child_comment != token.spelling:
            return parse_comment(token)

    return ""
コード例 #3
0
ファイル: __init__.py プロジェクト: ousttrue/pycpptool
    def traverse(c: cindex.Cursor, indent='') -> None:
        # skip
        if c.location.file.name != str(path):
            # exclude included file
            return
        if c.hash in used:
            # avoid show twice
            return
        used.add(c.hash)

        ref = ''
        if c.referenced and c.referenced.hash != c.hash:
            ref = f' => {c.referenced.hash:#010x}'

        canonical = ''
        if c.canonical and c.canonical.hash != c.hash:
            canonical = f' => {c.canonical.hash:#010x} (forward decl)'

        value = f'{c.hash:#010x}:{indent} {c.kind}: {c.spelling}{ref}{canonical}'
        print(value)

        if c.kind == cindex.CursorKind.UNEXPOSED_DECL:
            tokens = [t for t in c.get_tokens()]
            if tokens and tokens[0].spelling == 'extern':
                # extern "C" block
                for child in c.get_children():
                    traverse(child)
                return

        for child in c.get_children():
            traverse(child, indent + '  ')
コード例 #4
0
ファイル: parse.py プロジェクト: jjkester/checkmerge
def customize_literals(cursor: clang.Cursor, kwargs: NodeData) -> NodeData:
    """
    Sets the label to the token value of the literal. Without this customization a literal would not have a label at
    all.
    """
    try:
        kwargs['label'] = ''.join(map(lambda x: x.spelling, cursor.get_tokens()))
    except AttributeError:
        raise parse.ParseError("Unexpected error: literal does not have any tokens.")
    return kwargs
コード例 #5
0
ファイル: ast-dump-test.py プロジェクト: Ryp/Reaper
def dump(cursor: Cursor):
    #line_count = cursor.get_tokens[0].location.line
    line_count = 1
    for t in cursor.get_tokens():
        current_line = t.location.line
        if current_line > line_count:
            for _ in range(current_line - line_count):
                print('')
            line_count = current_line
        print(t.spelling, end=' ')
    print('')
コード例 #6
0
def parseIntLit(node: Cursor, signed: bool = True) -> int:
    # Assume the literal is not negated
    negation = False

    # Check if the node is wrapped in a valid unary negation
    if signed and node.kind == CursorKind.UNARY_OPERATOR and next(
            node.get_tokens()).spelling == '-':
        negation = True
        node = descend(node)

    # Validate the node
    if node.kind != CursorKind.INTEGER_LITERAL:
        if not signed:
            raise ParseError('expected unsigned integer literal')
        else:
            raise ParseError('expected integer literal')

    # Extract the value
    value = int(next(node.get_tokens()).spelling)

    return -value if negation else value
コード例 #7
0
ファイル: cindex_node.py プロジェクト: ousttrue/pycpptool
 def __init__(self, path: pathlib.Path, c: cindex.Cursor) -> None:
     super().__init__(path, c)
     typedef_type = get_typedef_type(c)
     if typedef_type:
         self.typedef_type = cdeclare.parse_declare(typedef_type.spelling)
     else:
         tokens = [t.spelling for t in c.get_tokens()]
         # print(tokens)
         if len(tokens) == 3:
             self.typedef_type = cdeclare.parse_declare(tokens[1])
             # raise Exception()
         else:
             self.typedef_type = None
コード例 #8
0
ファイル: c_header.py プロジェクト: lefta/ehlit-prototype
def parse_ENUM_DECL(cursor: Cursor) -> ast.Node:
    if not cursor.is_definition():
        return ast.EhEnum(0, ast.Identifier(0, cursor.spelling), None)
    fields: List[ast.Identifier] = []
    expect: bool = False
    for t in cursor.get_tokens():
        if t.spelling == '{' or t.spelling == ',':
            expect = True
        elif t.spelling == '}':
            break
        elif expect:
            fields.append(ast.Identifier(0, t.spelling))
            expect = False
    return ast.EhEnum(0, ast.Identifier(0, cursor.spelling), fields)
コード例 #9
0
ファイル: c_header.py プロジェクト: lefta/ehlit-prototype
def parse_MACRO_DEFINITION(cursor: Cursor) -> Optional[ast.Node]:
    tokens: List[Token] = list(cursor.get_tokens())
    if tokens[0].spelling in builtin_defines:
        return None

    sym: ast.Identifier = ast.Identifier(0, tokens[0].spelling)

    # Simple define
    if len(tokens) == 1:
        return CDefine(sym)
    # Function macro
    if tokens[1].spelling == '(':
        i = 2
        arg_cnt = 0
        while i < len(tokens):
            if tokens[i].kind != TokenKind.IDENTIFIER or i + 1 >= len(tokens):
                break
            arg_cnt += 1
            if tokens[i + 1].spelling == ')':
                if i + 2 >= len(tokens) and ',' not in [
                        t.spelling for t in tokens
                ]:
                    break
                return CMacroFunction(sym, arg_cnt)
            elif tokens[i + 1].spelling != ',':
                break
            i += 2
    # Constant macro
    next_relevant_token = 2 if tokens[1].spelling == '(' else 1
    if tokens[next_relevant_token].kind == TokenKind.LITERAL:
        return ast.VariableDeclaration(_macro_var_type(tokens), sym)
    # Alias macro
    alias: Optional[ast.Symbol] = _macro_alias_value(tokens)
    if alias is not None:
        return ast.Alias(alias, ast.Identifier(0, tokens[0].spelling))
    return None
コード例 #10
0
 def are_asts_equal(self, a: Cursor, b: Cursor):
     for a, b in itertools.zip_longest(a.get_tokens(), b.get_tokens()):
         if a is None or b is None or a.spelling != b.spelling:
             return False
     return True