コード例 #1
0
ファイル: context.py プロジェクト: nyulacska/langkit
    def __init__(self, lib_name, astnode_names, astnode_kinds, prefix):
        """
        :param str lib_name: Lower-case name for the generated library.

        :param list[str] astnode_names: List of camel-with-mixed-case names for
            all node types.

        :param dict[int, str] astnode_kinds: Mapping of kinds ('Enum_Rep) to
            camel-with-mixed-case node names.

        :param str prefix: Prefix to use for command names.
        """
        self.lib_name = lib_name
        self.astnode_names = [Name(name) for name in astnode_names]
        self.astnode_kinds = {
            kind: Name(name)
            for kind, name in astnode_kinds.items()
        }
        self.prefix = prefix

        self.node_record = ('{}__implementation__root_node_record'.format(
            self.lib_name))
        """
        Name of the record type used to represent node data.
        """

        self.entity_struct_names = self._entity_struct_names()

        self.reparse_debug_info()
コード例 #2
0
        def construct_operand(op):
            from langkit.expressions import Cast, New
            expr = construct(op)

            check_source_language(

                expr.type == LogicVarType
                or expr.type.matches(T.root_node)
                or expr.type.matches(T.root_node.env_el()),

                "Operands to a logic bind operator should be either "
                "a logic variable or an ASTNode, got {}".format(expr.type)
            )

            if expr.type.matches(T.root_node.env_el()):
                if expr.type is not T.root_node.env_el():
                    expr = Cast.Expr(expr, T.root_node.env_el())
            elif expr.type.matches(T.root_node):
                # Cast the ast node type if necessary
                if expr.type is not T.root_node:
                    expr = Cast.Expr(expr, T.root_node)

                # If the expression is a root node, implicitly construct an
                # env_element from it.
                expr = New.StructExpr(T.root_node.env_el(), {
                    Name('El'): expr,
                    Name('MD'): LiteralExpr('<>', None),
                    Name('Parents_Bindings'): LiteralExpr('null', None)
                })

            return expr
コード例 #3
0
    def token_base_name(self, token):
        """
        Helper function to get the name of a token.

        :param TokenAction|Enum|Name|str token: Input token. It can be either a
            TokenAction subclass (i.e. a Lexer subclass attribute), an enum
            value from "self.tokens", the token Name or a string (case
            insensitive token name).
        :rtype: Name
        """
        if isinstance(token, TokenAction):
            return token.name
        elif isinstance(token, Name):
            assert token in self.tokens_set
            return token
        else:
            assert isinstance(
                token, str), ("Bad type for {}, supposed to be str|{}".format(
                    token, self.tokens.__name__))
            name = Name.from_lower(token.lower())
            if name in self.tokens_set:
                return name
            elif token in self.literals_map:
                return self.literals_map[token].name
            else:
                check_source_language(
                    False,
                    "{} token literal is not part of the valid tokens for "
                    "this grammar".format(token))
コード例 #4
0
ファイル: lexer.py プロジェクト: AdaCore/langkit
    def token_base_name(self, token):
        """
        Helper function to get the name of a token.

        :param TokenAction|Enum|Name|str token: Input token. It can be either a
            TokenAction subclass (i.e. a Lexer subclass attribute), an enum
            value from "self.tokens_class", the token Name or a string (case
            insensitive token name).
        :rtype: Name
        """
        if isinstance(token, TokenAction):
            return token.name
        elif isinstance(token, Name):
            assert token in self.tokens_set
            return token
        else:
            assert isinstance(token, str), (
                "Bad type for {}, supposed to be str|{}".format(
                    token, self.tokens_class.__name__
                )
            )
            name = Name.from_lower(token.lower())
            if name in self.tokens_set:
                return name
            elif token in self.literals_map:
                return self.literals_map[token].name
            else:
                raise Exception(
                    "{} token literal is not part of the valid tokens for "
                    "this grammar".format(token)
                )
コード例 #5
0
 def __getattr__(self, attr: str) -> TokenAction:
     """
     Shortcut to get a TokenAction stored in self.tokens.
     """
     name = Name.from_camel(attr)
     try:
         return self.tokens.name_to_token[name]
     except KeyError:
         raise AttributeError(f"No such token: {attr}")
コード例 #6
0
ファイル: utils.py プロジェクト: pmderodat/langkit
def adaify_name(context: Context, name: str) -> str:
    """
    Turn a symbol name like a__b into an Ada-like name such as A.B.
    Also strip the $LIB_NAME.Analysis prefix, if present.
    """
    pfx = context.analysis_prefix
    if name.startswith(pfx):
        name = name[len(pfx):]
    chunks = name.split('__')
    return '.'.join(Name.from_lower(c).camel_with_underscores for c in chunks)
コード例 #7
0
ファイル: context.py プロジェクト: shintakezou/langkit
 def _astnode_struct_names(self):
     """
     Turn the set of ASTNode subclass names into a mapping from ASTNode
     record names, as GDB will see them, to user-friendly ASTNode names.
     """
     return {
         '{}__implementation__bare_{}_type'.format(
             self.lib_name, name.lower()
         ): Name.from_camel_with_underscores(name)
         for name in self.astnode_names
     }
コード例 #8
0
 def _entity_struct_names(self):
     """
     Turn the set of AST node names into a set of encoded type names for the
     corresponding entity records.
     """
     return {
         '{}__analysis__implementation__entity_{}'.format(
             self.lib_name,
             Name.from_camel_with_underscores(name).lower
         ) for name in self.astnode_names
     } | {'{}__analysis__implementation__ast_envs__entity'
          .format(self.lib_name)}
コード例 #9
0
ファイル: lexer.py プロジェクト: AdaCore/langkit
    def __new__(mcs, name, bases, dct):
        assert len(bases) == 1, (
            "Multiple inheritance for LexerToken subclasses is not supported"
        )

        fields = []
        for fld_name, fld_value in dct.items():
            if isinstance(fld_value, TokenAction):
                fld_value.name = Name.from_camel(fld_name)
                fields.append(fld_value)

        dct['fields'] = getattr(bases[0], 'fields', []) + fields
        return type.__new__(mcs, name, bases, dct)
コード例 #10
0
    def add_tokens(self, cls):
        for fld_name, fld_value in cls.__dict__.items():
            if isinstance(fld_value, TokenAction):
                dest_list = self.tokens
            elif isinstance(fld_value, TokenFamily):
                dest_list = self.token_families
            else:
                continue

            # Several items here are shared: for example, the
            # LexerToken.LexingFailure instance can be used in two different
            # lexers, so we can't assume its name is always None. Just accept
            # when it has already the expected name.
            name = Name.from_camel(fld_name)
            assert fld_value.name in (None, name)
            fld_value.name = name
            dest_list.append(fld_value)
コード例 #11
0
    def check_token_families(self, context):
        """
        Pass that checks that either there are no defined token families, or
        that they form a partition of existing tokens.
        """
        def format_token_list(tokens):
            return ', '.join(sorted(
                t.dsl_name if isinstance(t, TokenAction) else str(t)
                for t in tokens
            ))

        # Sort token families by name to ensure legality checks and code
        # generation determinism.
        self.tokens.token_families.sort(key=lambda tf: tf.name)

        all_tokens = set(self.tokens)
        seen_tokens = set()

        for family in self.tokens.token_families:
            with family.diagnostic_context:
                not_tokens = family.tokens - all_tokens
                check_source_language(
                    not not_tokens,
                    'Invalid tokens: {}'.format(format_token_list(not_tokens))
                )

                already_seen_tokens = seen_tokens & family.tokens
                check_source_language(
                    not already_seen_tokens,
                    'Tokens must belong to one family exclusively: {}'
                    .format(format_token_list(already_seen_tokens))
                )

                seen_tokens.update(family.tokens)

        # Create a token family to host all tokens that are not associated with
        # a specific token family.
        default_family = TokenFamily(*list(all_tokens - seen_tokens))
        default_family.name = Name('Default_Family')
        self.tokens.token_families.append(default_family)

        # Make it easy to get the family a token belongs to
        for tf in self.tokens.token_families:
            for t in tf.tokens:
                self.tokens.token_to_family[t] = tf
コード例 #12
0
 def add_tokens(self, klass):
     for fld_name, fld_value in klass.__dict__.items():
         if isinstance(fld_value, TokenAction):
             fld_value.name = Name.from_camel(fld_name)
             self.fields.append(fld_value)