コード例 #1
0
ファイル: structs.py プロジェクト: AdaCore/langkit
    def _check_match_coverage(self, input_type):
        """
        Given some input type for this match expression, make sure the set of
        matchers cover all cases. check_source_language will raise an error if
        it's not the case. Also emit warnings for unreachable matchers.

        :param ASTNode input_type: Type parameter.
        :rtype: None
        """

        type_set = TypeSet()

        for i, (t, _, _) in enumerate(self.matchers, 1):
            t_name = 'default one' if t is None else t.name().camel
            check_source_language(not type_set.include(t or input_type),
                                  'The #{} matcher ({}) is unreachable'
                                  ' as all previous matchers cover all the'
                                  ' nodes it can match'.format(i, t_name),
                                  Severity.warning)

        mm = sorted(type_set.unmatched_types(input_type),
                    key=lambda cls: cls.hierarchical_name())

        check_source_language(
            not mm,
            'The following AST nodes have no handler: {} (all {} subclasses'
            ' require one)'.format(
                ', '.join(t.name().camel for t in mm),
                input_type.name().camel
            )
        )
コード例 #2
0
ファイル: boolean.py プロジェクト: AdaCore/langkit
    def construct(self):
        """
        Construct a resolved expression for this.

        :rtype: IfExpr
        """
        def construct_op(op):
            return construct(op, lambda t: t in (BoolType, EquationType),
                             "Operands of binary logic operator must be of "
                             "boolean or equation type, got {expr_type}")

        lhs, rhs = map(construct_op, [self.lhs, self.rhs])

        check_source_language(
            lhs.type is rhs.type, "Left and right operands to binary logic "
            "operator should have the same type"
        )

        if lhs.type is BoolType:
            # Boolean case
            if self.kind == self.AND:
                then = rhs
                else_then = LiteralExpr('False', BoolType)
            else:
                then = LiteralExpr('True', BoolType)
                else_then = rhs
            return If.Expr(lhs, then, else_then, BoolType)
        else:
            # Equation case
            return BuiltinCallExpr(
                names.Name("Logic") + names.Name.from_lower(self.kind),
                EquationType, [lhs, rhs],
                '{}_Pred'.format(self.kind.capitalize())
            )
コード例 #3
0
ファイル: structs.py プロジェクト: AdaCore/langkit
    def do_prepare(self):
        self.matchers = []

        for i, match_fn in enumerate(self.matchers_functions):
            argspec = inspect.getargspec(match_fn)
            check_source_language(
                len(argspec.args) == 1 and
                not argspec.varargs and
                not argspec.keywords and
                (not argspec.defaults or len(argspec.defaults) < 2),
                'Invalid matcher lambda'
            )

            if argspec.defaults:
                match_type = resolve_type(argspec.defaults[0])
                check_source_language(
                    issubclass(match_type, ASTNode) and
                    match_type != ASTNode,
                    'Invalid matching type: {}'.format(
                        match_type.name().camel
                    )
                )
            else:
                match_type = None

            match_var = AbstractVariable(
                names.Name('Match_{}'.format(i)),
                type=match_type,
                create_local=True
            )
            self.matchers.append((match_type, match_var, match_fn(match_var)))
コード例 #4
0
ファイル: envs.py プロジェクト: AdaCore/langkit
    def prepare(self):
        """
        Method call by CompileCtx.compute_properties. Used to check that
        properties generated by the env spec are conforming.

        :rtype: bool
        """
        for key_prop, val_prop, _, _, _ in self.envs_expressions:
            with key_prop.diagnostic_context():
                check_source_language(
                    key_prop.type.matches(Symbol) or
                    key_prop.type.matches(Symbol.array_type()),
                    'The key expression in environment specification must be'
                    ' either a symbol or an array of symbol: got {}'
                    ' instead'.format(
                        key_prop.type.name().camel
                    )
                )

                check_source_language(
                    val_prop.type.matches(T.root_node)
                    or (val_prop.type.is_collection
                        and val_prop.type.element_type().matches(T.root_node)),
                    'The val expression in environment specification must be'
                    ' either a node or an array of nodes: got {}'
                    ' instead'.format(
                        val_prop.type.name().camel
                    )
                )
コード例 #5
0
ファイル: collections.py プロジェクト: AdaCore/langkit
    def construct(self):
        """
        Construct a resolved expression for this map operation.

        :rtype: MapExpr
        """
        (collection_expr,
         expr,
         element_var,
         index_var,
         iter_scope) = self.construct_common()

        check_source_language(
            not self.concat or expr.type.is_collection(),
            'Cannot mapcat with expressions returning {} values (collections'
            ' expected instead)'.format(expr.type.name())
        )

        with iter_scope.use():
            filter_expr = (construct(self.filter_expr, BoolType)
                           if self.filter_expr else None)

            take_while_expr = (construct(self.take_while_expr, BoolType)
                               if self.take_while_expr else None)

        return Map.Expr(element_var, index_var, collection_expr, expr,
                        iter_scope, filter_expr, self.concat,
                        take_while_expr)
コード例 #6
0
ファイル: boolean.py プロジェクト: AdaCore/langkit
    def construct(self):
        """
        Construct a resolved expression for this.

        :rtype: EqExpr
        """
        def construct_logic_eq(lhs, rhs):
            if lhs.type == LogicVarType:
                check_source_language(
                    rhs.type == LogicVarType or rhs.type.matches(ASTNode),
                    "Operands to a logic equality operator should be either "
                    "a logic variable or an ASTNode, got {}".format(rhs.type)
                )

                # Cast the ast node type if necessary
                if (rhs.type.matches(ASTNode) and rhs.type != T.root_node):
                    rhs = Cast.Expr(rhs, T.root_node)

                return BuiltinCallExpr("Equals", EquationType, [lhs, rhs],
                                       "Equals_Pred")
            else:
                return None

        from langkit.expressions.structs import Cast
        lhs = construct(self.lhs)
        rhs = construct(self.rhs)

        # We don't care about associacivity in logic eq, so lhs and rhs
        # might be passed in reverse order.
        logic = construct_logic_eq(lhs, rhs) or construct_logic_eq(rhs, lhs)
        if logic:
            return logic

        # Don't use CompiledType.matches since in the generated code, we need
        # both operands to be *exactly* the same types, so handle specifically
        # each case.
        if issubclass(lhs.type, ASTNode):
            # Handle checks between two subclasses without explicit casts. In
            # order to help users to detect dubious checks, forbid operands
            # that can never be equal because they have no subclass in common.
            if issubclass(lhs.type, rhs.type):
                lhs = Cast.Expr(lhs, assert_type(rhs.type, ASTNode))
            elif issubclass(rhs.type, lhs.type):
                rhs = Cast.Expr(rhs, assert_type(lhs.type, ASTNode))
            else:
                check_source_language(
                    False, '{} and {} values are never equal'.format(
                        lhs.type.name().camel, rhs.type.name().camel
                    )
                )
        else:
            check_source_language(
                lhs.type == rhs.type,
                'Incompatible types for equality: {} and {}'.format(
                    lhs.type.name().camel, rhs.type.name().camel
                )
            )

        return self.make_expr(lhs, rhs)
コード例 #7
0
ファイル: parsers.py プロジェクト: AdaCore/langkit
    def add_rules(self, **kwargs):
        """
        Add rules to the grammar.  The keyword arguments will provide a name to
        rules.

        :param dict[str, Parser] kwargs: The rules to add to the grammar.
        """
        import ast
        loc = extract_library_location()

        class GetTheCall(ast.NodeVisitor):
            """
            Helper visitor that will get the corresponding add_rule call in the
            source, so that we're then able to extract the precise line where
            each rule is added.
            """
            def __init__(self):
                self.the_call = None

            def visit_Call(self, call):
                if (
                    isinstance(call.func, ast.Attribute)
                    and call.func.attr == 'add_rules'
                    # Traceback locations are very imprecise, and python's ast
                    # doesn't have an end location for nodes, so we'll keep
                    # taking add_rules call, and the last is necessarily the
                    # good one.
                    and call.lineno <= loc.line
                ):
                    self.the_call = call

        caller_location = extract_library_location()
        the_call = GetTheCall()
        with open(caller_location.file) as f:
            file_ast = ast.parse(f.read(), f.name)
            the_call.visit(file_ast)

        # We're gonna use the keyword arguments to find back the precise line
        # where the rule was declared.
        keywords = {kw.arg: kw.value for kw in the_call.the_call.keywords}

        for name, rule in kwargs.items():
            rule.set_name(names.Name.from_lower(name))
            rule.set_grammar(self)
            rule.set_location(Location(loc.file, keywords[name].lineno, ""))
            rule.is_root = True

            with Context("In definition of rule '{}'".format(name), loc):
                check_source_language(
                    name not in self.rules,
                    "Rule '{}' is already present in the grammar".format(name)
                )

            self.rules[name] = rule
コード例 #8
0
ファイル: structs.py プロジェクト: AdaCore/langkit
    def do_prepare(self):
        check_source_language(issubclass(self.struct_type, Struct), (
            "Invalid type, expected struct type, got {}".format(
                self.struct_type.name().camel
            )
        ))

        check_source_language(not issubclass(self.struct_type, ASTNode), (
            "Invalid type, expected struct type, got {} which is an "
            "ASTNode".format(
                self.struct_type.name().camel
            )
        ))
コード例 #9
0
ファイル: logic.py プロジェクト: AdaCore/langkit
    def construct(self):
        exprs = [construct(e) for e in self.exprs]

        prop_types = [self.pred_property.struct] + [
            a.type for a in self.pred_property.explicit_arguments
        ]

        # Separate logic variable expressions from extra argument expressions
        logic_var_exprs, closure_exprs = funcy.split_by(
            lambda e: e.type == LogicVarType, exprs
        )

        check_source_language(
            len(logic_var_exprs) > 0, "Predicate instantiation should have at "
            "least one logic variable expression"
        )

        check_source_language(
            all(e.type != LogicVarType for e in closure_exprs), "Logic "
            "variable expressions should be grouped at the beginning, and "
            "should not appear after non logic variable expressions"
        )

        for i, (expr, arg_type) in enumerate(zip(exprs, prop_types)):
            if expr.type == LogicVarType:
                check_source_language(
                    arg_type.matches(T.root_node), "Argument #{} of predicate "
                    "is a logic variable, the corresponding property formal "
                    "has type {}, but should be a descendent of {}".format(
                        i, arg_type.name().camel, T.root_node.name().camel
                    )
                )
            else:
                check_source_language(
                    expr.type.matches(arg_type), "Argument #{} of predicate "
                    "has type {}, should be {}".format(
                        i, expr.type.name().camel, arg_type.name().camel
                    )
                )

        pred_id = self.pred_property.do_generate_logic_predicate(*[
            e.type for e in closure_exprs
        ])

        closure_exprs.append(construct(Env))

        logic_var_exprs.append(
            BasicExpr("{}_Predicate_Caller'({})".format(
                pred_id, ", ".join(
                    ["{}" for _ in range(len(closure_exprs) - 1)]
                    + ["Env => {}"]
                )
            ), type=None, sub_exprs=closure_exprs)
        )

        return BuiltinCallExpr(
            "{}_Pred.Create".format(pred_id), EquationType, logic_var_exprs,
            result_var_name="Pred"
        )
コード例 #10
0
ファイル: boolean.py プロジェクト: AdaCore/langkit
    def construct(self):
        """
        Construct a resolved expression for this.

        :rtype: IfExpr
        """
        then = construct(self.then)
        else_then = construct(self.else_then)
        check_source_language(
            then.type.matches(else_then.type),
            "Mismatching types in If expression: {} and {}".format(
                then.type.name().camel, else_then.type.name().camel
            )
        )
        rtype = then.type.unify(else_then.type)
        return If.Expr(construct(self.cond, BoolType), then, else_then, rtype)
コード例 #11
0
ファイル: boolean.py プロジェクト: AdaCore/langkit
        def construct_logic_eq(lhs, rhs):
            if lhs.type == LogicVarType:
                check_source_language(
                    rhs.type == LogicVarType or rhs.type.matches(ASTNode),
                    "Operands to a logic equality operator should be either "
                    "a logic variable or an ASTNode, got {}".format(rhs.type)
                )

                # Cast the ast node type if necessary
                if (rhs.type.matches(ASTNode) and rhs.type != T.root_node):
                    rhs = Cast.Expr(rhs, T.root_node)

                return BuiltinCallExpr("Equals", EquationType, [lhs, rhs],
                                       "Equals_Pred")
            else:
                return None
コード例 #12
0
ファイル: boolean.py プロジェクト: AdaCore/langkit
    def construct(self):
        # Add var_expr to the scope for this Then expression
        PropertyDef.get_scope().add(self.var_expr.local_var)

        # Accept as a prefix:
        # * any pointer, since it can be checked against "null";
        # * any Struct, since its "Is_Null" field can be checked.
        expr = construct(self.expr,
                         lambda cls: cls.is_ptr or issubclass(cls, Struct))
        self.var_expr.set_type(expr.type)

        then_expr = construct(self.then_expr)

        # Affect default value to the fallback expression. For the moment,
        # only booleans and structs are handled.
        if self.default_val is None:
            if then_expr.type.matches(BoolType):
                default_expr = construct(False)
            elif issubclass(then_expr.type, Struct):
                default_expr = construct(No(
                    # Because we're doing issubclass instead of isinstance,
                    # PyCharm do not understand that then_exp.type is a Struct,
                    # so the following is necessary not to have warnings.
                    assert_type(then_expr.type, Struct)
                ))
            elif then_expr.type.matches(LexicalEnvType):
                default_expr = construct(EmptyEnv)
            else:
                # The following is not actually used but PyCharm's typer
                # requires it.
                default_expr = None

                check_source_language(
                    False,
                    "Then expression should have a default value provided, "
                    "in cases where the provided function's return type is "
                    "not Bool, here {}".format(then_expr.type.name().camel)
                )
        else:
            default_expr = construct(self.default_val, then_expr.type)

        return Then.Expr(expr, construct(self.var_expr), then_expr,
                         default_expr)
コード例 #13
0
ファイル: structs.py プロジェクト: AdaCore/langkit
    def construct(self):
        """
        Construct a resolved expression that is the result of testing the kind
        of a node.

        :rtype: IsAExpr
        """
        expr = construct(self.expr)
        astnodes = [resolve_type(a) for a in self.astnodes]
        for a in astnodes:
            check_source_language(
                issubclass(a, ASTNode),
                "Expected ASTNode subclass, got {}".format(a)
            )
            check_source_language(a.matches(expr.type), (
                'When testing the dynamic subtype of an AST node, the type to'
                ' check must be a subclass of the value static type.'
            ))
        return IsA.Expr(expr, astnodes)
コード例 #14
0
ファイル: parsers.py プロジェクト: AdaCore/langkit
    def get_rule(self, rule_name):
        """
        Helper to return the rule corresponding to rule_name. The benefit of
        using this helper is that it will raise a helpful error diagnostic.

        :param str rule_name: The rule to get.
        """
        if rule_name not in self.rules:
            close_matches = difflib.get_close_matches(
                rule_name, self.rules.keys()
            )
            check_source_language(
                False, "Wrong rule name: '{}'. {}".format(
                    rule_name,
                    "Did you mean '{}'?".format(close_matches[0])
                    if close_matches else ""
                )
            )
        return self.rules[rule_name]
コード例 #15
0
ファイル: collections.py プロジェクト: AdaCore/langkit
    def construct(self):
        """
        Construct a resolved expression for this quantifier expression.

        :rtype: QuantifierExpr
        """
        (collection_expr,
         expr,
         element_var,
         index_var,
         iter_scope) = self.construct_common()

        check_source_language(
            expr.type.matches(BoolType),
            "Wrong type for expression in quantifier: expected bool, "
            "got {}".format(expr.type.name().camel)
        )

        return Quantifier.Expr(self.kind, collection_expr, expr,
                               element_var, index_var, iter_scope)
コード例 #16
0
ファイル: structs.py プロジェクト: AdaCore/langkit
    def construct(self):
        """
        Construct a resolved expression that is the result of casting a AST
        node.

        :rtype: CastExpr
        """
        expr = construct(
            self.expr,
            lambda t: self.astnode.matches(t) or t.matches(self.astnode),
            'Cannot cast {{expr_type}} to {}: only (up/down)casting is '
            'allowed'.format(
                self.astnode.name().camel
            )
        )
        check_source_language(
            expr.type != self.astnode,
            'Casting to the same type',
            severity=Severity.warning
        )
        return Cast.Expr(expr, self.astnode, do_raise=self.do_raise)
コード例 #17
0
    def __init__(self,
                 add_env=False,
                 add_to_env=None,
                 ref_envs=None,
                 initial_env=None,
                 env_hook_arg=None,
                 call_parents=True):
        """

        :param bool add_env: Wether to add a new scoped lexical environment.
            The new environment will be linked to the corresponding AST node
            and will have the AST node's lexical environment as a parent.

        :param add_to_env: Eiter an AddToEnv named tuple, or a list of them.
            Used to add elements to the lexical environment. See add_to_env's
            doc for more details.
        :type add_to_env: AddToEnv|[AddToEnv]

        :param AbstractExpression ref_envs: if an AbstractExpression returning
            a list of environments is supplied, the topmost environment in the
            environment resolution will be altered to include the list of
            environments as referenced environments. TODO: Not yet implemented!

        :param AbstractExpression initial_env: If supplied, this env will be
            used as the lexical environment to execute the rest of the actions.
            For example, if you pass an initial_env, and add_env, then an env
            will be added to the env passed as initial_env, and the node
            concerned by this env specification will have initial_env as a
            parent indirectly.

        :param AbstractExpression env_hook_arg: Does nothing if left to None.
            If supplied, it must be an abstract expression that resolves to a
            node. This expression will be evaluated and passed to the
            environment hook.
        """

        self.ast_node = None
        """
        ASTNode subclass associated to this environment specification.
        Initialized when creating ASTNode subclasses.
        :type: langkit.compiled_types.ASTNode
        """

        self._add_env = add_env
        ":type: bool"

        # The following attributes (unresolved_*) contain abstract expressions
        # used to describe various environment behaviors. They all have
        # corresponding attributes that embed them as properties: see below.

        self._unresolved_initial_env = initial_env
        ":type: AbstractExpression"

        self._unresolved_envs_expressions = []
        ":type: list[AddToEnv]"

        self.envs_expressions = []
        ":type: list[AddToEnv]"

        if add_to_env:
            check_source_language(
                isinstance(add_to_env, AddToEnv)
                or isinstance(add_to_env, list),
                "Wrong parameter for add_to_env: Expected AddToEnv named-tuple"
                " or list of AddToEnv")

            self._unresolved_envs_expressions = ([add_to_env] if isinstance(
                add_to_env, AddToEnv) else add_to_env)

        self._unresolved_ref_envs = ref_envs
        ":type: AbstractExpression"

        self._unresolved_env_hook_arg = env_hook_arg
        ":type: AbstractExpression"

        # These are the property attributes

        self.initial_env = None
        ":type: PropertyDef"

        self.ref_envs = None
        ":type: PropertyDef"

        self.env_hook_arg = None
        ":type: PropertyDef"

        self.has_post_actions = False

        self.call_parents = call_parents
        "Whether to call parents env specs or not"
コード例 #18
0
    def run(self, argv=None):
        parsed_args = self.args_parser.parse_args(argv)

        from langkit import diagnostics
        diagnostics.EMIT_PARSABLE_ERRORS = parsed_args.parsable_errors

        if parsed_args.profile:
            import cProfile
            import pstats

            pr = cProfile.Profile()
            pr.enable()

        # If asked to, setup the exception hook as a last-chance handler to
        # invoke a debugger in case of uncaught exception.
        if parsed_args.debug:
            # Try to use IPython's debugger if it is available, otherwise
            # fallback to PDB.
            try:
                # noinspection PyPackageRequirements
                from IPython.core import ultratb
            except ImportError:
                ultratb = None  # To keep PyCharm happy...

                def excepthook(type, value, tb):
                    import traceback
                    traceback.print_exception(type, value, tb)
                    pdb.post_mortem(tb)

                sys.excepthook = excepthook
            else:
                sys.excepthook = ultratb.FormattedTB(mode='Verbose',
                                                     color_scheme='Linux',
                                                     call_pdb=1)
            del ultratb

        self.dirs.set_build_dir(parsed_args.build_dir)
        install_dir = getattr(parsed_args, 'install-dir', None)
        if install_dir:
            self.dirs.set_install_dir(install_dir)

        # Compute code coverage in the code generator if asked to
        if parsed_args.func == self.do_generate and parsed_args.coverage:
            try:
                cov = Coverage(self.dirs)
            except Exception as exc:
                import traceback
                print >> sys.stderr, 'Coverage not available:'
                traceback.print_exc(exc)
                sys.exit(1)

            cov.start()
        else:
            cov = None

        # noinspection PyBroadException
        try:
            parsed_args.func(parsed_args)
        except DiagnosticError:
            if parsed_args.debug:
                raise
            print >> sys.stderr, col('Errors, exiting', Colors.FAIL)
            sys.exit(1)
        except Exception, e:
            if parsed_args.debug:
                raise
            import traceback
            ex_type, ex, tb = sys.exc_info()
            if e.args[0] == 'invalid syntax':
                loc = Location(e.filename, e.lineno, "")
            else:
                loc = extract_library_location(traceback.extract_tb(tb))
            with Context("", loc, "recovery"):
                check_source_language(False, str(e), do_raise=False)
            if parsed_args.verbosity.debug:
                traceback.print_exc()

            print >> sys.stderr, col('Internal error! Exiting', Colors.FAIL)
            sys.exit(1)
コード例 #19
0
 def check_type_compatibility(is_valid):
     check_source_language(
         is_valid, 'Incompatible types for equality: {} and {}'.format(
             lhs.type.dsl_name, rhs.type.dsl_name))
コード例 #20
0
    def construct(self):
        """
        Constructs a resolved expression that is the result of:

        - Resolving the receiver;
        - Getting its corresponding field.

        :rtype: FieldAccessExpr
        """

        is_deref = False
        receiver_expr = construct(self.receiver)

        if issubclass(receiver_expr.type, AnalysisUnitType):
            return construct_analysis_unit_property(receiver_expr, self.field,
                                                    self.arguments)

        check_source_language(
            issubclass(receiver_expr.type, Struct),
            '{} values have no field (accessed field was {})'.format(
                receiver_expr.type.name().camel,
                self.field
            )
        )

        to_get = receiver_expr.type.get_abstract_fields_dict().get(self.field,
                                                                   None)
        ":type: AbstractNodeField"

        # If still not found, maybe the receiver is an env el, in which case we
        # want to do implicit dereference.
        if not to_get and receiver_expr.type.is_env_element_type:
            to_get = receiver_expr.type.el_type.get_abstract_fields_dict().get(
                self.field, None
            )
            is_deref = bool(to_get)

        # If still not found, we have a problem
        check_source_language(
            to_get is not None, "Type {} has no '{}' field or property".format(
                receiver_expr.type.__name__, self.field
            )
        )

        check_source_language(
            not to_get.is_internal,
            '{} is for internal use only'.format(to_get.qualname)
        )

        # If the field is a property that take an implicit env argument, make
        # sure we have one to provide.
        check_source_language(
            not to_get.is_property or
            not to_get.has_implicit_env or
            Env.has_ambient_env,
            'This property has no implicit environment parameter whereas {}'
            ' expects one: please use the eval_in_env construct to bind an'
            ' environment first.'.format(to_get.qualname)
        )

        # Check that this property actually accepts these arguments and that
        # they are correctly typed.
        check_source_language(
            len(self.arguments) == len(to_get.explicit_arguments),
            'Invalid number of arguments in the call to {}:'
            ' {} expected but got {}'.format(
                to_get.qualname,
                len(to_get.explicit_arguments),
                len(self.arguments),
            )
        )

        arg_exprs = [
            construct(
                actual, formal.type,
                custom_msg='Invalid {} actual (#{}) for {}:'.format(
                    formal.name, i, to_get.qualname,
                ) + ' expected {expected} but got {expr_type}'
            ) for i, (actual, formal) in enumerate(
                zip(self.arguments, to_get.explicit_arguments), 1
            )
        ]

        ret = FieldAccess.Expr(receiver_expr, to_get, arg_exprs, is_deref)
        return ret
コード例 #21
0
    def do_prepare(self):
        self.dest_type = resolve_type(self.dest_type)

        check_source_language(
            self.dest_type.is_ast_node or self.dest_type.is_entity_type,
            "One can only cast to an ASTNode subtype or to an entity")
コード例 #22
0
    def construct(self):
        """
        :rtype: StructExpr
        """
        if self.struct_type.is_ast_node:
            check_source_language(
                not self.struct_type.is_list_type,
                'List node synthetization is not supported for now')
            check_source_language(
                PropertyDef.get().memoized,
                'Node synthetization can only happen inside a memoized'
                ' property')

        # Make sure the provided set of fields matches the one the struct
        # needs.
        def error_if_not_empty(name_set, message):
            check_source_language(
                not name_set,
                ('{}: {}'.format(message, ', '.join(name
                                                    for name in name_set))))

        # Create a dict of field names to fields in the struct type

        def is_required(f):
            if isinstance(f, BuiltinField):
                # BuiltinFields are actually stored fields only for structure
                # types (not for nodes).
                return self.struct_type.is_struct_type

            elif isinstance(f, Field):
                return not f.null

            else:
                return isinstance(f, UserField)

        required_fields = {
            f._name.lower: f
            for f in self.struct_type.get_abstract_node_data()
            if is_required(f)
        }

        error_if_not_empty(
            set(required_fields) - set(self.field_values.keys()),
            'Values are missing for {} fields'.format(
                self.struct_type.dsl_name))
        error_if_not_empty(
            set(self.field_values.keys()) - set(required_fields),
            'Extraneous fields for {}'.format(self.struct_type.dsl_name))

        # At this stage, we know that the user has only provided fields that
        # are valid for the struct type.
        provided_fields = {
            required_fields[name].name: construct(
                value, required_fields[name].type,
                'Wrong type for field {}: expected {{expected}}, '
                'got {{expr_type}}'.format(name))
            for name, value in self.field_values.items()
        }

        expr_cls = (New.NodeExpr
                    if self.struct_type.is_ast_node else New.StructExpr)
        return expr_cls(self.struct_type, provided_fields, abstract_expr=self)
コード例 #23
0
ファイル: dsl.py プロジェクト: GlancingMind/langkit
    def process_subclass(mcs, name, bases, dct, is_root):
        from langkit.envs import EnvSpec

        location = extract_library_location()
        base = bases[0]
        is_list_type = issubclass(base, _ASTNodeList)
        is_root_list_type = base is _ASTNodeList

        node_ctx = Context('in {}'.format(name), location)

        with node_ctx:
            check_source_language(
                len(bases) == 1, 'ASTNode subclasses must have exactly one'
                ' base class')
            if mcs.root_type is not None:
                check_source_language(
                    base is not ASTNode,
                    'Only one class can derive from ASTNode (previous was:'
                    ' {})'.format(mcs.root_type.__name__))

            env_spec = dct.pop('env_spec', None)
            check_source_language(
                env_spec is None or isinstance(env_spec, EnvSpec),
                'Invalid environment specification: {}'.format(env_spec))

            annotations = dct.pop('annotations', None)

        # If this is a list type, determine the corresponding element type
        if is_root_list_type:
            element_type = dct.pop('_element_type')
        elif is_list_type:
            element_type = base._element_type
        else:
            element_type = None

        # Determine if this is a token node
        with node_ctx:
            is_token_node = dct.pop('token_node', None)
            check_source_language(
                is_token_node is None or isinstance(is_token_node, bool),
                'The "token_node" field, when present, must contain a boolean')

            # If "token_node" allocation is left to None, inherit it (default
            # is False).
            if is_token_node is None:
                is_token_node = bool(base._is_token_node)

            # Otherwise, make sure that all derivations of a token node are
            # token nodes themselves.
            elif not is_token_node:
                check_source_language(
                    is_token_node == base._is_token_node,
                    '"token_node" annotation inconsistent with inherited AST'
                    ' node')

        fields = ASTNode.collect_fields(name, location, dct, AbstractNodeData)

        # AST list types are not allowed to have syntax fields
        if is_list_type:
            syntax_fields = [f_n for f_n, f_v in fields if not f_v.is_property]
            with node_ctx:
                check_source_language(
                    not syntax_fields,
                    'ASTNode list types are not allowed to have fields'
                    ' (here: {})'.format(', '.join(sorted(syntax_fields))))

        DSLType._import_base_type_info(name, location, dct)
        dct['_fields'] = fields
        dct['_base'] = base
        dct['_env_spec'] = env_spec
        dct['_is_token_node'] = is_token_node

        # Make sure subclasses don't inherit the "list_type" cache from their
        # base classes.
        dct['_list_type'] = None
        dct['_element_type'] = element_type
        dct['_annotations'] = annotations
コード例 #24
0
ファイル: lkt_lowering.py プロジェクト: QuentinOchem/langkit
def create_lexer(ctx, lkt_units):
    """
    Create and populate a lexer from a Lktlang unit.

    :param list[liblktlang.AnalysisUnit] lkt_units: Non-empty list of analysis
        units where to look for the grammar.
    :rtype: langkit.lexer.Lexer
    """
    import liblktlang

    # Look for the LexerDecl node in top-level lists
    full_lexer = find_toplevel_decl(ctx, lkt_units, liblktlang.LexerDecl,
                                    'lexer')
    with ctx.lkt_context(full_lexer):
        lexer_annot = parse_annotations(ctx, lexer_annotations, full_lexer)

    patterns = {}
    """
    Mapping from pattern names to the corresponding regular expression.

    :type: dict[names.Name, str]
    """

    token_family_sets = {}
    """
    Mapping from token family names to the corresponding sets of tokens that
    belong to this family.

    :type: dict[names.Name, Token]
    """

    token_families = {}
    """
    Mapping from token family names to the corresponding token families.  We
    build this late, once we know all tokens and all families.

    :type: dict[names.Name, TokenFamily]
    """

    tokens = {}
    """
    Mapping from token names to the corresponding tokens.

    :type: dict[names.Name, Token]
    """

    rules = []
    pre_rules = []
    """
    Lists of regular and pre lexing rules for this lexer.

    :type: list[(langkit.lexer.Matcher, langkit.lexer.Action)]
    """

    newline_after = []
    """
    List of tokens after which we must introduce a newline during unparsing.

    :type: list[Token]
    """
    def ignore_constructor(start_ignore_layout, end_ignore_layout):
        """
        Adapter to build a Ignore instance with the same API as WithText
        constructors.
        """
        del start_ignore_layout, end_ignore_layout
        return Ignore()

    def process_family(f):
        """
        Process a LexerFamilyDecl node. Register the token family and process
        the rules it contains.

        :type f: liblktlang.LexerFamilyDecl
        """
        with ctx.lkt_context(f):
            # Create the token family, if needed
            name = names.Name.from_lower(text_as_str(f.f_syn_name))
            token_set = token_family_sets.setdefault(name, set())

            for r in f.f_rules:
                check_source_language(
                    isinstance(r.f_decl, liblktlang.GrammarRuleDecl),
                    'Only lexer rules allowed in family blocks')
                process_token_rule(r, token_set)

    def process_token_rule(r, token_set=None):
        """
        Process the full declaration of a GrammarRuleDecl node: create the
        token it declares and lower the optional associated lexing rule.

        :param liblktlang.FullDecl r: Full declaration for the GrammarRuleDecl
            to process.
        :param None|set[TokenAction] token_set: If this declaration appears in
            the context of a token family, this adds the new token to this set.
            Must be left to None otherwise.
        """
        with ctx.lkt_context(r):
            rule_annot = parse_annotations(ctx, token_annotations, r)

            # Gather token action info from the annotations. If absent,
            # fallback to WithText.
            token_cons = None
            start_ignore_layout = False
            end_ignore_layout = False
            if 'ignore' in rule_annot:
                token_cons = ignore_constructor
            for name in ('text', 'trivia', 'symbol'):
                try:
                    start_ignore_layout, end_ignore_layout = rule_annot[name]
                except KeyError:
                    continue

                check_source_language(token_cons is None,
                                      'At most one token action allowed')
                token_cons = token_cls_map[name]
            is_pre = rule_annot.get('pre_rule', False)
            if token_cons is None:
                token_cons = WithText

            # Create the token and register it where needed: the global token
            # mapping, its token family (if any) and the "newline_after" group
            # if the corresponding annotation is present.
            token_lower_name = text_as_str(r.f_decl.f_syn_name)
            token_name = names.Name.from_lower(token_lower_name)

            check_source_language(
                token_lower_name not in ('termination', 'lexing_failure'),
                '{} is a reserved token name'.format(token_lower_name))
            check_source_language(token_name not in tokens,
                                  'Duplicate token name')

            token = token_cons(start_ignore_layout, end_ignore_layout)
            tokens[token_name] = token
            if token_set is not None:
                token_set.add(token)
            if 'newline_after' in rule_annot:
                newline_after.append(token)

            # Lower the lexing rule, if present
            matcher_expr = r.f_decl.f_expr
            if matcher_expr is not None:
                rule = (lower_matcher(matcher_expr), token)
                if is_pre:
                    pre_rules.append(rule)
                else:
                    rules.append(rule)

    def process_pattern(full_decl):
        """
        Process a pattern declaration.

        :param liblktlang.FullDecl r: Full declaration for the ValDecl to
            process.
        """
        parse_annotations(ctx, [], full_decl)
        decl = full_decl.f_decl
        lower_name = text_as_str(decl.f_syn_name)
        name = names.Name.from_lower(lower_name)

        with ctx.lkt_context(decl):
            check_source_language(name not in patterns,
                                  'Duplicate pattern name')
            check_source_language(
                decl.f_decl_type is None,
                'Patterns must have automatic types in'
                ' lexers')
            check_source_language(
                isinstance(decl.f_val, liblktlang.StringLit)
                and decl.f_val.p_is_regexp_literal,
                'Pattern string literal expected')
            # TODO: use StringLit.p_denoted_value when properly implemented
            patterns[name] = pattern_as_str(decl.f_val)

    def lower_matcher(expr):
        """
        Lower a token matcher to our internals.

        :type expr: liblktlang.GrammarExpr
        :rtype: langkit.lexer.Matcher
        """
        with ctx.lkt_context(expr):
            if isinstance(expr, liblktlang.TokenLit):
                return Literal(json.loads(text_as_str(expr)))
            elif isinstance(expr, liblktlang.TokenNoCaseLit):
                return NoCaseLit(json.loads(text_as_str(expr)))
            elif isinstance(expr, liblktlang.TokenPatternLit):
                return Pattern(pattern_as_str(expr))
            else:
                check_source_language(False, 'Invalid lexing expression')

    def lower_token_ref(ref):
        """
        Return the Token that `ref` refers to.

        :type ref: liblktlang.RefId
        :rtype: Token
        """
        with ctx.lkt_context(ref):
            token_name = names.Name.from_lower(text_as_str(ref))
            check_source_language(token_name in tokens,
                                  'Unknown token: {}'.format(token_name.lower))
            return tokens[token_name]

    def lower_family_ref(ref):
        """
        Return the TokenFamily that `ref` refers to.

        :type ref: liblktlang.RefId
        :rtype: TokenFamily
        """
        with ctx.lkt_context(ref):
            name_lower = text_as_str(ref)
            name = names.Name.from_lower(name_lower)
            check_source_language(
                name in token_families,
                'Unknown token family: {}'.format(name_lower))
            return token_families[name]

    def lower_case_alt(alt):
        """
        Lower the alternative of a case lexing rule.

        :type alt: liblktlang.BaseLexerCaseRuleAlt
        :rtype: Alt
        """
        prev_token_cond = None
        if isinstance(alt, liblktlang.LexerCaseRuleCondAlt):
            prev_token_cond = [
                lower_token_ref(ref) for ref in alt.f_cond_exprs
            ]
        return Alt(prev_token_cond=prev_token_cond,
                   send=lower_token_ref(alt.f_send.f_sent),
                   match_size=int(alt.f_send.f_match_size.text))

    # Go through all rules to register tokens, their token families and lexing
    # rules.
    for full_decl in full_lexer.f_decl.f_rules:
        with ctx.lkt_context(full_decl):
            if isinstance(full_decl, liblktlang.LexerFamilyDecl):
                # This is a family block: go through all declarations inside it
                process_family(full_decl)

            elif isinstance(full_decl, liblktlang.FullDecl):
                # There can be various types of declarations in lexers...
                decl = full_decl.f_decl

                if isinstance(decl, liblktlang.GrammarRuleDecl):
                    # Here, we have a token declaration, potentially associated
                    # with a lexing rule.
                    process_token_rule(full_decl)

                elif isinstance(decl, liblktlang.ValDecl):
                    # This is the declaration of a pattern
                    process_pattern(full_decl)

                else:
                    check_source_language(False,
                                          'Unexpected declaration in lexer')

            elif isinstance(full_decl, liblktlang.LexerCaseRule):
                syn_alts = list(full_decl.f_alts)

                # This is a rule for conditional lexing: lower its matcher and
                # its alternative rules.
                matcher = lower_matcher(full_decl.f_expr)
                check_source_language(
                    len(syn_alts) == 2 and isinstance(
                        syn_alts[0], liblktlang.LexerCaseRuleCondAlt)
                    and isinstance(syn_alts[1],
                                   liblktlang.LexerCaseRuleDefaultAlt),
                    'Invalid case rule topology')
                rules.append(
                    Case(matcher, lower_case_alt(syn_alts[0]),
                         lower_case_alt(syn_alts[1])))

            else:
                # The grammar should make the following dead code
                assert False, 'Invalid lexer rule: {}'.format(full_decl)

    # Create the LexerToken subclass to define all tokens and token families
    items = {}
    for name, token in tokens.items():
        items[name.camel] = token
    for name, token_set in token_family_sets.items():
        tf = TokenFamily(*list(token_set))
        token_families[name] = tf
        items[name.camel] = tf
    token_class = type('Token', (LexerToken, ), items)

    # Create the Lexer instance and register all patterns and lexing rules
    result = Lexer(token_class, 'track_indent' in lexer_annot, pre_rules)
    for name, regexp in patterns.items():
        result.add_patterns((name.lower, regexp))
    result.add_rules(*rules)

    # Register spacing/newline rules
    for tf1, tf2 in lexer_annot.get('spacing', []):
        result.add_spacing((lower_family_ref(tf1), lower_family_ref(tf2)))
    result.add_newline_after(*newline_after)

    return result
コード例 #25
0
ファイル: manage.py プロジェクト: samy-mahmoudi/libadalang
    def do_perf_test(self, args):
        """
        Run the performance regression testsuite.
        """
        from time import time

        self.set_context(args)

        def file_lines(filename):
            with open(filename) as f:
                return len(list(f))

        check_source_language(
            not os.path.isabs(args.build_dir),
            "--build-dir should be a relative path for perf testsuite"
        )

        work_dir = os.path.abspath(args.work_dir)
        variant_name = args.build_dir
        report_file = os.path.join(work_dir,
                                   'report-{}.txt'.format(variant_name))
        args.build_dir = os.path.join(work_dir, args.build_dir)

        if not args.no_recompile:
            # The perf testsuite only needs the "parse" main program
            args.disable_mains = self.main_programs - {'parse'}

            # Build libadalang in production mode inside of the perf testsuite
            # directory.
            self.dirs.set_build_dir(args.build_dir)
            args.build_mode = 'prod'
            self._mkdir(args.build_dir)
            self.do_make(args)

        # Checkout the code bases that we will use for the perf testsuite
        source_dir = os.path.join(work_dir, "source")
        try:
            os.mkdir(source_dir)
        except OSError:
            pass
        os.chdir(source_dir)
        if not os.path.exists('gnat'):
            subprocess.check_call([
                'svn', 'co',
                'svn+ssh://svn.us.adacore.com/Dev/trunk/gnat',
                '-r', '314163',
                '--ignore-externals'
            ])
        if not os.path.exists('gps'):
            subprocess.check_call(['git', 'clone',
                                   'ssh://review.eu.adacore.com:29418/gps'])
        os.chdir('gps')
        subprocess.check_call(['git', 'checkout',
                               '00b73897a867514732d48ae1429faf97fb07ad7c'])
        os.chdir('..')

        # Make a list of every ada file

        # Exclude some files that are contained here but that we do not parse
        # correctly.
        excluded_patterns = ['@', 'a-numeri', 'rad-project']
        ada_files = filter(
            lambda f: all(map(lambda p: p not in f, excluded_patterns)),
            self._find_ada_sources(source_dir)
        )
        file_list_name = 'ada_file_list'
        with open(file_list_name, 'w') as file_list:
            for f in ada_files:
                file_list.write(f + '\n')

        # Get a count of the total number of ada source lines
        lines_count = sum(map(file_lines, ada_files))

        with open(report_file, 'w') as f:
            def write_report(text, color=None):
                if color:
                    printcol(text, color)
                else:
                    print(text)
                print(text, file=f)

            write_report('=================================', Colors.HEADER)
            write_report('= Performance testsuite results =', Colors.HEADER)
            write_report('=================================', Colors.HEADER)
            write_report('')
            write_report('Name: {}'.format(variant_name))
            write_report('Scenario: {}'.format(args.scenario))
            write_report('')
            elapsed_list = []
            parse_args = ['{}/bin/parse'.format(args.build_dir), '-s', '-F',
                          file_list_name]
            if args.scenario == self.PERF_PARSE_AND_TRAVERSE:
                parse_args.append('-C')
            if args.with_trivia:
                parse_args.append('-P')
            for _ in range(args.nb_runs):
                # Execute parse on the file list and get the elapsed time
                t = time()
                subprocess.check_call(parse_args)
                elapsed = time() - t
                elapsed_list.append(elapsed)

                # Print a very basic report
                write_report(
                    'Parsed {0} lines of Ada code in {1:.2f} seconds'.format(
                        lines_count, elapsed
                    )
                )

            write_report('')
            write_report('= Performance summary =', Colors.OKGREEN)
            write_report(
                'Mean time to parse {0} lines of code:'
                ' {1:.2f} seconds'.format(
                    lines_count, sum(elapsed_list) / float(len(elapsed_list))
                )
            )
コード例 #26
0
    def from_parser(node, parser):
        """
        Given a parser that creates a specific type of parse node, return the
        corresponding unparser. Emit a user diagnostic if this transformation
        cannot be made.

        :param ASTNodeType node: Parse node that `parser` emits.
        :param Parser parser: Parser for which we want to create an unparser.
        :rtype: NodeUnparser
        """
        assert not node.abstract and not node.synthetic, (
            'Invalid unparser request for {}'.format(node.dsl_name)
        )
        parser = unwrap(parser)

        with parser.diagnostic_context:
            if node.is_token_node:
                return NodeUnparser._from_token_node_parser(node, parser)

            if isinstance(parser, _Transform):
                return NodeUnparser._from_transform_parser(node, parser)

            if isinstance(parser, List):
                check_source_language(
                    isinstance(parser.parser, (Defer, List, Null, Or,
                                               _Transform)),
                    'Unparsers generation require list parsers to directly'
                    ' build nodes for each list item'
                )
                return ListNodeUnparser(
                    node,
                    TokenUnparser.from_parser(parser.sep)
                )

            if isinstance(parser, Opt):
                if parser._booleanize:
                    # This is a special parser: when the subparser succeeds,
                    # the "present" alternative is created to hold its result,
                    # otherwise the "absent" alternative is created (and no
                    # token are consumed).
                    #
                    # So in both cases, we create an unparser, but we emit
                    # tokens only for the "present" alternative.
                    result = RegularNodeUnparser(node)
                    if node is parser._booleanize.alt_present.type:
                        NodeUnparser._emit_to_token_sequence(parser.parser,
                                                             result.pre_tokens)
                    return result

                else:
                    return NodeUnparser.from_parser(node, parser.parser)

            if isinstance(parser, Null):
                return NullNodeUnparser(node)

            check_source_language(
                False,
                'Unsupported parser for unparsers generation: {}'.format(
                    parser
                )
            )
コード例 #27
0
ファイル: regexp.py プロジェクト: yakobowski/langkit
    def _parse_sequence(cls, stream):
        """
        Parse a sequence of regexps. Stop at the first unmatched parenthesis or
        at the first top-level pipe character.

        :param file stream: Input regexp stream.
        :rtype: RegexpCollection.Parser
        """
        subparsers = []
        while True:
            if stream.eof or stream.next_is('|', ')'):
                break

            elif stream.next_is('('):
                # Nested group: recursively parse alternatives
                stream.read()
                subparsers.append(cls._parse_or(stream))
                check_source_language(stream.next_is(')'),
                                      'unbalanced parenthesis')
                stream.read()

            elif stream.next_is('['):
                # Parse a range of characters
                subparsers.append(cls._parse_range(stream))

            elif stream.next_is('{'):
                # Parse a reference to a named pattern
                stream.read()
                name = ''
                while not stream.eof and not stream.next_is('}'):
                    name += stream.read()
                check_source_language(stream.next_is('}'),
                                      'unbalanced bracket')
                stream.read()
                check_source_language(rule_name_re.match(name),
                                      'invalid rule name: {}'.format(name))
                subparsers.append(cls.Defer(name))

            elif stream.next_is('*', '+', '?'):
                # Repeat the previous sequence item
                check_source_language(subparsers, 'nothing to repeat')
                check_source_language(
                    not isinstance(subparsers[-1], cls.Repeat),
                    'multiple repeat')
                wrapper = {
                    '*': lambda p: cls.Repeat(p),
                    '+': lambda p: cls.Sequence([p, cls.Repeat(p)]),
                    '?': lambda p: cls.Opt(p)
                }[stream.read()]
                subparsers[-1] = wrapper(subparsers[-1])

            elif stream.next_is('.'):
                # Generally, "." designates any character *except* newlines. Do
                # the same here.
                stream.read()
                subparsers.append(cls.Range(CharSet('\n').negation))

            elif stream.next_is('^', '$'):
                check_source_language(
                    False, 'matching beginning or ending is unsupported')

            elif stream.next_is('\\'):
                # Parse an escape sequence. In can be a Unicode character, a
                # Unicode property or a simple escape sequence.
                stream.read()

                # \p and \P refer to character sets from Unicode general
                # categories.
                if stream.next_is('p', 'P'):
                    action = stream.read()

                    # Read the category name, which must appear between curly
                    # brackets.
                    category = ''
                    check_source_language(
                        stream.next_is('{'),
                        'incomplete Unicode category matcher')
                    stream.read()
                    while not stream.eof and not stream.next_is('}'):
                        category += stream.read()
                    check_source_language(
                        stream.next_is('}'),
                        'incomplete Unicode category matcher')
                    stream.read()

                    try:
                        char_set = CharSet.for_category(category)
                    except KeyError:
                        check_source_language(
                            False,
                            'invalid Unicode category: {}'.format(category))
                    if action == 'P':
                        char_set = char_set.negation
                    subparsers.append(cls.Range(char_set))

                else:
                    stream.go_back()
                    subparsers.append(
                        cls.Range(CharSet.from_int(cls._read_escape(stream))))

            else:
                subparsers.append(cls.Range(CharSet(stream.read())))

        return cls.Sequence(subparsers)
コード例 #28
0
 def check(self):
     # Check is not normally called on this, so if it is called it means
     # that a CallEnvHook instance has found its way into a regular action
     # list.
     check_source_language(
         "set_initial_env must be first in the action list")
コード例 #29
0
 def check(self):
     # Check is not normally called on this, so if it is called it means
     # that a SetInitialEnv instance has found its way into a regular action
     # list.
     check_source_language(
         "set_initial_env can only be preceded by call_env_hook")
コード例 #30
0
    def construct(self):
        """
        Construct a resolved expression for this.

        :rtype: ResolvedExpression
        """
        # Add the variables created for this expression to the current scope
        scope = PropertyDef.get_scope()
        for _, var, _ in self.matchers:
            scope.add(var.local_var)

        matched_expr = construct(self.matched_expr)
        check_source_language(issubclass(matched_expr.type, ASTNode)
                              or matched_expr.type.is_env_element_type,
                              'Match expressions can only work on AST nodes '
                              'or env elements')

        # Create a local variable so that in the generated code, we don't have
        # to re-compute the prefix for each type check.
        matched_abstract_var = AbstractVariable(
            names.Name('Match_Prefix'),
            type=matched_expr.type,
            create_local=True
        )
        PropertyDef.get_scope().add(matched_abstract_var.local_var)
        matched_var = construct(matched_abstract_var)

        constructed_matchers = []

        # Check (i.e. raise an error if no true) the set of matchers is valid:

        # * all matchers must target allowed types, i.e. input type subclasses;
        for typ, var, expr in self.matchers:
            if typ is not None:
                check_source_language(
                    typ.matches(matched_expr.type),
                    'Cannot match {} (input type is {})'.format(
                        typ.name().camel,
                        matched_expr.type.name().camel
                    )
                )
            else:
                # The default matcher (if any) matches the most general type,
                # which is the input type.
                var.set_type(matched_expr.type)
            constructed_matchers.append((construct(var), construct(expr)))

        # * all possible input types must have at least one matcher. Also warn
        #   if some matchers are unreachable.
        self._check_match_coverage(matched_expr.type)

        # Compute the return type as the unification of all branches
        _, expr = constructed_matchers[-1]
        rtype = expr.type
        for _, expr in constructed_matchers:
            check_source_language(
                expr.type.matches(rtype), "Wrong type for match result"
                " expression: got {} but expected {} or sub/supertype".format(
                    expr.type.name().camel, rtype.name().camel
                )
            )
            rtype = expr.type.unify(rtype)

        # This is the expression execution will reach if we have a bug in our
        # code (i.e. if matchers did not cover all cases).
        result = UnreachableExpr(rtype)

        # Wrap this "failing" expression with all the cases to match in the
        # appropriate order, so that in the end the first matchers are tested
        # first.
        for match_var, expr in reversed(constructed_matchers):
            casted = Cast.Expr(matched_var,
                               match_var.type,
                               result_var=match_var)
            guard = Not.make_expr(
                Eq.make_expr(
                    casted, LiteralExpr(casted.type.nullexpr(), casted.type)
                )
            )
            if expr.type != rtype:
                # We already checked that type matches, so only way this is
                # true is if expr.type is an ASTNode type derived from
                # rtype. In that case, we need an explicity upcast.
                expr = Cast.Expr(expr, rtype)

            result = If.Expr(guard, expr, result, rtype)

        return Let.Expr(
            [matched_var],
            [matched_expr],
            BindingScope(result,
                         [construct(var) for _, var, _ in self.matchers])
        )
コード例 #31
0
    def construct_common(self):
        """
        Construct and return the expressions commonly needed by collection
        expression subclasses.

        :rtype: CollectionExpression.ConstructCommonResult
        """
        current_scope = PropertyDef.get_scope()

        # First, build the collection expression. From the result, we can
        # deduce the type of the element variable.
        collection_expr = construct(self.collection)
        with_entities = collection_expr.type.is_entity_type
        if with_entities:
            saved_entity_coll_expr, collection_expr, entity_info = (
                collection_expr.destructure_entity())
            collection_expr = SequenceExpr(saved_entity_coll_expr,
                                           collection_expr)

        check_source_language(
            collection_expr.type.is_collection,
            'Cannot iterate on {}, which is not a collection'.format(
                collection_expr.type.dsl_name))

        elt_type = collection_expr.type.element_type
        if with_entities:
            elt_type = elt_type.entity
        self.element_var.set_type(elt_type)

        # List of "element" iteration variables
        elt_vars = [construct(self.element_var)]

        # List of initializing expressions for them
        elt_var_inits = []

        if with_entities:
            entity_var = elt_vars[-1]
            node_var = AbstractVariable(names.Name('Bare') +
                                        self.element_var._name,
                                        type=elt_type.element_type)
            elt_var_inits.append(
                make_as_entity(construct(node_var), entity_info=entity_info))
            elt_vars.append(construct(node_var))

        # If we are iterating over an AST list, then we get root grammar typed
        # values. We need to convert them to the more specific type to get the
        # rest of the expression machinery work.
        if collection_expr.type.is_list_type:
            typed_elt_var = elt_vars[-1]
            untyped_elt_var = AbstractVariable(
                names.Name('Untyped') + self.element_var._name,
                type=get_context().root_grammar_class)
            # Initialize the former last variable with a cast from the new last
            # variable and push the new last variable.
            elt_var_inits.append(
                UncheckedCastExpr(construct(untyped_elt_var),
                                  typed_elt_var.type))
            elt_vars.append(construct(untyped_elt_var))

        # Only then we can build the inner expression
        with current_scope.new_child() as inner_scope:
            inner_expr = construct(self.expr)

        if with_entities:
            entity_var.abstract_var.create_local_variable(inner_scope)
        if collection_expr.type.is_list_type:
            typed_elt_var.abstract_var.create_local_variable(inner_scope)

        if self.index_var:
            self.index_var.add_to_scope(inner_scope)

        elt_var_inits.append(None)

        return self.ConstructCommonResult(
            collection_expr, funcy.lzip(elt_vars, elt_var_inits),
            construct(self.index_var) if self.index_var else None, inner_expr,
            inner_scope)
コード例 #32
0
ファイル: structs.py プロジェクト: AdaCore/langkit
 def do_prepare(self):
     self.astnode = resolve_type(self.astnode)
     check_source_language(self.astnode.matches(ASTNode), (
         "One can only cast to an ASTNode subtype"
     ))
コード例 #33
0
ファイル: lkt_lowering.py プロジェクト: QuentinOchem/langkit
    def process_token_rule(r, token_set=None):
        """
        Process the full declaration of a GrammarRuleDecl node: create the
        token it declares and lower the optional associated lexing rule.

        :param liblktlang.FullDecl r: Full declaration for the GrammarRuleDecl
            to process.
        :param None|set[TokenAction] token_set: If this declaration appears in
            the context of a token family, this adds the new token to this set.
            Must be left to None otherwise.
        """
        with ctx.lkt_context(r):
            rule_annot = parse_annotations(ctx, token_annotations, r)

            # Gather token action info from the annotations. If absent,
            # fallback to WithText.
            token_cons = None
            start_ignore_layout = False
            end_ignore_layout = False
            if 'ignore' in rule_annot:
                token_cons = ignore_constructor
            for name in ('text', 'trivia', 'symbol'):
                try:
                    start_ignore_layout, end_ignore_layout = rule_annot[name]
                except KeyError:
                    continue

                check_source_language(token_cons is None,
                                      'At most one token action allowed')
                token_cons = token_cls_map[name]
            is_pre = rule_annot.get('pre_rule', False)
            if token_cons is None:
                token_cons = WithText

            # Create the token and register it where needed: the global token
            # mapping, its token family (if any) and the "newline_after" group
            # if the corresponding annotation is present.
            token_lower_name = text_as_str(r.f_decl.f_syn_name)
            token_name = names.Name.from_lower(token_lower_name)

            check_source_language(
                token_lower_name not in ('termination', 'lexing_failure'),
                '{} is a reserved token name'.format(token_lower_name))
            check_source_language(token_name not in tokens,
                                  'Duplicate token name')

            token = token_cons(start_ignore_layout, end_ignore_layout)
            tokens[token_name] = token
            if token_set is not None:
                token_set.add(token)
            if 'newline_after' in rule_annot:
                newline_after.append(token)

            # Lower the lexing rule, if present
            matcher_expr = r.f_decl.f_expr
            if matcher_expr is not None:
                rule = (lower_matcher(matcher_expr), token)
                if is_pre:
                    pre_rules.append(rule)
                else:
                    rules.append(rule)
コード例 #34
0
ファイル: c_api.py プロジェクト: shintakezou/langkit
 def lib_name(self, lib_name):
     check_source_language(self.LIB_NAME_RE.match(lib_name),
                           'Invalid library name: {}'.format(lib_name))
     self._lib_name = lib_name
コード例 #35
0
ファイル: lkt_lowering.py プロジェクト: QuentinOchem/langkit
    def lower(rule):
        """
        Helper to lower one parser.

        :param liblktlang.GrammarExpr rule: Grammar rule to lower.
        :rtype: Parser
        """
        # For convenience, accept null input rules, as we generally want to
        # forward them as-is to the lower level parsing machinery.
        if rule is None:
            return None

        loc = ctx.lkt_loc(rule)
        with ctx.lkt_context(rule):
            if isinstance(rule, liblktlang.ParseNodeExpr):
                node = resolve_node_ref(rule.f_node_name)

                # Lower the subparsers
                subparsers = [
                    lower(subparser) for subparser in rule.f_sub_exprs
                ]

                # Qualifier nodes are a special case: we produce one subclass
                # or the other depending on whether the subparsers accept the
                # input.
                if node._type.is_bool_node:
                    return Opt(*subparsers, location=loc).as_bool(node)

                # Likewise for enum nodes
                elif node._type.base and node._type.base.is_enum_node:
                    return _Transform(_Row(*subparsers, location=loc),
                                      node.type_ref,
                                      location=loc)

                # For other nodes, always create the node when the subparsers
                # accept the input.
                else:
                    return _Transform(parser=_Row(*subparsers),
                                      typ=node,
                                      location=loc)

            elif isinstance(rule, liblktlang.GrammarToken):
                token_name = rule.f_token_name.text
                try:
                    val = tokens[token_name]
                except KeyError:
                    check_source_language(
                        False, 'Unknown token: {}'.format(token_name))

                match_text = ''
                if rule.f_expr:
                    # The grammar is supposed to mainain this invariant
                    assert isinstance(rule.f_expr, liblktlang.TokenLit)
                    match_text = denoted_string_literal(rule.f_expr)

                return _Token(val=val, match_text=match_text, location=loc)

            elif isinstance(rule, liblktlang.TokenLit):
                return _Token(denoted_string_literal(rule), location=loc)

            elif isinstance(rule, liblktlang.GrammarList):
                return List(lower(rule.f_expr),
                            empty_valid=rule.f_kind.text == '*',
                            list_cls=resolve_node_ref(rule.f_list_type),
                            sep=lower(rule.f_sep),
                            location=loc)

            elif isinstance(
                    rule,
                (liblktlang.GrammarImplicitPick, liblktlang.GrammarPick)):
                return Pick(*[lower(subparser) for subparser in rule.f_exprs],
                            location=loc)

            elif isinstance(rule, liblktlang.GrammarRuleRef):
                return getattr(grammar, rule.f_node_name.text)

            elif isinstance(rule, liblktlang.GrammarOrExpr):
                return Or(
                    *[lower(subparser) for subparser in rule.f_sub_exprs],
                    location=loc)

            elif isinstance(rule, liblktlang.GrammarOpt):
                return Opt(lower(rule.f_expr), location=loc)

            elif isinstance(rule, liblktlang.GrammarOptGroup):
                return Opt(*[lower(subparser) for subparser in rule.f_expr],
                           location=loc)

            elif isinstance(rule, liblktlang.GrammarExprList):
                return Pick(*[lower(subparser) for subparser in rule],
                            location=loc)

            elif isinstance(rule, liblktlang.GrammarDiscard):
                return Discard(lower(rule.f_expr), location=loc)

            elif isinstance(rule, liblktlang.GrammarNull):
                return Null(resolve_node_ref(rule.f_name), location=loc)

            elif isinstance(rule, liblktlang.GrammarSkip):
                return Skip(resolve_node_ref(rule.f_name), location=loc)

            elif isinstance(rule, liblktlang.GrammarDontSkip):
                return DontSkip(lower(rule.f_expr),
                                lower(rule.f_dont_skip),
                                location=loc)

            elif isinstance(rule, liblktlang.GrammarPredicate):
                check_source_language(
                    isinstance(rule.f_prop_ref, liblktlang.DotExpr),
                    'Invalid property reference')
                node = resolve_node_ref(rule.f_prop_ref.f_prefix)
                prop_name = rule.f_prop_ref.f_suffix.text
                try:
                    prop = getattr(node, prop_name)
                except AttributeError:
                    check_source_language(
                        False, '{} has no {} property'.format(
                            node._name.camel_with_underscores, prop_name))
                return Predicate(lower(rule.f_expr), prop, location=loc)

            else:
                raise NotImplementedError('unhandled parser: {}'.format(rule))
コード例 #36
0
 def error_if_not_empty(name_set, message):
     check_source_language(
         not name_set,
         ('{}: {}'.format(message, ', '.join(name
                                             for name in name_set))))
コード例 #37
0
ファイル: collections.py プロジェクト: pmderodat/langkit
    def construct_common(self) -> CollectionExpression.ConstructCommonResult:
        """
        Construct and return the expressions commonly needed by collection
        expression subclasses.
        """
        assert self.element_var is not None

        current_scope = PropertyDef.get_scope()

        # Because of the discrepancy between the storage type in list nodes
        # (always root nodes) and the element type that user code deals with
        # (non-root list elements and/or entities), we may need to introduce
        # variables and initializing expressions. This is what the code below
        # does.

        # First, build the collection expression. From the result, we can
        # deduce the type of the user element variable.
        collection_expr = construct(self.collection)

        # If the collection is actually an entity, unwrap the bare list node
        # and save the entity info for later.
        with_entities = collection_expr.type.is_entity_type
        if with_entities:
            saved_entity_coll_expr, collection_expr, entity_info = (
                collection_expr.destructure_entity()
            )
            collection_expr = SequenceExpr(saved_entity_coll_expr,
                                           collection_expr)

        check_source_language(
            collection_expr.type.is_collection,
            'Cannot iterate on {}, which is not a collection'.format(
                collection_expr.type.dsl_name
            )
        )

        # Now that potential entity types are unwrapped, we can look for its
        # element type.
        elt_type = collection_expr.type.element_type
        if with_entities:
            elt_type = elt_type.entity
        self.element_var.set_type(elt_type)
        user_element_var = construct(self.element_var)

        # List of element variables, and the associated initialization
        # expressions (when applicable).
        #
        # Start with the only element variable that exists at this point: the
        # one that the user code for each iteration uses directly. When
        # relevant, each step in the code below creates a new variable N and
        # initialize variable N-1 from it.
        element_vars: List[InitializedVar] = [InitializedVar(user_element_var)]

        # Node lists contain bare nodes: if the user code deals with entities,
        # create a variable to hold a bare node and initialize the user
        # variable using it.
        if with_entities:
            entity_var = element_vars[-1]
            node_var = AbstractVariable(
                names.Name('Bare') + self.element_var._name,
                type=elt_type.element_type
            )
            entity_var.init_expr = make_as_entity(
                construct(node_var), entity_info=entity_info
            )
            element_vars.append(InitializedVar(construct(node_var)))

        # Node lists contain root nodes: if the user code deals with non-root
        # nodes, create a variable to hold the root bare node and initialize
        # the non-root node using it.
        if (
            collection_expr.type.is_list_type
            and not collection_expr.type.is_root_node
        ):
            typed_elt_var = element_vars[-1]
            untyped_elt_var = AbstractVariable(
                names.Name('Untyped') + self.element_var._name,
                type=get_context().root_grammar_class
            )
            typed_elt_var.init_expr = UncheckedCastExpr(
                construct(untyped_elt_var), typed_elt_var.var.type
            )
            element_vars.append(InitializedVar(construct(untyped_elt_var)))

        # Keep track of the ultimate "codegen" element variable. Unlike all
        # other iteration variable, it is the only one that will be defined by
        # the "for" loop in Ada (the other ones must be declared as regular
        # local variables).
        codegen_element_var = element_vars[-1].var

        # Create a scope to contain the code that runs during an iteration and
        # lower the iteration expression.
        with current_scope.new_child() as inner_scope:
            inner_expr = construct(self.expr)

        # Build the list of all iteration variables
        iter_vars = list(element_vars)
        index_var = None
        if self.index_var:
            index_var = construct(self.index_var)
            iter_vars.append(InitializedVar(index_var))

        # Create local variables for all iteration variables that need it
        for v in iter_vars:
            if v.var != codegen_element_var:
                v.var.abstract_var.create_local_variable(inner_scope)

        return self.ConstructCommonResult(
            collection_expr,
            codegen_element_var,
            user_element_var,
            index_var,
            iter_vars,
            inner_expr,
            inner_scope,
        )
コード例 #38
0
ファイル: collections.py プロジェクト: pmderodat/langkit
 def check_array(typ: CompiledType) -> None:
     check_source_language(
         typ.is_array_type,
         "Expected array type, got {}".format(typ.dsl_name)
     )
コード例 #39
0
ファイル: compile_context.py プロジェクト: AdaCore/langkit
    def compute_types(self):
        """
        Compute various information related to compiled types, that needs to be
        available for code generation.
        """

        # Get the list of ASTNode types from the Struct metaclass
        from langkit.compiled_types import (
            EnvElement, LexicalEnvType, StructMetaclass
        )

        self.astnode_types = list(StructMetaclass.astnode_types)

        # Here we're skipping Struct because it's not a real type in
        # generated code. We're also putting env_metadata and EnvElement in
        # the beginning and in the right dependency order (the metadata
        # type before the env element type).
        # TODO: Using a dependency order topological sort wouldn't hurt at
        # some point.
        self.struct_types = [
            t for t in StructMetaclass.struct_types
            if t not in [EnvElement, StructMetaclass.env_metadata]
        ]
        self.struct_types.insert(0, EnvElement)

        if StructMetaclass.env_metadata:
            self.struct_types = (
                [StructMetaclass.env_metadata] + self.struct_types
            )

        self.root_grammar_class = StructMetaclass.root_grammar_class
        self.env_metadata = StructMetaclass.env_metadata
        self.env_element = EnvElement

        # The Group lexical environment operation takes an array of lexical
        # envs, so we always need to generate the corresponding array type.
        self.array_types.add(LexicalEnvType.array_type())

        # Likewise for the EnvElement array type: LexicalEnv.get returns it.
        # No need to bind anything if the language specification did not
        # specify any EnvElement, though.
        if self.env_element:
            self.array_types.add(EnvElement.array_type())

        # Sort them in dependency order as required but also then in
        # alphabetical order so that generated declarations are kept in a
        # relatively stable order. This is really useful for debugging
        # purposes.
        keys = {
            cls: cls.hierarchical_name()
            for cls in self.astnode_types
        }
        self.astnode_types.sort(key=lambda cls: keys[cls])

        # Check that the environment hook is bound if the language spec uses
        # it.
        if self.env_hook_subprogram is None:
            for t in self.astnode_types:
                with t.diagnostic_context():
                    check_source_language(
                        t.env_spec is None or not t.env_spec.env_hook_enabled,
                        'Cannot invoke the environment hook if'
                        ' CompileContext.bind_env_hook has not been called'
                    )
コード例 #40
0
ファイル: dsl.py プロジェクト: geoffreycopin/langkit
    def process_subclass(mcs, name, bases, dct, is_root):
        from langkit.envs import EnvSpec

        location = extract_library_location()
        base = bases[0]
        is_list_type = issubclass(base, _ASTNodeList)
        is_root_list_type = base is _ASTNodeList

        node_ctx = Context('in {}'.format(name), location)

        with node_ctx:
            check_source_language(
                len(bases) == 1, 'ASTNode subclasses must have exactly one'
                ' base class')
            if mcs.root_type is not None:
                check_source_language(
                    base is not ASTNode,
                    'Only one class can derive from ASTNode (previous was:'
                    ' {})'.format(mcs.root_type.__name__))

            env_spec = dct.pop('env_spec', None)
            check_source_language(
                env_spec is None or isinstance(env_spec, EnvSpec),
                'Invalid environment specification: {}'.format(env_spec))

            annotations = dct.pop('annotations', None)

        # If this is a list type, determine the corresponding element type
        if is_list_type:
            element_type = (dct.pop('_element_type')
                            if is_root_list_type else base._element_type)
            allowed_field_types = PropertyDef
        else:
            element_type = None
            allowed_field_types = AbstractNodeData

        # Determine if this is a token node
        with node_ctx:
            is_token_node = dct.pop('token_node', None)
            check_source_language(
                is_token_node is None or isinstance(is_token_node, bool),
                'The "token_node" field, when present, must contain a boolean')

            # If "token_node" allocation is left to None, inherit it (default
            # is False).
            if is_token_node is None:
                is_token_node = bool(base._is_token_node)

            if is_token_node:
                allowed_field_types = (_UserField, PropertyDef)
            else:
                # Make sure that all derivations of a token node are token
                # nodes themselves.
                check_source_language(
                    not base._is_token_node,
                    '"token_node" annotation inconsistent with inherited AST'
                    ' node')

        # Handle enum nodes
        with node_ctx:
            # Forbid inheriting from an enum node
            check_source_language(
                not base._is_enum_node,
                'Inheriting from an enum node is forbidden.')

            # Determine if this is an enum node
            is_enum_node = dct.pop('enum_node', False)
            check_source_language(
                isinstance(is_enum_node, bool),
                'The "enum_node" field, when present, must contain a boolean')

            if is_enum_node:
                qualifier = dct.pop('qualifier', False)
                if qualifier:
                    alternatives = ['present', 'absent']
                else:
                    alternatives = dct.pop('alternatives', None)
                    check_source_language(alternatives is not None,
                                          'Missing "alternatives" field')
                    check_source_language(
                        isinstance(alternatives, list)
                        and all(isinstance(alt, str) for alt in alternatives),
                        'The "alternatives" field must contain a list of '
                        'strings')

                alts = [
                    _EnumNodeAlternative(names.Name.from_lower(alt))
                    for alt in alternatives
                ]

                allowed_field_types = (_UserField, PropertyDef)

        fields = ASTNode.collect_fields(name, location, dct,
                                        allowed_field_types)

        DSLType._import_base_type_info(name, location, dct)

        if is_enum_node:
            mcs.import_enum_node_attributes(dct, qualifier, alts, fields)

        dct['_fields'] = fields
        dct['_base'] = base
        dct['_env_spec'] = env_spec
        dct['_is_token_node'] = is_token_node
        dct['_is_enum_node'] = is_enum_node

        # Make sure subclasses don't inherit the "list_type" cache from their
        # base classes.
        dct['_list_type'] = None
        dct['_element_type'] = element_type
        dct['_annotations'] = annotations

        cls = type.__new__(mcs, name, bases, dct)

        mcs.astnode_types.append(cls)

        # Create the corresponding ASTNodeType subclass
        if cls._base is _ASTNodeList:
            # Only root list types are supposed to directly subclass
            # _ASTNodeList.
            element_type = cls._element_type._resolve()
            assert element_type
            astnode_type = element_type.list
        else:
            astnode_type = ASTNodeType(
                cls._name,
                cls._location,
                cls._doc,
                base=None if is_root else cls._base._resolve(),
                fields=cls._fields,
                env_spec=cls._env_spec,
                annotations=cls._annotations,

                # Only enum nodes are abstract at this point
                is_abstract=cls._is_enum_node,
                is_enum_node=cls._is_enum_node,
                is_bool_node=cls._is_enum_node and cls._qualifier,
                is_token_node=cls._is_token_node)

        astnode_type.dsl_decl = cls
        cls._type = astnode_type

        if is_enum_node:
            mcs.create_enum_node_alternatives(cls, astnode_type)

        return cls
コード例 #41
0
ファイル: structs.py プロジェクト: AdaCore/langkit
    def construct(self):
        """
        Constructs a resolved expression that is the result of:

        - Resolving the receiver;
        - Getting its corresponding field.

        :rtype: FieldAccessExpr
        """

        receiver_expr = construct(self.receiver)
        check_source_language(
            issubclass(receiver_expr.type, Struct),
            '{} values have no field (accessed field was {})'.format(
                receiver_expr.type.name().camel,
                self.field
            )
        )

        to_get = assert_type(
            receiver_expr.type, Struct
        ).get_abstract_fields_dict().get(self.field, None)
        ":type: AbstractNodeField"

        # If still not found, there's a problem
        check_source_language(
            to_get is not None, "Type {} has no '{}' field or property".format(
                receiver_expr.type.__name__, self.field
            )
        )

        check_source_language(
            not to_get.is_internal,
            '{} is for internal use only'.format(to_get.qualname)
        )

        # Check that this property actually accepts these arguments and that
        # they are correctly typed.
        check_source_language(
            len(self.arguments) == len(to_get.explicit_arguments),
            'Invalid number of arguments in the call to {}:'
            ' {} expected but got {}'.format(
                to_get.qualname,
                len(to_get.explicit_arguments),
                len(self.arguments),
            )
        )

        arg_exprs = [
            construct(
                actual, formal.type,
                custom_msg='Invalid {} actual (#{}) for {}:'.format(
                    formal.name, i, to_get.qualname,
                ) + ' expected {expected} but got {expr_type}'
            ) for i, (actual, formal) in enumerate(
                zip(self.arguments, to_get.explicit_arguments), 1
            )
        ]

        ret = FieldAccess.Expr(receiver_expr, to_get, arg_exprs)
        return ret
コード例 #42
0
ファイル: compile_context.py プロジェクト: AdaCore/langkit
    def _compile(self):
        """
        Compile the language specification: perform legality checks and type
        inference.
        """
        # Compile the first time, do nothing next times
        if self.compiled:
            return
        self.compiled = True

        assert self.grammar, "Set grammar before compiling"

        if not self.grammar.rules.get(self.main_rule_name, None):
            close_matches = difflib.get_close_matches(
                self.main_rule_name, self.grammar.rules.keys()
            )

            with self.grammar.context():
                check_source_language(
                    False,
                    'Invalid rule name specified for main rule: "{}". '
                    '{}'.format(
                        self.main_rule_name,
                        'Did you mean "{}"?'.format(close_matches[0])
                        if close_matches else ""
                    )
                )

        unreferenced_rules = self.grammar.get_unreferenced_rules()

        check_source_language(
            not unreferenced_rules, "The following parsing rules are not "
            "used: {}".format(", ".join(sorted(unreferenced_rules))),
            severity=Severity.warning
        )

        # Compute type information, so that it is available for further
        # compilation stages.
        self.compute_types()
        errors_checkpoint()

        if self.verbosity.info:
            printcol("Compiling the grammar...", Colors.OKBLUE)

        with names.camel_with_underscores:
            # Compute the type of fields for types used in the grammar
            for r_name, r in self.grammar.rules.items():
                r.compute_fields_types()

            # Compute properties information, so that it is available for
            # further compilation stages.
            self.compute_properties()
            errors_checkpoint()

            for r_name, r in self.grammar.rules.items():
                r.compile()
                self.rules_to_fn_names[r_name] = r

        unresolved_types = set([t for t in self.astnode_types
                                if not t.is_type_resolved])
        check_source_language(
            not unresolved_types,
            "The following ASTNode subclasses are not type resolved. They are"
            " not used by the grammar, and their types not annotated:"
            " {}".format(", ".join(t.name().camel for t in unresolved_types))
        )

        astnodes_files = {
            path.abspath(inspect.getsourcefile(n)) for n in self.astnode_types
        }

        if self.annotate_fields_types:
            # Only import lib2to3 if the users needs it
            import lib2to3.main

            lib2to3.main.main(
                "langkit",
                ["-f", "annotate_fields_types",
                 "--no-diff", "-w"] + list(astnodes_files)
            )

        for i, astnode in enumerate(
            (astnode
             for astnode in self.astnode_types
             if not astnode.abstract),
            # Compute kind constants for all ASTNode concrete subclasses.
            # Start with 2: the constant 0 is reserved as an
            # error/uninitialized code and the constant 1 is reserved for all
            # ASTList nodes.
            start=2
        ):
            self.node_kind_constants[astnode] = i

        # Now that all Struct subclasses referenced by the grammar have been
        # typed, iterate over all declared subclasses to register the ones that
        # are unreachable from the grammar.  TODO: this kludge will eventually
        # disappear as part of OC22-016.
        for t in self.struct_types + self.astnode_types:
            t.add_to_context()

        errors_checkpoint()
コード例 #43
0
    def _emit_to_field_unparser(parser, field_unparser, pre_tokens,
                                post_tokens):
        """
        Considering ``field_unparser`` as a field unparser we are in the
        process of elaborating, and ``pre_tokens`` and ``post_tokens`` as the
        token sequences that surround this field, extract information from the
        given ``parser`` to complete them.

        If ``parser`` is anything else than a Null parser, set
        ``field_unparser.always_absent`` to True.

        Emit a user diagnostic if ``parser`` is too complex for this analysis.

        :param Parser parser: Parser to analyze.
        :param FieldUnparser field_unparser: Field unparser to complete.
        :param TokenSequenceUnparser pre_tokens: Token sequences to contain the
            list of tokens that appear before the field, whether or not the
            field is present. Tokens are inserted at the end of this sequence.
        :param TokenSequenceUnparser post_tokens: Token sequences to contain
            the list of tokens that appear after the field, whether or not the
            field is present. Tokens are inserted at the beginning of this
            sequence.
        """
        parser = unwrap(parser)

        # As all fields are nodes, previous validation passes made sure that
        # `parser` yields a parse node (potentially a null one).

        if isinstance(parser, (Defer, List, Null, _Transform)):
            # Field parsing goes directly to node creation, so there is no
            # pre/post sequences of tokens.
            field_unparser.always_absent = (field_unparser.always_absent
                                            and isinstance(parser, Null))

        elif isinstance(parser, Opt):
            if not parser._booleanize:
                # Because we are in an Opt parser, we now know that this field
                # is optionnal, so it can be absent.
                field_unparser.always_absent = False
                field_unparser.empty_list_is_absent = parser.type.is_list_type

                # Starting from here, tokens to be unparsed in
                # ``parser.parser`` must be unparsed iff the field is present,
                # so respectively prepend and append token sequences in the
                # recursion to the field unparser itself.
                pre_tokens = TokenSequenceUnparser()
                post_tokens = TokenSequenceUnparser()
                NodeUnparser._emit_to_field_unparser(parser.parser,
                                                     field_unparser,
                                                     pre_tokens, post_tokens)
                field_unparser.pre_tokens = (pre_tokens +
                                             field_unparser.pre_tokens)
                field_unparser.post_tokens = (field_unparser.post_tokens +
                                              post_tokens)

        elif isinstance(parser, Or):
            # Just check that all subparsers create nodes, and thus that there
            # is nothing specific to do here: the unparser will just recurse on
            # this field.
            field_unparser.always_absent = False
            for subparser in parser.parsers:
                # Named parsing rules always create nodes, so we don't need to
                # check Defer parsers. Skip parsers also create nodes, but most
                # importantly they trigger a parsing error, so unparsers can
                # ignore them.
                if not isinstance(subparser, (Defer, Skip)):
                    NodeUnparser.from_parser(subparser.type, subparser)

        elif isinstance(parser, _Extract):
            field_unparser.always_absent = False
            pre_toks, node_parser, post_toks = NodeUnparser._split_extract(
                parser)

            # Pre and post-tokens from this _Extract parser appear whether or
            # not the parsed field is present, so they go in ``pre_tokens`` and
            # ``post_tokens``, not in the field unparser itself.
            pre_tokens.tokens = pre_tokens.tokens + pre_toks.tokens
            post_tokens.tokens = post_toks.tokens + post_tokens.tokens
            NodeUnparser._emit_to_field_unparser(node_parser, field_unparser,
                                                 pre_tokens, post_tokens)

        else:
            check_source_language(
                False, 'Unsupported parser for node field: {}'.format(parser))
コード例 #44
0
 def check_never_equal(can_be_equal):
     check_source_language(
         can_be_equal, '{} and {} values are never equal'.format(
             lhs.type.dsl_name, rhs.type.dsl_name))
コード例 #45
0
ファイル: structs.py プロジェクト: AdaCore/langkit
 def error_if_not_empty(name_set, message):
     check_source_language(not name_set, ('{}: {}'.format(
         message, ', '.join(name for name in name_set)
     )))
コード例 #46
0
    def run(self, argv=None):
        parsed_args = self.args_parser.parse_args(argv)

        for trace in parsed_args.trace:
            print("Trace {} is activated".format(trace))
            Log.enable(trace)

        Diagnostics.set_style(parsed_args.diagnostic_style)

        if parsed_args.profile:
            import cProfile
            import pstats

            pr = cProfile.Profile()
            pr.enable()

        # Set the verbosity
        self.verbosity = parsed_args.verbosity

        self.no_ada_api = parsed_args.no_ada_api

        # If asked to, setup the exception hook as a last-chance handler to
        # invoke a debugger in case of uncaught exception.
        if parsed_args.debug:
            # Try to use IPython's debugger if it is available, otherwise
            # fallback to PDB.
            try:
                # noinspection PyPackageRequirements
                from IPython.core import ultratb
            except ImportError:
                ultratb = None  # To keep PyCharm happy...

                def excepthook(type, value, tb):
                    traceback.print_exception(type, value, tb)
                    pdb.post_mortem(tb)

                sys.excepthook = excepthook
            else:
                sys.excepthook = ultratb.FormattedTB(mode='Verbose',
                                                     color_scheme='Linux',
                                                     call_pdb=1)
            del ultratb

        self.dirs.set_build_dir(parsed_args.build_dir)
        install_dir = getattr(parsed_args, 'install-dir', None)
        if install_dir:
            self.dirs.set_install_dir(install_dir)

        if getattr(parsed_args, 'list_warnings', False):
            WarningSet.print_list()
            return

        # noinspection PyBroadException
        try:
            parsed_args.func(parsed_args)

        except DiagnosticError:
            if parsed_args.debug:
                raise
            if parsed_args.verbosity.debug or parsed_args.full_error_traces:
                traceback.print_exc()
            print(col('Errors, exiting', Colors.FAIL), file=sys.stderr)
            sys.exit(1)

        except Exception as e:
            if parsed_args.debug:
                raise
            ex_type, ex, tb = sys.exc_info()

            # If we have a syntax error, we know for sure the last stack frame
            # points to the code that must be fixed. Otherwise, point to the
            # top-most stack frame that does not belong to Langkit.
            if e.args and e.args[0] == 'invalid syntax':
                loc = Location(e.filename, e.lineno)
            else:
                loc = extract_library_location(traceback.extract_tb(tb))
            with Context("", loc, "recovery"):
                check_source_language(False, str(e), do_raise=False)

            # Keep Langkit bug "pretty" for users: display the Python stack
            # trace only when requested.
            if parsed_args.verbosity.debug or parsed_args.full_error_traces:
                traceback.print_exc()

            print(col('Internal error! Exiting', Colors.FAIL), file=sys.stderr)
            sys.exit(1)

        finally:
            if parsed_args.profile:
                pr.disable()
                ps = pstats.Stats(pr)
                ps.dump_stats('langkit.prof')
コード例 #47
0
    def _resolve_property(name: str, prop_ref: _Any,
                          arity: int) -> Optional[PropertyDef]:
        """
        Resolve the ``prop`` property reference (if any, built in the DSL) to
        the referenced property. If it is present, check its signature.

        :param name: Name of the property in the DSL construct. Used to format
            the error message.
        :param prop_ref: Property reference to resolve.
        :param arity: Expected number of entity arguments for this property
            ("Self" included).
        """
        from langkit.expressions import FieldAccess

        # First, resolve the property

        prop: PropertyDef

        if prop_ref is None:
            return None

        elif isinstance(prop_ref, FieldAccess):
            node_data = prop_ref.resolve_field()
            if isinstance(node_data, PropertyDef):
                prop = node_data
            else:
                error(f"{name} must be a property")

        elif isinstance(prop_ref, T.Defer):
            prop = prop_ref.get()

        elif isinstance(prop_ref, PropertyDef):
            prop = prop_ref

        else:
            error(
                f"{name} must be either a FieldAccess resolving to a property,"
                " or a direct reference to a property")

        # Second, check its signature

        prop = prop.root_property
        assert prop.struct
        check_source_language(
            prop.struct.matches(T.root_node),
            f"{name} must belong to a subtype of {T.root_node.dsl_name}",
        )

        # Check that it takes the expected number of arguments. "Self" counts
        # as an implicit argument, so we expect at least ``arity - 1`` natural
        # arguments.
        n_args = arity - 1
        entity_args = prop.natural_arguments[:n_args]
        extra_args = prop.natural_arguments[n_args:]
        check_source_language(
            len(entity_args) == n_args
            and all(arg.type.is_entity_type for arg in entity_args),
            f"{name} property must accept {n_args} entity arguments (only"
            f" {len(entity_args)} found)",
        )

        # The other argumenst must be optional
        check_source_language(
            all(arg.default_value is not None for arg in extra_args),
            f"extra arguments for {name} must be optional",
        )

        # Check the property return type
        check_source_language(
            prop.type.matches(T.root_node.entity),
            f"{name} must return a subtype of {T.entity.dsl_name}",
        )

        # Check that all dynamic variables for this property are bound in the
        # current expression context.
        DynamicVariable.check_call_bindings(prop,
                                            f"In call to {{prop}} as {name}")

        # Third, generate a functor for this property, so that equations can
        # refer to it.
        from langkit.compile_context import get_context
        get_context().do_generate_logic_functors(prop, arity)

        return prop
コード例 #48
0
 def check(token):
     check_source_language(
         token not in (self.tokens.Termination,
                       self.tokens.LexingFailure),
         '{} is reserved for automatic actions only'.format(
             token.dsl_name))
コード例 #49
0
    def construct(self):
        check_multiple([
            (self.pred_property.type.matches(T.Bool),
             'Predicate property must return a boolean, got {}'.format(
                 self.pred_property.type.dsl_name)),
            (self.pred_property.struct.matches(T.root_node),
             'Predicate property must belong to a subtype of {}'.format(
                 T.root_node.dsl_name)),
        ])

        # Separate logic variable expressions from extra argument expressions
        exprs = [construct(e) for e in self.exprs]
        logic_var_exprs, closure_exprs = funcy.lsplit_by(
            lambda e: e.type == T.LogicVar, exprs)
        check_source_language(
            len(logic_var_exprs) > 0, "Predicate instantiation should have at "
            "least one logic variable expression")
        check_source_language(
            all(e.type != T.LogicVar for e in closure_exprs),
            'Logic variable expressions should be grouped at the beginning,'
            ' and should not appear after non logic variable expressions')

        # Make sure this predicate will work on clean logic variables
        logic_var_exprs = [ResetLogicVar(expr) for expr in logic_var_exprs]

        # Compute the list of arguments to pass to the property (Self
        # included).
        args = (
            [Argument(names.Name('Self'), self.pred_property.struct.entity)] +
            self.pred_property.natural_arguments)

        # Then check that 1) all extra passed actuals match what the property
        # arguments expect and that 2) arguments left without an actual have a
        # default value.
        default_passed_args = 0
        for i, (expr, arg) in enumerate(zip_longest(exprs, args)):

            if expr is None:
                check_source_language(
                    arg.default_value is not None,
                    'Missing an actual for argument #{} ({})'.format(
                        i, arg.name.lower))
                default_passed_args += 1
                continue

            check_source_language(
                arg is not None,
                'Too many actuals: at most {} expected, got {}'.format(
                    len(args), len(exprs)))

            if expr.type == T.LogicVar:
                check_source_language(
                    arg.type.matches(T.root_node.entity),
                    "Argument #{} of predicate "
                    "is a logic variable, the corresponding property formal "
                    "has type {}, but should be a descendent of {}".format(
                        i, arg.type.dsl_name, T.root_node.entity.dsl_name))
            else:
                check_source_language(
                    expr.type.matches(arg.type), "Argument #{} of predicate "
                    "has type {}, should be {}".format(i, expr.type.dsl_name,
                                                       arg.type.dsl_name))

        DynamicVariable.check_call_bindings(self.pred_property,
                                            'In predicate property {prop}')

        # Append dynamic variables to embed their values in the closure
        closure_exprs.extend(
            construct(dynvar) for dynvar in self.pred_property.dynamic_vars)

        pred_id = self.pred_property.do_generate_logic_predicate(
            tuple(e.type for e in closure_exprs), default_passed_args)

        args = " ({})".format(', '.join(
            ["{}"
             for _ in range(len(closure_exprs))])) if closure_exprs else ""
        predicate_expr = untyped_literal_expr(
            f"Create_{pred_id}_Predicate{args}", operands=closure_exprs)

        return Predicate.Expr(self.pred_property,
                              pred_id,
                              logic_var_exprs,
                              predicate_expr,
                              abstract_expr=self)
コード例 #50
0
 def check_array(typ):
     check_source_language(
         typ.is_array_type,
         "Expected array type, got {}".format(typ.dsl_name))
コード例 #51
0
ファイル: structs.py プロジェクト: AdaCore/langkit
    def construct(self):
        """
        Construct a resolved expression for this.

        :rtype: ResolvedExpression
        """
        # Add the variables created for this expression to the current scope
        scope = PropertyDef.get_scope()
        for _, v, _ in self.matchers:
            scope.add(v.local_var)

        matched_expr = construct(self.matched_expr)
        check_source_language(issubclass(matched_expr.type, ASTNode),
                              'Match expressions can only work on AST nodes')

        # Yes, the assertion below is what we just checked above, but unlike
        # check_source_language, assert_type provides type information to
        # PyCharm's static analyzer.
        matched_type = assert_type(matched_expr.type, ASTNode)

        constructed_matchers = []

        # Check (i.e. raise an error if no true) the set of matchers is valid:

        # * all matchers must target allowed types, i.e. input type subclasses;
        for t, v, e in self.matchers:
            if t is not None:
                check_source_language(
                    t.matches(matched_expr.type),
                    'Cannot match {} (input type is {})'.format(
                        t.name().camel,
                        matched_expr.type.name().camel
                    )
                )
            else:
                # The default matcher (if any) matches the most general type,
                # which is the input type.
                v.set_type(matched_expr.type)
            constructed_matchers.append((construct(v), construct(e)))

        # * all possible input types must have at least one matcher. Also warn
        #   if some matchers are unreachable.
        self._check_match_coverage(matched_type)

        # Compute the return type as the unification of all branches
        _, expr = constructed_matchers[-1]
        rtype = expr.type
        for _, expr in constructed_matchers:
            check_source_language(
                expr.type.matches(rtype), "Wrong type for match expression : "
                "{}, expected {} or sub/supertype".format(
                    expr.type.name().camel, rtype.name().camel
                )
            )
            rtype = expr.type.unify(rtype)

        # This is the expression execution will reach if we have a bug in our
        # code (i.e. if matchers did not cover all cases).
        result = UnreachableExpr(rtype)

        # Wrap this "failing" expression with all the cases to match in the
        # appropriate order, so that in the end the first matchers are tested
        # first.
        for match_var, expr in reversed(constructed_matchers):
            casted = Cast.Expr(matched_expr,
                               match_var.type,
                               result_var=match_var)
            guard = Not.make_expr(
                Eq.make_expr(casted, LiteralExpr('null', casted.type))
            )
            if expr.type != rtype:
                # We already checked that type matches, so only way this is
                # true is if expr.type is an ASTNode type derived from
                # rtype. In that case, we need an explicity upcast.
                expr = Cast.Expr(expr, rtype)

            result = If.Expr(guard, expr, result, rtype)

        return result
コード例 #52
0
from __future__ import absolute_import, division, print_function

from langkit import compiled_types, expressions
from langkit.diagnostics import check_source_language, Severity
from langkit.utils import dispatch_on_type

try:
    from docutils.core import publish_parts
except ImportError:  # no-code-coverage
    check_source_language(
        False,
        "Missing docutils to properly render sphinx doc. Install the "
        "docutils package",
        severity=Severity.warning
    )

    # Provide a stub implementation for publish_parts
    def publish_parts(x, *args, **kwargs):
        return {'html_body': x}


def trim_docstring_lines(docstring):
    """
    This function will return a trimmed version of the docstring, removing
    whitespace at the beginning of lines depending on the offset of the first
    line.

    :type docstring: str
    """

    # Remove leading newline if needed
コード例 #53
0
ファイル: envs.py プロジェクト: AdaCore/langkit
    def __init__(self,
                 add_env=False,
                 add_to_env=None,
                 ref_envs=None,
                 initial_env=None,
                 env_hook_arg=None):
        """

        :param bool add_env: Wether to add a new scoped lexical environment.
            The new environment will be linked to the corresponding AST node
            and will have the AST node's lexical environment as a parent.

        :param add_to_env: Eiter an AddToEnv named tuple, or a list of them.
            Used to add elements to the lexical environment. See add_to_env's
            doc for more details.
        :type add_to_env: AddToEnv|[AddToEnv]

        :param AbstractExpression ref_envs: if an AbstractExpression returning
            a list of environments is supplied, the topmost environment in the
            environment resolution will be altered to include the list of
            environments as referenced environments. TODO: Not yet implemented!

        :param AbstractExpression initial_env: If supplied, this env will be
            used as the lexical environment to execute the rest of the actions.
            For example, if you pass an initial_env, and add_env, then an env
            will be added to the env passed as initial_env, and the node
            concerned by this env specification will have initial_env as a
            parent indirectly.

        :param AbstractExpression env_hook_arg: Does nothing if left to None.
            If supplied, it must be an abstract expression that resolves to a
            node. This expression will be evaluated and passed to the
            environment hook.
        """

        self.ast_node = None
        """
        ASTNode subclass associated to this environment specification.
        Initialized when creating ASTNode subclasses.
        :type: langkit.compiled_types.ASTNode
        """

        self._add_env = add_env
        ":type: bool"

        # The following attributes (unresolved_*) contain abstract expressions
        # used to describe various environment behaviors. They all have
        # corresponding attributes that embed them as properties: see below.

        self._unresolved_initial_env = initial_env
        ":type: AbstractExpression"

        self._unresolved_envs_expressions = []
        ":type: list[AddToEnv]"

        self.envs_expressions = []
        ":type: list[AddToEnv]"

        if add_to_env:
            check_source_language(
                isinstance(add_to_env, AddToEnv)
                or isinstance(add_to_env, list),
                "Wrong parameter for add_to_env: Expected AddToEnv named-tuple"
                " or list of AddToEnv"
            )

            self._unresolved_envs_expressions = (
                [add_to_env] if isinstance(add_to_env, AddToEnv)
                else add_to_env
            )

        self._unresolved_ref_envs = ref_envs
        ":type: AbstractExpression"

        self._unresolved_env_hook_arg = env_hook_arg
        ":type: AbstractExpression"

        # These are the property attributes

        self.initial_env = None
        ":type: PropertyDef"

        self.ref_envs = None
        ":type: PropertyDef"

        self.env_hook_arg = None
        ":type: PropertyDef"

        self.has_post_actions = False