예제 #1
0
파일: parsers.py 프로젝트: AdaCore/langkit
    def add_rules(self, **kwargs):
        """
        Add rules to the grammar.  The keyword arguments will provide a name to
        rules.

        :param dict[str, Parser] kwargs: The rules to add to the grammar.
        """
        import ast
        loc = extract_library_location()

        class GetTheCall(ast.NodeVisitor):
            """
            Helper visitor that will get the corresponding add_rule call in the
            source, so that we're then able to extract the precise line where
            each rule is added.
            """
            def __init__(self):
                self.the_call = None

            def visit_Call(self, call):
                if (
                    isinstance(call.func, ast.Attribute)
                    and call.func.attr == 'add_rules'
                    # Traceback locations are very imprecise, and python's ast
                    # doesn't have an end location for nodes, so we'll keep
                    # taking add_rules call, and the last is necessarily the
                    # good one.
                    and call.lineno <= loc.line
                ):
                    self.the_call = call

        caller_location = extract_library_location()
        the_call = GetTheCall()
        with open(caller_location.file) as f:
            file_ast = ast.parse(f.read(), f.name)
            the_call.visit(file_ast)

        # We're gonna use the keyword arguments to find back the precise line
        # where the rule was declared.
        keywords = {kw.arg: kw.value for kw in the_call.the_call.keywords}

        for name, rule in kwargs.items():
            rule.set_name(names.Name.from_lower(name))
            rule.set_grammar(self)
            rule.set_location(Location(loc.file, keywords[name].lineno, ""))
            rule.is_root = True

            with Context("In definition of rule '{}'".format(name), loc):
                check_source_language(
                    name not in self.rules,
                    "Rule '{}' is already present in the grammar".format(name)
                )

            self.rules[name] = rule
예제 #2
0
    def add_patterns(self, *patterns):
        r"""
        Add the list of named patterns to the lexer's internal patterns. A
        named pattern is a pattern that you can refer to through the {}
        notation in another pattern, or directly via the lexer instance::

            l.add_patterns(
                ('digit', r"[0-9]"),
                ('integer', r"({digit}(_?{digit})*)"),
            )

            l.add_rules(
                (l.patterns.integer, WithText(TokenKind.Number))
                (Pattern("{integer}(\.{integer})?"),
                 WithText(TokenKind.Number))
            )

        Please note that the order of addition matters if you want to refer to
        patterns in other patterns.

        :param list[(str, str)] patterns: The list of patterns to add.
        """
        loc = extract_library_location()
        for k, v in patterns:
            assert isinstance(k, str)
            assert isinstance(v, str)
            self.patterns.append((k, v, loc))
예제 #3
0
 def __init__(self,
              matcher: Matcher,
              action: Action,
              location: Optional[Location] = None):
     self.matcher = matcher
     self.action = action
     self.location = location or extract_library_location()
예제 #4
0
        def __init__(self, match_length, *alts):
            super().__init__()
            self.location = extract_library_location()
            self.match_length = match_length

            for alt in alts:
                check_source_language(
                    isinstance(alt, Alt),
                    'Invalid alternative to Case matcher: {}'.format(alt)
                )
                check_source_language(
                    alt.match_size <= match_length,
                    'Match size for this Case alternative ({}) cannot be'
                    ' longer than the Case matcher ({} chars)'.format(
                        alt.match_size, match_length
                    )
                )

            check_source_language(
                alts[-1].prev_token_cond is None,
                "The last alternative to a case matcher "
                "must have no prev token condition"
            )

            self.alts = alts[:-1]
            self.default_alt = alts[-1]
예제 #5
0
    def __init__(self, *tokens):
        """
        :type tokens: list[TokenAction]
        """
        self.location = extract_library_location()
        self.tokens = set(tokens)

        self.name = None
        """
예제 #6
0
파일: dsl.py 프로젝트: briot/langkit
    def process_subclass(mcs, name, bases, dct, is_root):
        from langkit.envs import EnvSpec

        location = extract_library_location()
        base = bases[0]
        is_list_type = issubclass(base, _ASTNodeList)
        is_root_list_type = base is _ASTNodeList

        node_ctx = Context('in {}'.format(name), location)

        with node_ctx:
            check_source_language(
                len(bases) == 1, 'ASTNode subclasses must have exactly one'
                ' base class')
            if mcs.root_type is not None:
                check_source_language(
                    base is not ASTNode,
                    'Only one class can derive from ASTNode (previous was:'
                    ' {})'.format(mcs.root_type.__name__))

            env_spec = dct.pop('env_spec', None)
            check_source_language(
                env_spec is None or isinstance(env_spec, EnvSpec),
                'Invalid environment specification: {}'.format(env_spec))

            annotations = dct.pop('annotations', None)

        # If this is a list type, determine the corresponding element type
        if is_root_list_type:
            element_type = dct.pop('_element_type')
        elif is_list_type:
            element_type = base._element_type
        else:
            element_type = None

        fields = ASTNode.collect_fields(name, location, dct, AbstractNodeData)

        # AST list types are not allowed to have syntax fields
        if is_list_type:
            syntax_fields = [f_n for f_n, f_v in fields if not f_v.is_property]
            with node_ctx:
                check_source_language(
                    not syntax_fields,
                    'ASTNode list types are not allowed to have fields'
                    ' (here: {})'.format(', '.join(sorted(syntax_fields))))

        DSLType._import_base_type_info(name, location, dct)
        dct['_fields'] = fields
        dct['_base'] = base
        dct['_env_spec'] = env_spec

        # Make sure subclasses don't inherit the "list_type" cache from their
        # base classes.
        dct['_list_type'] = None
        dct['_element_type'] = element_type
        dct['_annotations'] = annotations
예제 #7
0
    def __new__(mcs, name, bases, dct):
        # Don't do anything special for Enum itself
        if not mcs.base_enum_type:
            result = type.__new__(mcs, name, bases, dct)
            mcs.base_enum_type = result
            return result

        location = extract_library_location()
        with Context('in {}'.format(name), location):
            check_source_language(
                bases == (Enum, ),
                'Enumeration types must derive from and only from Enum'
            )

            # Get the list of values, initializing their name
            values = []
            for key, value in dct.items():
                # Ignore __special__ fields
                if key.startswith('__') and key.endswith('__'):
                    continue

                check_source_language(
                    isinstance(value, EnumValue),
                    'Enum subclass can only contain EnumValue instances'
                    ' (here, {} is {})'.format(key, value)
                )
                check_source_language(
                    value._type is None,
                    'EnumValue instances cannot be used in multiple Enum'
                    ' subclasses (here: {})'.format(key)
                )
                value._name = names.Name.from_lower(key)
                values.append(value)
            values.sort(key=lambda v: v._id)
            dct['_values'] = values

        DSLType._import_base_type_info(name, location, dct)

        # Create the subclass and associate values to it
        cls = type.__new__(mcs, name, bases, dct)
        for value in cls._values:
            value._type = cls

        # Now create the CompiledType instance, register it where needed
        enum_type = EnumType(cls._name, cls._location, cls._doc,
                             [v._name for v in cls._values])
        enum_type.dsl_decl = cls
        cls._type = enum_type

        # Associate the enumeration values in the DSL/Langkit internals
        for dsl_val, internal_val in zip(cls._values, enum_type.values):
            dsl_val._type = cls
            dsl_val._value = internal_val
            internal_val.dsl_decl = dsl_val

        return cls
예제 #8
0
    def process_subclass(mcs, name, bases, dct):
        location = extract_library_location()

        with Context('in {}'.format(name), location):
            check_source_language(
                bases == (Struct, ),
                'Struct subclasses must derive from Struct only',
            )

        fields = Struct.collect_fields(name, location, dct, _UserField)
        DSLType._import_base_type_info(name, location, dct)
        dct['_fields'] = fields
예제 #9
0
def _check_decorator_use(decorator, expected_cls, cls):
    """
    Helper for class decorators below. Raise a diagnostic error if `cls`,
    which is the input parameter of `decorator`, is not a subclass of
    `expected_cls`.
    """
    location = extract_library_location()
    with Context(location):
        check_source_language(
            issubtype(cls, expected_cls),
            'The {} decorator must be called on a {} subclass'
            ' (here, got: {})'.format(decorator.__name__,
                                      expected_cls.__name__, cls))
예제 #10
0
    def process_subclass(mcs, name, bases, dct):
        location = extract_library_location()

        with diagnostic_context(location):
            check_source_language(
                bases == (Struct, ),
                'Struct subclasses must derive from Struct only',
            )

        fields = Struct.collect_fields(
            name, location, dct, _UserField, only_null_fields=False
        )
        DSLType._import_base_type_info(name, location, dct)
        dct['_fields'] = fields
예제 #11
0
    def add_rules(self, **kwargs):
        """
        Add rules to the grammar.  The keyword arguments will provide a name to
        rules.

        :param dict[str, Parser] kwargs: The rules to add to the grammar.
        """
        import ast
        loc = extract_library_location()

        class GetTheCall(ast.NodeVisitor):
            """
            Helper visitor that will get the corresponding add_rule call in the
            source, so that we're then able to extract the precise line where
            each rule is added.
            """
            def __init__(self):
                self.the_call = None

            def visit_Call(self, call):
                if (isinstance(call.func, ast.Attribute)
                        and call.func.attr == 'add_rules'
                        # Traceback locations are very imprecise, and python's ast
                        # doesn't have an end location for nodes, so we'll keep
                        # taking add_rules call, and the last is necessarily the
                        # good one.
                        and call.lineno <= loc.line):
                    self.the_call = call

        the_call = GetTheCall()
        with open(loc.file) as f:
            file_ast = ast.parse(f.read(), f.name)
            the_call.visit(file_ast)

        # We're gonna use the keyword arguments to find back the precise line
        # where the rule was declared.
        keywords = {kw.arg: kw.value for kw in the_call.the_call.keywords}

        for name, rule in kwargs.items():
            rule.set_name(names.Name.from_lower(name))
            rule.set_grammar(self)
            rule.set_location(Location(loc.file, keywords[name].lineno, ""))
            rule.is_root = True

            with Context("In definition of rule '{}'".format(name), loc):
                check_source_language(
                    name not in self.rules,
                    "Rule '{}' is already present in the grammar".format(name))

            self.rules[name] = rule
예제 #12
0
파일: dsl.py 프로젝트: eliericha/langkit
    def __new__(mcs, name, bases, dct):
        # Don't do anything for EnumNode itself: it's just a placeholder
        if bases == (BaseStruct, ):
            return type.__new__(mcs, name, bases, dct)

        location = extract_library_location()
        with Context('in {}'.format(name), location):

            qualifier = dct.pop('qualifier', False)
            if qualifier:
                alternatives = ['present', 'absent']
            else:
                alternatives = dct.pop('alternatives', None)
                check_source_language(alternatives is not None,
                                      'Missing "alternatives" field')
                check_source_language(
                    isinstance(alternatives, list)
                    and all(isinstance(alt, str) for alt in alternatives),
                    'The "alternatives" field must contain a list of strings')

        alts = [
            EnumNode.Alternative(names.Name.from_lower(alt))
            for alt in alternatives
        ]
        fields = EnumNode.collect_fields(name, location, dct,
                                         (_UserField, PropertyDef))

        DSLType._import_base_type_info(name, location, dct)
        dct['_fields'] = fields
        dct['_alternatives'] = alts
        dct['_qualifier'] = qualifier
        if qualifier:
            dct['_alt_present'], dct['_alt_absent'] = alts

        # Make Alternative instances available as EnumNode class attributes for
        # a convenient way to create parsers for them.
        for alt in alts:
            attr_name = (names.Name('alt') + alt.name).lower
            dct[attr_name] = alt

        cls = type.__new__(mcs, name, bases, dct)

        mcs.enum_types.append(cls)
        for alt in alts:
            alt._enum_node_cls = cls

        return cls
예제 #13
0
파일: envs.py 프로젝트: briot/langkit
    def __init__(self, *actions):
        """
        :param list[EnvAction] actions: A list of environment actions to
            execute.
        """
        self.location = extract_library_location()

        self.ast_node = None
        """
        ASTNodeType subclass associated to this environment specification.
        Initialized when creating ASTNodeType subclasses.
        :type: langkit.compiled_types.ASTNodeType
        """

        actions = list(actions)

        self.env_hook = None
        if isinstance(actions and actions[0], CallEnvHook):
            self.env_hook = actions.pop(0)
            ":type: SetInitialEnv"

        self.initial_env = None
        if isinstance(actions and actions[0], SetInitialEnv):
            self.initial_env = actions.pop(0)
            ":type: SetInitialEnv"

        pre, post = split_by(
            lambda a: not isinstance(a, HandleChildren), actions
        )

        # Get rid of the HandleChildren delimiter action
        post = post and post[1:]

        self.pre_actions = pre
        self.post_actions = post
        self.actions = self.pre_actions + self.post_actions

        # These are the property attributes

        self.initial_env_prop = None
        ":type: PropertyDef"

        self.env_hook_arg = None
        ":type: PropertyDef"

        self.adds_env = any(isinstance(a, AddEnv) for a in self.pre_actions)
예제 #14
0
파일: envs.py 프로젝트: nyulacska/langkit
    def __init__(self, *actions):
        """
        :param list[EnvAction] actions: A list of environment actions to
            execute.
        """
        self.location = extract_library_location()

        self.ast_node = None
        """
        ASTNodeType subclass associated to this environment specification.
        Initialized when creating ASTNodeType subclasses.
        :type: langkit.compiled_types.ASTNodeType
        """

        actions = list(actions)

        # If present, allow Do actions to come before SetInitialEnv
        self.pre_initial_env_actions = []
        if any(isinstance(a, SetInitialEnv) for a in actions):
            while actions and isinstance(actions[0], Do):
                self.pre_initial_env_actions.append(actions.pop(0))

        # After that, allow one call to SetInitialEnv
        self.initial_env = None
        if actions and isinstance(actions[0], SetInitialEnv):
            self.initial_env = actions.pop(0)

        pre, post = split_by(
            lambda a: not isinstance(a, HandleChildren), actions
        )

        # Get rid of the HandleChildren delimiter action
        post = post and post[1:]

        self.pre_actions = pre
        self.post_actions = post
        self.actions = self.pre_actions + self.post_actions

        # These are the property attributes

        self.initial_env_prop = None
        ":type: PropertyDef"

        self.adds_env = any(isinstance(a, AddEnv) for a in self.pre_actions)
예제 #15
0
파일: dsl.py 프로젝트: briot/langkit
    def __new__(mcs, name, bases, dct):
        DSLType._import_base_type_info(name, extract_library_location(), dct)

        cls = type.__new__(mcs, name, bases, dct)

        # If this is an EnumType subclass, register it and create the
        # corresponding CompiledType subclass.
        if mcs._base_cls:
            mcs.enum_types.add(cls)
            cls._type = _EnumType(
                names.Name.from_camel(name),
                cls._location,
                cls._doc,
                cls.alternatives,
                cls.suffix,
            )

        else:
            mcs._base_cls = cls

        return cls
예제 #16
0
파일: envs.py 프로젝트: setton/langkit
    def __init__(self, *actions: EnvAction) -> None:
        """
        :param actions: A list of environment actions to execute.
        """
        self.location = extract_library_location()

        self.ast_node: Optional[ASTNodeType] = None
        """
        ASTNodeType subclass associated to this environment specification.
        Initialized when creating ASTNodeType subclasses.
        """

        self.initial_env: Optional[SetInitialEnv] = None
        """
        The SetInitialEnv action associated to this EnvSpec, if any.
        Initialized during the parsing of actions.
        """

        # Analyze the given list of actions
        self._parse_actions(list(actions))

        self.adds_env = any(isinstance(a, AddEnv) for a in self.pre_actions)
        """
예제 #17
0
    def __init__(self, *actions):
        """
        :param list[EnvAction] actions: A list of environment actions to
            execute.
        """
        self.location = extract_library_location()

        self.ast_node = None
        """
        ASTNodeType subclass associated to this environment specification.
        Initialized when creating ASTNodeType subclasses.
        :type: langkit.compiled_types.ASTNodeType
        """

        # Analyze the given list of actions
        self._parse_actions(actions)

        # Property that returns the initial environment taht environment
        # actions will use.
        self.initial_env_prop = None
        ":type: PropertyDef"

        self.adds_env = any(isinstance(a, AddEnv) for a in self.pre_actions)
        """
예제 #18
0
def Pick(*parsers):
    """
    Utility around Row and Extract, that will automatically scan a Row, remove
    tokens and ignored sub parses, and extract the only significant sub-result.

    If there are several significant sub-results, raises an error.
    """
    location = extract_library_location()
    parsers = [resolve(p) for p in parsers if p]
    pick_parser_idx = -1
    for i, p in enumerate(parsers):
        if p.discard():
            continue
        with Context("", location):
            check_source_language(
                pick_parser_idx == -1,
                "Pick parser can have only one sub-parser that is not a token",
                Severity.non_blocking_error)
        pick_parser_idx = i

    if pick_parser_idx == -1:
        return Row(*parsers)
    else:
        return Row(*parsers)[pick_parser_idx]
예제 #19
0
 def __init__(self, matcher, action, location=None):
     self.matcher = matcher
     self.action = action
     self.location = location or extract_library_location()
예제 #20
0
 def __init__(self, main_rule_name):
     self.rules = {}
     self.main_rule_name = main_rule_name
     self.location = extract_library_location()
예제 #21
0
 def __init__(self, location=None):
     self.location = location or extract_library_location()
예제 #22
0
    def run(self, argv=None):
        parsed_args = self.args_parser.parse_args(argv)

        from langkit import diagnostics
        diagnostics.EMIT_PARSABLE_ERRORS = parsed_args.parsable_errors

        if parsed_args.profile:
            import cProfile
            import pstats

            pr = cProfile.Profile()
            pr.enable()

        # If asked to, setup the exception hook as a last-chance handler to
        # invoke a debugger in case of uncaught exception.
        if parsed_args.debug:
            # Try to use IPython's debugger if it is available, otherwise
            # fallback to PDB.
            try:
                # noinspection PyPackageRequirements
                from IPython.core import ultratb
            except ImportError:
                ultratb = None  # To keep PyCharm happy...

                def excepthook(type, value, tb):
                    import traceback
                    traceback.print_exception(type, value, tb)
                    pdb.post_mortem(tb)

                sys.excepthook = excepthook
            else:
                sys.excepthook = ultratb.FormattedTB(mode='Verbose',
                                                     color_scheme='Linux',
                                                     call_pdb=1)
            del ultratb

        self.dirs.set_build_dir(parsed_args.build_dir)
        install_dir = getattr(parsed_args, 'install-dir', None)
        if install_dir:
            self.dirs.set_install_dir(install_dir)

        # Compute code coverage in the code generator if asked to
        if parsed_args.func == self.do_generate and parsed_args.coverage:
            try:
                cov = Coverage(self.dirs)
            except Exception as exc:
                import traceback
                print >> sys.stderr, 'Coverage not available:'
                traceback.print_exc(exc)
                sys.exit(1)

            cov.start()
        else:
            cov = None

        # noinspection PyBroadException
        try:
            parsed_args.func(parsed_args)
        except DiagnosticError:
            if parsed_args.debug:
                raise
            print >> sys.stderr, col('Errors, exiting', Colors.FAIL)
            sys.exit(1)
        except Exception, e:
            if parsed_args.debug:
                raise
            import traceback
            ex_type, ex, tb = sys.exc_info()
            if e.args[0] == 'invalid syntax':
                loc = Location(e.filename, e.lineno, "")
            else:
                loc = extract_library_location(traceback.extract_tb(tb))
            with Context("", loc, "recovery"):
                check_source_language(False, str(e), do_raise=False)
            if parsed_args.verbosity.debug:
                traceback.print_exc()

            print >> sys.stderr, col('Internal error! Exiting', Colors.FAIL)
            sys.exit(1)
예제 #23
0
    def run(self, argv=None):
        parsed_args = self.args_parser.parse_args(argv)

        for trace in parsed_args.trace:
            print("Trace {} is activated".format(trace))
            Log.enable(trace)

        Diagnostics.set_style(parsed_args.diagnostic_style)

        if parsed_args.profile:
            import cProfile
            import pstats

            pr = cProfile.Profile()
            pr.enable()

        # Set the verbosity
        self.verbosity = parsed_args.verbosity

        self.no_ada_api = parsed_args.no_ada_api

        # If asked to, setup the exception hook as a last-chance handler to
        # invoke a debugger in case of uncaught exception.
        if parsed_args.debug:
            # Try to use IPython's debugger if it is available, otherwise
            # fallback to PDB.
            try:
                # noinspection PyPackageRequirements
                from IPython.core import ultratb
            except ImportError:
                ultratb = None  # To keep PyCharm happy...

                def excepthook(type, value, tb):
                    traceback.print_exception(type, value, tb)
                    pdb.post_mortem(tb)

                sys.excepthook = excepthook
            else:
                sys.excepthook = ultratb.FormattedTB(mode='Verbose',
                                                     color_scheme='Linux',
                                                     call_pdb=1)
            del ultratb

        self.dirs.set_build_dir(parsed_args.build_dir)
        install_dir = getattr(parsed_args, 'install-dir', None)
        if install_dir:
            self.dirs.set_install_dir(install_dir)

        if getattr(parsed_args, 'list_warnings', False):
            WarningSet.print_list()
            return

        # noinspection PyBroadException
        try:
            parsed_args.func(parsed_args)

        except DiagnosticError:
            if parsed_args.debug:
                raise
            if parsed_args.verbosity.debug or parsed_args.full_error_traces:
                traceback.print_exc()
            print(col('Errors, exiting', Colors.FAIL), file=sys.stderr)
            sys.exit(1)

        except Exception as e:
            if parsed_args.debug:
                raise
            ex_type, ex, tb = sys.exc_info()

            # If we have a syntax error, we know for sure the last stack frame
            # points to the code that must be fixed. Otherwise, point to the
            # top-most stack frame that does not belong to Langkit.
            if e.args and e.args[0] == 'invalid syntax':
                loc = Location(e.filename, e.lineno)
            else:
                loc = extract_library_location(traceback.extract_tb(tb))
            with Context("", loc, "recovery"):
                check_source_language(False, str(e), do_raise=False)

            # Keep Langkit bug "pretty" for users: display the Python stack
            # trace only when requested.
            if parsed_args.verbosity.debug or parsed_args.full_error_traces:
                traceback.print_exc()

            print(col('Internal error! Exiting', Colors.FAIL), file=sys.stderr)
            sys.exit(1)

        finally:
            if parsed_args.profile:
                pr.disable()
                ps = pstats.Stats(pr)
                ps.dump_stats('langkit.prof')
예제 #24
0
파일: dsl.py 프로젝트: GlancingMind/langkit
    def process_subclass(mcs, name, bases, dct, is_root):
        from langkit.envs import EnvSpec

        location = extract_library_location()
        base = bases[0]
        is_list_type = issubclass(base, _ASTNodeList)
        is_root_list_type = base is _ASTNodeList

        node_ctx = Context('in {}'.format(name), location)

        with node_ctx:
            check_source_language(
                len(bases) == 1, 'ASTNode subclasses must have exactly one'
                ' base class')
            if mcs.root_type is not None:
                check_source_language(
                    base is not ASTNode,
                    'Only one class can derive from ASTNode (previous was:'
                    ' {})'.format(mcs.root_type.__name__))

            env_spec = dct.pop('env_spec', None)
            check_source_language(
                env_spec is None or isinstance(env_spec, EnvSpec),
                'Invalid environment specification: {}'.format(env_spec))

            annotations = dct.pop('annotations', None)

        # If this is a list type, determine the corresponding element type
        if is_root_list_type:
            element_type = dct.pop('_element_type')
        elif is_list_type:
            element_type = base._element_type
        else:
            element_type = None

        # Determine if this is a token node
        with node_ctx:
            is_token_node = dct.pop('token_node', None)
            check_source_language(
                is_token_node is None or isinstance(is_token_node, bool),
                'The "token_node" field, when present, must contain a boolean')

            # If "token_node" allocation is left to None, inherit it (default
            # is False).
            if is_token_node is None:
                is_token_node = bool(base._is_token_node)

            # Otherwise, make sure that all derivations of a token node are
            # token nodes themselves.
            elif not is_token_node:
                check_source_language(
                    is_token_node == base._is_token_node,
                    '"token_node" annotation inconsistent with inherited AST'
                    ' node')

        fields = ASTNode.collect_fields(name, location, dct, AbstractNodeData)

        # AST list types are not allowed to have syntax fields
        if is_list_type:
            syntax_fields = [f_n for f_n, f_v in fields if not f_v.is_property]
            with node_ctx:
                check_source_language(
                    not syntax_fields,
                    'ASTNode list types are not allowed to have fields'
                    ' (here: {})'.format(', '.join(sorted(syntax_fields))))

        DSLType._import_base_type_info(name, location, dct)
        dct['_fields'] = fields
        dct['_base'] = base
        dct['_env_spec'] = env_spec
        dct['_is_token_node'] = is_token_node

        # Make sure subclasses don't inherit the "list_type" cache from their
        # base classes.
        dct['_list_type'] = None
        dct['_element_type'] = element_type
        dct['_annotations'] = annotations
예제 #25
0
파일: envs.py 프로젝트: setton/langkit
 def __init__(self) -> None:
     self.location = extract_library_location()
예제 #26
0
파일: libmanage.py 프로젝트: Roldak/langkit
    def run_no_exit(self, argv: Opt[List[str]] = None) -> int:
        parsed_args, unknown_args = self.args_parser.parse_known_args(argv)

        for trace in parsed_args.trace:
            print("Trace {} is activated".format(trace))
            Log.enable(trace)

        Diagnostics.set_style(parsed_args.diagnostic_style)

        if parsed_args.profile:
            import cProfile
            import pstats

            pr = cProfile.Profile()
            pr.enable()

        # Set the verbosity
        self.verbosity = parsed_args.verbosity

        self.enable_build_warnings = getattr(parsed_args,
                                             "enable_build_warnings", False)

        # If there is no build_mode (ie. we're not running a command that
        # requires it), we still need one to call gnatpp, so set it to a dummy
        # build mode.
        self.build_mode = getattr(parsed_args, "build_mode",
                                  self.BUILD_MODES[0])

        self.no_ada_api = parsed_args.no_ada_api

        # If asked to, setup the exception hook as a last-chance handler to
        # invoke a debugger in case of uncaught exception.
        if parsed_args.debug:
            # Try to use IPython's debugger if it is available, otherwise
            # fallback to PDB.
            try:
                # noinspection PyPackageRequirements
                from IPython.core import ultratb
            except ImportError:

                def excepthook(typ: Type[BaseException], value: BaseException,
                               tb: TracebackType) -> Any:
                    traceback.print_exception(typ, value, tb)
                    pdb.post_mortem(tb)

                sys.excepthook = excepthook
            else:
                sys.excepthook = ultratb.FormattedTB(mode='Verbose',
                                                     color_scheme='Linux',
                                                     call_pdb=1)

        self.dirs.set_build_dir(parsed_args.build_dir)
        install_dir = getattr(parsed_args, 'install-dir', None)
        if install_dir:
            self.dirs.set_install_dir(install_dir)

        if getattr(parsed_args, 'list_warnings', False):
            WarningSet.print_list()
            return 0

        # noinspection PyBroadException
        try:
            parsed_args.func(parsed_args, unknown_args)
            return 0

        except DiagnosticError:
            if parsed_args.debug:
                raise
            if parsed_args.verbosity.debug or parsed_args.full_error_traces:
                traceback.print_exc()
            print(col('Errors, exiting', Colors.FAIL))
            return 1

        except Exception as e:
            if parsed_args.debug:
                raise
            ex_type, ex, tb = sys.exc_info()

            # If we have a syntax error, we know for sure the last stack frame
            # points to the code that must be fixed. Otherwise, point to the
            # top-most stack frame that does not belong to Langkit.
            if e.args and e.args[0] == 'invalid syntax':
                assert isinstance(e, SyntaxError)
                loc = Location(cast(str, e.filename), cast(int, e.lineno))
            else:
                loc = cast(Location,
                           extract_library_location(traceback.extract_tb(tb)))
            with diagnostic_context(loc):
                check_source_language(False, str(e), do_raise=False)

            # Keep Langkit bug "pretty" for users: display the Python stack
            # trace only when requested.
            if parsed_args.verbosity.debug or parsed_args.full_error_traces:
                traceback.print_exc()

            print(col('Internal error! Exiting', Colors.FAIL))
            return 1

        finally:
            if parsed_args.profile:
                pr.disable()
                ps = pstats.Stats(pr)
                ps.dump_stats('langkit.prof')
예제 #27
0
    def __init__(self, *tokens: TokenAction):
        self.location = extract_library_location()
        self.tokens = set(tokens)

        self.name: Optional[Name] = None
        """
예제 #28
0
 def __init__(self, location: Optional[Location] = None):
     self.location = location or extract_library_location()
예제 #29
0
    def process_subclass(mcs, name, bases, dct, is_root):
        from langkit.envs import EnvSpec

        location = extract_library_location()
        base = bases[0]
        is_list_type = issubclass(base, _ASTNodeList)
        is_root_list_type = base is _ASTNodeList

        node_ctx = Context('in {}'.format(name), location)

        with node_ctx:
            check_source_language(
                len(bases) == 1, 'ASTNode subclasses must have exactly one'
                ' base class')
            if mcs.root_type is not None:
                check_source_language(
                    base is not ASTNode,
                    'Only one class can derive from ASTNode (previous was:'
                    ' {})'.format(mcs.root_type.__name__))

            env_spec = dct.pop('env_spec', None)
            check_source_language(
                env_spec is None or isinstance(env_spec, EnvSpec),
                'Invalid environment specification: {}'.format(env_spec))

            annotations = dct.pop('annotations', None)

        # If this is a list type, determine the corresponding element type
        if is_list_type:
            element_type = (dct.pop('_element_type')
                            if is_root_list_type else base._element_type)
            allowed_field_types = PropertyDef
        else:
            element_type = None
            allowed_field_types = AbstractNodeData

        # Determine if this is a token node
        with node_ctx:
            is_token_node = dct.pop('token_node', None)
            check_source_language(
                is_token_node is None or isinstance(is_token_node, bool),
                'The "token_node" field, when present, must contain a boolean')

            # If "token_node" allocation is left to None, inherit it (default
            # is False).
            if is_token_node is None:
                is_token_node = bool(base._is_token_node)

            if is_token_node:
                allowed_field_types = (_UserField, PropertyDef)
            else:
                # Make sure that all derivations of a token node are token
                # nodes themselves.
                check_source_language(
                    not base._is_token_node,
                    '"token_node" annotation inconsistent with inherited AST'
                    ' node')

        # Handle enum nodes
        with node_ctx:
            # Forbid inheriting from an enum node
            check_source_language(
                not base._is_enum_node,
                'Inheriting from an enum node is forbidden.')

            # Determine if this is an enum node
            is_enum_node = dct.pop('enum_node', False)
            check_source_language(
                isinstance(is_enum_node, bool),
                'The "enum_node" field, when present, must contain a boolean')

            if is_enum_node:
                qualifier = dct.pop('qualifier', False)
                if qualifier:
                    alternatives = ['present', 'absent']
                else:
                    alternatives = dct.pop('alternatives', None)
                    check_source_language(alternatives is not None,
                                          'Missing "alternatives" field')
                    check_source_language(
                        isinstance(alternatives, list)
                        and all(isinstance(alt, str) for alt in alternatives),
                        'The "alternatives" field must contain a list of '
                        'strings')

                alts = [
                    _EnumNodeAlternative(names.Name.from_lower(alt))
                    for alt in alternatives
                ]

                allowed_field_types = (_UserField, PropertyDef)

        fields = ASTNode.collect_fields(name, location, dct,
                                        allowed_field_types)

        DSLType._import_base_type_info(name, location, dct)

        if is_enum_node:
            mcs.import_enum_node_attributes(dct, qualifier, alts, fields)

        dct['_fields'] = fields
        dct['_base'] = base
        dct['_env_spec'] = env_spec
        dct['_is_token_node'] = is_token_node
        dct['_is_enum_node'] = is_enum_node

        # Make sure subclasses don't inherit the "list_type" cache from their
        # base classes.
        dct['_list_type'] = None
        dct['_element_type'] = element_type
        dct['_annotations'] = annotations

        cls = type.__new__(mcs, name, bases, dct)

        mcs.astnode_types.append(cls)

        # Create the corresponding ASTNodeType subclass
        if cls._base is _ASTNodeList:
            # Only root list types are supposed to directly subclass
            # _ASTNodeList.
            element_type = cls._element_type._resolve()
            assert element_type
            astnode_type = element_type.list
        else:
            astnode_type = ASTNodeType(
                cls._name,
                cls._location,
                cls._doc,
                base=None if is_root else cls._base._resolve(),
                fields=cls._fields,
                env_spec=cls._env_spec,
                annotations=cls._annotations,

                # Only enum nodes are abstract at this point
                is_abstract=cls._is_enum_node,
                is_enum_node=cls._is_enum_node,
                is_bool_node=cls._is_enum_node and cls._qualifier,
                is_token_node=cls._is_token_node)

        astnode_type.dsl_decl = cls
        cls._type = astnode_type

        if is_enum_node:
            mcs.create_enum_node_alternatives(cls, astnode_type)

        return cls
예제 #30
0
파일: parsers.py 프로젝트: AdaCore/langkit
 def __init__(self, main_rule_name):
     self.rules = {}
     self.main_rule_name = main_rule_name
     self.location = extract_library_location()