def error_context(self): """ Helper that will return a error context manager with parameters set for the grammar definition. """ return Context("In definition of grammar rule {}".format(self.name), self.location)
def diagnostic_context(self): """ Diagnostic context for env specs. """ ctx_message = 'in env spec' return Context(ctx_message, self.location)
def __new__(mcs, name, bases, dct): # Don't do anything special for Enum itself if not mcs.base_enum_type: result = type.__new__(mcs, name, bases, dct) mcs.base_enum_type = result return result location = extract_library_location() with Context('in {}'.format(name), location): check_source_language( bases == (Enum, ), 'Enumeration types must derive from and only from Enum' ) # Get the list of values, initializing their name values = [] for key, value in dct.items(): # Ignore __special__ fields if key.startswith('__') and key.endswith('__'): continue check_source_language( isinstance(value, EnumValue), 'Enum subclass can only contain EnumValue instances' ' (here, {} is {})'.format(key, value) ) check_source_language( value._type is None, 'EnumValue instances cannot be used in multiple Enum' ' subclasses (here: {})'.format(key) ) value._name = names.Name.from_lower(key) values.append(value) values.sort(key=lambda v: v._id) dct['_values'] = values DSLType._import_base_type_info(name, location, dct) # Create the subclass and associate values to it cls = type.__new__(mcs, name, bases, dct) for value in cls._values: value._type = cls # Now create the CompiledType instance, register it where needed enum_type = EnumType(cls._name, cls._location, cls._doc, [v._name for v in cls._values]) enum_type.dsl_decl = cls cls._type = enum_type # Associate the enumeration values in the DSL/Langkit internals for dsl_val, internal_val in zip(cls._values, enum_type.values): dsl_val._type = cls dsl_val._value = internal_val internal_val.dsl_decl = dsl_val return cls
def process_subclass(mcs, name, bases, dct, is_root): from langkit.envs import EnvSpec location = extract_library_location() base = bases[0] is_list_type = issubclass(base, _ASTNodeList) is_root_list_type = base is _ASTNodeList node_ctx = Context('in {}'.format(name), location) with node_ctx: check_source_language( len(bases) == 1, 'ASTNode subclasses must have exactly one' ' base class') if mcs.root_type is not None: check_source_language( base is not ASTNode, 'Only one class can derive from ASTNode (previous was:' ' {})'.format(mcs.root_type.__name__)) env_spec = dct.pop('env_spec', None) check_source_language( env_spec is None or isinstance(env_spec, EnvSpec), 'Invalid environment specification: {}'.format(env_spec)) annotations = dct.pop('annotations', None) # If this is a list type, determine the corresponding element type if is_root_list_type: element_type = dct.pop('_element_type') elif is_list_type: element_type = base._element_type else: element_type = None fields = ASTNode.collect_fields(name, location, dct, AbstractNodeData) # AST list types are not allowed to have syntax fields if is_list_type: syntax_fields = [f_n for f_n, f_v in fields if not f_v.is_property] with node_ctx: check_source_language( not syntax_fields, 'ASTNode list types are not allowed to have fields' ' (here: {})'.format(', '.join(sorted(syntax_fields)))) DSLType._import_base_type_info(name, location, dct) dct['_fields'] = fields dct['_base'] = base dct['_env_spec'] = env_spec # Make sure subclasses don't inherit the "list_type" cache from their # base classes. dct['_list_type'] = None dct['_element_type'] = element_type dct['_annotations'] = annotations
def collect_fields( cls, owning_type: str, location: Location, dct: Dict[str, Any], field_cls: Union[Type[AbstractNodeData], Tuple[Type[AbstractNodeData], ...]], only_null_fields: bool) -> List[Tuple[str, AbstractNodeData]]: """ Metaclass helper. Excluding __special__ entries, make sure all entries in `dct` are instances of `field_cls` and return them as an annotated list: (name, field). This ensures that all fields are associated to a legal name, and records this name in the field instances. :param owning_type: Name of the type for the type that will own the fields. Used for diagnostic message formatting purposes. :param location: Location for the declaration of the owning type. :param dct: Input class dictionnary. :param field_cls: AbstractNodeData subclass, or list of subclasses. :param only_null_fields: Whether syntax fields, if accepted, must be null. """ result = [] for f_n, f_v in dct.items(): # Ignore __special__ fields if f_n.startswith('__') and f_n.endswith('__'): continue with Context(location): expected_types = (field_cls if isinstance(field_cls, tuple) else (field_cls, )) check_source_language( isinstance(f_v, field_cls), 'Field {f_name} is a {f_type}, but only instances of' ' {exp_type} subclasses are allowed in {metatype}' ' subclasses'.format( f_name=f_n, f_type=type(f_v), exp_type='/'.join(t.__name__ for t in expected_types), metatype=cls.__name__, )) check_source_language( not f_n.startswith('_'), 'Underscore-prefixed field names are not allowed') check_source_language(f_n.lower() == f_n, 'Field names must be lower-case') if only_null_fields and isinstance(f_v, _Field): check_source_language(f_v.null, 'Only null fields allowed here') result.append((f_n, f_v)) # Sort fields by creation time order so that users get fields in the # same order as it was declared in the DSL. result.sort(key=lambda kv: kv[1]._serial) return result
def process_subclass(mcs, name, bases, dct): location = extract_library_location() with Context('in {}'.format(name), location): check_source_language( bases == (Struct, ), 'Struct subclasses must derive from Struct only', ) fields = Struct.collect_fields(name, location, dct, _UserField) DSLType._import_base_type_info(name, location, dct) dct['_fields'] = fields
def collect_fields(cls, owning_type, location, dct, field_cls): """ Metaclass helper. Excluding __special__ entries, make sure all entries in `dct` are instances of `field_cls` and return them as an annotated list: (name, field). This ensure that all fields are associated to a legal name, and records this name in the field instances. :param str owning_type: Name of the type for the type that will own the fields. Used for diagnostic message formatting purposes. :param langkit.diagnostic.Location: Location for the declaration of the owning type. :param dict[str, T] dct: Input class dictionnary. :param AbstractNodeData|list(AbstractNodeData) field_cls: AbstractNodeData subclass, or list of subclasses. :rtype: list[(str, AbstractNodeData)] """ result = [] for f_n, f_v in dct.items(): # Ignore __special__ fields if f_n.startswith('__') and f_n.endswith('__'): continue with Context('in {}.{}'.format(owning_type, f_n), location): expected_types = (field_cls if isinstance(field_cls, tuple) else (field_cls, )) check_source_language( isinstance(f_v, field_cls), 'Field {f_name} is a {f_type}, but only instances of' ' {exp_type} subclasses are allowed in {metatype}' ' subclasses'.format( f_name=f_n, f_type=type(f_v), exp_type='/'.join(t.__name__ for t in expected_types), metatype=cls.__name__, ) ) check_source_language( not f_n.startswith('_'), 'Underscore-prefixed field names are not allowed' ) check_source_language( f_n.lower() == f_n, 'Field names must be lower-case' ) result.append((f_n, f_v)) # Sort fields by creation time order so that users get fields in the # same order as it was declared in the DSL. result.sort(key=lambda kv: kv[1]._serial) return result
def _check_decorator_use(decorator, expected_cls, cls): """ Helper for class decorators below. Raise a diagnostic error if `cls`, which is the input parameter of `decorator`, is not a subclass of `expected_cls`. """ location = extract_library_location() with Context(location): check_source_language( issubtype(cls, expected_cls), 'The {} decorator must be called on a {} subclass' ' (here, got: {})'.format(decorator.__name__, expected_cls.__name__, cls))
def add_rules(self, **kwargs): """ Add rules to the grammar. The keyword arguments will provide a name to rules. :param dict[str, Parser] kwargs: The rules to add to the grammar. """ import ast loc = extract_library_location() class GetTheCall(ast.NodeVisitor): """ Helper visitor that will get the corresponding add_rule call in the source, so that we're then able to extract the precise line where each rule is added. """ def __init__(self): self.the_call = None def visit_Call(self, call): if (isinstance(call.func, ast.Attribute) and call.func.attr == 'add_rules' # Traceback locations are very imprecise, and python's ast # doesn't have an end location for nodes, so we'll keep # taking add_rules call, and the last is necessarily the # good one. and call.lineno <= loc.line): self.the_call = call the_call = GetTheCall() with open(loc.file) as f: file_ast = ast.parse(f.read(), f.name) the_call.visit(file_ast) # We're gonna use the keyword arguments to find back the precise line # where the rule was declared. keywords = {kw.arg: kw.value for kw in the_call.the_call.keywords} for name, rule in kwargs.items(): rule.set_name(names.Name.from_lower(name)) rule.set_grammar(self) rule.set_location(Location(loc.file, keywords[name].lineno, "")) rule.is_root = True with Context("In definition of rule '{}'".format(name), loc): check_source_language( name not in self.rules, "Rule '{}' is already present in the grammar".format(name)) self.rules[name] = rule
def __new__(mcs, name, bases, dct): # Don't do anything for EnumNode itself: it's just a placeholder if bases == (BaseStruct, ): return type.__new__(mcs, name, bases, dct) location = extract_library_location() with Context('in {}'.format(name), location): qualifier = dct.pop('qualifier', False) if qualifier: alternatives = ['present', 'absent'] else: alternatives = dct.pop('alternatives', None) check_source_language(alternatives is not None, 'Missing "alternatives" field') check_source_language( isinstance(alternatives, list) and all(isinstance(alt, str) for alt in alternatives), 'The "alternatives" field must contain a list of strings') alts = [ EnumNode.Alternative(names.Name.from_lower(alt)) for alt in alternatives ] fields = EnumNode.collect_fields(name, location, dct, (_UserField, PropertyDef)) DSLType._import_base_type_info(name, location, dct) dct['_fields'] = fields dct['_alternatives'] = alts dct['_qualifier'] = qualifier if qualifier: dct['_alt_present'], dct['_alt_absent'] = alts # Make Alternative instances available as EnumNode class attributes for # a convenient way to create parsers for them. for alt in alts: attr_name = (names.Name('alt') + alt.name).lower dct[attr_name] = alt cls = type.__new__(mcs, name, bases, dct) mcs.enum_types.append(cls) for alt in alts: alt._enum_node_cls = cls return cls
def Pick(*parsers): """ Utility around Row and Extract, that will automatically scan a Row, remove tokens and ignored sub parses, and extract the only significant sub-result. If there are several significant sub-results, raises an error. """ location = extract_library_location() parsers = [resolve(p) for p in parsers if p] pick_parser_idx = -1 for i, p in enumerate(parsers): if p.discard(): continue with Context("", location): check_source_language( pick_parser_idx == -1, "Pick parser can have only one sub-parser that is not a token", Severity.non_blocking_error) pick_parser_idx = i if pick_parser_idx == -1: return Row(*parsers) else: return Row(*parsers)[pick_parser_idx]
def diagnostic_context(self): return Context( 'In definition of token family {}'.format(self.dsl_name), self.location)
def run(self, argv=None): parsed_args = self.args_parser.parse_args(argv) for trace in parsed_args.trace: print("Trace {} is activated".format(trace)) Log.enable(trace) Diagnostics.set_style(parsed_args.diagnostic_style) if parsed_args.profile: import cProfile import pstats pr = cProfile.Profile() pr.enable() # Set the verbosity self.verbosity = parsed_args.verbosity self.no_ada_api = parsed_args.no_ada_api # If asked to, setup the exception hook as a last-chance handler to # invoke a debugger in case of uncaught exception. if parsed_args.debug: # Try to use IPython's debugger if it is available, otherwise # fallback to PDB. try: # noinspection PyPackageRequirements from IPython.core import ultratb except ImportError: ultratb = None # To keep PyCharm happy... def excepthook(type, value, tb): traceback.print_exception(type, value, tb) pdb.post_mortem(tb) sys.excepthook = excepthook else: sys.excepthook = ultratb.FormattedTB(mode='Verbose', color_scheme='Linux', call_pdb=1) del ultratb self.dirs.set_build_dir(parsed_args.build_dir) install_dir = getattr(parsed_args, 'install-dir', None) if install_dir: self.dirs.set_install_dir(install_dir) if getattr(parsed_args, 'list_warnings', False): WarningSet.print_list() return # noinspection PyBroadException try: parsed_args.func(parsed_args) except DiagnosticError: if parsed_args.debug: raise if parsed_args.verbosity.debug or parsed_args.full_error_traces: traceback.print_exc() print(col('Errors, exiting', Colors.FAIL), file=sys.stderr) sys.exit(1) except Exception as e: if parsed_args.debug: raise ex_type, ex, tb = sys.exc_info() # If we have a syntax error, we know for sure the last stack frame # points to the code that must be fixed. Otherwise, point to the # top-most stack frame that does not belong to Langkit. if e.args and e.args[0] == 'invalid syntax': loc = Location(e.filename, e.lineno) else: loc = extract_library_location(traceback.extract_tb(tb)) with Context("", loc, "recovery"): check_source_language(False, str(e), do_raise=False) # Keep Langkit bug "pretty" for users: display the Python stack # trace only when requested. if parsed_args.verbosity.debug or parsed_args.full_error_traces: traceback.print_exc() print(col('Internal error! Exiting', Colors.FAIL), file=sys.stderr) sys.exit(1) finally: if parsed_args.profile: pr.disable() ps = pstats.Stats(pr) ps.dump_stats('langkit.prof')
def run(self, argv=None): parsed_args = self.args_parser.parse_args(argv) from langkit import diagnostics diagnostics.EMIT_PARSABLE_ERRORS = parsed_args.parsable_errors if parsed_args.profile: import cProfile import pstats pr = cProfile.Profile() pr.enable() # If asked to, setup the exception hook as a last-chance handler to # invoke a debugger in case of uncaught exception. if parsed_args.debug: # Try to use IPython's debugger if it is available, otherwise # fallback to PDB. try: # noinspection PyPackageRequirements from IPython.core import ultratb except ImportError: ultratb = None # To keep PyCharm happy... def excepthook(type, value, tb): import traceback traceback.print_exception(type, value, tb) pdb.post_mortem(tb) sys.excepthook = excepthook else: sys.excepthook = ultratb.FormattedTB(mode='Verbose', color_scheme='Linux', call_pdb=1) del ultratb self.dirs.set_build_dir(parsed_args.build_dir) install_dir = getattr(parsed_args, 'install-dir', None) if install_dir: self.dirs.set_install_dir(install_dir) # Compute code coverage in the code generator if asked to if parsed_args.func == self.do_generate and parsed_args.coverage: try: cov = Coverage(self.dirs) except Exception as exc: import traceback print >> sys.stderr, 'Coverage not available:' traceback.print_exc(exc) sys.exit(1) cov.start() else: cov = None # noinspection PyBroadException try: parsed_args.func(parsed_args) except DiagnosticError: if parsed_args.debug: raise print >> sys.stderr, col('Errors, exiting', Colors.FAIL) sys.exit(1) except Exception, e: if parsed_args.debug: raise import traceback ex_type, ex, tb = sys.exc_info() if e.args[0] == 'invalid syntax': loc = Location(e.filename, e.lineno, "") else: loc = extract_library_location(traceback.extract_tb(tb)) with Context("", loc, "recovery"): check_source_language(False, str(e), do_raise=False) if parsed_args.verbosity.debug: traceback.print_exc() print >> sys.stderr, col('Internal error! Exiting', Colors.FAIL) sys.exit(1)
def diagnostic_context(self) -> Context: """ Diagnostic context for env specs. """ assert self.location is not None return Context(self.location)
def diagnostic_context(self) -> Context: assert self.location is not None return Context(self.location)
def process_subclass(mcs, name, bases, dct, is_root): from langkit.envs import EnvSpec location = extract_library_location() base = bases[0] is_list_type = issubclass(base, _ASTNodeList) is_root_list_type = base is _ASTNodeList node_ctx = Context('in {}'.format(name), location) with node_ctx: check_source_language( len(bases) == 1, 'ASTNode subclasses must have exactly one' ' base class') if mcs.root_type is not None: check_source_language( base is not ASTNode, 'Only one class can derive from ASTNode (previous was:' ' {})'.format(mcs.root_type.__name__)) env_spec = dct.pop('env_spec', None) check_source_language( env_spec is None or isinstance(env_spec, EnvSpec), 'Invalid environment specification: {}'.format(env_spec)) annotations = dct.pop('annotations', None) # If this is a list type, determine the corresponding element type if is_list_type: element_type = (dct.pop('_element_type') if is_root_list_type else base._element_type) allowed_field_types = PropertyDef else: element_type = None allowed_field_types = AbstractNodeData # Determine if this is a token node with node_ctx: is_token_node = dct.pop('token_node', None) check_source_language( is_token_node is None or isinstance(is_token_node, bool), 'The "token_node" field, when present, must contain a boolean') # If "token_node" allocation is left to None, inherit it (default # is False). if is_token_node is None: is_token_node = bool(base._is_token_node) if is_token_node: allowed_field_types = (_UserField, PropertyDef) else: # Make sure that all derivations of a token node are token # nodes themselves. check_source_language( not base._is_token_node, '"token_node" annotation inconsistent with inherited AST' ' node') # Handle enum nodes with node_ctx: # Forbid inheriting from an enum node check_source_language( not base._is_enum_node, 'Inheriting from an enum node is forbidden.') # Determine if this is an enum node is_enum_node = dct.pop('enum_node', False) check_source_language( isinstance(is_enum_node, bool), 'The "enum_node" field, when present, must contain a boolean') if is_enum_node: qualifier = dct.pop('qualifier', False) if qualifier: alternatives = ['present', 'absent'] else: alternatives = dct.pop('alternatives', None) check_source_language(alternatives is not None, 'Missing "alternatives" field') check_source_language( isinstance(alternatives, list) and all(isinstance(alt, str) for alt in alternatives), 'The "alternatives" field must contain a list of ' 'strings') alts = [ _EnumNodeAlternative(names.Name.from_lower(alt)) for alt in alternatives ] allowed_field_types = (_UserField, PropertyDef) fields = ASTNode.collect_fields(name, location, dct, allowed_field_types) DSLType._import_base_type_info(name, location, dct) if is_enum_node: mcs.import_enum_node_attributes(dct, qualifier, alts, fields) dct['_fields'] = fields dct['_base'] = base dct['_env_spec'] = env_spec dct['_is_token_node'] = is_token_node dct['_is_enum_node'] = is_enum_node # Make sure subclasses don't inherit the "list_type" cache from their # base classes. dct['_list_type'] = None dct['_element_type'] = element_type dct['_annotations'] = annotations cls = type.__new__(mcs, name, bases, dct) mcs.astnode_types.append(cls) # Create the corresponding ASTNodeType subclass if cls._base is _ASTNodeList: # Only root list types are supposed to directly subclass # _ASTNodeList. element_type = cls._element_type._resolve() assert element_type astnode_type = element_type.list else: astnode_type = ASTNodeType( cls._name, cls._location, cls._doc, base=None if is_root else cls._base._resolve(), fields=cls._fields, env_spec=cls._env_spec, annotations=cls._annotations, # Only enum nodes are abstract at this point is_abstract=cls._is_enum_node, is_enum_node=cls._is_enum_node, is_bool_node=cls._is_enum_node and cls._qualifier, is_token_node=cls._is_token_node) astnode_type.dsl_decl = cls cls._type = astnode_type if is_enum_node: mcs.create_enum_node_alternatives(cls, astnode_type) return cls
def _diagnostic_context(cls): ctx_message = 'in {}'.format(cls._name.camel) return Context(ctx_message, cls._location)
def run_no_exit(self, argv: Opt[List[str]] = None) -> int: parsed_args, unknown_args = self.args_parser.parse_known_args(argv) for trace in parsed_args.trace: print("Trace {} is activated".format(trace)) Log.enable(trace) Diagnostics.set_style(parsed_args.diagnostic_style) if parsed_args.profile: import cProfile import pstats pr = cProfile.Profile() pr.enable() # Set the verbosity self.verbosity = parsed_args.verbosity self.enable_build_warnings = getattr(parsed_args, "enable_build_warnings", False) # If there is no build_mode (ie. we're not running a command that # requires it), we still need one to call gnatpp, so set it to a dummy # build mode. self.build_mode = getattr(parsed_args, "build_mode", self.BUILD_MODES[0]) self.no_ada_api = parsed_args.no_ada_api # If asked to, setup the exception hook as a last-chance handler to # invoke a debugger in case of uncaught exception. if parsed_args.debug: # Try to use IPython's debugger if it is available, otherwise # fallback to PDB. try: # noinspection PyPackageRequirements from IPython.core import ultratb except ImportError: def excepthook(typ: Type[BaseException], value: BaseException, tb: TracebackType) -> Any: traceback.print_exception(typ, value, tb) pdb.post_mortem(tb) sys.excepthook = excepthook else: sys.excepthook = ultratb.FormattedTB(mode='Verbose', color_scheme='Linux', call_pdb=1) self.dirs.set_build_dir(parsed_args.build_dir) install_dir = getattr(parsed_args, 'install-dir', None) if install_dir: self.dirs.set_install_dir(install_dir) if getattr(parsed_args, 'list_warnings', False): WarningSet.print_list() return 0 # noinspection PyBroadException try: parsed_args.func(parsed_args, unknown_args) return 0 except DiagnosticError: if parsed_args.debug: raise if parsed_args.verbosity.debug or parsed_args.full_error_traces: traceback.print_exc() print(col('Errors, exiting', Colors.FAIL), file=sys.stderr) return 1 except Exception as e: if parsed_args.debug: raise ex_type, ex, tb = sys.exc_info() # If we have a syntax error, we know for sure the last stack frame # points to the code that must be fixed. Otherwise, point to the # top-most stack frame that does not belong to Langkit. if e.args and e.args[0] == 'invalid syntax': assert isinstance(e, SyntaxError) loc = Location(cast(str, e.filename), cast(int, e.lineno)) else: loc = cast(Location, extract_library_location(traceback.extract_tb(tb))) with Context(loc): check_source_language(False, str(e), do_raise=False) # Keep Langkit bug "pretty" for users: display the Python stack # trace only when requested. if parsed_args.verbosity.debug or parsed_args.full_error_traces: traceback.print_exc() print(col('Internal error! Exiting', Colors.FAIL), file=sys.stderr) return 1 finally: if parsed_args.profile: pr.disable() ps = pstats.Stats(pr) ps.dump_stats('langkit.prof')
def diagnostic_context(self): return Context('in access to .{}'.format(self.field), self.location, 'abstract_expr')
def diagnostic_context(self): return Context(self.location)
def process_subclass(mcs, name, bases, dct, is_root): from langkit.envs import EnvSpec location = extract_library_location() base = bases[0] is_list_type = issubclass(base, _ASTNodeList) is_root_list_type = base is _ASTNodeList node_ctx = Context('in {}'.format(name), location) with node_ctx: check_source_language( len(bases) == 1, 'ASTNode subclasses must have exactly one' ' base class') if mcs.root_type is not None: check_source_language( base is not ASTNode, 'Only one class can derive from ASTNode (previous was:' ' {})'.format(mcs.root_type.__name__)) env_spec = dct.pop('env_spec', None) check_source_language( env_spec is None or isinstance(env_spec, EnvSpec), 'Invalid environment specification: {}'.format(env_spec)) annotations = dct.pop('annotations', None) # If this is a list type, determine the corresponding element type if is_root_list_type: element_type = dct.pop('_element_type') elif is_list_type: element_type = base._element_type else: element_type = None # Determine if this is a token node with node_ctx: is_token_node = dct.pop('token_node', None) check_source_language( is_token_node is None or isinstance(is_token_node, bool), 'The "token_node" field, when present, must contain a boolean') # If "token_node" allocation is left to None, inherit it (default # is False). if is_token_node is None: is_token_node = bool(base._is_token_node) # Otherwise, make sure that all derivations of a token node are # token nodes themselves. elif not is_token_node: check_source_language( is_token_node == base._is_token_node, '"token_node" annotation inconsistent with inherited AST' ' node') fields = ASTNode.collect_fields(name, location, dct, AbstractNodeData) # AST list types are not allowed to have syntax fields if is_list_type: syntax_fields = [f_n for f_n, f_v in fields if not f_v.is_property] with node_ctx: check_source_language( not syntax_fields, 'ASTNode list types are not allowed to have fields' ' (here: {})'.format(', '.join(sorted(syntax_fields)))) DSLType._import_base_type_info(name, location, dct) dct['_fields'] = fields dct['_base'] = base dct['_env_spec'] = env_spec dct['_is_token_node'] = is_token_node # Make sure subclasses don't inherit the "list_type" cache from their # base classes. dct['_list_type'] = None dct['_element_type'] = element_type dct['_annotations'] = annotations
def _diagnostic_context(cls): return Context(cls._location)
def context(self): return Context("In definition of grammar", self.location)
def diagnostic_context(self): """ Diagnostic context for env specs. """ return Context(self.location)