Ejemplo n.º 1
0
def print_errors(recovered=False):
    c = context_cache[1] if recovered else context_stack
    ids = set()
    msgs = []

    # We'll iterate once on diagnostic contexts, to:
    # 1. Remove those with null locations.
    # 2. Only keep one per registered id.
    for ctx_msg, ctx_loc, id in reversed(c):
        if ctx_loc and (not id or id not in ids):
            msgs.append((ctx_msg, ctx_loc))
            ids.add(id)

    # Then we'll print the context we've kept
    last_file_info = ''
    for ctx_msg, ctx_loc in reversed(msgs):
        # We only want to show the file information one time if it is the same
        file_info = 'File "{}", '.format(col(ctx_loc.file, Colors.CYAN))
        if last_file_info == file_info:
            file_info = '  '
        else:
            last_file_info = file_info

        print ('{file_info}line {line}, {msg}'.format(
            file_info=file_info,
            line=col(ctx_loc.line, Colors.CYAN),
            msg=ctx_msg
        ))
Ejemplo n.º 2
0
def print_context(context):
    """
    Print the current error context.

    Note that this makes sense only when `DiagnosticStyle.default` is enabled.
    """
    assert Diagnostics.style == DiagnosticStyle.default
    assert isinstance(context, list)

    # Then we'll print the context we've kept
    last_file_info = ''
    for ctx_msg, ctx_loc in reversed(context):
        # We only want to show the file information one time if it is the same
        file_info = 'File "{}", '.format(col(get_filename(ctx_loc.file),
                                             Colors.CYAN))
        if last_file_info == file_info:
            file_info = '  '
        else:
            last_file_info = file_info

        print ('{file_info}line {line}, {msg}'.format(
            file_info=file_info,
            line=col(ctx_loc.line, Colors.CYAN),
            msg=ctx_msg
        ))
Ejemplo n.º 3
0
def prop_repr(prop):
    """
    Return a colored repr for a property name.

    :type prop: langkit.gdb.debug_info.Property
    :rtype: str
    """
    prefix = '[dispatcher]'
    if prop.name.startswith(prefix):
        name = prop.name[len(prefix):]
        label = 'dispatch property for {}'.format(name)
    else:
        label = prop.name
    return col(col(label, Colors.RED), Colors.BOLD)
Ejemplo n.º 4
0
def format_severity(severity):
    """
    :param Severity severity:
    """
    msg = ('Error' if severity == Severity.non_blocking_error else
           severity.name.capitalize())
    return col(msg, Colors.BOLD + SEVERITY_COLORS[severity])
Ejemplo n.º 5
0
def format_severity(severity):
    """
    :param Severity severity:
    """
    msg = ('Error'
           if severity == Severity.non_blocking_error else
           severity.name.capitalize())
    return col(msg, Colors.BOLD + SEVERITY_COLORS[severity])
Ejemplo n.º 6
0
def name_repr(expr):
    """
    Return a colored repr for a binding name.

    :type expr: langkit.gdb.state.Binding
    :rtype: str
    """
    return col(expr.dsl_name, Colors.GREEN)
Ejemplo n.º 7
0
def expr_repr(expr):
    """
    Return a colored repr for an expression.

    :type expr: langkit.gdb.state.ExpressionEvaluation
    :rtype: str
    """
    return col(expr.expr_repr, Colors.CYAN)
Ejemplo n.º 8
0
 def append_line(line_nb: Union[int, str], line: str) -> None:
     """
     Append a line to the source listing, given a line number and a line.
     """
     ret.append(
         col(prefix_fmt.format(line_nb, line), Colors.BLUE + Colors.BOLD))
     ret.append(line)
     ret.append("\n")
    def run(self):
        parsed_args = self.args_parser.parse_args()
        self.dirs.set_build_dir(parsed_args.build_dir)
        install_dir = getattr(parsed_args, 'install-dir', None)
        if install_dir:
            self.dirs.set_install_dir(install_dir)

        # Compute code coverage in the code generator if asked to
        if parsed_args.func == self.do_generate and parsed_args.coverage:
            try:
                import coverage
                del coverage
            except Exception as exc:
                import traceback
                print >> sys.stderr, 'Coverage not available:'
                traceback.print_exc(exc)
                sys.exit(1)

            cov = Coverage(self.dirs)
            cov.start()
        else:
            cov = None

        self.context = self.create_context(parsed_args)

        # Set the extensions dir on the compile context
        self.context.extensions_dir = self.dirs.lang_source_dir("extensions")

        try:
            parsed_args.func(parsed_args)
        except AssertionError as e:
            # If in verbose mode, show the full exception traceback + message.
            # If in normal mode, just show the message and exit.
            if parsed_args.verbose:
                raise
            else:
                print col("ERROR :", Colors.FAIL), e.message
                exit(1)

        if cov is not None:
            cov.stop()
            cov.generate_report()
Ejemplo n.º 10
0
def source_listing(highlight_sloc: Location, lines_after: int = 0) -> str:
    """
    Create a source listing for an error message, centered around a specific
    sloc, that will be highlighted/careted, as in the following example::

        65 | fun test(): Int = b_inst.fun_call
           |                   ^^^^^^^^^^^^^^^

    :param highlight_sloc: The source location that will allow us
        to create the specific listing.
    :param lines_after: The number of lines to print after the given sloc.
    """

    # Make sure we have at least one line, since locations can refer to "line
    # 1" even for empty source files.
    source_buffer = splitted_text(highlight_sloc.lkt_unit) or ['']

    ret = []

    line_nb = highlight_sloc.line - 1
    start_offset = highlight_sloc.column - 1
    end_offset = highlight_sloc.end_column - 1

    # Compute the width of the column needed to print line numbers
    line_nb_width = len(str(highlight_sloc.line + lines_after))

    # Precompute the format string for the listing left column
    prefix_fmt = "{{: >{}}} | ".format(line_nb_width)

    def append_line(line_nb: Union[int, str], line: str) -> None:
        """
        Append a line to the source listing, given a line number and a line.
        """
        ret.append(
            col(prefix_fmt.format(line_nb, line), Colors.BLUE + Colors.BOLD))
        ret.append(line)
        ret.append("\n")

    # Append the line containing the sloc
    append_line(line_nb, source_buffer[line_nb])

    # Append the line caretting the sloc in the line above
    caret_line = "".join("^" if start_offset <= i < end_offset else " "
                         for i in range(len(source_buffer[line_nb])))
    append_line("", col(caret_line, Colors.RED + Colors.BOLD))

    # Append following lines up to ``lines_after`` lines
    for line_nb, line in enumerate(
            source_buffer[line_nb + 1:min(line_nb + lines_after +
                                          1, len(source_buffer))],
            line_nb + 1):
        append_line(line_nb, line)

    return "".join(ret)
Ejemplo n.º 11
0
    def gen_code_or_fncall(self, pos_name="pos"):
        """
        Return generated code for this parser into the global context.

        `pos_name` is the name of a variable that contains the position of the
        next token in the lexer.

        Either the "parsing code" is returned, either it is emitted in a
        dedicated function and a call to it is returned instead.  This method
        relies on the subclasses-defined `generated_code` for "parsing code"
        generation.

        :param str|names.Name pos_name: The name of the position variable.
        :rtype: ParserCodeContext
        """

        if self.name and get_context().verbosity.debug:
            print "Compiling rule: {0}".format(
                col(self.gen_fn_name, Colors.HEADER)
            )

        # Users must be able to run parsers that implement a named rule, so
        # generate dedicated functions for them.
        if self.is_root:

            # The call to compile will add the declaration and the definition
            # (body) of the function to the compile context.
            self.compile()

            # Generate a call to the previously compiled function, and return
            # the context corresponding to this call.
            pos, res = gen_names("fncall_pos", "fncall_res")
            fncall_block = render(
                'parsers/fn_call_ada',
                _self=self, pos_name=pos_name,
                pos=pos, res=res
            )

            return ParserCodeContext(
                pos_var_name=pos,
                res_var_name=res,
                code=fncall_block,
                var_defs=[
                    (pos, Token),
                    (res, self.get_type())
                ]
            )

        else:
            return self.generate_code(pos_name)
Ejemplo n.º 12
0
def print_context(recovered=False):
    """
    Print the current error context.
    """

    # Then we'll print the context we've kept
    last_file_info = ''
    for ctx_msg, ctx_loc in reversed(get_structured_context(recovered)):
        if EMIT_PARSABLE_ERRORS:
            print "{}:{}: {}".format(ctx_loc.file, ctx_loc.line, ctx_msg)
        else:
            # We only want to show the file information one time if it is the
            # same.
            file_info = 'File "{}", '.format(col(ctx_loc.file, Colors.CYAN))
            if last_file_info == file_info:
                file_info = '  '
            else:
                last_file_info = file_info

            print('{file_info}line {line}, {msg}'.format(file_info=file_info,
                                                         line=col(
                                                             ctx_loc.line,
                                                             Colors.CYAN),
                                                         msg=ctx_msg))
Ejemplo n.º 13
0
def print_error(message: str,
                location: Union[Location, L.LKNode, None],
                severity: Severity = Severity.error) -> None:
    """
    Prints an error.
    """
    if severity == Severity.warning:
        name = 'warning'
        color = Colors.YELLOW
    else:
        name = 'error'
        color = Colors.RED
    error_marker = col("{}: ".format(name), color + Colors.BOLD)

    if location is None:
        print(error_marker + message)
        return

    # If "location" is a node from liblktlang, turn it into a Location
    # instance. Note that this possible only if liblktlang is available, so we
    # know that we'll have a Location instance afterwards.
    if liblktlang_available and isinstance(location, L.LKNode):
        location = Location.from_lkt_node(location)
    assert isinstance(location, Location)

    # Print the basic error (with colors if in tty)
    print(
        "{}: {}{}".format(
            col(location.gnu_style_repr(), Colors.BOLD),
            error_marker,
            style_diagnostic_message(message),
        ), )

    # Print the source listing
    if location.lkt_unit is not None:
        print(source_listing(location))
Ejemplo n.º 14
0
    def gen_code_or_fncall(self, pos_name="pos"):
        """
        Return generated code for this parser into the global context.

        `pos_name` is the name of a variable that contains the position of the
        next token in the lexer.

        Either the "parsing code" is returned, either it is emitted in a
        dedicated function and a call to it is returned instead.  This method
        relies on the subclasses-defined `generated_code` for "parsing code"
        generation.

        :param str|names.Name pos_name: The name of the position variable.
        :rtype: ParserCodeContext
        """

        if self.name and get_context().verbosity.debug:
            print "Compiling rule: {0}".format(
                col(self.gen_fn_name, Colors.HEADER))

        # Users must be able to run parsers that implement a named rule, so
        # generate dedicated functions for them.
        if self.is_root:

            # The call to compile will add the declaration and the definition
            # (body) of the function to the compile context.
            self.compile()

            # Generate a call to the previously compiled function, and return
            # the context corresponding to this call.
            pos, res = gen_names("fncall_pos", "fncall_res")
            fncall_block = render('parsers/fn_call_ada',
                                  parser=self,
                                  pos_name=pos_name,
                                  pos=pos,
                                  res=res)

            return ParserCodeContext(pos_var_name=pos,
                                     res_var_name=res,
                                     code=fncall_block,
                                     var_defs=[(pos, Token),
                                               (res, self.get_type())])

        else:
            return self.generate_code(pos_name)
Ejemplo n.º 15
0
def style_diagnostic_message(string: str) -> str:
    """
    Given a diagnostic message containing possible variable references
    surrounded by backticks, style those references.
    """
    return re.sub("`.*?`", lambda m: col(m.group(), Colors.BOLD), string)
Ejemplo n.º 16
0
def main(dirs, pattern, j, chunk_size, automated, no_resolution, project,
         extra_args):

    if not automated:
        print("Loading old results ..")
        prev_results = load_or_create("results_file", lambda: None)
        if no_resolution:
            results = prev_results
            embed()
            return

    results = Results()
    files = []
    for dir in dirs:
        dir_files = sorted(glob('{}/*.ad?'.format(dir)))
        if pattern:
            dir_files = [f for f in dir_files if re.findall(pattern, f)]
        dir_files = chunks(chunk_size, map(os.path.basename, dir_files))
        files += [(dir, fs) for fs in dir_files]

    project = os.path.abspath(project)

    raw_results = pmap(lambda (dir, f): FileResult.nameres_files(
        dir, f, project=project, extra_args=extra_args),
                       files,
                       nb_threads=j)

    total_nb_files = sum(len(fs[1]) for fs in files)

    bar = ProgressBar(max_value=total_nb_files)
    for subresults in raw_results:
        results.add(subresults)
        bar.update(len(results))

    if automated:
        print("ACATS Passing:")
        for success in results.successes:
            print(success.file_name)
        return

    ####################################
    # Start of interactive mode report #
    ####################################

    print("Report for {}:".format(time.strftime("%Y-%m-%d %Hh%M")))

    successes = len(results.successes)
    failures = len(results.failures)
    nb_files = successes + failures

    print("Number of successful tests: {}".format(col(successes,
                                                      Colors.GREEN)))
    print("Number of failures: {}".format(col(failures, Colors.RED)))
    print("Number of crashes: {}".format(col(len(results.crashes),
                                             Colors.RED)))

    print("Percentage of files passing: {:.2f}%".format(
        float(successes) / float(nb_files) * 100))

    ind_successes = len(results.individual_successes)
    ind_failures = len(results.individual_failures)
    total_xrefs = ind_successes + ind_failures

    print("Number of individual successes: {}".format(
        col(ind_successes, Colors.GREEN)))
    print("Number of individual failures: {}".format(
        col(ind_failures, Colors.RED)))

    print("Percentage of successes: {:.2f}%".format(
        float(ind_successes) / float(total_xrefs) * 100))

    print("Crashes: ")
    for crash in results.crashes:
        print("    {}".format(crash.file_name))

    def print_exceptions():
        print("Exceptions:")
        for msg, files in results.exceptions_to_files:
            print("{}: {}".format(len(files), msg))

    previously_passing = set()
    previously_failing = set()
    if prev_results is not None:
        print(col("Newly passing tests:", Colors.GREEN))
        previously_passing = set(r.file_name for r in prev_results.successes)
        now_passing = set(r.file_name for r in results.successes)
        for f in (now_passing - previously_passing):
            print("    {}".format(f))

        print(col("Newly failing tests:", Colors.RED))
        previously_failing = set(r.file_name for r in prev_results.failures)
        now_failing = set(r.file_name for r in results.failures)
        for f in (now_failing - previously_failing):
            print("   {}".format(f))

    embed()

    if results.save:
        dump_to(results, "results_file")
Ejemplo n.º 17
0
    def run(self, argv=None):
        parsed_args = self.args_parser.parse_args(argv)

        for trace in parsed_args.trace:
            print("Trace {} is activated".format(trace))
            Log.enable(trace)

        Diagnostics.set_style(parsed_args.diagnostic_style)

        if parsed_args.profile:
            import cProfile
            import pstats

            pr = cProfile.Profile()
            pr.enable()

        # Set the verbosity
        self.verbosity = parsed_args.verbosity

        self.no_ada_api = parsed_args.no_ada_api

        # If asked to, setup the exception hook as a last-chance handler to
        # invoke a debugger in case of uncaught exception.
        if parsed_args.debug:
            # Try to use IPython's debugger if it is available, otherwise
            # fallback to PDB.
            try:
                # noinspection PyPackageRequirements
                from IPython.core import ultratb
            except ImportError:
                ultratb = None  # To keep PyCharm happy...

                def excepthook(type, value, tb):
                    traceback.print_exception(type, value, tb)
                    pdb.post_mortem(tb)

                sys.excepthook = excepthook
            else:
                sys.excepthook = ultratb.FormattedTB(mode='Verbose',
                                                     color_scheme='Linux',
                                                     call_pdb=1)
            del ultratb

        self.dirs.set_build_dir(parsed_args.build_dir)
        install_dir = getattr(parsed_args, 'install-dir', None)
        if install_dir:
            self.dirs.set_install_dir(install_dir)

        if getattr(parsed_args, 'list_warnings', False):
            WarningSet.print_list()
            return

        # noinspection PyBroadException
        try:
            parsed_args.func(parsed_args)

        except DiagnosticError:
            if parsed_args.debug:
                raise
            if parsed_args.verbosity.debug or parsed_args.full_error_traces:
                traceback.print_exc()
            print(col('Errors, exiting', Colors.FAIL), file=sys.stderr)
            sys.exit(1)

        except Exception as e:
            if parsed_args.debug:
                raise
            ex_type, ex, tb = sys.exc_info()

            # If we have a syntax error, we know for sure the last stack frame
            # points to the code that must be fixed. Otherwise, point to the
            # top-most stack frame that does not belong to Langkit.
            if e.args and e.args[0] == 'invalid syntax':
                loc = Location(e.filename, e.lineno)
            else:
                loc = extract_library_location(traceback.extract_tb(tb))
            with Context("", loc, "recovery"):
                check_source_language(False, str(e), do_raise=False)

            # Keep Langkit bug "pretty" for users: display the Python stack
            # trace only when requested.
            if parsed_args.verbosity.debug or parsed_args.full_error_traces:
                traceback.print_exc()

            print(col('Internal error! Exiting', Colors.FAIL), file=sys.stderr)
            sys.exit(1)

        finally:
            if parsed_args.profile:
                pr.disable()
                ps = pstats.Stats(pr)
                ps.dump_stats('langkit.prof')
    def set_types(cls, types):
        """
        Associate `types` (a list of CompiledType) to fields in `cls` . It is
        valid to perform this association multiple times as long as types are
        consistent.

        :type types: list[CompiledType]
        """
        fields = cls.get_parse_fields(include_inherited=False)

        assert len(fields) == len(types), (
            "{} has {} fields ({} types given). You probably have"
            " inconsistent grammar rules and type declarations".format(
                cls, len(fields), len(types)))

        def is_subtype(base_type, subtype):
            return issubclass(subtype, base_type)

        def are_subtypes(fields, new_types):
            return all(
                is_subtype(f.type, n) for f, n in zip(fields, new_types))

        # TODO: instead of expecting types to be *exactly* the same, perform
        # type unification (take the nearest common ancestor for all field
        # types).
        assert (not cls.is_type_resolved or are_subtypes(fields, types)), (
            "Already associated types for some fields are not consistent with"
            " current ones:\n- {}\n- {}".format([f.type for f in fields],
                                                types))

        # Only assign types if cls was not yet typed. In the case where it
        # was already typed, we checked above that the new types were
        # consistent with the already present ones.
        if not cls.is_type_resolved:
            cls.is_type_resolved = True

            for field_type, field in zip(types, fields):

                # At this stage, if the field has a type, it means that the
                # user assigned it one originally. In this case we will use the
                # inferred type for checking only (raising an assertion if it
                # does not correspond).
                if field.type:
                    f = inspect.getfile(cls)
                    l = inspect.getsourcelines(cls)[1]
                    assert field.type == field_type, (
                        col(
                            "Inferred type for field does not correspond to "
                            "type provided by the user.\n", Colors.FAIL) +
                        col("class {astnode_name}, file {file} line {line}\n",
                            Colors.WARNING) + "Field {field_name}, "
                        "Provided type : {ptype}, Inferred type: {itype}"
                    ).format(
                        astnode_name=cls.name(),
                        file=f,
                        line=l,
                        ptype=field.type.name().camel,
                        itype=field_type.name().camel,
                        field_name=field._name.camel)
                else:
                    field.type = field_type
Ejemplo n.º 19
0
def name_repr(expr: Binding) -> str:
    """
    Return a colored repr for a binding name.
    """
    return col(expr.dsl_name, Colors.GREEN)
Ejemplo n.º 20
0
def expr_repr(expr: ExpressionEvaluation) -> str:
    """
    Return a colored repr for an expression.
    """
    return col(expr.expr_repr, Colors.CYAN)
Ejemplo n.º 21
0
    def run_no_exit(self, argv: Opt[List[str]] = None) -> int:
        parsed_args, unknown_args = self.args_parser.parse_known_args(argv)

        for trace in parsed_args.trace:
            print("Trace {} is activated".format(trace))
            Log.enable(trace)

        Diagnostics.set_style(parsed_args.diagnostic_style)

        if parsed_args.profile:
            import cProfile
            import pstats

            pr = cProfile.Profile()
            pr.enable()

        # Set the verbosity
        self.verbosity = parsed_args.verbosity

        self.enable_build_warnings = getattr(parsed_args,
                                             "enable_build_warnings", False)

        # If there is no build_mode (ie. we're not running a command that
        # requires it), we still need one to call gnatpp, so set it to a dummy
        # build mode.
        self.build_mode = getattr(parsed_args, "build_mode",
                                  self.BUILD_MODES[0])

        self.no_ada_api = parsed_args.no_ada_api

        # If asked to, setup the exception hook as a last-chance handler to
        # invoke a debugger in case of uncaught exception.
        if parsed_args.debug:
            # Try to use IPython's debugger if it is available, otherwise
            # fallback to PDB.
            try:
                # noinspection PyPackageRequirements
                from IPython.core import ultratb
            except ImportError:

                def excepthook(typ: Type[BaseException], value: BaseException,
                               tb: TracebackType) -> Any:
                    traceback.print_exception(typ, value, tb)
                    pdb.post_mortem(tb)

                sys.excepthook = excepthook
            else:
                sys.excepthook = ultratb.FormattedTB(mode='Verbose',
                                                     color_scheme='Linux',
                                                     call_pdb=1)

        self.dirs.set_build_dir(parsed_args.build_dir)
        install_dir = getattr(parsed_args, 'install-dir', None)
        if install_dir:
            self.dirs.set_install_dir(install_dir)

        if getattr(parsed_args, 'list_warnings', False):
            WarningSet.print_list()
            return 0

        # noinspection PyBroadException
        try:
            parsed_args.func(parsed_args, unknown_args)
            return 0

        except DiagnosticError:
            if parsed_args.debug:
                raise
            if parsed_args.verbosity.debug or parsed_args.full_error_traces:
                traceback.print_exc()
            print(col('Errors, exiting', Colors.FAIL))
            return 1

        except Exception as e:
            if parsed_args.debug:
                raise
            ex_type, ex, tb = sys.exc_info()

            # If we have a syntax error, we know for sure the last stack frame
            # points to the code that must be fixed. Otherwise, point to the
            # top-most stack frame that does not belong to Langkit.
            if e.args and e.args[0] == 'invalid syntax':
                assert isinstance(e, SyntaxError)
                loc = Location(cast(str, e.filename), cast(int, e.lineno))
            else:
                loc = cast(Location,
                           extract_library_location(traceback.extract_tb(tb)))
            with diagnostic_context(loc):
                check_source_language(False, str(e), do_raise=False)

            # Keep Langkit bug "pretty" for users: display the Python stack
            # trace only when requested.
            if parsed_args.verbosity.debug or parsed_args.full_error_traces:
                traceback.print_exc()

            print(col('Internal error! Exiting', Colors.FAIL))
            return 1

        finally:
            if parsed_args.profile:
                pr.disable()
                ps = pstats.Stats(pr)
                ps.dump_stats('langkit.prof')
Ejemplo n.º 22
0
    def run(self, argv=None):
        parsed_args = self.args_parser.parse_args(argv)

        from langkit import diagnostics
        diagnostics.EMIT_PARSABLE_ERRORS = parsed_args.parsable_errors

        if parsed_args.profile:
            import cProfile
            import pstats

            pr = cProfile.Profile()
            pr.enable()

        # If asked to, setup the exception hook as a last-chance handler to
        # invoke a debugger in case of uncaught exception.
        if parsed_args.debug:
            # Try to use IPython's debugger if it is available, otherwise
            # fallback to PDB.
            try:
                # noinspection PyPackageRequirements
                from IPython.core import ultratb
            except ImportError:
                ultratb = None  # To keep PyCharm happy...

                def excepthook(type, value, tb):
                    import traceback
                    traceback.print_exception(type, value, tb)
                    pdb.post_mortem(tb)

                sys.excepthook = excepthook
            else:
                sys.excepthook = ultratb.FormattedTB(mode='Verbose',
                                                     color_scheme='Linux',
                                                     call_pdb=1)
            del ultratb

        self.dirs.set_build_dir(parsed_args.build_dir)
        install_dir = getattr(parsed_args, 'install-dir', None)
        if install_dir:
            self.dirs.set_install_dir(install_dir)

        # Compute code coverage in the code generator if asked to
        if parsed_args.func == self.do_generate and parsed_args.coverage:
            try:
                cov = Coverage(self.dirs)
            except Exception as exc:
                import traceback
                print >> sys.stderr, 'Coverage not available:'
                traceback.print_exc(exc)
                sys.exit(1)

            cov.start()
        else:
            cov = None

        # noinspection PyBroadException
        try:
            parsed_args.func(parsed_args)
        except DiagnosticError:
            if parsed_args.debug:
                raise
            print >> sys.stderr, col('Errors, exiting', Colors.FAIL)
            sys.exit(1)
        except Exception, e:
            if parsed_args.debug:
                raise
            import traceback
            ex_type, ex, tb = sys.exc_info()
            if e.args[0] == 'invalid syntax':
                loc = Location(e.filename, e.lineno, "")
            else:
                loc = extract_library_location(traceback.extract_tb(tb))
            with Context("", loc, "recovery"):
                check_source_language(False, str(e), do_raise=False)
            if parsed_args.verbosity.debug:
                traceback.print_exc()

            print >> sys.stderr, col('Internal error! Exiting', Colors.FAIL)
            sys.exit(1)
Ejemplo n.º 23
0
    def run(self, argv=None):
        parsed_args = self.args_parser.parse_args(argv)

        # If asked to, setup the exception hook as a last-chance handler to
        # invoke a debugger in case of uncaught exception.
        if parsed_args.debug:
            # Try to use IPython's debugger if it is available, otherwise
            # fallback to PDB.
            try:
                # noinspection PyPackageRequirements
                from IPython.core import ultratb
            except ImportError:
                ultratb = None  # To keep PyCharm happy...

                def excepthook(type, value, tb):
                    import traceback
                    traceback.print_exception(type, value, tb)
                    pdb.post_mortem(tb)
                sys.excepthook = excepthook
            else:
                sys.excepthook = ultratb.FormattedTB(
                    mode='Verbose', color_scheme='Linux', call_pdb=1
                )
            del ultratb

        self.dirs.set_build_dir(parsed_args.build_dir)
        install_dir = getattr(parsed_args, 'install-dir', None)
        if install_dir:
            self.dirs.set_install_dir(install_dir)

        # Compute code coverage in the code generator if asked to
        if parsed_args.func == self.do_generate and parsed_args.coverage:
            try:
                import coverage
                del coverage
            except Exception as exc:
                import traceback
                print >> sys.stderr, 'Coverage not available:'
                traceback.print_exc(exc)
                sys.exit(1)

            cov = Coverage(self.dirs)
            cov.start()
        else:
            cov = None

        # noinspection PyBroadException
        try:
            parsed_args.func(parsed_args)
        except DiagnosticError:
            if parsed_args.debug:
                raise
            print >> sys.stderr, col('Errors, exiting', Colors.FAIL)
            sys.exit(1)
        except Exception:
            if parsed_args.debug:
                raise
            import traceback
            traceback.print_exc()
            print_errors(recovered=True)
            print >> sys.stderr, col('Internal error! Exiting', Colors.FAIL)
            sys.exit(1)

        if cov is not None:
            cov.stop()
            cov.generate_report()