示例#1
0
def main(args):
    """
    List the analyzers' basic information supported by CodeChecker.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    context = analyzer_context.get_context()
    working, errored = \
        analyzer_types.check_supported_analyzers(
            analyzer_types.supported_analyzers,
            context)

    if args.dump_config:
        binary = context.analyzer_binaries.get(args.dump_config)

        if args.dump_config == 'clang-tidy':
            subprocess.call([binary, '-dump-config', '-checks=*'])
        elif args.dump_config == 'clangsa':
            # TODO: Not supported by ClangSA yet!
            LOG.warning("'--dump-config clangsa' is not supported yet.")

        return

    if args.output_format not in ['csv', 'json']:
        if 'details' not in args:
            header = ['Name']
        else:
            header = ['Name', 'Path', 'Version']
    else:
        if 'details' not in args:
            header = ['name']
        else:
            header = ['name', 'path', 'version_string']

    rows = []
    for analyzer in working:
        if 'details' not in args:
            rows.append([analyzer])
        else:
            binary = context.analyzer_binaries.get(analyzer)
            try:
                version = subprocess.check_output([binary, '--version'])
            except (subprocess.CalledProcessError, OSError):
                version = 'ERROR'

            rows.append([analyzer, binary, version])

    if 'all' in args:
        for analyzer, err_reason in errored:
            if 'details' not in args:
                rows.append([analyzer])
            else:
                rows.append([
                    analyzer,
                    context.analyzer_binaries.get(analyzer), err_reason
                ])

    if len(rows) > 0:
        print(output_formatters.twodim_to_str(args.output_format, header,
                                              rows))
示例#2
0
def main(args):
    """
    Generates a build log by running the original build command.
    No analysis is done.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    args.logfile = os.path.realpath(args.logfile)

    # It is possible that the log file will not be created or it will be empty
    # for example when the build command is an empty string or when there is no
    # compiler command to log. For this reason we will create this log file if
    # it does not exist and we will insert an empty array to it, so it will be
    # a valid json file.
    with open(args.logfile, 'w', encoding="utf-8", errors="ignore") as logfile:
        logfile.write("[\n]")

    context = analyzer_context.get_context()
    verbose = args.verbose if 'verbose' in args else None

    build_manager.perform_build_command(args.logfile,
                                        args.command,
                                        context,
                                        'keep_link' in args,
                                        silent='quiet' in args,
                                        verbose=verbose)
示例#3
0
def main(args):
    """
    Get and print the version information from the version config
    file and Thrift API definition.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    output_format = args.output_format

    has_analyzer_version = False
    try:
        from codechecker_analyzer.cmd import analyzer_version
        has_analyzer_version = True

        # Print analyzer version information.
        print("CodeChecker analyzer version:")
        analyzer_version.print_version(output_format)
    except Exception:
        pass

    try:
        from codechecker_web.cmd import web_version

        if has_analyzer_version:
            print()  # Print a new line to separate version information.

        # Print web server version information.
        print("CodeChecker web version:")
        web_version.print_version(output_format)
    except Exception:
        pass
示例#4
0
def main(args):
    """
    Get and print the version information from the version config
    file and Thrift API definition.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    output_format = args.output_format

    has_analyzer_version = False
    try:
        from codechecker_analyzer.cmd import analyzer_version
        has_analyzer_version = True

        # Print analyzer version information.
        print("CodeChecker analyzer version:")
        analyzer_version.print_version(output_format)
    except Exception:
        pass

    try:
        from codechecker_web.cmd import web_version

        if has_analyzer_version:
            print()  # Print a new line to separate version information.

        # Print web server version information.
        print("CodeChecker web version:")
        web_version.print_version(output_format)
    except Exception:
        pass
示例#5
0
def main(args):
    """
    Get and print the version information from the version config
    file and Thrift API definition.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    print_version(args.output_format)
示例#6
0
def main(args):
    """
    Get and print the version information from the version config
    file and Thrift API definition.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    print_version(args.output_format)
示例#7
0
def main(args):
    """
    Get and print the version information from the version config
    file and Thrift API definition.
    """
    # If the given output format is not 'table', redirect logger's output to
    # the stderr.
    stream = None
    if 'output_format' in args and args.output_format != 'table':
        stream = 'stderr'

    logger.setup_logger(args.verbose if 'verbose' in args else None, stream)

    print_version(args.output_format)
示例#8
0
def main(args):
    """
    Generates a build log by running the original build command.
    No analysis is done.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    args.logfile = os.path.realpath(args.logfile)
    if os.path.exists(args.logfile):
        os.remove(args.logfile)

    context = analyzer_context.get_context()
    build_manager.perform_build_command(args.logfile,
                                        args.command,
                                        context,
                                        'keep_link' in args,
                                        silent='quiet' in args)
示例#9
0
def main(args):
    """
    Get and print the version information from the version config
    file and Thrift API definition.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    output_format = args.output_format

    # Get analyzer version information if the module is available.
    analyzer_version = None
    try:
        from codechecker_analyzer.cmd.analyzer_version import Version
        analyzer_version = Version()
    except Exception:
        pass

    # Get web version information if the module is available.
    web_version = None
    try:
        from codechecker_web.cmd.web_version import Version
        web_version = Version()
    except Exception:
        pass

    # Print the version information.
    if output_format == "json":
        print(
            json.dumps({
                "analyzer":
                analyzer_version.to_dict() if analyzer_version else None,
                "web":
                web_version.to_dict() if web_version else None
            }))
    else:
        if analyzer_version:
            analyzer_version.print(output_format)
            print()

        if web_version:
            web_version.print(output_format)
示例#10
0
def main(args):
    """
    List the checkers available in the specified (or all supported) analyzers
    alongside with their description or enabled status in various formats.
    """
    # If the given output format is not 'table', redirect logger's output to
    # the stderr.
    logger.setup_logger(args.verbose if 'verbose' in args else None,
                        None if args.output_format == 'table' else 'stderr')

    cl = analyzer_context.get_context().checker_labels

    if 'profile' in args and not args.profile:
        __print_profiles(args, cl)
        return

    if 'severity' in args and not args.severity:
        __print_severities(args, cl)
        return

    if 'guideline' in args and not args.guideline:
        __print_guidelines(args, cl)
        return

    if 'label' in args and not args.label:
        __print_labels(args, cl)
        return

    if 'label' in args and ':' not in args.label:
        __print_label_values(args, cl)
        return

    if 'checker_config' in args:
        __print_checker_config(args)
        return

    __print_checkers(args, cl)
示例#11
0
def main(args):
    """
    List the checkers available in the specified (or all supported) analyzers
    alongside with their description or enabled status in various formats.
    """

    # If the given output format is not 'table', redirect logger's output to
    # the stderr.
    logger.setup_logger(args.verbose if 'verbose' in args else None,
                        None if args.output_format == 'table' else 'stderr')

    context = analyzer_context.get_context()
    working_analyzers, errored = analyzer_types.check_supported_analyzers(
        args.analyzers, context)

    analyzer_environment = env.extend(context.path_env_extra,
                                      context.ld_lib_path_extra)

    analyzer_config_map = analyzer_types.build_config_handlers(
        args, context, working_analyzers)

    def uglify(text):
        """
        csv and json format output contain this non human readable header
        string: no CamelCase and no space.
        """
        return text.lower().replace(' ', '_')

    def match_guideline(checker_name, selected_guidelines):
        """
        Returns True if checker_name gives reports related to any of the
        selected guideline rule.
        checker_name -- A full checker name.
        selected_guidelines -- A list of guideline names or guideline rule IDs.
        """
        guideline = context.guideline_map.get(checker_name, {})
        guideline_set = set(guideline)
        for value in guideline.values():
            guideline_set |= set(value)

        return any(g in guideline_set for g in selected_guidelines)

    def format_guideline(guideline):
        """
        Convert guideline rules to human-readable format.
        guideline -- Dictionary in the following format:
                     {"guideline_1": ["rule_1", "rule_2"]}
        """
        return ' '.join('Related {} rules: {}'.format(g, ', '.join(r))
                        for g, r in guideline.items())

    # List available checker profiles.
    if 'profile' in args and args.profile == 'list':
        if 'details' in args:
            header = ['Profile name', 'Description']
            rows = context.available_profiles.items()
        else:
            header = ['Profile name']
            rows = [(key, "") for key in context.available_profiles.keys()]

        if args.output_format in ['csv', 'json']:
            header = list(map(uglify, header))

        print(output_formatters.twodim_to_str(args.output_format, header,
                                              rows))
        return

    # List checker config options.
    if 'checker_config' in args:
        if 'details' in args:
            header = ['Option', 'Description']
        else:
            header = ['Option']

        if args.output_format in ['csv', 'json']:
            header = list(map(uglify, header))

        rows = []
        for analyzer in working_analyzers:
            config_handler = analyzer_config_map.get(analyzer)
            analyzer_class = analyzer_types.supported_analyzers[analyzer]

            configs = analyzer_class.get_checker_config(
                config_handler, analyzer_environment)
            rows.extend(
                (':'.join((analyzer, c[0])),
                 c[1]) if 'details' in args else (':'.join((analyzer, c[0])), )
                for c in configs)

        print(output_formatters.twodim_to_str(args.output_format, header,
                                              rows))
        return

    if args.guideline is not None and len(args.guideline) == 0:
        result = defaultdict(set)

        for _, guidelines in context.guideline_map.items():
            for guideline, rules in guidelines.items():
                result[guideline] |= set(rules)

        header = ['Guideline', 'Rules']
        if args.output_format in ['csv', 'json']:
            header = list(map(uglify, header))

        if args.output_format == 'json':
            rows = [(g, sorted(list(r))) for g, r in result.items()]
        else:
            rows = [(g, ', '.join(sorted(r))) for g, r in result.items()]

        if args.output_format == 'rows':
            for row in rows:
                print('Guideline: {}'.format(row[0]))
                print('Rules: {}'.format(row[1]))
        else:
            print(
                output_formatters.twodim_to_str(args.output_format, header,
                                                rows))
        return

    # List available checkers.
    if 'details' in args:
        header = [
            'Enabled', 'Name', 'Analyzer', 'Severity', 'Guideline',
            'Description'
        ]
    else:
        header = ['Name']

    if args.output_format in ['csv', 'json']:
        header = list(map(uglify, header))

    rows = []
    for analyzer in working_analyzers:
        config_handler = analyzer_config_map.get(analyzer)
        analyzer_class = analyzer_types.supported_analyzers[analyzer]

        checkers = analyzer_class.get_analyzer_checkers(
            config_handler, analyzer_environment)
        default_checker_cfg = context.checker_config.get(analyzer +
                                                         '_checkers')

        profile_checkers = None
        if 'profile' in args:
            if args.profile not in context.available_profiles:
                LOG.error("Checker profile '%s' does not exist!", args.profile)
                LOG.error("To list available profiles, use '--profile list'.")
                sys.exit(1)

            profile_checkers = [(args.profile, True)]

        config_handler.initialize_checkers(context.available_profiles,
                                           context.package_root, checkers,
                                           default_checker_cfg,
                                           profile_checkers)

        for checker_name, value in config_handler.checks().items():
            state, description = value

            if state != CheckerState.enabled and 'profile' in args:
                continue

            if state == CheckerState.enabled and 'only_disabled' in args:
                continue
            elif state != CheckerState.enabled and 'only_enabled' in args:
                continue

            if args.output_format == 'json':
                state = state == CheckerState.enabled
            else:
                state = '+' if state == CheckerState.enabled else '-'

            if args.guideline is not None:
                if not match_guideline(checker_name, args.guideline):
                    continue

            if 'details' in args:
                severity = context.severity_map.get(checker_name)
                guideline = context.guideline_map.get(checker_name, {})
                if args.output_format != 'json':
                    guideline = format_guideline(guideline)
                rows.append([
                    state, checker_name, analyzer, severity, guideline,
                    description
                ])
            else:
                rows.append([checker_name])

    if 'show_warnings' in args:
        severity = context.severity_map.get('clang-diagnostic-')
        for warning in get_warnings(analyzer_environment):
            warning = 'clang-diagnostic-' + warning[2:]

            if 'guideline' in args:
                if not match_guideline(warning, args.guideline):
                    continue

            guideline = context.guideline_map.get(warning, {})
            if args.output_format != 'json':
                guideline = format_guideline(guideline)

            if 'details' in args:
                rows.append(['', warning, '-', severity, guideline, '-'])
            else:
                rows.append([warning])

    if rows:
        print(output_formatters.twodim_to_str(args.output_format, header,
                                              rows))

    for analyzer_binary, reason in errored:
        LOG.error(
            "Failed to get checkers for '%s'!"
            "The error reason was: '%s'", analyzer_binary, reason)
        LOG.error("Please check your installation and the "
                  "'config/package_layout.json' file!")
示例#12
0
def main(args):
    """
    Execute a wrapper over log-analyze-parse, aka 'check'.
    """

    logger.setup_logger(args.verbose if 'verbose' in args else None)

    if 'ctu_ast_mode' in args and 'ctu_phases' not in args:
        LOG.error("Analyzer option 'ctu-ast-mode' requires CTU mode enabled")
        sys.exit(1)

    def __update_if_key_exists(source, target, key):
        """Append the source Namespace's element with 'key' to target with
        the same key, but only if it exists."""
        if key in source:
            setattr(target, key, getattr(source, key))

    # If no output directory is given then the checker results go to a
    # temporary directory. This way the subsequent "quick" checks don't pollute
    # the result list of a previous check. If the detection status function is
    # intended to be used (i.e. by keeping the .plist files) then the output
    # directory has to be provided explicitly.
    if 'output_dir' in args:
        output_dir = args.output_dir
    else:
        output_dir = tempfile.mkdtemp()

    output_dir = os.path.abspath(output_dir)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logfile = None
    try:
        # --- Step 1.: Perform logging if build command was specified.
        if 'command' in args:
            logfile = tempfile.NamedTemporaryFile().name

            # Translate the argument list between check and log.
            log_args = argparse.Namespace(command=args.command,
                                          logfile=logfile)
            __update_if_key_exists(args, log_args, 'quiet')
            __update_if_key_exists(args, log_args, 'verbose')

            import codechecker_analyzer.cmd.log as log_module
            LOG.debug("Calling LOG with args:")
            LOG.debug(log_args)

            # If not explicitly given the debug log file of ld_logger is placed
            # in report directory if any. Otherwise parallel "CodeChecker
            # check" commands would overwrite each other's log files under /tmp
            # which is the default location for "CodeChecker check".
            if 'CC_LOGGER_DEBUG_FILE' not in os.environ:
                os.environ['CC_LOGGER_DEBUG_FILE'] = \
                    os.path.join(output_dir, 'codechecker.logger.debug')

            log_module.main(log_args)
        elif 'logfile' in args:
            logfile = args.logfile

        # --- Step 2.: Perform the analysis.
        analyze_args = argparse.Namespace(
            logfile=logfile,
            output_path=output_dir,
            output_format='plist',
            jobs=args.jobs,
            keep_gcc_include_fixed=args.keep_gcc_include_fixed,
            keep_gcc_intrin=args.keep_gcc_intrin)
        # Some arguments don't have default values.
        # We can't set these keys to None because it would result in an error
        # after the call.
        args_to_update = [
            'quiet',
            'skipfile',
            'files',
            'analyzers',
            'add_compiler_defaults',
            'clangsa_args_cfg_file',
            'tidy_args_cfg_file',
            'tidy_config',
            'analyzer_config',
            'checker_config',
            'capture_analysis_output',
            'config_file',
            'ctu_phases',
            'ctu_reanalyze_on_failure',
            'stats_output',
            'stats_dir',
            'stats_enabled',
            'stats_relevance_threshold',
            'stats_min_sample_count',
            'enable_all',
            'ordered_checkers',  # --enable and --disable.
            'timeout',
            'compile_uniqueing',
            'report_hash',
            'enable_z3',
            'enable_z3_refutation'
        ]
        for key in args_to_update:
            __update_if_key_exists(args, analyze_args, key)
        if 'clean' in args:
            setattr(analyze_args, 'clean', True)
        __update_if_key_exists(args, analyze_args, 'verbose')

        import codechecker_analyzer.cmd.analyze as analyze_module
        LOG.debug("Calling ANALYZE with args:")
        LOG.debug(analyze_args)

        analysis_exit_status = analyze_module.main(analyze_args)

        # --- Step 3.: Print to stdout.
        parse_args = argparse.Namespace(input=[output_dir],
                                        input_format='plist')
        __update_if_key_exists(args, parse_args, 'print_steps')
        __update_if_key_exists(args, parse_args, 'review_status')
        __update_if_key_exists(args, parse_args, 'verbose')
        __update_if_key_exists(args, parse_args, 'skipfile')
        __update_if_key_exists(args, parse_args, 'suppress')

        import codechecker_analyzer.cmd.parse as parse_module
        LOG.debug("Calling PARSE with args:")
        LOG.debug(parse_args)

        parse_module.main(parse_args)
    except ImportError:
        LOG.error("Check failed: couldn't import a library.")
    except Exception:
        LOG.exception("Running check failed.")
        import traceback
        traceback.print_exc()
    finally:
        if 'output_dir' not in args:
            shutil.rmtree(output_dir)
        if 'command' in args:
            os.remove(logfile)

    LOG.debug("Check finished.")

    return analysis_exit_status
示例#13
0
def main(args):
    """
    Store the defect results in the specified input list as bug reports in the
    database.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    try:
        cmd_config.check_config_file(args)
    except FileNotFoundError as fnerr:
        LOG.error(fnerr)
        sys.exit(1)

    if not host_check.check_zlib():
        raise Exception("zlib is not available on the system!")

    # To ensure the help message prints the default folder properly,
    # the 'default' for 'args.input' is a string, not a list.
    # But we need lists for the foreach here to work.
    if isinstance(args.input, str):
        args.input = [args.input]

    if 'name' not in args:
        LOG.debug("Generating name for analysis...")
        generated = __get_run_name(args.input)
        if generated:
            setattr(args, 'name', generated)
        else:
            LOG.error("No suitable name was found in the inputs for the "
                      "analysis run. Please specify one by passing argument "
                      "--name run_name in the invocation.")
            sys.exit(2)  # argparse returns error code 2 for bad invocations.

    LOG.info("Storing analysis results for run '" + args.name + "'")

    if 'force' in args:
        LOG.info("argument --force was specified: the run with name '" +
                 args.name + "' will be deleted.")

    # Setup connection to the remote server.
    client = libclient.setup_client(args.product_url)

    _, zip_file = tempfile.mkstemp('.zip')
    LOG.debug("Will write mass store ZIP to '%s'...", zip_file)

    try:
        LOG.debug("Assembling zip file.")
        try:
            assemble_zip(args.input, zip_file, client)
        except Exception as ex:
            print(ex)
            import traceback
            traceback.print_stack()

        zip_size = os.stat(zip_file).st_size
        LOG.debug("Assembling zip done, size is %s",
                  sizeof_fmt(zip_size))

        if zip_size > MAX_UPLOAD_SIZE:
            LOG.error("The result list to upload is too big (max: %s).",
                      sizeof_fmt(MAX_UPLOAD_SIZE))
            sys.exit(1)

        with open(zip_file, 'rb') as zf:
            b64zip = base64.b64encode(zf.read()).decode("utf-8")

        context = webserver_context.get_context()

        trim_path_prefixes = args.trim_path_prefix if \
            'trim_path_prefix' in args else None

        description = args.description if 'description' in args else None

        LOG.info("Storing results to the server...")
        client.massStoreRun(args.name,
                            args.tag if 'tag' in args else None,
                            str(context.version),
                            b64zip,
                            'force' in args,
                            trim_path_prefixes,
                            description)

        # Storing analysis statistics if the server allows them.
        if client.allowsStoringAnalysisStatistics():
            storing_analysis_statistics(client, args.input, args.name)

        LOG.info("Storage finished successfully.")
    except RequestFailed as reqfail:
        if reqfail.errorCode == ErrorCode.SOURCE_FILE:
            header = ['File', 'Line', 'Checker name']
            table = twodim_to_str('table',
                                  header,
                                  [c.split('|') for c in reqfail.extraInfo])
            LOG.warning("Setting the review statuses for some reports failed "
                        "because of non valid source code comments: "
                        "%s\n %s", reqfail.message, table)
        sys.exit(1)
    except Exception as ex:
        import traceback
        traceback.print_stack()
        LOG.info("Storage failed: %s", str(ex))
        sys.exit(1)
    finally:
        os.remove(zip_file)
示例#14
0
def main(args):
    """
    Perform analysis on the given logfiles and store the results in a machine-
    readable format.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    if len(args.logfile) != 1:
        LOG.warning("Only one log file can be processed right now!")
        sys.exit(1)

    args.output_path = os.path.abspath(args.output_path)
    if os.path.exists(args.output_path) and \
            not os.path.isdir(args.output_path):
        LOG.error("The given output path is not a directory: " +
                  args.output_path)
        sys.exit(1)

    if 'enable_all' in args:
        LOG.info("'--enable-all' was supplied for this analysis.")

    # We clear the output directory in the following cases.
    ctu_dir = os.path.join(args.output_path, 'ctu-dir')
    if 'ctu_phases' in args and args.ctu_phases[0] and \
            os.path.isdir(ctu_dir):
        # Clear the CTU-dir if the user turned on the collection phase.
        LOG.debug("Previous CTU contents have been deleted.")
        shutil.rmtree(ctu_dir)

    if 'clean' in args and os.path.isdir(args.output_path):
        LOG.info(
            "Previous analysis results in '%s' have been removed, "
            "overwriting with current result", args.output_path)
        shutil.rmtree(args.output_path)

    if not os.path.exists(args.output_path):
        os.makedirs(args.output_path)

    LOG.debug("args: " + str(args))
    LOG.debug("Output will be stored to: '" + args.output_path + "'")

    # Process the skip list if present.
    skip_handler = __get_skip_handler(args)

    # Parse the JSON CCDBs and retrieve the compile commands.
    actions = []
    for log_file in args.logfile:
        if not os.path.exists(log_file):
            LOG.error("The specified logfile '%s' does not exist!", log_file)
            continue

        actions += log_parser.parse_log(
            load_json_or_empty(log_file), skip_handler,
            os.path.join(args.output_path, 'compiler_info.json'))
    if not actions:
        LOG.info("None of the specified build log files contained "
                 "valid compilation commands. No analysis needed...")
        sys.exit(1)

    context = analyzer_context.get_context()
    metadata = {
        'action_num': len(actions),
        'command': sys.argv,
        'versions': {
            'codechecker':
            "{0} ({1})".format(context.package_git_tag,
                               context.package_git_hash)
        },
        'working_directory': os.getcwd(),
        'output_path': args.output_path,
        'result_source_files': {}
    }

    if 'name' in args:
        metadata['name'] = args.name

    # Update metadata dictionary with old values.
    metadata_file = os.path.join(args.output_path, 'metadata.json')
    if os.path.exists(metadata_file):
        metadata_prev = load_json_or_empty(metadata_file)
        metadata['result_source_files'] = \
            metadata_prev['result_source_files']

    analyzer.perform_analysis(args, skip_handler, context, actions, metadata)

    __update_skip_file(args)

    LOG.debug("Analysis metadata write to '%s'", metadata_file)
    with open(metadata_file, 'w') as metafile:
        json.dump(metadata, metafile)

    # WARN: store command will search for this file!!!!
    compile_cmd_json = os.path.join(args.output_path, 'compile_cmd.json')
    try:
        source = os.path.abspath(args.logfile[0])
        target = os.path.abspath(compile_cmd_json)

        if source != target:
            shutil.copyfile(source, target)
    except shutil.Error:
        LOG.debug("Compilation database JSON file is the same.")
    except Exception:
        LOG.debug("Copying compilation database JSON file failed.")

    try:
        from codechecker_analyzer import analyzer_statistics
        analyzer_statistics.collect(metadata, "analyze")
    except Exception:
        pass
示例#15
0
def main(args):
    """
    Execute a wrapper over log-analyze-parse, aka 'check'.
    """

    logger.setup_logger(args.verbose if 'verbose' in args else None)

    def __update_if_key_exists(source, target, key):
        """Append the source Namespace's element with 'key' to target with
        the same key, but only if it exists."""
        if key in source:
            setattr(target, key, getattr(source, key))

    # If no output directory is given then the checker results go to a
    # temporary directory. This way the subsequent "quick" checks don't pollute
    # the result list of a previous check. If the detection status function is
    # intended to be used (i.e. by keeping the .plist files) then the output
    # directory has to be provided explicitly.
    is_temp_output = False
    if 'output_dir' in args:
        output_dir = args.output_dir
    else:
        output_dir = tempfile.mkdtemp()
        is_temp_output = True

    output_dir = os.path.abspath(output_dir)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logfile = None
    is_command = False
    try:
        # --- Step 1.: Perform logging if build command was specified.
        if 'command' in args:
            logfile = tempfile.NamedTemporaryFile().name
            is_command = True

            # Translate the argument list between check and log.
            log_args = argparse.Namespace(
                command=args.command,
                logfile=logfile
            )
            __update_if_key_exists(args, log_args, 'quiet')
            __update_if_key_exists(args, log_args, 'verbose')

            import codechecker_analyzer.cmd.log as log_module
            LOG.debug("Calling LOG with args:")
            LOG.debug(log_args)

            log_module.main(log_args)
        elif 'logfile' in args:
            logfile = args.logfile

        # --- Step 2.: Perform the analysis.
        if not os.path.exists(logfile):
            raise OSError("The specified logfile '" + logfile + "' does not "
                          "exist.")

        analyze_args = argparse.Namespace(
            logfile=[logfile],
            output_path=output_dir,
            output_format='plist',
            jobs=args.jobs
        )
        # Some arguments don't have default values.
        # We can't set these keys to None because it would result in an error
        # after the call.
        args_to_update = ['quiet',
                          'skipfile',
                          'analyzers',
                          'add_compiler_defaults',
                          'clangsa_args_cfg_file',
                          'tidy_args_cfg_file',
                          'tidy_config',
                          'capture_analysis_output',
                          'ctu_phases',
                          'stats_output',
                          'stats_dir',
                          'stats_enabled',
                          'stats_relevance_threshold',
                          'stats_min_sample_count',
                          'enable_all',
                          'ordered_checkers',  # --enable and --disable.
                          'timeout',
                          'compile_uniqueing',
                          'report_hash'
                          ]
        for key in args_to_update:
            __update_if_key_exists(args, analyze_args, key)
        if 'force' in args or 'clean' in args:
            setattr(analyze_args, 'clean', True)
        __update_if_key_exists(args, analyze_args, 'verbose')

        import codechecker_analyzer.cmd.analyze as analyze_module
        LOG.debug("Calling ANALYZE with args:")
        LOG.debug(analyze_args)

        analyze_module.main(analyze_args)

        # --- Step 3.: Print to stdout.
        parse_args = argparse.Namespace(
            input=[output_dir],
            input_format='plist'
        )
        __update_if_key_exists(args, parse_args, 'print_steps')
        __update_if_key_exists(args, parse_args, 'verbose')
        __update_if_key_exists(args, parse_args, 'skipfile')

        import codechecker_analyzer.cmd.parse as parse_module
        LOG.debug("Calling PARSE with args:")
        LOG.debug(parse_args)

        parse_module.main(parse_args)
    except ImportError:
        LOG.error("Check failed: couldn't import a library.")
    except Exception as ex:
        LOG.exception("Running check failed.")
        import traceback
        traceback.print_exc()
    finally:
        if is_temp_output:
            shutil.rmtree(output_dir)
        if is_command:
            os.remove(logfile)

    LOG.debug("Check finished.")
示例#16
0
def main(args):
    """
    Perform analysis on the given logfiles and store the results in a machine-
    readable format.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    check_config_file(args)

    if not os.path.exists(args.logfile):
        LOG.error("The specified logfile '%s' does not exist!", args.logfile)
        sys.exit(1)

    args.output_path = os.path.abspath(args.output_path)
    if os.path.exists(args.output_path) and \
            not os.path.isdir(args.output_path):
        LOG.error("The given output path is not a directory: " +
                  args.output_path)
        sys.exit(1)

    if 'enable_all' in args:
        LOG.info("'--enable-all' was supplied for this analysis.")

    # We clear the output directory in the following cases.
    ctu_dir = os.path.join(args.output_path, 'ctu-dir')
    if 'ctu_phases' in args and args.ctu_phases[0] and \
            os.path.isdir(ctu_dir):
        # Clear the CTU-dir if the user turned on the collection phase.
        LOG.debug("Previous CTU contents have been deleted.")
        shutil.rmtree(ctu_dir)

    if 'clean' in args and os.path.isdir(args.output_path):
        LOG.info(
            "Previous analysis results in '%s' have been removed, "
            "overwriting with current result", args.output_path)
        shutil.rmtree(args.output_path)

    if not os.path.exists(args.output_path):
        os.makedirs(args.output_path)

    LOG.debug("args: " + str(args))
    LOG.debug("Output will be stored to: '" + args.output_path + "'")

    config_option_re = re.compile(r'^({}):.+=.+$'.format('|'.join(
        analyzer_types.supported_analyzers)))

    # Check the format of analyzer options.
    if 'analyzer_config' in args:
        for config in args.analyzer_config:
            if not re.match(config_option_re, config):
                LOG.error("Analyzer option in wrong format: %s", config)
                sys.exit(1)

    # Check the format of checker options.
    if 'checker_config' in args:
        for config in args.checker_config:
            if not re.match(config_option_re, config):
                LOG.error("Checker option in wrong format: %s", config)
                sys.exit(1)

    compile_commands = load_json_or_empty(args.logfile, default={})

    # Process the skip list if present.
    skip_handler = __get_skip_handler(args, compile_commands)

    # Enable alpha uniqueing by default if ctu analysis is used.
    if 'none' in args.compile_uniqueing and 'ctu_phases' in args:
        args.compile_uniqueing = "alpha"

    compiler_info_file = None
    if 'compiler_info_file' in args:
        LOG.debug("Compiler info is read from: %s", args.compiler_info_file)
        if not os.path.exists(args.compiler_info_file):
            LOG.error("Compiler info file %s does not exist",
                      args.compiler_info_file)
            sys.exit(1)
        compiler_info_file = args.compiler_info_file

    ctu_or_stats_enabled = False
    # Skip list is applied only in pre-analysis
    # if --ctu-collect was called explicitly.
    pre_analysis_skip_handler = None
    if 'ctu_phases' in args:
        ctu_collect = args.ctu_phases[0]
        ctu_analyze = args.ctu_phases[1]
        if ctu_collect and not ctu_analyze:
            pre_analysis_skip_handler = skip_handler

        if ctu_collect or ctu_analyze:
            ctu_or_stats_enabled = True

    # Skip list is applied only in pre-analysis
    # if --stats-collect was called explicitly.
    if 'stats_output' in args and args.stats_output:
        pre_analysis_skip_handler = skip_handler
        ctu_or_stats_enabled = True

    if 'stats_enabled' in args and args.stats_enabled:
        ctu_or_stats_enabled = True

    context = analyzer_context.get_context()
    analyzer_env = env.extend(context.path_env_extra,
                              context.ld_lib_path_extra)

    # Number of all the compilation commands in the parsed log files,
    # logged by the logger.
    all_cmp_cmd_count = len(compile_commands)

    actions, skipped_cmp_cmd_count = log_parser.parse_unique_log(
        compile_commands, args.output_path, args.compile_uniqueing,
        compiler_info_file, args.keep_gcc_include_fixed, args.keep_gcc_intrin,
        skip_handler, pre_analysis_skip_handler, ctu_or_stats_enabled,
        analyzer_env)

    if not actions:
        LOG.info("No analysis is required.\nThere were no compilation "
                 "commands in the provided compilation database or "
                 "all of them were skipped.")
        sys.exit(0)

    uniqued_compilation_db_file = os.path.join(args.output_path,
                                               "unique_compile_commands.json")
    with open(uniqued_compilation_db_file,
              'w',
              encoding="utf-8",
              errors="ignore") as f:
        json.dump(actions, f, cls=log_parser.CompileCommandEncoder)

    metadata = {
        'version':
        2,
        'tools': [{
            'name':
            'codechecker',
            'action_num':
            len(actions),
            'command':
            sys.argv,
            'version':
            "{0} ({1})".format(context.package_git_tag,
                               context.package_git_hash),
            'working_directory':
            os.getcwd(),
            'output_path':
            args.output_path,
            'result_source_files': {},
            'analyzers': {}
        }]
    }
    metadata_tool = metadata['tools'][0]

    if 'name' in args:
        metadata_tool['run_name'] = args.name

    # Update metadata dictionary with old values.
    metadata_file = os.path.join(args.output_path, 'metadata.json')
    metadata_prev = None
    if os.path.exists(metadata_file):
        metadata_prev = load_json_or_empty(metadata_file)
        metadata_tool['result_source_files'] = \
            __get_result_source_files(metadata_prev)

    CompileCmdParseCount = \
        collections.namedtuple('CompileCmdParseCount',
                               'total, analyze, skipped, removed_by_uniqueing')
    cmp_cmd_to_be_uniqued = all_cmp_cmd_count - skipped_cmp_cmd_count

    # Number of compile commands removed during uniqueing.
    removed_during_uniqueing = cmp_cmd_to_be_uniqued - len(actions)

    all_to_be_analyzed = cmp_cmd_to_be_uniqued - removed_during_uniqueing

    compile_cmd_count = CompileCmdParseCount(
        total=all_cmp_cmd_count,
        analyze=all_to_be_analyzed,
        skipped=skipped_cmp_cmd_count,
        removed_by_uniqueing=removed_during_uniqueing)

    LOG.debug_analyzer(
        "Total number of compile commands without "
        "skipping or uniqueing: %d", compile_cmd_count.total)
    LOG.debug_analyzer("Compile commands removed by uniqueing: %d",
                       compile_cmd_count.removed_by_uniqueing)
    LOG.debug_analyzer("Compile commands skipped during log processing: %d",
                       compile_cmd_count.skipped)
    LOG.debug_analyzer("Compile commands forwarded for analysis: %d",
                       compile_cmd_count.analyze)

    analyzer.perform_analysis(args, skip_handler, context, actions,
                              metadata_tool, compile_cmd_count)

    __update_skip_file(args)
    __cleanup_metadata(metadata_prev, metadata)

    LOG.debug("Analysis metadata write to '%s'", metadata_file)
    with open(metadata_file, 'w', encoding="utf-8",
              errors="ignore") as metafile:
        json.dump(metadata, metafile)

    # WARN: store command will search for this file!!!!
    compile_cmd_json = os.path.join(args.output_path, 'compile_cmd.json')
    try:
        source = os.path.abspath(args.logfile)
        target = os.path.abspath(compile_cmd_json)

        if source != target:
            shutil.copyfile(source, target)
    except shutil.Error:
        LOG.debug("Compilation database JSON file is the same.")
    except Exception:
        LOG.debug("Copying compilation database JSON file failed.")

    try:
        # pylint: disable=no-name-in-module
        from codechecker_analyzer import analyzer_statistics
        analyzer_statistics.collect(metadata, "analyze")
    except Exception:
        pass
示例#17
0
def main(args):
    """
    List the checkers available in the specified (or all supported) analyzers
    alongside with their description or enabled status in various formats.
    """

    # If the given output format is not 'table', redirect logger's output to
    # the stderr.
    logger.setup_logger(args.verbose if 'verbose' in args else None,
                        None if args.output_format == 'table' else 'stderr')

    context = analyzer_context.get_context()
    working_analyzers, errored = analyzer_types.check_supported_analyzers(
        args.analyzers, context)

    analyzer_environment = env.extend(context.path_env_extra,
                                      context.ld_lib_path_extra)

    analyzer_config_map = analyzer_types.build_config_handlers(
        args, context, working_analyzers)

    def uglify(text):
        """
        csv and json format output contain this non human readable header
        string: no CamelCase and no space.
        """
        return text.lower().replace(' ', '_')

    # List available checker profiles.
    if 'profile' in args and args.profile == 'list':
        if 'details' in args:
            header = ['Profile name', 'Description']
            rows = context.available_profiles.items()
        else:
            header = ['Profile name']
            rows = [(key, "") for key in context.available_profiles.keys()]

        if args.output_format in ['csv', 'json']:
            header = list(map(uglify, header))

        print(output_formatters.twodim_to_str(args.output_format, header,
                                              rows))
        return

    # List checker config options.
    if 'checker_config' in args:
        if 'details' in args:
            header = ['Option', 'Description']
        else:
            header = ['Option']

        if args.output_format in ['csv', 'json']:
            header = list(map(uglify, header))

        rows = []
        for analyzer in working_analyzers:
            config_handler = analyzer_config_map.get(analyzer)
            analyzer_class = analyzer_types.supported_analyzers[analyzer]

            configs = analyzer_class.get_checker_config(
                config_handler, analyzer_environment)
            rows.extend(
                (':'.join((analyzer, c[0])),
                 c[1]) if 'details' in args else (':'.join((analyzer, c[0])), )
                for c in configs)

        print(output_formatters.twodim_to_str(args.output_format, header,
                                              rows))
        return

    # List available checkers.
    if 'details' in args:
        header = ['Enabled', 'Name', 'Analyzer', 'Severity', 'Description']
    else:
        header = ['Name']

    if args.output_format in ['csv', 'json']:
        header = list(map(uglify, header))

    rows = []
    for analyzer in working_analyzers:
        config_handler = analyzer_config_map.get(analyzer)
        analyzer_class = analyzer_types.supported_analyzers[analyzer]

        checkers = analyzer_class.get_analyzer_checkers(
            config_handler, analyzer_environment)
        default_checker_cfg = context.checker_config.get(analyzer +
                                                         '_checkers')

        profile_checkers = None
        if 'profile' in args:
            if args.profile not in context.available_profiles:
                LOG.error("Checker profile '%s' does not exist!", args.profile)
                LOG.error("To list available profiles, use '--profile list'.")
                sys.exit(1)

            profile_checkers = [(args.profile, True)]

        config_handler.initialize_checkers(context.available_profiles,
                                           context.package_root, checkers,
                                           default_checker_cfg,
                                           profile_checkers)

        for checker_name, value in config_handler.checks().items():
            state, description = value

            if state != CheckerState.enabled and 'profile' in args:
                continue

            if state == CheckerState.enabled and 'only_disabled' in args:
                continue
            elif state != CheckerState.enabled and 'only_enabled' in args:
                continue

            if args.output_format == 'json':
                state = state == CheckerState.enabled
            else:
                state = '+' if state == CheckerState.enabled else '-'

            if 'details' in args:
                severity = context.severity_map.get(checker_name)
                rows.append(
                    [state, checker_name, analyzer, severity, description])
            else:
                rows.append([checker_name])

        if 'show_warnings' in args:
            severity = context.severity_map.get('clang-diagnostic-')
            for warning in get_warnings(analyzer_environment):
                if 'details' in args:
                    rows.append(['', warning, '-', severity, '-'])
                else:
                    rows.append([warning])

    if rows:
        print(output_formatters.twodim_to_str(args.output_format, header,
                                              rows))

    for analyzer_binary, reason in errored:
        LOG.error(
            "Failed to get checkers for '%s'!"
            "The error reason was: '%s'", analyzer_binary, reason)
        LOG.error("Please check your installation and the "
                  "'config/package_layout.json' file!")
示例#18
0
def init_logger(level, logger_name='system'):
    logger.setup_logger(level)
    global LOG
    LOG = logger.get_logger(logger_name)
示例#19
0
def main(args):
    """
    Execute a wrapper over log-analyze-parse, aka 'check'.
    """

    logger.setup_logger(args.verbose if 'verbose' in args else None)

    def __update_if_key_exists(source, target, key):
        """Append the source Namespace's element with 'key' to target with
        the same key, but only if it exists."""
        if key in source:
            setattr(target, key, getattr(source, key))

    if 'force' in args:
        LOG.warning('"--force" option has been deprecated and it will be '
                    'removed in the future version of CodeChecker. Use the '
                    '"--clean" option to delete analysis reports stored in '
                    'the output directory.')

    # If no output directory is given then the checker results go to a
    # temporary directory. This way the subsequent "quick" checks don't pollute
    # the result list of a previous check. If the detection status function is
    # intended to be used (i.e. by keeping the .plist files) then the output
    # directory has to be provided explicitly.
    is_temp_output = False
    if 'output_dir' in args:
        output_dir = args.output_dir
    else:
        output_dir = tempfile.mkdtemp()
        is_temp_output = True

    output_dir = os.path.abspath(output_dir)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logfile = None
    is_command = False
    try:
        # --- Step 1.: Perform logging if build command was specified.
        if 'command' in args:
            logfile = tempfile.NamedTemporaryFile().name
            is_command = True

            # Translate the argument list between check and log.
            log_args = argparse.Namespace(command=args.command,
                                          logfile=logfile)
            __update_if_key_exists(args, log_args, 'quiet')
            __update_if_key_exists(args, log_args, 'verbose')

            import codechecker_analyzer.cmd.log as log_module
            LOG.debug("Calling LOG with args:")
            LOG.debug(log_args)

            log_module.main(log_args)
        elif 'logfile' in args:
            logfile = args.logfile

        # --- Step 2.: Perform the analysis.
        if not os.path.exists(logfile):
            raise OSError("The specified logfile '" + logfile + "' does not "
                          "exist.")

        analyze_args = argparse.Namespace(logfile=[logfile],
                                          output_path=output_dir,
                                          output_format='plist',
                                          jobs=args.jobs)
        # Some arguments don't have default values.
        # We can't set these keys to None because it would result in an error
        # after the call.
        args_to_update = [
            'quiet',
            'skipfile',
            'analyzers',
            'add_compiler_defaults',
            'clangsa_args_cfg_file',
            'tidy_args_cfg_file',
            'tidy_config',
            'capture_analysis_output',
            'ctu_phases',
            'stats_output',
            'stats_dir',
            'stats_enabled',
            'stats_relevance_threshold',
            'stats_min_sample_count',
            'enable_all',
            'ordered_checkers',  # --enable and --disable.
            'timeout',
            'compile_uniqueing',
            'report_hash',
            'enable_z3',
            'enable_z3_refutation'
        ]
        for key in args_to_update:
            __update_if_key_exists(args, analyze_args, key)
        if 'clean' in args:
            setattr(analyze_args, 'clean', True)
        __update_if_key_exists(args, analyze_args, 'verbose')

        import codechecker_analyzer.cmd.analyze as analyze_module
        LOG.debug("Calling ANALYZE with args:")
        LOG.debug(analyze_args)

        analyze_module.main(analyze_args)

        # --- Step 3.: Print to stdout.
        parse_args = argparse.Namespace(input=[output_dir],
                                        input_format='plist')
        __update_if_key_exists(args, parse_args, 'print_steps')
        __update_if_key_exists(args, parse_args, 'verbose')
        __update_if_key_exists(args, parse_args, 'skipfile')

        import codechecker_analyzer.cmd.parse as parse_module
        LOG.debug("Calling PARSE with args:")
        LOG.debug(parse_args)

        parse_module.main(parse_args)
    except ImportError:
        LOG.error("Check failed: couldn't import a library.")
    except Exception:
        LOG.exception("Running check failed.")
        import traceback
        traceback.print_exc()
    finally:
        if is_temp_output:
            shutil.rmtree(output_dir)
        if is_command:
            os.remove(logfile)

    LOG.debug("Check finished.")
示例#20
0
def main(args):
    """
    Entry point for parsing some analysis results and printing them to the
    stdout in a human-readable format.
    """

    logger.setup_logger(args.verbose if 'verbose' in args else None)

    context = analyzer_context.get_context()

    # To ensure the help message prints the default folder properly,
    # the 'default' for 'args.input' is a string, not a list.
    # But we need lists for the foreach here to work.
    if isinstance(args.input, str):
        args.input = [args.input]

    original_cwd = os.getcwd()

    suppr_handler = None
    if 'suppress' in args:
        __make_handler = False
        if not os.path.isfile(args.suppress):
            if 'create_suppress' in args:
                with open(args.suppress, 'w') as _:
                    # Just create the file.
                    __make_handler = True
                    LOG.info("Will write source-code suppressions to "
                             "suppress file.")
            else:
                LOG.warning("Suppress file '%s' given, but it does not exist"
                            " -- will not suppress anything.", args.suppress)
        else:
            __make_handler = True

        if __make_handler:
            suppr_handler = suppress_handler.\
                GenericSuppressHandler(args.suppress,
                                       'create_suppress' in args)
    elif 'create_suppress' in args:
        LOG.error("Can't use '--export-source-suppress' unless '--suppress "
                  "SUPPRESS_FILE' is also given.")
        sys.exit(2)

    processed_path_hashes = set()

    skip_handler = None
    if 'skipfile' in args:
        with open(args.skipfile, 'r') as skip_file:
            skip_handler = SkipListHandler(skip_file.read())

    trim_path_prefixes = args.trim_path_prefix if \
        'trim_path_prefix' in args else None

    def trim_path_prefixes_handler(source_file):
        """
        Callback to util.trim_path_prefixes to prevent module dependency
        of plist_to_html
        """
        return util.trim_path_prefixes(source_file, trim_path_prefixes)

    html_builder = None

    def skip_html_report_data_handler(report_hash, source_file, report_line,
                                      checker_name, diag, files):
        """
        Report handler which skips bugs which were suppressed by source code
        comments.
        """
        report = Report(None, diag['path'], files)
        path_hash = get_report_path_hash(report, files)
        if path_hash in processed_path_hashes:
            LOG.debug("Skip report because it is a deduplication of an "
                      "already processed report!")
            LOG.debug("Path hash: %s", path_hash)
            LOG.debug(diag)
            return True

        skip = plist_parser.skip_report(report_hash,
                                        source_file,
                                        report_line,
                                        checker_name,
                                        suppr_handler)
        if skip_handler:
            skip |= skip_handler.should_skip(source_file)

        if not skip:
            processed_path_hashes.add(path_hash)

        return skip

    for input_path in args.input:

        input_path = os.path.abspath(input_path)
        os.chdir(original_cwd)
        LOG.debug("Parsing input argument: '%s'", input_path)

        export = args.export if 'export' in args else None
        if export is not None and export == 'html':
            output_path = os.path.abspath(args.output_path)

            if not html_builder:
                html_builder = \
                    PlistToHtml.HtmlBuilder(context.path_plist_to_html_dist,
                                            context.severity_map)

            LOG.info("Generating html output files:")
            PlistToHtml.parse(input_path,
                              output_path,
                              context.path_plist_to_html_dist,
                              skip_html_report_data_handler,
                              html_builder,
                              trim_path_prefixes_handler)
            continue

        files = []
        metadata_dict = {}
        if os.path.isfile(input_path):
            files.append(input_path)

        elif os.path.isdir(input_path):
            metadata_file = os.path.join(input_path, "metadata.json")
            if os.path.exists(metadata_file):
                metadata_dict = util.load_json_or_empty(metadata_file)
                LOG.debug(metadata_dict)

                if 'working_directory' in metadata_dict:
                    working_dir = metadata_dict['working_directory']
                    try:
                        os.chdir(working_dir)
                    except OSError as oerr:
                        LOG.debug(oerr)
                        LOG.error("Working directory %s is missing.\n"
                                  "Can not parse reports safely.", working_dir)
                        sys.exit(1)

            _, _, file_names = next(os.walk(input_path), ([], [], []))
            files = [os.path.join(input_path, file_name) for file_name
                     in file_names]

        file_change = set()
        file_report_map = defaultdict(list)

        rh = plist_parser.PlistToPlaintextFormatter(suppr_handler,
                                                    skip_handler,
                                                    context.severity_map,
                                                    processed_path_hashes,
                                                    trim_path_prefixes)
        rh.print_steps = 'print_steps' in args

        for file_path in files:
            f_change = parse(file_path, metadata_dict, rh, file_report_map)
            file_change = file_change.union(f_change)

        report_stats = rh.write(file_report_map)
        severity_stats = report_stats.get('severity')
        file_stats = report_stats.get('files')
        reports_stats = report_stats.get('reports')

        print("\n----==== Summary ====----")
        if file_stats:
            vals = [[os.path.basename(k), v] for k, v in
                    dict(file_stats).items()]
            keys = ['Filename', 'Report count']
            table = twodim_to_str('table', keys, vals, 1, True)
            print(table)

        if severity_stats:
            vals = [[k, v] for k, v in dict(severity_stats).items()]
            keys = ['Severity', 'Report count']
            table = twodim_to_str('table', keys, vals, 1, True)
            print(table)

        report_count = reports_stats.get("report_count", 0)
        print("----=================----")
        print("Total number of reports: {}".format(report_count))
        print("----=================----")

        if file_change:
            changed_files = '\n'.join([' - ' + f for f in file_change])
            LOG.warning("The following source file contents changed since the "
                        "latest analysis:\n%s\nMultiple reports were not "
                        "shown and skipped from the statistics. Please "
                        "analyze your project again to update the "
                        "reports!", changed_files)

    os.chdir(original_cwd)

    # Create index.html and statistics.html for the generated html files.
    if html_builder:
        html_builder.create_index_html(args.output_path)
        html_builder.create_statistics_html(args.output_path)

        print('\nTo view statistics in a browser run:\n> firefox {0}'.format(
            os.path.join(args.output_path, 'statistics.html')))

        print('\nTo view the results in a browser run:\n> firefox {0}'.format(
            os.path.join(args.output_path, 'index.html')))
示例#21
0
def main(args):
    """
    List the analyzers' basic information supported by CodeChecker.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    context = analyzer_context.get_context()
    working, errored = \
        analyzer_types.check_supported_analyzers(
            analyzer_types.supported_analyzers,
            context)

    if args.dump_config:
        binary = context.analyzer_binaries.get(args.dump_config)

        if args.dump_config == 'clang-tidy':
            subprocess.call([binary, '-dump-config', '-checks=*'])
        elif args.dump_config == 'clangsa':
            # TODO: Not supported by ClangSA yet!
            LOG.warning("'--dump-config clangsa' is not supported yet.")

        return

    if args.output_format not in ['csv', 'json']:
        if 'details' not in args:
            header = ['Name']
        else:
            header = ['Name', 'Path', 'Version']
    else:
        if 'details' not in args:
            header = ['name']
        else:
            header = ['name', 'path', 'version_string']

    rows = []
    for analyzer in working:
        if 'details' not in args:
            rows.append([analyzer])
        else:
            binary = context.analyzer_binaries.get(analyzer)
            try:
                version = subprocess.check_output([binary,
                                                   '--version'])
            except (subprocess.CalledProcessError, OSError):
                version = 'ERROR'

            rows.append([analyzer,
                         binary,
                         version])

    if 'all' in args:
        for analyzer, err_reason in errored:
            if 'details' not in args:
                rows.append([analyzer])
            else:
                rows.append([analyzer,
                             context.analyzer_binaries.get(analyzer),
                             err_reason])

    if len(rows) > 0:
        print(output_formatters.twodim_to_str(args.output_format,
                                              header, rows))
示例#22
0
def main(args):
    """
    Perform analysis on the given logfiles and store the results in a machine-
    readable format.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    if len(args.logfile) != 1:
        LOG.warning("Only one log file can be processed right now!")
        sys.exit(1)

    args.output_path = os.path.abspath(args.output_path)
    if os.path.exists(args.output_path) and \
            not os.path.isdir(args.output_path):
        LOG.error("The given output path is not a directory: " +
                  args.output_path)
        sys.exit(1)

    if 'enable_all' in args:
        LOG.info("'--enable-all' was supplied for this analysis.")

    # We clear the output directory in the following cases.
    ctu_dir = os.path.join(args.output_path, 'ctu-dir')
    if 'ctu_phases' in args and args.ctu_phases[0] and \
            os.path.isdir(ctu_dir):
        # Clear the CTU-dir if the user turned on the collection phase.
        LOG.debug("Previous CTU contents have been deleted.")
        shutil.rmtree(ctu_dir)

    if 'clean' in args and os.path.isdir(args.output_path):
        LOG.info("Previous analysis results in '%s' have been removed, "
                 "overwriting with current result", args.output_path)
        shutil.rmtree(args.output_path)

    if not os.path.exists(args.output_path):
        os.makedirs(args.output_path)

    LOG.debug("args: " + str(args))
    LOG.debug("Output will be stored to: '" + args.output_path + "'")

    # Process the skip list if present.
    skip_handler = __get_skip_handler(args)

    # Enable alpha uniqueing by default if ctu analysis is used.
    if 'none' in args.compile_uniqueing and 'ctu_phases' in args:
        args.compile_uniqueing = "alpha"

    compiler_info_file = None
    if 'compiler_info_file' in args:
        LOG.debug("Compiler info is read from: %s", args.compiler_info_file)
        if not os.path.exists(args.compiler_info_file):
            LOG.error("Compiler info file %s does not exist",
                      args.compiler_info_file)
            sys.exit(1)
        compiler_info_file = args.compiler_info_file

    report_dir = args.output_path

    # Skip list is applied only in pre-analysis
    # if --ctu-collect or --stats-collect was called explicitly.
    pre_analysis_skip_handler = None
    if 'ctu_phases' in args:
        ctu_collect = args.ctu_phases[0]
        ctu_analyze = args.ctu_phases[1]
        if ((ctu_collect and not ctu_analyze)
                or ("stats_output" in args and args.stats_output)):
            pre_analysis_skip_handler = skip_handler

    # Parse the JSON CCDBs and retrieve the compile commands.
    actions = []
    for log_file in args.logfile:
        if not os.path.exists(log_file):
            LOG.error("The specified logfile '%s' does not exist!",
                      log_file)
            continue

        actions += log_parser.parse_unique_log(
            load_json_or_empty(log_file),
            report_dir,
            args.compile_uniqueing,
            compiler_info_file,
            args.keep_gcc_include_fixed,
            skip_handler,
            pre_analysis_skip_handler)

    if not actions:
        LOG.info("No analysis is required.\nThere were no compilation "
                 "commands in the provided compilation database or "
                 "all of them were skipped.")
        sys.exit(0)

    uniqued_compilation_db_file = os.path.join(
        args.output_path, "unique_compile_commands.json")
    with open(uniqued_compilation_db_file, 'w') as f:
        json.dump(actions, f,
                  cls=log_parser.CompileCommandEncoder)

    context = analyzer_context.get_context()
    metadata = {'action_num': len(actions),
                'command': sys.argv,
                'versions': {
                    'codechecker': "{0} ({1})".format(
                        context.package_git_tag,
                        context.package_git_hash)},
                'working_directory': os.getcwd(),
                'output_path': args.output_path,
                'result_source_files': {}}

    if 'name' in args:
        metadata['name'] = args.name

    # Update metadata dictionary with old values.
    metadata_file = os.path.join(args.output_path, 'metadata.json')
    if os.path.exists(metadata_file):
        metadata_prev = load_json_or_empty(metadata_file)
        metadata['result_source_files'] = \
            metadata_prev['result_source_files']

    analyzer.perform_analysis(args, skip_handler, context, actions, metadata)

    __update_skip_file(args)

    LOG.debug("Analysis metadata write to '%s'", metadata_file)
    with open(metadata_file, 'w') as metafile:
        json.dump(metadata, metafile)

    # WARN: store command will search for this file!!!!
    compile_cmd_json = os.path.join(args.output_path, 'compile_cmd.json')
    try:
        source = os.path.abspath(args.logfile[0])
        target = os.path.abspath(compile_cmd_json)

        if source != target:
            shutil.copyfile(source, target)
    except shutil.Error:
        LOG.debug("Compilation database JSON file is the same.")
    except Exception:
        LOG.debug("Copying compilation database JSON file failed.")

    try:
        from codechecker_analyzer import analyzer_statistics
        analyzer_statistics.collect(metadata, "analyze")
    except Exception:
        pass
示例#23
0
def main(args):
    """
    List the checkers available in the specified (or all supported) analyzers
    alongside with their description or enabled status in various formats.
    """

    logger.setup_logger(args.verbose if 'verbose' in args else None)

    # If nothing is set, list checkers for all supported analyzers.
    analyzers = args.analyzers \
        if 'analyzers' in args \
        else analyzer_types.supported_analyzers

    context = analyzer_context.get_context()
    working, errored = analyzer_types.check_supported_analyzers(analyzers,
                                                                context)

    analyzer_environment = get_check_env(context.path_env_extra,
                                         context.ld_lib_path_extra)

    analyzer_config_map = analyzer_types.build_config_handlers(args,
                                                               context,
                                                               working)
    # List available checker profiles.
    if 'profile' in args and args.profile == 'list':
        if 'details' not in args:
            if args.output_format not in ['csv', 'json']:
                header = ['Profile name']
            else:
                header = ['profile_name']
        else:
            if args.output_format not in ['csv', 'json']:
                header = ['Profile name', 'Description']
            else:
                header = ['profile_name', 'description']

        rows = []
        for (profile, description) in context.available_profiles.items():
            if 'details' not in args:
                rows.append([profile])
            else:
                rows.append([profile, description])

        print(output_formatters.twodim_to_str(args.output_format,
                                              header, rows))
        return

    # Use good looking different headers based on format.
    if 'details' not in args:
        if args.output_format not in ['csv', 'json']:
            header = ['Name']
        else:
            header = ['name']
    else:
        if args.output_format not in ['csv', 'json']:
            header = ['', 'Name', 'Analyzer', 'Severity', 'Description']
        else:
            header = ['enabled', 'name', 'analyzer', 'severity', 'description']

    rows = []
    for analyzer in working:
        config_handler = analyzer_config_map.get(analyzer)
        analyzer_class = \
            analyzer_types.supported_analyzers[analyzer]

        checkers = analyzer_class.get_analyzer_checkers(config_handler,
                                                        analyzer_environment)
        default_checker_cfg = context.checker_config.get(
            analyzer + '_checkers')

        profile_checkers = None
        if 'profile' in args:
            if args.profile not in context.available_profiles:
                LOG.error("Checker profile '%s' does not exist!",
                          args.profile)
                LOG.error("To list available profiles, use '--profile list'.")
                sys.exit(1)

            profile_checkers = [(args.profile, True)]

        config_handler.initialize_checkers(context.available_profiles,
                                           context.package_root,
                                           checkers,
                                           default_checker_cfg,
                                           profile_checkers)

        for checker_name, value in config_handler.checks().items():
            enabled, description = value

            if not enabled and 'profile' in args:
                continue

            if enabled and 'only_disabled' in args:
                continue
            elif not enabled and 'only_enabled' in args:
                continue

            if args.output_format != 'json':
                enabled = '+' if enabled else '-'

            if 'details' not in args:
                rows.append([checker_name])
            else:
                severity = context.severity_map.get(checker_name)
                rows.append([enabled, checker_name, analyzer,
                             severity, description])

    if len(rows) > 0:
        print(output_formatters.twodim_to_str(args.output_format,
                                              header, rows))

    for analyzer_binary, reason in errored:
        LOG.error("Failed to get checkers for '%s'!"
                  "The error reason was: '%s'", analyzer_binary, reason)
        LOG.error("Please check your installation and the "
                  "'config/package_layout.json' file!")
示例#24
0
def main(args):
    """
    List the analyzers' basic information supported by CodeChecker.
    """
    # If the given output format is not 'table', redirect logger's output to
    # the stderr.
    stream = None
    if 'output_format' in args and args.output_format != 'table':
        stream = 'stderr'

    logger.setup_logger(args.verbose if 'verbose' in args else None, stream)

    context = analyzer_context.get_context()
    working_analyzers, errored = \
        analyzer_types.check_supported_analyzers(
            analyzer_types.supported_analyzers,
            context)

    if args.dump_config:
        binary = context.analyzer_binaries.get(args.dump_config)

        if args.dump_config == 'clang-tidy':
            subprocess.call([binary, '-dump-config', '-checks=*'],
                            encoding="utf-8", errors="ignore")
        elif args.dump_config == 'clangsa':
            ret = subprocess.call([binary,
                                   '-cc1',
                                   '-analyzer-checker-option-help',
                                   '-analyzer-checker-option-help-alpha'],
                                  stderr=subprocess.PIPE,
                                  encoding="utf-8",
                                  errors="ignore")

            if ret:
                # This flag is supported from Clang 9.
                LOG.warning("'--dump-config clangsa' is not supported yet. "
                            "Please make sure that you are using Clang 9 or "
                            "newer.")

        return

    analyzer_environment = env.extend(context.path_env_extra,
                                      context.ld_lib_path_extra)
    analyzer_config_map = analyzer_types.build_config_handlers(
        args, context, working_analyzers)

    def uglify(text):
        """
        csv and json format output contain this non human readable header
        string: no CamelCase and no space.
        """
        return text.lower().replace(' ', '_')

    if 'analyzer_config' in args:
        if 'details' in args:
            header = ['Option', 'Description']
        else:
            header = ['Option']

        if args.output_format in ['csv', 'json']:
            header = list(map(uglify, header))

        analyzer = args.analyzer_config
        config_handler = analyzer_config_map.get(analyzer)
        analyzer_class = analyzer_types.supported_analyzers[analyzer]

        configs = analyzer_class.get_analyzer_config(config_handler,
                                                     analyzer_environment)
        rows = [(':'.join((analyzer, c[0])), c[1]) if 'details' in args
                else (':'.join((analyzer, c[0])),) for c in configs]

        print(output_formatters.twodim_to_str(args.output_format,
                                              header, rows))

        return

    if 'details' in args:
        header = ['Name', 'Path', 'Version']
    else:
        header = ['Name']

    if args.output_format in ['csv', 'json']:
        header = list(map(uglify, header))

    rows = []
    for analyzer in working_analyzers:
        if 'details' not in args:
            rows.append([analyzer])
        else:
            binary = context.analyzer_binaries.get(analyzer)
            try:
                version = subprocess.check_output(
                    [binary, '--version'], encoding="utf-8", errors="ignore")
            except (subprocess.CalledProcessError, OSError):
                version = 'ERROR'

            rows.append([analyzer,
                         binary,
                         version])

    if 'all' in args:
        for analyzer, err_reason in errored:
            if 'details' not in args:
                rows.append([analyzer])
            else:
                rows.append([analyzer,
                             context.analyzer_binaries.get(analyzer),
                             err_reason])

    if rows:
        print(output_formatters.twodim_to_str(args.output_format,
                                              header, rows))
示例#25
0
def init_logger(level, stream=None, logger_name='system'):
    logger.setup_logger(level, stream)
    global LOG
    LOG = logger.get_logger(logger_name)
示例#26
0
文件: cli.py 项目: uhziel/codechecker
def main():
    """
    CodeChecker main command line.
    """
    os.environ['CC_LIB_DIR'] = os.path.dirname(
        os.path.dirname(os.path.realpath(__file__)))

    data_files_dir_path = get_data_files_dir_path()
    os.environ['CC_DATA_FILES_DIR'] = data_files_dir_path

    # Load the available CodeChecker subcommands.
    # This list is generated dynamically by scripts/build_package.py, and is
    # always meant to be available alongside the CodeChecker.py.
    commands_cfg = os.path.join(data_files_dir_path, "config", "commands.json")

    with open(commands_cfg, encoding="utf-8", errors="ignore") as cfg_file:
        subcommands = json.load(cfg_file)

    def signal_handler(signum, frame):
        """
        Without this handler the PostgreSQL
        server does not terminate at signal.
        """
        sys.exit(128 + signum)

    signal.signal(signal.SIGINT, signal_handler)
    signal.signal(signal.SIGTERM, signal_handler)

    try:
        parser = argparse.ArgumentParser(
            prog="CodeChecker",
            formatter_class=argparse.RawDescriptionHelpFormatter,
            description="""Run the CodeChecker sourcecode analyzer framework.
Please specify a subcommand to access individual features.""",
            epilog="""Example scenario: Analyzing, and storing results
------------------------------------------------
Start the server where the results will be stored and can be viewed
after the analysis is done:
    CodeChecker server

Analyze a project with default settings:
    CodeChecker check -b "cd ~/myproject && make" -o "~/results"

Store the analyzer results to the server:
    CodeChecker store "~/results" -n myproject

The results can be viewed:
 * In a web browser: http://localhost:8001
 * In the command line:
    CodeChecker cmd results myproject

Example scenario: Analyzing, and printing results to Terminal (no storage)
--------------------------------------------------------------------------
In this case, no database is used, and the results are printed on the standard
output.

    CodeChecker check -b "cd ~/myproject && make\" """)

        subparsers = parser.add_subparsers(help='commands')

        if subcommands:
            # Try to check if the user has already given us a subcommand to
            # execute. If so, don't load every available parts of CodeChecker
            # to ensure a more optimised run.
            if len(sys.argv) > 1:
                first_command = sys.argv[1]
                if first_command in subcommands:

                    # Consider only the given command as an available one.
                    subcommands = {first_command: subcommands[first_command]}

            lib_dir_path = os.environ.get('CC_LIB_DIR')
            for subcommand in subcommands:
                try:
                    add_subcommand(subparsers, subcommand,
                                   subcommands[subcommand], lib_dir_path)
                except (IOError, ImportError):
                    print("Couldn't import module for subcommand '" +
                          subcommand + "'... ignoring.")
                    import traceback
                    traceback.print_exc(file=sys.stdout)

        args = parser.parse_args()

        # Call handler function to process configuration files. If there are
        # any configuration options available in one of the given file than
        # extend the system argument list with these options and try to parse
        # the argument list again to validate it.
        if 'func_process_config_file' in args:
            # Import logger module here after 'CC_DATA_FILES_DIR' environment
            # variable is set, so 'setup_logger' will be able to initialize
            # the logger properly.
            from codechecker_common import logger
            logger.setup_logger(args.verbose if 'verbose' in args else None,
                                'stderr')

            if len(sys.argv) > 1:
                called_sub_command = sys.argv[1]

            cfg_args = args.func_process_config_file(args, called_sub_command)
            if cfg_args:
                # Expand environment variables in the arguments.
                cfg_args = [os.path.expandvars(cfg) for cfg in cfg_args]

                # Replace --config option with the options inside the config
                # file.
                cfg_idx = sys.argv.index("--config")
                sys.argv = sys.argv[:cfg_idx] + cfg_args + \
                    sys.argv[cfg_idx + 2:]

                args = parser.parse_args()

        if 'func' in args:
            sys.exit(args.func(args))
        else:
            # Print the help message of the current command if no subcommand
            # is given.
            sys.argv.append("--help")
            args = parser.parse_args()
            args.func(args)

    except KeyboardInterrupt as kb_err:
        print(str(kb_err))
        print("Interrupted by user...")
        sys.exit(1)

    # Handle all exception, but print stacktrace. It is needed for atexit.
    # atexit does not work correctly when an unhandled exception occurred.
    # So in this case, the servers left running when the script exited.
    except Exception:
        import traceback
        traceback.print_exc(file=sys.stdout)
        sys.exit(1)
示例#27
0
def main(args):
    """
    Entry point for parsing some analysis results and printing them to the
    stdout in a human-readable format.
    """

    logger.setup_logger(args.verbose if 'verbose' in args else None)

    context = analyzer_context.get_context()

    # To ensure the help message prints the default folder properly,
    # the 'default' for 'args.input' is a string, not a list.
    # But we need lists for the foreach here to work.
    if isinstance(args.input, str):
        args.input = [args.input]

    original_cwd = os.getcwd()

    suppr_handler = None
    if 'suppress' in args:
        __make_handler = False
        if not os.path.isfile(args.suppress):
            if 'create_suppress' in args:
                with open(args.suppress, 'w') as _:
                    # Just create the file.
                    __make_handler = True
                    LOG.info("Will write source-code suppressions to "
                             "suppress file: %s", args.suppress)
            else:
                LOG.warning("Suppress file '%s' given, but it does not exist"
                            " -- will not suppress anything.", args.suppress)
        else:
            __make_handler = True

        if __make_handler:
            suppr_handler = suppress_handler.\
                GenericSuppressHandler(args.suppress,
                                       'create_suppress' in args)
    elif 'create_suppress' in args:
        LOG.error("Can't use '--export-source-suppress' unless '--suppress "
                  "SUPPRESS_FILE' is also given.")
        sys.exit(2)

    processed_path_hashes = set()

    skip_handler = None
    if 'skipfile' in args:
        with open(args.skipfile, 'r') as skip_file:
            skip_handler = SkipListHandler(skip_file.read())

    trim_path_prefixes = args.trim_path_prefix if \
        'trim_path_prefix' in args else None

    def trim_path_prefixes_handler(source_file):
        """
        Callback to util.trim_path_prefixes to prevent module dependency
        of plist_to_html
        """
        return util.trim_path_prefixes(source_file, trim_path_prefixes)

    html_builder = None

    def skip_html_report_data_handler(report_hash, source_file, report_line,
                                      checker_name, diag, files):
        """
        Report handler which skips bugs which were suppressed by source code
        comments.
        """
        report = Report(None, diag['path'], files)
        path_hash = get_report_path_hash(report, files)
        if path_hash in processed_path_hashes:
            LOG.debug("Skip report because it is a deduplication of an "
                      "already processed report!")
            LOG.debug("Path hash: %s", path_hash)
            LOG.debug(diag)
            return True

        skip = skip_report(report_hash,
                           source_file,
                           report_line,
                           checker_name,
                           suppr_handler)
        if skip_handler:
            skip |= skip_handler.should_skip(source_file)

        if not skip:
            processed_path_hashes.add(path_hash)

        return skip

    file_change = set()
    severity_stats = defaultdict(int)
    file_stats = defaultdict(int)
    report_count = 0

    for input_path in args.input:

        input_path = os.path.abspath(input_path)
        os.chdir(original_cwd)
        LOG.debug("Parsing input argument: '%s'", input_path)

        export = args.export if 'export' in args else None
        if export is not None and export == 'html':
            output_path = os.path.abspath(args.output_path)

            if not html_builder:
                html_builder = \
                    PlistToHtml.HtmlBuilder(context.path_plist_to_html_dist,
                                            context.severity_map)

            LOG.info("Generating html output files:")
            PlistToHtml.parse(input_path,
                              output_path,
                              context.path_plist_to_html_dist,
                              skip_html_report_data_handler,
                              html_builder,
                              trim_path_prefixes_handler)
            continue

        files = []
        metadata_dict = {}
        if os.path.isfile(input_path):
            files.append(input_path)

        elif os.path.isdir(input_path):
            metadata_file = os.path.join(input_path, "metadata.json")
            if os.path.exists(metadata_file):
                metadata_dict = util.load_json_or_empty(metadata_file)
                LOG.debug(metadata_dict)

                if 'working_directory' in metadata_dict:
                    working_dir = metadata_dict['working_directory']
                    try:
                        os.chdir(working_dir)
                    except OSError as oerr:
                        LOG.debug(oerr)
                        LOG.error("Working directory %s is missing.\n"
                                  "Can not parse reports safely.", working_dir)
                        sys.exit(1)

            _, _, file_names = next(os.walk(input_path), ([], [], []))
            files = [os.path.join(input_path, file_name) for file_name
                     in file_names]

        file_report_map = defaultdict(list)

        rh = PlistToPlaintextFormatter(suppr_handler,
                                       skip_handler,
                                       context.severity_map,
                                       processed_path_hashes,
                                       trim_path_prefixes)
        rh.print_steps = 'print_steps' in args

        for file_path in files:
            f_change = parse(file_path, metadata_dict, rh, file_report_map)
            file_change = file_change.union(f_change)

        report_stats = rh.write(file_report_map)
        sev_stats = report_stats.get('severity')
        for severity in sev_stats:
            severity_stats[severity] += sev_stats[severity]

        f_stats = report_stats.get('files')
        for file_path in f_stats:
            file_stats[file_path] += f_stats[file_path]

        rep_stats = report_stats.get('reports')
        report_count += rep_stats.get("report_count", 0)

    print("\n----==== Summary ====----")
    if file_stats:
        vals = [[os.path.basename(k), v] for k, v in
                dict(file_stats).items()]
        keys = ['Filename', 'Report count']
        table = twodim_to_str('table', keys, vals, 1, True)
        print(table)

    if severity_stats:
        vals = [[k, v] for k, v in dict(severity_stats).items()]
        keys = ['Severity', 'Report count']
        table = twodim_to_str('table', keys, vals, 1, True)
        print(table)

    print("----=================----")
    print("Total number of reports: {}".format(report_count))
    print("----=================----")

    if file_change:
        changed_files = '\n'.join([' - ' + f for f in file_change])
        LOG.warning("The following source file contents changed since the "
                    "latest analysis:\n%s\nMultiple reports were not "
                    "shown and skipped from the statistics. Please "
                    "analyze your project again to update the "
                    "reports!", changed_files)

    os.chdir(original_cwd)

    # Create index.html and statistics.html for the generated html files.
    if html_builder:
        html_builder.create_index_html(args.output_path)
        html_builder.create_statistics_html(args.output_path)

        print('\nTo view statistics in a browser run:\n> firefox {0}'.format(
            os.path.join(args.output_path, 'statistics.html')))

        print('\nTo view the results in a browser run:\n> firefox {0}'.format(
            os.path.join(args.output_path, 'index.html')))
示例#28
0
def main(args):
    """
    Entry point for parsing some analysis results and printing them to the
    stdout in a human-readable format.
    """

    logger.setup_logger(args.verbose if 'verbose' in args else None)

    try:
        cmd_config.check_config_file(args)
    except FileNotFoundError as fnerr:
        LOG.error(fnerr)
        sys.exit(1)

    export = args.export if 'export' in args else None
    if export == 'html' and 'output_path' not in args:
        LOG.error("Argument --export not allowed without argument --output "
                  "when exporting to HTML.")
        sys.exit(1)

    if export == 'gerrit' and not gerrit.mandatory_env_var_is_set():
        sys.exit(1)

    context = analyzer_context.get_context()

    # To ensure the help message prints the default folder properly,
    # the 'default' for 'args.input' is a string, not a list.
    # But we need lists for the foreach here to work.
    if isinstance(args.input, str):
        args.input = [args.input]

    original_cwd = os.getcwd()

    src_comment_status_filter = args.review_status

    suppr_handler = None
    if 'suppress' in args:
        __make_handler = False
        if not os.path.isfile(args.suppress):
            if 'create_suppress' in args:
                with open(args.suppress,
                          'w',
                          encoding='utf-8',
                          errors='ignore') as _:
                    # Just create the file.
                    __make_handler = True
                    LOG.info(
                        "Will write source-code suppressions to "
                        "suppress file: %s", args.suppress)
            else:
                LOG.warning(
                    "Suppress file '%s' given, but it does not exist"
                    " -- will not suppress anything.", args.suppress)
        else:
            __make_handler = True

        if __make_handler:
            suppr_handler = suppress_handler.\
                GenericSuppressHandler(args.suppress,
                                       'create_suppress' in args,
                                       src_comment_status_filter)
    elif 'create_suppress' in args:
        LOG.error("Can't use '--export-source-suppress' unless '--suppress "
                  "SUPPRESS_FILE' is also given.")
        sys.exit(2)

    processed_path_hashes = set()

    skip_handler = None
    if 'skipfile' in args:
        with open(args.skipfile, 'r', encoding='utf-8',
                  errors='ignore') as skip_file:
            skip_handler = SkipListHandler(skip_file.read())

    trim_path_prefixes = args.trim_path_prefix if \
        'trim_path_prefix' in args else None

    if export:
        if export not in EXPORT_TYPES:
            LOG.error(f"Unknown export format: {export}")
            return

        # The HTML part will be handled separately below.
        if export != 'html':
            try:
                res = parse_convert_reports(args.input, export,
                                            context.severity_map,
                                            trim_path_prefixes)
                if 'output_path' in args:
                    output_path = os.path.abspath(args.output_path)

                    if not os.path.exists(output_path):
                        os.mkdir(output_path)

                    reports_json = os.path.join(output_path, 'reports.json')
                    with open(reports_json,
                              mode='w',
                              encoding='utf-8',
                              errors="ignore") as output_f:
                        output_f.write(json.dumps(res))

                return print(json.dumps(res))
            except Exception as ex:
                LOG.error(ex)
                sys.exit(1)

    def trim_path_prefixes_handler(source_file):
        """
        Callback to util.trim_path_prefixes to prevent module dependency
        of plist_to_html
        """
        return util.trim_path_prefixes(source_file, trim_path_prefixes)

    html_builder = None

    def skip_html_report_data_handler(report_hash, source_file, report_line,
                                      checker_name, diag, files):
        """
        Report handler which skips bugs which were suppressed by source code
        comments. This function will return a tuple. The first element
        will decide whether the report should be skipped or not and the second
        element will be a list of source code comments related to the actual
        report.
        """
        files_dict = {k: v for k, v in enumerate(files)}
        report = Report({'check_name': checker_name},
                        diag['path'],
                        files_dict,
                        metadata=None)
        path_hash = get_report_path_hash(report)
        if path_hash in processed_path_hashes:
            LOG.debug("Skip report because it is a deduplication of an "
                      "already processed report!")
            LOG.debug("Path hash: %s", path_hash)
            LOG.debug(diag)
            return True, []

        skip, source_code_comments = skip_report(report_hash, source_file,
                                                 report_line, checker_name,
                                                 suppr_handler,
                                                 src_comment_status_filter)

        if skip_handler:
            skip |= skip_handler.should_skip(source_file)

        if not skip:
            processed_path_hashes.add(path_hash)

        return skip, source_code_comments

    file_change = set()
    severity_stats = defaultdict(int)
    file_stats = defaultdict(int)
    report_count = 0

    for input_path in args.input:
        input_path = os.path.abspath(input_path)
        os.chdir(original_cwd)
        LOG.debug("Parsing input argument: '%s'", input_path)

        if export == 'html':
            output_path = os.path.abspath(args.output_path)

            if not html_builder:
                html_builder = \
                    PlistToHtml.HtmlBuilder(context.path_plist_to_html_dist,
                                            context.severity_map)

            LOG.info("Generating html output files:")
            PlistToHtml.parse(input_path, output_path,
                              context.path_plist_to_html_dist,
                              skip_html_report_data_handler, html_builder,
                              trim_path_prefixes_handler)
            continue

        files = []
        metadata_dict = {}
        if os.path.isfile(input_path):
            files.append(input_path)

        elif os.path.isdir(input_path):
            metadata_file = os.path.join(input_path, "metadata.json")
            if os.path.exists(metadata_file):
                metadata_dict = util.load_json_or_empty(metadata_file)
                LOG.debug(metadata_dict)

                if 'working_directory' in metadata_dict:
                    working_dir = metadata_dict['working_directory']
                    try:
                        os.chdir(working_dir)
                    except OSError as oerr:
                        LOG.debug(oerr)
                        LOG.error(
                            "Working directory %s is missing.\n"
                            "Can not parse reports safely.", working_dir)
                        sys.exit(1)

            _, _, file_names = next(os.walk(input_path), ([], [], []))
            files = [
                os.path.join(input_path, file_name) for file_name in file_names
            ]

        file_report_map = defaultdict(list)

        plist_pltf = PlistToPlaintextFormatter(suppr_handler, skip_handler,
                                               context.severity_map,
                                               processed_path_hashes,
                                               trim_path_prefixes,
                                               src_comment_status_filter)
        plist_pltf.print_steps = 'print_steps' in args

        for file_path in files:
            f_change = parse_with_plt_formatter(file_path, metadata_dict,
                                                plist_pltf, file_report_map)
            file_change = file_change.union(f_change)

        report_stats = plist_pltf.write(file_report_map)
        sev_stats = report_stats.get('severity')
        for severity in sev_stats:
            severity_stats[severity] += sev_stats[severity]

        f_stats = report_stats.get('files')
        for file_path in f_stats:
            file_stats[file_path] += f_stats[file_path]

        rep_stats = report_stats.get('reports')
        report_count += rep_stats.get("report_count", 0)

    # Create index.html and statistics.html for the generated html files.
    if html_builder:
        html_builder.create_index_html(args.output_path)
        html_builder.create_statistics_html(args.output_path)

        print('\nTo view statistics in a browser run:\n> firefox {0}'.format(
            os.path.join(args.output_path, 'statistics.html')))

        print('\nTo view the results in a browser run:\n> firefox {0}'.format(
            os.path.join(args.output_path, 'index.html')))
    else:
        print("\n----==== Summary ====----")
        if file_stats:
            vals = [[os.path.basename(k), v]
                    for k, v in dict(file_stats).items()]
            vals.sort(key=itemgetter(0))
            keys = ['Filename', 'Report count']
            table = twodim.to_str('table', keys, vals, 1, True)
            print(table)

        if severity_stats:
            vals = [[k, v] for k, v in dict(severity_stats).items()]
            vals.sort(key=itemgetter(0))
            keys = ['Severity', 'Report count']
            table = twodim.to_str('table', keys, vals, 1, True)
            print(table)

        print("----=================----")
        print("Total number of reports: {}".format(report_count))
        print("----=================----")

    if file_change:
        changed_files = '\n'.join([' - ' + f for f in file_change])
        LOG.warning(
            "The following source file contents changed since the "
            "latest analysis:\n%s\nMultiple reports were not "
            "shown and skipped from the statistics. Please "
            "analyze your project again to update the "
            "reports!", changed_files)

    os.chdir(original_cwd)

    if report_count != 0:
        sys.exit(2)
示例#29
0
def init_logger(level, logger_name='system'):
    logger.setup_logger(level)
    global LOG
    LOG = logger.get_logger(logger_name)
示例#30
0
def main(args):
    """
    List the analyzers' basic information supported by CodeChecker.
    """
    # If the given output format is not 'table', redirect logger's output to
    # the stderr.
    stream = None
    if 'output_format' in args and args.output_format != 'table':
        stream = 'stderr'

    logger.setup_logger(args.verbose if 'verbose' in args else None, stream)

    context = analyzer_context.get_context()
    working, errored = \
        analyzer_types.check_supported_analyzers(
            analyzer_types.supported_analyzers,
            context)

    if args.dump_config:
        binary = context.analyzer_binaries.get(args.dump_config)

        if args.dump_config == 'clang-tidy':
            subprocess.call([binary, '-dump-config', '-checks=*'])
        elif args.dump_config == 'clangsa':
            ret = subprocess.call([binary, '-cc1',
                                   '-analyzer-checker-option-help',
                                   '-analyzer-checker-option-help-alpha'],
                                  stderr=subprocess.PIPE)

            if ret:
                # This flag is supported from Clang 9.
                LOG.warning("'--dump-config clangsa' is not supported yet. "
                            "Please make sure that you are using Clang 9 or "
                            "newer.")

        return

    if args.output_format not in ['csv', 'json']:
        if 'details' not in args:
            header = ['Name']
        else:
            header = ['Name', 'Path', 'Version']
    else:
        if 'details' not in args:
            header = ['name']
        else:
            header = ['name', 'path', 'version_string']

    rows = []
    for analyzer in working:
        if 'details' not in args:
            rows.append([analyzer])
        else:
            binary = context.analyzer_binaries.get(analyzer)
            try:
                version = subprocess.check_output([binary,
                                                   '--version'])
            except (subprocess.CalledProcessError, OSError):
                version = 'ERROR'

            rows.append([analyzer,
                         binary,
                         version])

    if 'all' in args:
        for analyzer, err_reason in errored:
            if 'details' not in args:
                rows.append([analyzer])
            else:
                rows.append([analyzer,
                             context.analyzer_binaries.get(analyzer),
                             err_reason])

    if rows:
        print(output_formatters.twodim_to_str(args.output_format,
                                              header, rows))
示例#31
0
def main(args):
    """
    Store the defect results in the specified input list as bug reports in the
    database.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    if not host_check.check_zlib():
        raise Exception("zlib is not available on the system!")

    # To ensure the help message prints the default folder properly,
    # the 'default' for 'args.input' is a string, not a list.
    # But we need lists for the foreach here to work.
    if isinstance(args.input, str):
        args.input = [args.input]

    if 'name' not in args:
        LOG.debug("Generating name for analysis...")
        generated = __get_run_name(args.input)
        if generated:
            setattr(args, 'name', generated)
        else:
            LOG.error("No suitable name was found in the inputs for the "
                      "analysis run. Please specify one by passing argument "
                      "--name run_name in the invocation.")
            sys.exit(2)  # argparse returns error code 2 for bad invocations.

    LOG.info("Storing analysis results for run '" + args.name + "'")

    if 'force' in args:
        LOG.info("argument --force was specified: the run with name '" +
                 args.name + "' will be deleted.")

    protocol, host, port, product_name = split_product_url(args.product_url)

    # Before any transmission happens, check if we have the PRODUCT_STORE
    # permission to prevent a possibly long ZIP operation only to get an
    # error later on.
    product_client = libclient.setup_product_client(protocol,
                                                    host, port, product_name)
    product_id = product_client.getCurrentProduct().id

    auth_client, _ = libclient.setup_auth_client(protocol, host, port)
    has_perm = libclient.check_permission(
        auth_client, Permission.PRODUCT_STORE, {'productID': product_id})
    if not has_perm:
        LOG.error("You are not authorised to store analysis results in "
                  "product '%s'", product_name)
        sys.exit(1)

    # Setup connection to the remote server.
    client = libclient.setup_client(args.product_url, product_client=False)

    LOG.debug("Initializing client connecting to %s:%d/%s done.",
              host, port, product_name)

    _, zip_file = tempfile.mkstemp('.zip')
    LOG.debug("Will write mass store ZIP to '%s'...", zip_file)

    try:
        assemble_zip(args.input, zip_file, client)

        if os.stat(zip_file).st_size > MAX_UPLOAD_SIZE:
            LOG.error("The result list to upload is too big (max: %s).",
                      sizeof_fmt(MAX_UPLOAD_SIZE))
            sys.exit(1)

        with open(zip_file, 'rb') as zf:
            b64zip = base64.b64encode(zf.read())

        context = webserver_context.get_context()

        trim_path_prefixes = args.trim_path_prefix if \
            'trim_path_prefix' in args else None

        client.massStoreRun(args.name,
                            args.tag if 'tag' in args else None,
                            str(context.version),
                            b64zip,
                            'force' in args,
                            trim_path_prefixes)

        # Storing analysis statistics if the server allows them.
        if client.allowsStoringAnalysisStatistics():
            storing_analysis_statistics(client, args.input, args.name)

        LOG.info("Storage finished successfully.")
    except RequestFailed as reqfail:
        if reqfail.errorCode == ErrorCode.SOURCE_FILE:
            header = ['File', 'Line', 'Checker name']
            table = twodim_to_str('table',
                                  header,
                                  [c.split('|') for c in reqfail.extraInfo])
            LOG.warning("Setting the review statuses for some reports failed "
                        "because of non valid source code comments: "
                        "%s\n %s", reqfail.message, table)
        sys.exit(1)
    except Exception as ex:
        LOG.info("Storage failed: %s", str(ex))
        sys.exit(1)
    finally:
        os.remove(zip_file)
示例#32
0
def main(args):
    """
    Perform analysis on the given logfiles and store the results in a machine-
    readable format.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    if len(args.logfile) != 1:
        LOG.warning("Only one log file can be processed right now!")
        sys.exit(1)

    args.output_path = os.path.abspath(args.output_path)
    if os.path.exists(args.output_path) and \
            not os.path.isdir(args.output_path):
        LOG.error("The given output path is not a directory: " +
                  args.output_path)
        sys.exit(1)

    if 'enable_all' in args:
        LOG.info("'--enable-all' was supplied for this analysis.")

    # We clear the output directory in the following cases.
    ctu_dir = os.path.join(args.output_path, 'ctu-dir')
    if 'ctu_phases' in args and args.ctu_phases[0] and \
            os.path.isdir(ctu_dir):
        # Clear the CTU-dir if the user turned on the collection phase.
        LOG.debug("Previous CTU contents have been deleted.")
        shutil.rmtree(ctu_dir)

    if 'clean' in args and os.path.isdir(args.output_path):
        LOG.info("Previous analysis results in '%s' have been removed, "
                 "overwriting with current result", args.output_path)
        shutil.rmtree(args.output_path)

    if not os.path.exists(args.output_path):
        os.makedirs(args.output_path)

    LOG.debug("args: " + str(args))
    LOG.debug("Output will be stored to: '" + args.output_path + "'")

    # Process the skip list if present.
    skip_handler = __get_skip_handler(args)

    # Enable alpha uniqueing by default if ctu analysis is used.
    if 'none' in args.compile_uniqueing and 'ctu_phases' in args:
        args.compile_uniqueing = "alpha"

    compiler_info_file = None
    if 'compiler_info_file' in args:
        LOG.debug("Compiler info is read from: %s", args.compiler_info_file)
        if not os.path.exists(args.compiler_info_file):
            LOG.error("Compiler info file %s does not exist",
                      args.compiler_info_file)
            sys.exit(1)
        compiler_info_file = args.compiler_info_file

    report_dir = args.output_path

    # Parse the JSON CCDBs and retrieve the compile commands.
    actions = []
    for log_file in args.logfile:
        if not os.path.exists(log_file):
            LOG.error("The specified logfile '%s' does not exist!",
                      log_file)
            continue

        actions += log_parser.parse_unique_log(
            load_json_or_empty(log_file),
            report_dir,
            args.compile_uniqueing,
            skip_handler,
            compiler_info_file
            )

    if not actions:
        LOG.info("None of the specified build log files contained "
                 "valid compilation commands. No analysis needed...")
        sys.exit(1)

    uniqued_compilation_db_file = os.path.join(
        args.output_path, "unique_compile_commands.json")
    with open(uniqued_compilation_db_file, 'w') as f:
        json.dump(actions, f,
                  cls=log_parser.CompileCommandEncoder)

    context = analyzer_context.get_context()
    metadata = {'action_num': len(actions),
                'command': sys.argv,
                'versions': {
                    'codechecker': "{0} ({1})".format(
                        context.package_git_tag,
                        context.package_git_hash)},
                'working_directory': os.getcwd(),
                'output_path': args.output_path,
                'result_source_files': {}}

    if 'name' in args:
        metadata['name'] = args.name

    # Update metadata dictionary with old values.
    metadata_file = os.path.join(args.output_path, 'metadata.json')
    if os.path.exists(metadata_file):
        metadata_prev = load_json_or_empty(metadata_file)
        metadata['result_source_files'] = \
            metadata_prev['result_source_files']

    analyzer.perform_analysis(args, skip_handler, context, actions, metadata)

    __update_skip_file(args)

    LOG.debug("Analysis metadata write to '%s'", metadata_file)
    with open(metadata_file, 'w') as metafile:
        json.dump(metadata, metafile)

    # WARN: store command will search for this file!!!!
    compile_cmd_json = os.path.join(args.output_path, 'compile_cmd.json')
    try:
        source = os.path.abspath(args.logfile[0])
        target = os.path.abspath(compile_cmd_json)

        if source != target:
            shutil.copyfile(source, target)
    except shutil.Error:
        LOG.debug("Compilation database JSON file is the same.")
    except Exception:
        LOG.debug("Copying compilation database JSON file failed.")

    try:
        from codechecker_analyzer import analyzer_statistics
        analyzer_statistics.collect(metadata, "analyze")
    except Exception:
        pass
示例#33
0
def main(args):
    """
    List the checkers available in the specified (or all supported) analyzers
    alongside with their description or enabled status in various formats.
    """

    logger.setup_logger(args.verbose if 'verbose' in args else None)

    # If nothing is set, list checkers for all supported analyzers.
    analyzers = args.analyzers \
        if 'analyzers' in args \
        else analyzer_types.supported_analyzers

    context = analyzer_context.get_context()
    working, errored = analyzer_types.check_supported_analyzers(analyzers,
                                                                context)

    analyzer_environment = get_check_env(context.path_env_extra,
                                         context.ld_lib_path_extra)

    analyzer_config_map = analyzer_types.build_config_handlers(args,
                                                               context,
                                                               working)
    # List available checker profiles.
    if 'profile' in args and args.profile == 'list':
        if 'details' not in args:
            if args.output_format not in ['csv', 'json']:
                header = ['Profile name']
            else:
                header = ['profile_name']
        else:
            if args.output_format not in ['csv', 'json']:
                header = ['Profile name', 'Description']
            else:
                header = ['profile_name', 'description']

        rows = []
        for (profile, description) in context.available_profiles.items():
            if 'details' not in args:
                rows.append([profile])
            else:
                rows.append([profile, description])

        print(output_formatters.twodim_to_str(args.output_format,
                                              header, rows))
        return

    # Use good looking different headers based on format.
    if 'details' not in args:
        if args.output_format not in ['csv', 'json']:
            header = ['Name']
        else:
            header = ['name']
    else:
        if args.output_format not in ['csv', 'json']:
            header = ['', 'Name', 'Analyzer', 'Severity', 'Description']
        else:
            header = ['enabled', 'name', 'analyzer', 'severity', 'description']

    rows = []
    for analyzer in working:
        config_handler = analyzer_config_map.get(analyzer)
        analyzer_class = \
            analyzer_types.supported_analyzers[analyzer]

        checkers = analyzer_class.get_analyzer_checkers(config_handler,
                                                        analyzer_environment)
        default_checker_cfg = context.checker_config.get(
            analyzer + '_checkers')

        profile_checkers = None
        if 'profile' in args:
            if args.profile not in context.available_profiles:
                LOG.error("Checker profile '%s' does not exist!",
                          args.profile)
                LOG.error("To list available profiles, use '--profile list'.")
                return

            profile_checkers = [(args.profile, True)]

        config_handler.initialize_checkers(context.available_profiles,
                                           context.package_root,
                                           checkers,
                                           default_checker_cfg,
                                           profile_checkers)

        for checker_name, value in config_handler.checks().items():
            enabled, description = value

            if not enabled and 'profile' in args:
                continue

            if enabled and 'only_disabled' in args:
                continue
            elif not enabled and 'only_enabled' in args:
                continue

            if args.output_format != 'json':
                enabled = '+' if enabled else '-'

            if 'details' not in args:
                rows.append([checker_name])
            else:
                severity = context.severity_map.get(checker_name)
                rows.append([enabled, checker_name, analyzer,
                             severity, description])

    if len(rows) > 0:
        print(output_formatters.twodim_to_str(args.output_format,
                                              header, rows))

    for analyzer_binary, reason in errored:
        LOG.error("Failed to get checkers for '%s'!"
                  "The error reason was: '%s'", analyzer_binary, reason)
        LOG.error("Please check your installation and the "
                  "'config/package_layout.json' file!")