예제 #1
0
def main(args):
    """
    List the analyzers' basic information supported by CodeChecker.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    context = generic_package_context.get_context()
    working, errored = \
        analyzer_types.check_supported_analyzers(
            analyzer_types.supported_analyzers,
            context)

    if args.dump_config:
        binary = context.analyzer_binaries.get(args.dump_config)

        if args.dump_config == 'clang-tidy':
            subprocess.call([binary, '-dump-config', '-checks=*'])
        elif args.dump_config == 'clangsa':
            # TODO: Not supported by ClangSA yet!
            LOG.warning("'--dump-config clangsa' is not supported yet.")

        return

    if args.output_format not in ['csv', 'json']:
        if 'details' not in args:
            header = ['Name']
        else:
            header = ['Name', 'Path', 'Version']
    else:
        if 'details' not in args:
            header = ['name']
        else:
            header = ['name', 'path', 'version_string']

    rows = []
    for analyzer in working:
        if 'details' not in args:
            rows.append([analyzer])
        else:
            binary = context.analyzer_binaries.get(analyzer)
            try:
                version = subprocess.check_output([binary, '--version'])
            except (subprocess.CalledProcessError, OSError):
                version = 'ERROR'

            rows.append([analyzer, binary, version])

    if 'all' in args:
        for analyzer, err_reason in errored:
            if 'details' not in args:
                rows.append([analyzer])
            else:
                rows.append([
                    analyzer,
                    context.analyzer_binaries.get(analyzer), err_reason
                ])

    if len(rows) > 0:
        print(output_formatters.twodim_to_str(args.output_format, header,
                                              rows))
예제 #2
0
def main(args):
    """
    Get and print the version information from the version config
    file and Thrift API definition.
    """

    context = generic_package_context.get_context()

    rows = [
        ("Base package version", context.version),
        ("Package build date", context.package_build_date),
        ("Git commit ID (hash)", context.package_git_hash),
        ("Git tag information", context.package_git_tag),
        ("Database schema version", str(context.db_version_info)),
        ("Client API version (Thrift)", constants.API_VERSION)
    ]

    if args.output_format != "json":
        print(output_formatters.twodim_to_str(args.output_format,
                                              ["Kind", "Version"],
                                              rows))
    elif args.output_format == "json":
        # Use a special JSON format here, instead of
        # [ {"kind": "something", "version": "0.0.0"}, {"kind": "foo", ... } ]
        # do
        # { "something": "0.0.0", "foo": ... }
        print(json.dumps(dict(rows)))
예제 #3
0
def is_statistics_capable():
    """ Detects if the current clang is Statistics compatible. """
    context = generic_package_context.get_context()

    analyzer = "clangsa"
    enabled_analyzers = [analyzer]
    cfg_handlers = analyzer_types.build_config_handlers({}, context,
                                                        enabled_analyzers)

    clangsa_cfg = cfg_handlers[analyzer]
    analyzer = analyzer_types.construct_analyzer_type(analyzer, clangsa_cfg,
                                                      None)

    check_env = analyzer_env.get_check_env(context.path_env_extra,
                                           context.ld_lib_path_extra)

    checkers = analyzer.get_analyzer_checkers(clangsa_cfg, check_env)

    stat_checkers_pattern = re.compile(r'.+statisticscollector.+')

    for checker_name, _ in checkers:
        if stat_checkers_pattern.match(checker_name):
            return True

    return False
예제 #4
0
def handle_check(args):
    """
    Runs the original build and logs the buildactions.
    Based on the log runs the analysis.
    """
    try:
        if not host_check.check_zlib():
            sys.exit(1)

        args.workspace = os.path.abspath(args.workspace)
        if not os.path.isdir(args.workspace):
            os.mkdir(args.workspace)

        context = generic_package_context.get_context()
        context.codechecker_workspace = args.workspace
        context.db_username = args.dbusername

        log_file, set_in_cmdline = build_manager.check_log_file(args, context)

        if not log_file:
            LOG.error("Failed to generate compilation command file: " +
                      log_file)
            sys.exit(1)

        actions = log_parser.parse_log(log_file, args.add_compiler_defaults)

        check_env = analyzer_env.get_check_env(context.path_env_extra,
                                               context.ld_lib_path_extra)

        sql_server = SQLServer.from_cmdline_args(args,
                                                 context.codechecker_workspace,
                                                 context.migration_root,
                                                 check_env)

        conn_mgr = client.ConnectionManager(sql_server, 'localhost',
                                            util.get_free_port())

        sql_server.start(context.db_version_info,
                         wait_for_start=True,
                         init=True)

        conn_mgr.start_report_server()

        LOG.debug("Checker server started.")

        analyzer.run_check(args, actions, context)

        LOG.info("Analysis has finished.")

        log_startserver_hint(args)

    except Exception as ex:
        LOG.error(ex)
        import traceback
        print(traceback.format_exc())
    finally:
        if not args.keep_tmp:
            if log_file and not set_in_cmdline:
                LOG.debug('Removing temporary log file: ' + log_file)
                os.remove(log_file)
예제 #5
0
def get_argparser_ctor_args():
    """
    This method returns a dict containing the kwargs for constructing an
    argparse.ArgumentParser (either directly or as a subparser).
    """

    return {
        'prog':
        'CodeChecker checkers',
        'formatter_class':
        argparse.ArgumentDefaultsHelpFormatter,

        # Description is shown when the command's help is queried directly
        'description':
        "Get the list of checkers available and their enabled "
        "status in the supported analyzers. Currently "
        "supported analyzers are: " +
        ', '.join(analyzer_types.supported_analyzers) + ".",

        # Epilogue is shown after the arguments when the help is queried
        # directly.
        'epilog':
        "The list of checkers that are enabled of disabled by "
        "default can be edited by editing the file '" +
        os.path.join(generic_package_context.get_context().package_root,
                     'config', 'config.json') + "'.",

        # Help is shown when the "parent" CodeChecker command lists the
        # individual subcommands.
        'help':
        "List the checkers available for code analysis."
    }
예제 #6
0
def _do_quickcheck(args):
    """
    Handles the "quickcheck" command.

    For arguments see main function in CodeChecker.py.
    It also requires an extra property in args object, namely workspace which
    is a directory path as a string.
    This function is called from handle_quickcheck.
    """

    try:
        context = generic_package_context.get_context()

        context.codechecker_workspace = args.workspace
        args.name = "quickcheck"

        log_file, set_in_cmdline = build_manager.check_log_file(args, context)
        actions = log_parser.parse_log(log_file, args.add_compiler_defaults)
        analyzer.run_quick_check(args, context, actions)

    except Exception as ex:
        LOG.error("Running quickcheck failed.")
    finally:
        if not args.keep_tmp:
            if log_file and not set_in_cmdline:
                LOG.debug('Removing temporary log file: ' + log_file)
                os.remove(log_file)
예제 #7
0
def addReport(session, run_id, file_id, main_section, bugpath, events,
              detection_status, detection_time):
    """
    """
    try:

        checker_name = main_section['check_name']
        context = generic_package_context.get_context()
        severity_name = context.severity_map.get(checker_name, 'UNSPECIFIED')
        severity = ttypes.Severity._NAMES_TO_VALUES[severity_name]

        report = Report(run_id,
                        main_section['issue_hash_content_of_line_in_context'],
                        file_id, main_section['description'], checker_name
                        or 'NOT FOUND', main_section['category'],
                        main_section['type'], main_section['location']['line'],
                        main_section['location']['col'], severity,
                        detection_status, detection_time)

        session.add(report)
        session.flush()

        LOG.debug("storing bug path")
        store_bug_path(session, bugpath, report.id)
        LOG.debug("storing events")
        store_bug_events(session, events, report.id)

        return report.id

    except Exception as ex:
        raise shared.ttypes.RequestFailed(shared.ttypes.ErrorCode.GENERAL,
                                          str(ex))
예제 #8
0
def is_ctu_capable():
    """ Detects if the current clang is CTU compatible. """

    context = generic_package_context.get_context()
    ctu_func_map_cmd = context.ctu_func_map_cmd
    try:
        version = subprocess.check_output([ctu_func_map_cmd, '-version'])
    except (subprocess.CalledProcessError, OSError):
        version = 'ERROR'
    return version != 'ERROR'
예제 #9
0
def handle_log(args):
    """
    Generates a build log by running the original build command.
    No analysis is done.
    """
    args.logfile = os.path.realpath(args.logfile)
    if os.path.exists(args.logfile):
        os.remove(args.logfile)

    context = generic_package_context.get_context()
    build_manager.perform_build_command(args.logfile, args.command, context)
예제 #10
0
def add_arguments_to_parser(parser):
    """
    Add the subcommand's arguments to the given argparse.ArgumentParser.
    """

    context = generic_package_context.get_context()
    working, _ = analyzer_types.check_supported_analyzers(
        analyzer_types.supported_analyzers, context)

    parser.add_argument('--all',
                        dest="all",
                        action='store_true',
                        default=argparse.SUPPRESS,
                        required=False,
                        help="Show all supported analyzers, not just the "
                        "available ones.")

    parser.add_argument('--details',
                        dest="details",
                        action='store_true',
                        default=argparse.SUPPRESS,
                        required=False,
                        help="Show details about the analyzers, not just "
                        "their names.")

    parser.add_argument('--dump-config',
                        dest='dump_config',
                        required=False,
                        choices=list(working),
                        help="Dump the available checker options for the "
                        "given analyzer to the standard output. "
                        "Currently only clang-tidy supports this option. "
                        "The output can be redirected to a file named "
                        ".clang-tidy. If this file is placed to the "
                        "project directory then the options are applied "
                        "to the files under that directory. This config "
                        "file can also be provided via "
                        "'CodeChecker analyze' and 'CodeChecker check' "
                        "commands.")

    parser.add_argument('-o',
                        '--output',
                        dest='output_format',
                        required=False,
                        default='rows',
                        choices=output_formatters.USER_FORMATS,
                        help="Specify the format of the output list.")

    logger.add_verbose_arguments(parser)
    parser.set_defaults(func=main)
예제 #11
0
def main(args):
    """
    List the analyzers' basic information supported by CodeChecker.
    """

    context = generic_package_context.get_context()
    working, errored = \
        analyzer_types.check_supported_analyzers(
            analyzer_types.supported_analyzers,
            context)

    if args.output_format not in ['csv', 'json']:
        if 'details' not in args:
            header = ['Name']
        else:
            header = ['Name', 'Path', 'Version']
    else:
        if 'details' not in args:
            header = ['name']
        else:
            header = ['name', 'path', 'version_string']

    rows = []
    for analyzer in working:
        if 'details' not in args:
            rows.append([analyzer])
        else:
            binary = context.analyzer_binaries.get(analyzer)
            try:
                version = subprocess.check_output([binary,
                                                   '--version'])
            except (subprocess.CalledProcessError, OSError):
                version = 'ERROR'

            rows.append([analyzer,
                         binary,
                         version])

    if 'all' in args:
        for analyzer, err_reason in errored:
            if 'details' not in args:
                rows.append([analyzer])
            else:
                rows.append([analyzer,
                             context.analyzer_binaries.get(analyzer),
                             err_reason])

    if len(rows) > 0:
        print(output_formatters.twodim_to_str(args.output_format,
                                              header, rows))
예제 #12
0
def main(args):
    """
    Entry point for parsing some analysis results and printing them to the
    stdout in a human-readable format.
    """

    context = generic_package_context.get_context()

    # To ensure the help message prints the default folder properly,
    # the 'default' for 'args.input' is a string, not a list.
    # But we need lists for the foreach here to work.
    if isinstance(args.input, str):
        args.input = [args.input]

    original_cwd = os.getcwd()

    suppress_handler = None
    if 'suppress' in args:
        if not os.path.isfile(args.suppress):
            LOG.warning("Suppress file '" + args.suppress + "' given, but it "
                        "does not exist -- will not suppress anything.")
        else:
            suppress_handler = generic_package_suppress_handler.\
                GenericSuppressHandler(args.suppress)

    for input_path in args.input:
        os.chdir(original_cwd)
        LOG.debug("Parsing input argument: '" + input_path + "'")

        if os.path.isfile(input_path):
            parse(input_path, context, {}, suppress_handler, args.print_steps)
        elif os.path.isdir(input_path):
            metadata_file = os.path.join(input_path, "metadata.json")
            metadata_dict = {}
            if os.path.exists(metadata_file):
                with open(metadata_file, 'r') as metadata:
                    metadata_dict = json.load(metadata)
                    LOG.debug(metadata_dict)

                if 'working_directory' in metadata_dict:
                    os.chdir(metadata_dict['working_directory'])

            _, _, files = next(os.walk(input_path), ([], [], []))
            for f in files:
                parse(os.path.join(input_path, f), context, metadata_dict,
                      suppress_handler, args.print_steps)

    os.chdir(original_cwd)
예제 #13
0
def main(args):
    """
    Generates a build log by running the original build command.
    No analysis is done.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    args.logfile = os.path.realpath(args.logfile)
    if os.path.exists(args.logfile):
        os.remove(args.logfile)

    context = generic_package_context.get_context()
    build_manager.perform_build_command(args.logfile,
                                        args.command,
                                        context,
                                        silent='quiet' in args)
예제 #14
0
def handle_list_checkers(args):
    """
    List the supported checkers by the analyzers.
    List the default enabled and disabled checkers in the config.
    """
    context = generic_package_context.get_context()
    # If nothing is set, list checkers for all supported analyzers.
    analyzers = args.analyzers or analyzer_types.supported_analyzers
    enabled_analyzers, _ = analyzer_types\
        .check_supported_analyzers(analyzers, context)
    analyzer_environment = analyzer_env.get_check_env(
        context.path_env_extra,
        context.ld_lib_path_extra)

    for ea in enabled_analyzers:
        if ea not in analyzer_types.supported_analyzers:
            LOG.error('Unsupported analyzer ' + str(ea))
            sys.exit(1)

    analyzer_config_map = \
        analyzer_types.build_config_handlers(args,
                                             context,
                                             enabled_analyzers)

    for ea in enabled_analyzers:
        # Get the config.
        config_handler = analyzer_config_map.get(ea)
        source_analyzer = \
            analyzer_types.construct_analyzer_type(ea,
                                                   config_handler,
                                                   None)

        checkers = source_analyzer.get_analyzer_checkers(config_handler,
                                                         analyzer_environment)

        default_checker_cfg = context.default_checkers_config.get(
            ea + '_checkers')

        analyzer_types.initialize_checkers(config_handler,
                                           checkers,
                                           default_checker_cfg)
        for checker_name, value in config_handler.checks().items():
            enabled, description = value
            if enabled:
                print(' + {0:50} {1}'.format(checker_name, description))
            else:
                print(' - {0:50} {1}'.format(checker_name, description))
예제 #15
0
def handle_plist(args):
    context = generic_package_context.get_context()
    context.codechecker_workspace = args.workspace
    context.db_username = args.dbusername

    if not args.stdout:
        args.workspace = os.path.realpath(args.workspace)
        if not os.path.isdir(args.workspace):
            os.mkdir(args.workspace)

        check_env = analyzer_env.get_check_env(context.path_env_extra,
                                               context.ld_lib_path_extra)

        sql_server = SQLServer.from_cmdline_args(args,
                                                 context.codechecker_workspace,
                                                 context.migration_root,
                                                 check_env)

        conn_mgr = client.ConnectionManager(sql_server, 'localhost',
                                            util.get_free_port())

        sql_server.start(context.db_version_info,
                         wait_for_start=True,
                         init=True)

        conn_mgr.start_report_server()

        with client.get_connection() as connection:
            context.run_id = connection.add_checker_run(
                ' '.join(sys.argv), args.name, context.version, args.force)

    pool = multiprocessing.Pool(args.jobs)

    try:
        items = [(plist, args, context)
                 for plist in os.listdir(args.directory)]
        pool.map_async(consume_plist, items, 1).get(float('inf'))
        pool.close()
    except Exception:
        pool.terminate()
        raise
    finally:
        pool.join()

    if not args.stdout:
        log_startserver_hint(args)
예제 #16
0
def handle_version_info(args):
    """
    Get and print the version information from the
    version config file and thrift API versions.
    """

    context = generic_package_context.get_context()

    print('Base package version: \t' + context.version).expandtabs(30)
    print('Package build date: \t' + context.package_build_date).expandtabs(30)
    print('Git hash: \t' + context.package_git_hash).expandtabs(30)
    print('Git tag info: \t' + context.package_git_tag).expandtabs(30)
    print('DB schema version: \t' +
          str(context.db_version_info)).expandtabs(30)

    # Thift api version for the clients.
    from codeCheckerDBAccess import constants
    print(('Thrift client api version: \t' +
           constants.API_VERSION).expandtabs(30))
예제 #17
0
def handle_debug(args):
    """
    Runs a debug command on the buildactions where the analysis
    failed for some reason.
    """
    context = generic_package_context.get_context()

    context.codechecker_workspace = args.workspace
    context.db_username = args.dbusername

    check_env = analyzer_env.get_check_env(context.path_env_extra,
                                           context.ld_lib_path_extra)

    sql_server = SQLServer.from_cmdline_args(args,
                                             context.migration_root,
                                             check_env)
    sql_server.start(context.db_version_info, wait_for_start=True, init=False)

    debug_reporter.debug(context, sql_server.get_connection_string(),
                         args.force)
예제 #18
0
def main(args):
    """
    Get and print the version information from the version config
    file and Thrift API definition.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    context = generic_package_context.get_context()

    server_versions = [
        '{0}.{1}'.format(major, minor)
        for major, minor in version.SUPPORTED_VERSIONS.items()
    ]

    if args.output_format != 'json':
        server_versions = ', '.join(server_versions)

    rows = [("Base package version", context.version),
            ("Package build date", context.package_build_date),
            ("Git commit ID (hash)", context.package_git_hash),
            ("Git tag information", context.package_git_tag),
            ("Configuration schema", str(context.product_db_version_info)),
            ("Database schema", str(context.run_db_version_info)),
            ("Server supported API (Thrift)", server_versions),
            ("Client API (Thrift)", version.CLIENT_API)]

    if args.output_format != "json":
        print(
            output_formatters.twodim_to_str(args.output_format,
                                            ["Kind", "Version"], rows))
    elif args.output_format == "json":
        # Use a special JSON format here, instead of
        # [ {"kind": "something", "version": "0.0.0"}, {"kind": "foo", ... } ]
        # do
        # { "something": "0.0.0", "foo": ... }
        print(json.dumps(dict(rows)))
예제 #19
0
def main(args):
    """
    Entry point for parsing some analysis results and printing them to the
    stdout in a human-readable format.
    """

    logger.setup_logger(args.verbose if 'verbose' in args else None)

    context = generic_package_context.get_context()

    # To ensure the help message prints the default folder properly,
    # the 'default' for 'args.input' is a string, not a list.
    # But we need lists for the foreach here to work.
    if isinstance(args.input, str):
        args.input = [args.input]

    original_cwd = os.getcwd()

    suppress_handler = None
    if 'suppress' in args:
        __make_handler = False
        if not os.path.isfile(args.suppress):
            if 'create_suppress' in args:
                with open(args.suppress, 'w') as _:
                    # Just create the file.
                    __make_handler = True
                    LOG.info("Will write source-code suppressions to "
                             "suppress file.")
            else:
                LOG.warning("Suppress file '" + args.suppress + "' given, but "
                            "it does not exist -- will not suppress anything.")
        else:
            __make_handler = True

        if __make_handler:
            suppress_handler = generic_package_suppress_handler.\
                GenericSuppressHandler(args.suppress,
                                       'create_suppress' in args)
    elif 'create_suppress' in args:
        LOG.error("Can't use '--export-source-suppress' unless '--suppress "
                  "SUPPRESS_FILE' is also given.")
        sys.exit(2)

    processed_path_hashes = set()

    def skip_html_report_data_handler(report_hash, source_file, report_line,
                                      checker_name, diag, files):
        """
        Report handler which skips bugs which were suppressed by source code
        comments.
        """
        report = Report(None, diag['path'], files)
        path_hash = get_report_path_hash(report, files)
        if path_hash in processed_path_hashes:
            LOG.debug("Skip report because it is a deduplication of an "
                      "already processed report!")
            LOG.debug("Path hash: %s", path_hash)
            LOG.debug(diag)
            return True

        skip = plist_parser.skip_report(report_hash, source_file, report_line,
                                        checker_name, suppress_handler)
        if not skip:
            processed_path_hashes.add(path_hash)

        return skip

    skip_handler = None
    if 'skipfile' in args:
        with open(args.skip_file, 'r') as skip_file:
            skip_handler = SkipListHandler(skip_file.read())

    html_builder = None

    for input_path in args.input:

        input_path = os.path.abspath(input_path)
        os.chdir(original_cwd)
        LOG.debug("Parsing input argument: '" + input_path + "'")

        export = args.export if 'export' in args else None
        if export is not None and export == 'html':
            output_path = os.path.abspath(args.output_path)

            if not html_builder:
                html_builder = \
                    PlistToHtml.HtmlBuilder(context.path_plist_to_html_dist,
                                            context.checkers_severity_map_file)

            LOG.info("Generating html output files:")
            PlistToHtml.parse(input_path, output_path,
                              context.path_plist_to_html_dist,
                              skip_html_report_data_handler, html_builder)
            continue

        severity_stats = Counter({})
        file_stats = Counter({})
        report_count = Counter({})

        files = []
        metadata_dict = {}
        if os.path.isfile(input_path):
            files.append(input_path)

        elif os.path.isdir(input_path):
            metadata_file = os.path.join(input_path, "metadata.json")
            if os.path.exists(metadata_file):
                with open(metadata_file, 'r') as metadata:
                    metadata_dict = json.load(metadata)
                    LOG.debug(metadata_dict)

                if 'working_directory' in metadata_dict:
                    working_dir = metadata_dict['working_directory']
                    try:
                        os.chdir(working_dir)
                    except OSError as oerr:
                        LOG.debug(oerr)
                        LOG.error(
                            "Working directory %s is missing.\n"
                            "Can not parse reports safely.", working_dir)
                        sys.exit(1)

            _, _, file_names = next(os.walk(input_path), ([], [], []))
            files = [
                os.path.join(input_path, file_name) for file_name in file_names
            ]

        file_change = set()
        for file_path in files:
            report_stats, f_change = parse(file_path, context, metadata_dict,
                                           suppress_handler, skip_handler,
                                           'print_steps' in args,
                                           processed_path_hashes)
            file_change = file_change.union(f_change)

            severity_stats.update(Counter(report_stats.get('severity', {})))
            file_stats.update(Counter(report_stats.get('files', {})))
            report_count.update(Counter(report_stats.get('reports', {})))

        print("\n----==== Summary ====----")
        if file_stats:
            vals = [[os.path.basename(k), v]
                    for k, v in dict(file_stats).items()]
            keys = ['Filename', 'Report count']
            table = twodim_to_str('table', keys, vals, 1, True)
            print(table)

        if severity_stats:
            vals = [[k, v] for k, v in dict(severity_stats).items()]
            keys = ['Severity', 'Report count']
            table = twodim_to_str('table', keys, vals, 1, True)
            print(table)

        report_count = dict(report_count).get("report_count", 0)
        print("----=================----")
        print("Total number of reports: {}".format(report_count))
        print("----=================----")

        if file_change:
            changed_files = '\n'.join([' - ' + f for f in file_change])
            LOG.warning("The following source file contents changed since the "
                        "latest analysis:\n{0}\nMultiple reports were not "
                        "shown and skipped from the statistics. Please "
                        "analyze your project again to update the "
                        "reports!".format(changed_files))

    os.chdir(original_cwd)

    # Create index.html for the generated html files.
    if html_builder:
        html_builder.create_index_html(args.output_path)
예제 #20
0
def handle_server(args):
    """
    Starts the report viewer server.
    """
    if not host_check.check_zlib():
        sys.exit(1)

    workspace = args.workspace

    if (args.list or args.stop or args.stop_all) and \
            not (args.list ^ args.stop ^ args.stop_all):
        print("CodeChecker server: error: argument -l/--list and -s/--stop"
              "and --stop-all are mutually exclusive.")
        sys.exit(2)

    if args.list:
        instances = instance_manager.list()

        instances_on_multiple_hosts = any(True for inst in instances
                                          if inst['hostname'] !=
                                          socket.gethostname())
        if not instances_on_multiple_hosts:
            rows = [('Workspace', 'View port')]
        else:
            rows = [('Workspace', 'Computer host', 'View port')]

        for instance in instance_manager.list():
            if not instances_on_multiple_hosts:
                rows.append((instance['workspace'], str(instance['port'])))
            else:
                rows.append((instance['workspace'],
                             instance['hostname']
                             if instance['hostname'] != socket.gethostname()
                             else '',
                             str(instance['port'])))

        print("Your running CodeChecker servers:")
        print(util.twodim_to_table(rows))
        sys.exit(0)
    elif args.stop or args.stop_all:
        for i in instance_manager.list():
            # A STOP only stops the server associated with the given workspace
            # and view-port.
            if i['hostname'] != socket.gethostname() or (
                args.stop and not (i['port'] == args.view_port and
                                   os.path.abspath(i['workspace']) ==
                                   os.path.abspath(workspace))):
                continue

            try:
                util.kill_process_tree(i['pid'])
                LOG.info("Stopped CodeChecker server running on port {0} "
                         "in workspace {1} (PID: {2})".
                         format(i['port'], i['workspace'], i['pid']))
            except:
                # Let the exception come out if the commands fail
                LOG.error("Couldn't stop process PID #" + str(i['pid']))
                raise
        sys.exit(0)

    # WARNING
    # In case of SQLite args.dbaddress default value is used
    # for which the is_localhost should return true.
    if util.is_localhost(args.dbaddress) and not os.path.exists(workspace):
        os.makedirs(workspace)

    suppress_handler = generic_package_suppress_handler.\
        GenericSuppressHandler(None)
    if args.suppress is None:
        LOG.warning('No suppress file was given, suppressed results will '
                    'be only stored in the database.')
    else:
        if not os.path.exists(args.suppress):
            LOG.error('Suppress file ' + args.suppress + ' not found!')
            sys.exit(1)

    context = generic_package_context.get_context()
    context.codechecker_workspace = workspace
    session_manager.SessionManager.CodeChecker_Workspace = workspace
    context.db_username = args.dbusername

    check_env = analyzer_env.get_check_env(context.path_env_extra,
                                           context.ld_lib_path_extra)

    sql_server = SQLServer.from_cmdline_args(args,
                                             context.migration_root,
                                             check_env)
    conn_mgr = client.ConnectionManager(sql_server, args.check_address,
                                        args.check_port)
    if args.check_port:
        LOG.debug('Starting CodeChecker server and database server.')
        sql_server.start(context.db_version_info, wait_for_start=True,
                         init=True)
        conn_mgr.start_report_server()
    else:
        LOG.debug('Starting database.')
        sql_server.start(context.db_version_info, wait_for_start=True,
                         init=True)

    # Start database viewer.
    db_connection_string = sql_server.get_connection_string()

    suppress_handler.suppress_file = args.suppress
    LOG.debug('Using suppress file: ' + str(suppress_handler.suppress_file))

    checker_md_docs = os.path.join(context.doc_root, 'checker_md_docs')
    checker_md_docs_map = os.path.join(checker_md_docs,
                                       'checker_doc_map.json')
    with open(checker_md_docs_map, 'r') as dFile:
        checker_md_docs_map = json.load(dFile)

    package_data = {'www_root': context.www_root,
                    'doc_root': context.doc_root,
                    'checker_md_docs': checker_md_docs,
                    'checker_md_docs_map': checker_md_docs_map,
                    'version': context.package_git_tag}

    try:
        client_db_access_server.start_server(package_data,
                                             args.view_port,
                                             db_connection_string,
                                             suppress_handler,
                                             args.not_host_only,
                                             context)
    except socket.error as err:
        if err.errno == errno.EADDRINUSE:
            LOG.error("Server can't be started, maybe the given port number "
                      "({}) is already used. Check the connection "
                      "parameters.".format(args.view_port))
            sys.exit(1)
        else:
            raise
예제 #21
0
def handle_diff_results(args):
    context = generic_package_context.get_context()

    def get_diff_results(client, baseids, cmp_data):

        report_filter = ttypes.ReportFilter()
        add_filter_conditions(report_filter, args.filter)

        # Do not show resolved bugs in compare mode new.
        if cmp_data.diffType == ttypes.DiffType.NEW:
            report_filter.detectionStatus = [
                ttypes.DetectionStatus.NEW, ttypes.DetectionStatus.UNRESOLVED,
                ttypes.DetectionStatus.REOPENED
            ]

        sort_mode = [(ttypes.SortMode(ttypes.SortType.FILENAME,
                                      ttypes.Order.ASC))]
        limit = constants.MAX_QUERY_SIZE
        offset = 0

        all_results = []
        results = client.getRunResults(baseids, limit, offset, sort_mode,
                                       report_filter, cmp_data)

        while results:
            all_results.extend(results)
            offset += limit
            results = client.getRunResults(baseids, limit, offset, sort_mode,
                                           report_filter, cmp_data)
        return all_results

    def get_report_dir_results(reportdir):
        all_reports = []
        for filename in os.listdir(reportdir):
            if filename.endswith(".plist"):
                file_path = os.path.join(reportdir, filename)
                LOG.debug("Parsing:" + file_path)
                try:
                    files, reports = plist_parser.parse_plist(file_path)
                    for report in reports:
                        report.main['location']['file_name'] = \
                            files[int(report.main['location']['file'])]
                    all_reports.extend(reports)

                except Exception as ex:
                    LOG.error('The generated plist is not valid!')
                    LOG.error(ex)
        return all_reports

    def get_line_from_file(filename, lineno):
        with open(filename, 'r') as f:
            i = 1
            for line in f:
                if i == lineno:
                    return line
                i += 1
        return ""

    def get_line_from_remote_file(client, fid, lineno):
        # Thrift Python client cannot decode JSONs that contain non '\u00??'
        # characters, so we instead ask for a Base64-encoded version.
        source = client.getSourceFileData(fid, True, ttypes.Encoding.BASE64)
        lines = base64.b64decode(source.fileContent).split('\n')
        return "" if len(lines) < lineno else lines[lineno - 1]

    def get_diff_report_dir(client, baseids, report_dir, diff_type):

        report_filter = ttypes.ReportFilter()
        add_filter_conditions(report_filter, args.filter)

        sort_mode = [(ttypes.SortMode(ttypes.SortType.FILENAME,
                                      ttypes.Order.ASC))]
        limit = constants.MAX_QUERY_SIZE
        offset = 0

        base_results = []
        results = client.getRunResults(baseids, limit, offset, sort_mode,
                                       report_filter, None)
        while results:
            base_results.extend(results)
            offset += limit
            results = client.getRunResults(baseids, limit, offset, sort_mode,
                                           report_filter, None)
        base_hashes = {}
        for res in base_results:
            base_hashes[res.bugHash] = res

        filtered_reports = []
        new_results = get_report_dir_results(report_dir)
        new_hashes = {}
        suppressed_in_code = []

        for rep in new_results:
            bughash = rep.main['issue_hash_content_of_line_in_context']
            source_file = rep.main['location']['file_name']
            bug_line = rep.main['location']['line']
            new_hashes[bughash] = rep
            sp_handler = suppress_handler.SourceSuppressHandler(
                source_file, bug_line, bughash, rep.main['check_name'])
            if sp_handler.get_suppressed():
                suppressed_in_code.append(bughash)
                LOG.debug("Bug " + bughash + "is suppressed in code. file:" +
                          source_file + "Line " + str(bug_line))

        if diff_type == 'new':
            # Shows new reports from the report dir
            # which are not present in the baseline (server)
            # and not suppressed in the code.
            for result in new_results:
                if not (result.main['issue_hash_content_of_line_in_context']
                        in base_hashes) and \
                   not (result.main['issue_hash_content_of_line_in_context']
                        in suppressed_in_code):
                    filtered_reports.append(result)
        elif diff_type == 'resolved':
            # Show bugs in the baseline (server)
            # which are not present in the report dir
            # or suppressed.
            for result in base_results:
                if not (result.bugHash in new_hashes) or \
                        (result.bugHash in suppressed_in_code):
                    filtered_reports.append(result)
        elif diff_type == 'unresolved':
            # Shows bugs in the report dir
            # that are not suppressed and
            # which are also present in the baseline (server)

            for result in new_results:
                new_hash = result.main['issue_hash_content_of_line_in_context']
                if new_hash in base_hashes and \
                        not (new_hash in suppressed_in_code):
                    filtered_reports.append(result)
        return filtered_reports

    def cached_report_file_lookup(file_cache, file_id):
        """
        Get source file data for the given file and caches it in a file cache
        if file data is not found in the cache. Finally, it returns the source
        file data from the cache.
        """
        if file_id not in file_cache:
            source = client.getSourceFileData(file_id, True,
                                              ttypes.Encoding.BASE64)
            file_content = base64.b64decode(source.fileContent)
            file_cache[file_id] = {
                'id': file_id,
                'path': source.filePath,
                'content': file_content
            }

        return file_cache[file_id]

    def get_report_data(client, reports, file_cache):
        """
        Returns necessary report files and report data events for the HTML
        plist parser.
        """
        file_sources = {}
        report_data = []

        for report in reports:
            file_sources[report.fileId] = cached_report_file_lookup(
                file_cache, report.fileId)

            details = client.getReportDetails(report.reportId)
            events = []
            for index, event in enumerate(details.pathEvents):
                file_sources[event.fileId] = cached_report_file_lookup(
                    file_cache, event.fileId)

                events.append({
                    'line': event.startLine,
                    'col': event.startCol,
                    'file': event.fileId,
                    'msg': event.msg,
                    'step': index + 1
                })
            report_data.append(events)

        return {'files': file_sources, 'reports': report_data}

    def report_to_html(client, reports, output_dir):
        """
        Generate HTML output files for the given reports in the given output
        directory by using the Plist To HTML parser.
        """
        html_builder = PlistToHtml.HtmlBuilder(context.path_plist_to_html_dist)

        file_report_map = defaultdict(list)
        for report in reports:
            file_report_map[report.fileId].append(report)

        file_cache = {}
        for file_id, file_reports in file_report_map.items():
            checked_file = file_reports[0].checkedFile
            filename = os.path.basename(checked_file)

            report_data = get_report_data(client, file_reports, file_cache)

            output_path = os.path.join(output_dir,
                                       filename + '_' + str(file_id) + '.html')
            html_builder.create(output_path, report_data)
            print('Html file was generated for file://{0}: file://{1}'.format(
                checked_file, output_path))

    def print_reports(client, reports, output_format, diff_type):
        output_dir = args.export_dir if 'export_dir' in args else None
        if 'clean' in args and os.path.isdir(output_dir):
            print("Previous analysis results in '{0}' have been removed, "
                  "overwriting with current results.".format(output_dir))
            shutil.rmtree(output_dir)

        if output_format == 'json':
            output = []
            for report in reports:
                if isinstance(report, Report):
                    output.append(report.main)
                else:
                    output.append(report)
            print(CmdLineOutputEncoder().encode(output))
            return

        if output_format == 'html':
            if len(reports) == 0:
                print('No {0} reports was found!'.format(diff_type))
                return

            output_dir = args.export_dir
            if not os.path.exists(output_dir):
                os.makedirs(output_dir)

            print("Generating HTML output files to file://{0} directory:\n".
                  format(output_dir))

            report_to_html(client, reports, output_dir)

            print('\nTo view the results in a browser run:\n'
                  '  $ firefox {0}'.format(args.export_dir))
            return

        header = ['File', 'Checker', 'Severity', 'Msg', 'Source']
        rows = []
        for report in reports:
            if type(report) is Report:
                bug_line = report.main['location']['line']
                bug_col = report.main['location']['col']
                sev = 'unknown'
                checked_file = report.main['location']['file_name']\
                    + ':' + str(bug_line) + ":" + str(bug_col)
                check_name = report.main['check_name']
                check_msg = report.main['description']
                source_line =\
                    get_line_from_file(report.main['location']['file_name'],
                                       bug_line)
            else:
                bug_line = report.line
                bug_col = report.column
                sev = ttypes.Severity._VALUES_TO_NAMES[report.severity]
                checked_file = report.checkedFile + ':' + str(bug_line) +\
                    ":" + str(bug_col)
                source_line =\
                    get_line_from_remote_file(client, report.fileId, bug_line)
                check_name = report.checkerId
                check_msg = report.checkerMsg
            rows.append(
                (checked_file, check_name, sev, check_msg, source_line))
        if output_format == 'plaintext':
            for row in rows:
                print("{0}: {1} [{2}]\n{3}\n".format(row[0], row[3], row[1],
                                                     row[4]))
        else:
            print(twodim_to_str(output_format, header, rows))

    client = setup_client(args.product_url)

    report_dir_mode = False
    if os.path.isdir(args.newname):
        # If newname is a valid directory we assume that it is a report dir and
        # we are in local compare mode.
        report_dir_mode = True
    else:
        run_info = check_run_names(client, [args.newname])
        newid = run_info[args.newname].runId

    try:
        basename_regex = '^' + args.basename + '$'
        base_runs = filter(lambda run: re.match(basename_regex, run.name),
                           client.getRunData(None))
        base_ids = map(lambda run: run.runId, base_runs)
    except re.error:
        LOG.error('Invalid regex format in ' + args.basename)
        sys.exit(1)

    if len(base_ids) == 0:
        LOG.warning("No run names match the given pattern: " + args.basename)
        sys.exit(1)

    LOG.info("Matching against runs: " +
             ', '.join(map(lambda run: run.name, base_runs)))

    diff_type = 'new'
    if 'unresolved' in args:
        diff_type = 'unresolved'
    elif 'resolved' in args:
        diff_type = 'resolved'

    results = []
    if report_dir_mode:
        diff_type = 'new'
        if 'unresolved' in args:
            diff_type = 'unresolved'
        elif 'resolved' in args:
            diff_type = 'resolved'
        results = get_diff_report_dir(client, base_ids,
                                      os.path.abspath(args.newname), diff_type)
    else:
        cmp_data = ttypes.CompareData(runIds=[newid])
        if 'new' in args:
            cmp_data.diffType = ttypes.DiffType.NEW
        elif 'unresolved' in args:
            cmp_data.diffType = ttypes.DiffType.UNRESOLVED
        elif 'resolved' in args:
            cmp_data.diffType = ttypes.DiffType.RESOLVED

        results = get_diff_results(client, base_ids, cmp_data)

    if len(results) == 0:
        LOG.info("No results.")
    else:
        print_reports(client, results, args.output_format, diff_type)
예제 #22
0
def main(args):
    """
    Perform analysis on the given logfiles and store the results in a machine-
    readable format.
    """

    context = generic_package_context.get_context()

    # Parse the JSON CCDBs and retrieve the compile commands.
    actions = []

    for log_file in args.logfile:
        if not os.path.exists(log_file):
            LOG.error("The specified logfile '" + log_file + "' does not "
                      "exist!")
            continue

        actions += log_parser.parse_log(log_file,
                                        args.add_compiler_defaults)

    if len(actions) == 0:
        LOG.info("None of the specified build log files contained "
                 "valid compilation commands. No analysis needed...")
        return

    if 'enable_all' in args:
        LOG.info("'--enable-all' was supplied for this analysis.")

    # Run the analysis.
    args.output_path = os.path.abspath(args.output_path)
    if os.path.isdir(args.output_path):
        LOG.info("Previous analysis results in '{0}' have been "
                 "removed, overwriting with current result".
                 format(args.output_path))
        shutil.rmtree(args.output_path)
    os.makedirs(args.output_path)

    LOG.debug("Output will be stored to: '" + args.output_path + "'")

    metadata = {'action_num': len(actions),
                'command': sys.argv,
                'versions': {
                    'codechecker': "{0} ({1})".format(context.package_git_tag,
                                                      context.package_git_hash)
                },
                'working_directory': os.getcwd(),
                'output_path': args.output_path}

    if 'name' in args:
        metadata['name'] = args.name

    if 'skipfile' in args:
        # Skip information needs to be saved because reports in a header
        # can only be skipped by the report-server used in 'store' later
        # on if this information is persisted.
        with open(args.skipfile, 'r') as skipfile:
            metadata['skip_data'] = [l.strip() for l in skipfile.readlines()]

    analyzer.perform_analysis(args, context, actions, metadata)

    metadata_path = os.path.join(args.output_path, 'metadata.json')
    LOG.debug("Analysis metadata write to '" + metadata_path + "'")
    with open(metadata_path, 'w') as metafile:
        json.dump(metadata, metafile)
예제 #23
0
def main(args):
    """
    Perform analysis on the given logfiles and store the results in a machine-
    readable format.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    if len(args.logfile) != 1:
        LOG.warning("Only one log file can be processed right now!")
        sys.exit(1)

    args.output_path = os.path.abspath(args.output_path)
    if os.path.exists(args.output_path) and \
            not os.path.isdir(args.output_path):
        LOG.error("The given output path is not a directory: " +
                  args.output_path)
        sys.exit(1)

    if 'enable_all' in args:
        LOG.info("'--enable-all' was supplied for this analysis.")

    # We clear the output directory in the following cases.
    ctu_dir = os.path.join(args.output_path, 'ctu-dir')
    if 'ctu_phases' in args and args.ctu_phases[0] and \
            os.path.isdir(ctu_dir):
        # Clear the CTU-dir if the user turned on the collection phase.
        LOG.debug("Previous CTU contents have been deleted.")
        shutil.rmtree(ctu_dir)

    if 'clean' in args and os.path.isdir(args.output_path):
        LOG.info("Previous analysis results in '{0}' have been removed, "
                 "overwriting with current result".format(args.output_path))
        shutil.rmtree(args.output_path)

    if not os.path.exists(args.output_path):
        os.makedirs(args.output_path)

    LOG.debug("args: " + str(args))
    LOG.debug("Output will be stored to: '" + args.output_path + "'")

    # Parse the JSON CCDBs and retrieve the compile commands.
    actions = []
    for log_file in args.logfile:
        if not os.path.exists(log_file):
            LOG.error("The specified logfile '" + log_file + "' does not "
                      "exist!")
            continue

        parseLogOptions = ParseLogOptions(args)
        actions += log_parser.parse_log(log_file, parseLogOptions)
    if len(actions) == 0:
        LOG.info("None of the specified build log files contained "
                 "valid compilation commands. No analysis needed...")
        sys.exit(1)

    context = generic_package_context.get_context()
    metadata = {
        'action_num': len(actions),
        'command': sys.argv,
        'versions': {
            'codechecker':
            "{0} ({1})".format(context.package_git_tag,
                               context.package_git_hash)
        },
        'working_directory': os.getcwd(),
        'output_path': args.output_path,
        'result_source_files': {}
    }

    if 'name' in args:
        metadata['name'] = args.name

    # Update metadata dictionary with old values.
    metadata_file = os.path.join(args.output_path, 'metadata.json')
    if os.path.exists(metadata_file):
        with open(metadata_file, 'r') as data:
            metadata_prev = json.load(data)
            metadata['result_source_files'] =\
                metadata_prev['result_source_files']

    analyzer.perform_analysis(args, context, actions, metadata)

    LOG.debug("Analysis metadata write to '" + metadata_file + "'")
    with open(metadata_file, 'w') as metafile:
        json.dump(metadata, metafile)

    # WARN: store command will search for this file!!!!
    compile_cmd_json = os.path.join(args.output_path, 'compile_cmd.json')
    try:
        source = os.path.abspath(args.logfile[0])
        target = os.path.abspath(compile_cmd_json)

        if source != target:
            shutil.copyfile(source, target)
    except shutil.Error:
        LOG.debug("Compilation database JSON file is the same.")
    except Exception:
        LOG.debug("Copying compilation database JSON file failed.")
예제 #24
0
def main(args):
    """
    Store the defect results in the specified input list as bug reports in the
    database.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    if not host_check.check_zlib():
        raise Exception("zlib is not available on the system!")

    # To ensure the help message prints the default folder properly,
    # the 'default' for 'args.input' is a string, not a list.
    # But we need lists for the foreach here to work.
    if isinstance(args.input, str):
        args.input = [args.input]

    if 'name' not in args:
        LOG.debug("Generating name for analysis...")
        generated = __get_run_name(args.input)
        if generated:
            setattr(args, 'name', generated)
        else:
            LOG.error("No suitable name was found in the inputs for the "
                      "analysis run. Please specify one by passing argument "
                      "--name run_name in the invocation.")
            sys.exit(2)  # argparse returns error code 2 for bad invocations.

    LOG.info("Storing analysis results for run '" + args.name + "'")

    if 'force' in args:
        LOG.info("argument --force was specified: the run with name '" +
                 args.name + "' will be deleted.")

    protocol, host, port, product_name = split_product_url(args.product_url)

    # Before any transmission happens, check if we have the PRODUCT_STORE
    # permission to prevent a possibly long ZIP operation only to get an
    # error later on.
    product_client = libclient.setup_product_client(protocol, host, port,
                                                    product_name)
    product_id = product_client.getCurrentProduct().id

    auth_client, _ = libclient.setup_auth_client(protocol, host, port)
    has_perm = libclient.check_permission(auth_client,
                                          Permission.PRODUCT_STORE,
                                          {'productID': product_id})
    if not has_perm:
        LOG.error("You are not authorised to store analysis results in "
                  "product '{0}'".format(product_name))
        sys.exit(1)

    # Setup connection to the remote server.
    client = libclient.setup_client(args.product_url, product_client=False)

    LOG.debug("Initializing client connecting to {0}:{1}/{2} done.".format(
        host, port, product_name))

    _, zip_file = tempfile.mkstemp('.zip')
    LOG.debug("Will write mass store ZIP to '{0}'...".format(zip_file))

    try:
        assemble_zip(args.input, zip_file, client)

        if os.stat(zip_file).st_size > MAX_UPLOAD_SIZE:
            LOG.error("The result list to upload is too big (max: {}).".format(
                sizeof_fmt(MAX_UPLOAD_SIZE)))
            sys.exit(1)

        with open(zip_file, 'rb') as zf:
            b64zip = base64.b64encode(zf.read())

        context = generic_package_context.get_context()

        trim_path_prefixes = args.trim_path_prefix if \
            'trim_path_prefix' in args else None

        client.massStoreRun(args.name, args.tag if 'tag' in args else None,
                            str(context.version), b64zip, 'force' in args,
                            trim_path_prefixes)

        LOG.info("Storage finished successfully.")
    except RequestFailed as reqfail:
        if reqfail.errorCode == ErrorCode.SOURCE_FILE:
            header = ['File', 'Line', 'Checker name']
            table = twodim_to_str('table', header,
                                  [c.split('|') for c in reqfail.extraInfo])
            LOG.warning("Setting the review statuses for some reports failed "
                        "because of non valid source code comments: "
                        "{0}\n {1}".format(reqfail.message, table))
        sys.exit(1)
    except Exception as ex:
        LOG.info("Storage failed: " + str(ex))
        sys.exit(1)
    finally:
        os.remove(zip_file)
예제 #25
0
def server_init_start(args):
    """
    Start or manage a CodeChecker report server.
    """

    if 'list' in args or 'stop' in args or 'stop_all' in args:
        __instance_management(args)
        sys.exit(0)

    # Actual server starting from this point.
    if not host_check.check_zlib():
        raise Exception("zlib is not available on the system!")

    # WARNING
    # In case of SQLite args.dbaddress default value is used
    # for which the is_localhost should return true.
    if util.is_localhost(args.dbaddress) and \
            not os.path.exists(args.config_directory):
        os.makedirs(args.config_directory)

    # Make sure the SQLite file can be created if it not exists.
    if 'sqlite' in args and \
            not os.path.isdir(os.path.dirname(args.sqlite)):
        os.makedirs(os.path.dirname(args.sqlite))

    if 'reset_root' in args:
        try:
            os.remove(os.path.join(args.config_directory, 'root.user'))
            LOG.info("Master superuser (root) credentials invalidated and "
                     "deleted. New ones will be generated...")
        except OSError:
            # File doesn't exist.
            pass

    if 'force_auth' in args:
        LOG.info("'--force-authentication' was passed as a command-line "
                 "option. The server will ask for users to authenticate!")

    context = generic_package_context.get_context()
    context.codechecker_workspace = args.config_directory
    context.db_username = args.dbusername

    check_env = analyzer_env.get_check_env(context.path_env_extra,
                                           context.ld_lib_path_extra)

    cfg_sql_server = database.SQLServer.from_cmdline_args(
        vars(args),
        CONFIG_META,
        context.config_migration_root,
        interactive=True,
        env=check_env)

    LOG.info("Checking configuration database ...")
    db_status = cfg_sql_server.connect()
    db_status_msg = database_status.db_status_msg.get(db_status)
    LOG.info(db_status_msg)

    if db_status == DBStatus.SCHEMA_MISSING:
        LOG.debug("Config database schema is missing, initializing new.")
        db_status = cfg_sql_server.connect(init=True)
        if db_status != DBStatus.OK:
            LOG.error("Config database initialization failed!")
            LOG.error("Please check debug logs.")
            sys.exit(1)

    if db_status == DBStatus.SCHEMA_MISMATCH_NO:
        LOG.debug("Configuration database schema mismatch.")
        LOG.debug("No schema upgrade is possible.")
        sys.exit(1)

    if db_status == DBStatus.SCHEMA_MISMATCH_OK:
        LOG.debug("Configuration database schema mismatch.")
        LOG.debug("Schema upgrade is possible.")
        LOG.warning("Please note after migration only "
                    "newer CodeChecker versions can be used"
                    "to start the server")
        LOG.warning("It is advised to make a full backup of your "
                    "configuration database")

        LOG.warning(cfg_sql_server.get_db_location())

        question = 'Do you want to upgrade to the new schema?' \
                   ' Y(es)/n(o) '
        if util.get_user_input(question):
            print("Upgrading schema ...")
            ret = cfg_sql_server.upgrade()
            msg = database_status.db_status_msg.get(ret,
                                                    'Unknown database status')
            print(msg)
            if ret != DBStatus.OK:
                LOG.error("Schema migration failed")
                sys.exit(ret)
        else:
            LOG.info("No schema migration was done.")
            sys.exit(0)

    if db_status == DBStatus.MISSING:
        LOG.error("Missing configuration database.")
        LOG.error("Server can not be started.")
        sys.exit(1)

    # Configuration database setup and check is needed before database
    # statuses can be checked.
    try:
        if args.status:
            ret = __db_status_check(cfg_sql_server, context, args.status)
            sys.exit(ret)
    except AttributeError:
        LOG.debug('Status was not in the arguments.')

    try:
        if args.product_to_upgrade:
            ret = __db_migration(cfg_sql_server, context,
                                 args.product_to_upgrade)
            sys.exit(ret)
    except AttributeError:
        LOG.debug('Product upgrade was not in the arguments.')

    # Create the main database link from the arguments passed over the
    # command line.
    cfg_dir = os.path.abspath(args.config_directory)
    default_product_path = os.path.join(cfg_dir, 'Default.sqlite')
    create_default_product = 'sqlite' in args and \
                             not os.path.exists(default_product_path)

    if create_default_product:
        # Create a default product and add it to the configuration database.

        LOG.debug("Create default product...")
        LOG.debug("Configuring schema and migration...")

        prod_server = database.SQLiteDatabase(default_product_path, RUN_META,
                                              context.run_migration_root,
                                              check_env)

        LOG.debug("Checking 'Default' product database.")
        db_status = prod_server.connect()
        if db_status != DBStatus.MISSING:
            db_status = prod_server.connect(init=True)
            LOG.debug(database_status.db_status_msg.get(db_status))
            if db_status != DBStatus.OK:
                LOG.error("Failed to configure default product")
                sys.exit(1)

        product_conn_string = prod_server.get_connection_string()

        server.add_initial_run_database(cfg_sql_server, product_conn_string)

        LOG.info("Product 'Default' at '{0}' created and set up.".format(
            default_product_path))

    prod_statuses = check_product_db_status(cfg_sql_server, context)

    upgrade_available = {}
    for k, v in prod_statuses.items():
        db_status, _, _, _ = v
        if db_status == DBStatus.SCHEMA_MISMATCH_OK or \
                db_status == DBStatus.SCHEMA_MISSING:
            upgrade_available[k] = v

    if upgrade_available:
        print_prod_status(prod_statuses)
        LOG.warning("Multiple products can be upgraded, make a backup!")
        __db_migration(cfg_sql_server, context)

    prod_statuses = check_product_db_status(cfg_sql_server, context)
    print_prod_status(prod_statuses)

    non_ok_db = False
    for k, v in prod_statuses.items():
        db_status, _, _, _ = v
        if db_status != DBStatus.OK:
            non_ok_db = True
        break

    if non_ok_db:
        msg = "There are some database issues. " \
              "Do you want to start the " \
              "server? Y(es)/n(o) "
        if not util.get_user_input(msg):
            sys.exit(1)

    # Start database viewer.
    checker_md_docs = os.path.join(context.doc_root, 'checker_md_docs')
    checker_md_docs_map = os.path.join(checker_md_docs, 'checker_doc_map.json')
    checker_md_docs_map = util.load_json_or_empty(checker_md_docs_map, {})

    package_data = {
        'www_root': context.www_root,
        'doc_root': context.doc_root,
        'checker_md_docs': checker_md_docs,
        'checker_md_docs_map': checker_md_docs_map,
        'version': context.package_git_tag
    }

    suppress_handler = generic_package_suppress_handler. \
        GenericSuppressHandler(None, False)

    try:
        server.start_server(args.config_directory, package_data,
                            args.view_port, cfg_sql_server, suppress_handler,
                            args.listen_address, 'force_auth' in args,
                            args.skip_db_cleanup, context, check_env)
    except socket.error as err:
        if err.errno == errno.EADDRINUSE:
            LOG.error("Server can't be started, maybe the given port number "
                      "({}) is already used. Check the connection "
                      "parameters.".format(args.view_port))
            sys.exit(1)
        else:
            raise
예제 #26
0
def main(args):
    """
    Entry point for parsing some analysis results and printing them to the
    stdout in a human-readable format.
    """

    logger.setup_logger(args.verbose if 'verbose' in args else None)

    context = generic_package_context.get_context()

    # To ensure the help message prints the default folder properly,
    # the 'default' for 'args.input' is a string, not a list.
    # But we need lists for the foreach here to work.
    if isinstance(args.input, str):
        args.input = [args.input]

    original_cwd = os.getcwd()

    suppress_handler = None
    if 'suppress' in args:
        __make_handler = False
        if not os.path.isfile(args.suppress):
            if 'create_suppress' in args:
                with open(args.suppress, 'w') as _:
                    # Just create the file.
                    __make_handler = True
                    LOG.info("Will write source-code suppressions to "
                             "suppress file.")
            else:
                LOG.warning("Suppress file '" + args.suppress + "' given, but "
                            "it does not exist -- will not suppress anything.")
        else:
            __make_handler = True

        if __make_handler:
            suppress_handler = generic_package_suppress_handler.\
                GenericSuppressHandler(args.suppress,
                                       'create_suppress' in args)
    elif 'create_suppress' in args:
        LOG.error("Can't use '--export-source-suppress' unless '--suppress "
                  "SUPPRESS_FILE' is also given.")
        sys.exit(2)

    skip_handler = None
    if 'skipfile' in args:
        skip_handler = SkipListHandler(args.skipfile)

    for input_path in args.input:

        input_path = os.path.abspath(input_path)
        os.chdir(original_cwd)
        LOG.debug("Parsing input argument: '" + input_path + "'")

        export = args.export if 'export' in args else None
        if export is not None and export == 'html':
            output_path = os.path.abspath(args.output_path)

            LOG.info("Generating html output files:")
            PlistToHtml.parse(input_path, output_path,
                              context.path_plist_to_html_dist, 'clean' in args)
            continue

        severity_stats = Counter({})
        file_stats = Counter({})
        report_count = Counter({})

        files = []
        metadata_dict = {}
        if os.path.isfile(input_path):
            files.append(input_path)

        elif os.path.isdir(input_path):
            metadata_file = os.path.join(input_path, "metadata.json")
            if os.path.exists(metadata_file):
                with open(metadata_file, 'r') as metadata:
                    metadata_dict = json.load(metadata)
                    LOG.debug(metadata_dict)

                if 'working_directory' in metadata_dict:
                    os.chdir(metadata_dict['working_directory'])

            _, _, file_names = next(os.walk(input_path), ([], [], []))
            files = [
                os.path.join(input_path, file_name) for file_name in file_names
            ]

        for file_path in files:
            report_stats = parse(file_path, context, metadata_dict,
                                 suppress_handler, skip_handler, 'print_steps'
                                 in args)

            severity_stats.update(Counter(report_stats.get('severity', {})))
            file_stats.update(Counter(report_stats.get('files', {})))
            report_count.update(Counter(report_stats.get('reports', {})))

        print("\n----==== Summary ====----")

        if file_stats:
            vals = [[os.path.basename(k), v]
                    for k, v in dict(file_stats).items()]
            keys = ['Filename', 'Report count']
            table = twodim_to_str('table', keys, vals, 1, True)
            print(table)

        if severity_stats:
            vals = [[k, v] for k, v in dict(severity_stats).items()]
            keys = ['Severity', 'Report count']
            table = twodim_to_str('table', keys, vals, 1, True)
            print(table)

        report_count = dict(report_count).get("report_count", 0)
        print("----=================----")
        print("Total number of reports: {}".format(report_count))
        print("----=================----")

    os.chdir(original_cwd)
예제 #27
0
def handle_diff_results(args):

    init_logger(args.verbose if 'verbose' in args else None)

    context = generic_package_context.get_context()

    def get_diff_results(client, baseids, cmp_data):

        report_filter = ttypes.ReportFilter()
        add_filter_conditions(report_filter, args.filter)

        # Do not show resolved bugs in compare mode new.
        if cmp_data.diffType == ttypes.DiffType.NEW:
            report_filter.detectionStatus = [
                ttypes.DetectionStatus.NEW, ttypes.DetectionStatus.UNRESOLVED,
                ttypes.DetectionStatus.REOPENED
            ]

        sort_mode = [(ttypes.SortMode(ttypes.SortType.FILENAME,
                                      ttypes.Order.ASC))]
        limit = constants.MAX_QUERY_SIZE
        offset = 0

        all_results = []
        results = client.getRunResults(baseids, limit, offset, sort_mode,
                                       report_filter, cmp_data)

        while results:
            all_results.extend(results)
            offset += limit
            results = client.getRunResults(baseids, limit, offset, sort_mode,
                                           report_filter, cmp_data)
        return all_results

    def get_report_dir_results(reportdir):
        all_reports = []
        for filename in os.listdir(reportdir):
            if filename.endswith(".plist"):
                file_path = os.path.join(reportdir, filename)
                LOG.debug("Parsing:" + file_path)
                try:
                    files, reports = plist_parser.parse_plist(file_path)
                    for report in reports:
                        report.main['location']['file_name'] = \
                            files[int(report.main['location']['file'])]
                    all_reports.extend(reports)

                except Exception as ex:
                    LOG.error('The generated plist is not valid!')
                    LOG.error(ex)
        return all_reports

    def get_line_from_file(filename, lineno):
        with open(filename, 'r') as f:
            i = 1
            for line in f:
                if i == lineno:
                    return line
                i += 1
        return ""

    def get_diff_base_results(client, baseids, base_hashes, suppressed_hashes):
        base_results = []
        report_filter = ttypes.ReportFilter()
        add_filter_conditions(report_filter, args.filter)

        sort_mode = [(ttypes.SortMode(ttypes.SortType.FILENAME,
                                      ttypes.Order.ASC))]
        limit = constants.MAX_QUERY_SIZE
        offset = 0

        report_filter.reportHash = base_hashes + suppressed_hashes
        results = client.getRunResults(baseids, limit, offset, sort_mode,
                                       report_filter, None)
        while results:
            base_results.extend(results)
            offset += limit
            results = client.getRunResults(baseids, limit, offset, sort_mode,
                                           report_filter, None)
        return base_results

    def get_diff_report_dir(client, baseids, report_dir, cmp_data):
        filtered_reports = []
        report_dir_results = get_report_dir_results(report_dir)
        new_hashes = {}
        suppressed_in_code = []

        for rep in report_dir_results:
            bughash = rep.main['issue_hash_content_of_line_in_context']
            source_file = rep.main['location']['file_name']
            bug_line = rep.main['location']['line']
            checker_name = rep.main['check_name']

            new_hashes[bughash] = rep
            sc_handler = SourceCodeCommentHandler(source_file)
            src_comment_data = sc_handler.filter_source_line_comments(
                bug_line, checker_name)

            if len(src_comment_data) == 1:
                suppressed_in_code.append(bughash)
                LOG.debug("Bug " + bughash + "is suppressed in code. file:" +
                          source_file + "Line " + str(bug_line))
            elif len(src_comment_data) > 1:
                LOG.warning("Multiple source code comment can be found "
                            "for '{0}' checker in '{1}' at line {2}. "
                            "This bug will not be suppressed!".format(
                                checker_name, source_file, bug_line))

        base_hashes = client.getDiffResultsHash(baseids, new_hashes.keys(),
                                                cmp_data.diffType)

        if cmp_data.diffType == ttypes.DiffType.NEW or \
           cmp_data.diffType == ttypes.DiffType.UNRESOLVED:
            # Shows reports from the report dir which are not present in the
            # baseline (NEW reports) or appear in both side (UNRESOLVED
            # reports) and not suppressed in the code.
            for result in report_dir_results:
                h = result.main['issue_hash_content_of_line_in_context']
                if h in base_hashes and h not in suppressed_in_code:
                    filtered_reports.append(result)
        elif cmp_data.diffType == ttypes.DiffType.RESOLVED:
            # Show bugs in the baseline (server) which are not present in the
            # report dir or suppressed.
            results = get_diff_base_results(client, baseids, base_hashes,
                                            suppressed_in_code)
            for result in results:
                filtered_reports.append(result)

        return filtered_reports

    def cached_report_file_lookup(file_cache, file_id):
        """
        Get source file data for the given file and caches it in a file cache
        if file data is not found in the cache. Finally, it returns the source
        file data from the cache.
        """
        if file_id not in file_cache:
            source = client.getSourceFileData(file_id, True,
                                              ttypes.Encoding.BASE64)
            file_content = base64.b64decode(source.fileContent)
            file_cache[file_id] = {
                'id': file_id,
                'path': source.filePath,
                'content': file_content
            }

        return file_cache[file_id]

    def get_report_data(client, reports, file_cache):
        """
        Returns necessary report files and report data events for the HTML
        plist parser.
        """
        file_sources = {}
        report_data = []

        for report in reports:
            file_sources[report.fileId] = cached_report_file_lookup(
                file_cache, report.fileId)

            details = client.getReportDetails(report.reportId)
            events = []
            for index, event in enumerate(details.pathEvents):
                file_sources[event.fileId] = cached_report_file_lookup(
                    file_cache, event.fileId)

                events.append({
                    'line': event.startLine,
                    'col': event.startCol,
                    'file': event.fileId,
                    'msg': event.msg,
                    'step': index + 1
                })
            report_data.append(events)

        return {'files': file_sources, 'reports': report_data}

    def reports_to_report_data(reports):
        """
        Converts reports from Report class from one plist file
        to report data events for the HTML plist parser.
        """
        file_sources = {}
        fname_to_fid = {}
        report_data = []
        findex = 0

        for report in reports:
            # Not all report in this list may refer to the same files
            # thus we need to create a single file list with
            # all files from all reports.
            for f in report.files:
                if f not in fname_to_fid:
                    try:
                        content = open(f, 'r').read()
                    except (OSError, IOError):
                        content = f + " NOT FOUND."
                    file_sources[findex] = {
                        'id': findex,
                        'path': f,
                        'content': content
                    }
                    fname_to_fid[f] = findex
                    findex += 1

            events = []
            pathElements = report.bug_path
            index = 1
            for element in pathElements:
                if element['kind'] == 'event':
                    fname = report.files[element['location']['file']]
                    new_fid = fname_to_fid[fname]
                    events.append({
                        'line': element['location']['line'],
                        'col': element['location']['col'],
                        'file': new_fid,
                        'msg': element['message'],
                        'step': index
                    })
                    index += 1
            report_data.append(events)

        return {'files': file_sources, 'reports': report_data}

    def report_to_html(client, reports, output_dir):
        """
        Generate HTML output files for the given reports in the given output
        directory by using the Plist To HTML parser.
        """
        html_builder = PlistToHtml.HtmlBuilder(context.path_plist_to_html_dist)

        file_report_map = defaultdict(list)
        for report in reports:
            file_path = ""
            if isinstance(report, Report):
                file_path = report.main['location']['file_name']
            else:
                file_path = report.checkedFile
            file_report_map[file_path].append(report)

        file_cache = {}
        for file_path, file_reports in file_report_map.items():
            checked_file = file_path
            filename = os.path.basename(checked_file)
            h = int(hashlib.md5(file_path).hexdigest(), 16) % (10**8)

            if isinstance(file_reports[0], Report):
                report_data = reports_to_report_data(file_reports)
            else:
                report_data = get_report_data(client, file_reports, file_cache)

            output_path = os.path.join(output_dir,
                                       filename + '_' + str(h) + '.html')
            html_builder.create(output_path, report_data)
            print('Html file was generated for file://{0}: file://{1}'.format(
                checked_file, output_path))

    def print_reports(client, reports, output_format):
        output_dir = args.export_dir if 'export_dir' in args else None
        if 'clean' in args and os.path.isdir(output_dir):
            print("Previous analysis results in '{0}' have been removed, "
                  "overwriting with current results.".format(output_dir))
            shutil.rmtree(output_dir)

        if output_format == 'json':
            output = []
            for report in reports:
                if isinstance(report, Report):
                    output.append(report.main)
                else:
                    output.append(report)
            print(CmdLineOutputEncoder().encode(output))
            return

        if output_format == 'html':
            output_dir = args.export_dir
            if not os.path.exists(output_dir):
                os.makedirs(output_dir)

            print("Generating HTML output files to file://{0} directory:\n".
                  format(output_dir))

            report_to_html(client, reports, output_dir)

            print('\nTo view the results in a browser run:\n'
                  '  $ firefox {0}'.format(args.export_dir))
            return

        header = ['File', 'Checker', 'Severity', 'Msg', 'Source']
        rows = []

        source_lines = defaultdict(set)
        for report in reports:
            if not isinstance(report, Report):
                source_lines[report.fileId].add(report.line)

        lines_in_files_requested = []
        for key in source_lines:
            lines_in_files_requested.append(
                ttypes.LinesInFilesRequested(fileId=key,
                                             lines=source_lines[key]))

        source_line_contents = client.getLinesInSourceFileContents(
            lines_in_files_requested, ttypes.Encoding.BASE64)

        for report in reports:
            if isinstance(report, Report):
                # report is coming from a plist file.
                bug_line = report.main['location']['line']
                bug_col = report.main['location']['col']
                sev = 'unknown'
                checked_file = report.main['location']['file_name']\
                    + ':' + str(bug_line) + ":" + str(bug_col)
                check_name = report.main['check_name']
                check_msg = report.main['description']
                source_line =\
                    get_line_from_file(report.main['location']['file_name'],
                                       bug_line)
            else:
                # report is of ReportData type coming from CodeChecker server.
                bug_line = report.line
                bug_col = report.column
                sev = ttypes.Severity._VALUES_TO_NAMES[report.severity]
                checked_file = report.checkedFile + ':' + str(bug_line) +\
                    ":" + str(bug_col)
                source_line = base64.b64decode(
                    source_line_contents[report.fileId][bug_line])
                check_name = report.checkerId
                check_msg = report.checkerMsg
            rows.append(
                (checked_file, check_name, sev, check_msg, source_line))
        if output_format == 'plaintext':
            for row in rows:
                print("{0}: {1} [{2}]\n{3}\n".format(row[0], row[3], row[1],
                                                     row[4]))
        else:
            print(twodim_to_str(output_format, header, rows))

    client = setup_client(args.product_url)

    base_runs = get_runs(client, [args.basename])
    base_ids = map(lambda run: run.runId, base_runs)

    if len(base_ids) == 0:
        LOG.warning("No run names match the given pattern: " + args.basename)
        sys.exit(1)

    LOG.info("Matching base runs: " +
             ', '.join(map(lambda run: run.name, base_runs)))

    cmp_data = ttypes.CompareData()
    if 'new' in args:
        cmp_data.diffType = ttypes.DiffType.NEW
    elif 'unresolved' in args:
        cmp_data.diffType = ttypes.DiffType.UNRESOLVED
    elif 'resolved' in args:
        cmp_data.diffType = ttypes.DiffType.RESOLVED

    results = []
    if os.path.isdir(args.newname):
        # If newname is a valid directory we assume that it is a report dir and
        # we are in local compare mode.
        results = get_diff_report_dir(client, base_ids,
                                      os.path.abspath(args.newname), cmp_data)
    else:
        new_runs = get_runs(client, [args.newname])
        cmp_data.runIds = map(lambda run: run.runId, new_runs)

        if len(new_runs) == 0:
            LOG.warning("No run names match the given pattern: " +
                        args.newname)
            sys.exit(1)

        LOG.info("Matching new runs: " +
                 ', '.join(map(lambda run: run.name, new_runs)))

        results = get_diff_results(client, base_ids, cmp_data)

    if len(results) == 0:
        LOG.info("No results.")
    else:
        print_reports(client, results, args.output_format)
예제 #28
0
def main(args):
    """
    Store the defect results in the specified input list as bug reports in the
    database.
    """

    if not host_check.check_zlib():
        raise Exception("zlib is not available on the system!")

    # To ensure the help message prints the default folder properly,
    # the 'default' for 'args.input' is a string, not a list.
    # But we need lists for the foreach here to work.
    if isinstance(args.input, str):
        args.input = [args.input]

    if 'name' not in args:
        LOG.debug("Generating name for analysis...")
        generated = __get_run_name(args.input)
        if generated:
            setattr(args, 'name', generated)
        else:
            LOG.error("No suitable name was found in the inputs for the "
                      "analysis run. Please specify one by passing argument "
                      "--name run_name in the invocation.")
            sys.exit(2)  # argparse returns error code 2 for bad invocations.

    LOG.info("Storing analysis results for run '" + args.name + "'")

    if 'force' in args:
        LOG.info("argument --force was specified: the run with name '" +
                 args.name + "' will be deleted.")

    protocol, host, port, product_name = split_product_url(args.product_url)

    # Before any transmission happens, check if we have the PRODUCT_STORE
    # permission to prevent a possibly long ZIP operation only to get an
    # error later on.
    product_client = libclient.setup_product_client(protocol,
                                                    host, port, product_name)
    product_id = product_client.getCurrentProduct().id

    auth_client, _ = libclient.setup_auth_client(protocol, host, port)
    has_perm = libclient.check_permission(
        auth_client, Permission.PRODUCT_STORE, {'productID': product_id})
    if not has_perm:
        LOG.error("You are not authorised to store analysis results in "
                  "product '{0}'".format(product_name))
        sys.exit(1)

    # Setup connection to the remote server.
    client = libclient.setup_client(args.product_url)

    LOG.debug("Initializing client connecting to {0}:{1}/{2} done."
              .format(host, port, product_name))

    _, zip_file = tempfile.mkstemp('.zip')
    LOG.debug("Will write mass store ZIP to '{0}'...".format(zip_file))

    try:
        assemble_zip(args.input, zip_file, client)
        with open(zip_file, 'rb') as zf:
            b64zip = base64.b64encode(zf.read())

        context = generic_package_context.get_context()

        client.massStoreRun(args.name,
                            args.tag if 'tag' in args else None,
                            str(context.version),
                            b64zip,
                            'force' in args)

        LOG.info("Storage finished successfully.")
    except Exception as ex:
        LOG.info("Storage failed: " + str(ex))
        sys.exit(1)
    finally:
        os.remove(zip_file)
예제 #29
0
def handle_diff_results(args):

    init_logger(args.verbose if 'verbose' in args else None)
    check_deprecated_arg_usage(args)

    f_severities, f_checkers, f_file_path, _, _ = check_filter_values(args)

    context = generic_package_context.get_context()

    def skip_report_dir_result(report):
        """
        Returns True if the report should be skipped from the results based on
        the given filter set.
        """
        if f_severities:
            severity_name = context.severity_map.get(report.main['check_name'])
            if severity_name.lower() not in map(str.lower, f_severities):
                return True

        if f_checkers:
            checker_name = report.main['check_name']
            if not any([re.match(r'^' + c.replace("*", ".*") + '$',
                                 checker_name, re.IGNORECASE)
                        for c in f_checkers]):
                return True

        if f_file_path:
            file_path = report.files[int(report.main['location']['file'])]
            if not any([re.match(r'^' + f.replace("*", ".*") + '$',
                                 file_path, re.IGNORECASE)
                        for f in f_file_path]):
                return True

        if 'checker_msg' in args:
            checker_msg = report.main['description']
            if not any([re.match(r'^' + c.replace("*", ".*") + '$',
                                 checker_msg, re.IGNORECASE)
                        for c in args.checker_msg]):
                return True

        return False

    def get_report_dir_results(reportdir):
        all_reports = []
        processed_path_hashes = set()
        for filename in os.listdir(reportdir):
            if filename.endswith(".plist"):
                file_path = os.path.join(reportdir, filename)
                LOG.debug("Parsing:" + file_path)
                try:
                    files, reports = plist_parser.parse_plist(file_path)
                    for report in reports:
                        path_hash = get_report_path_hash(report, files)
                        if path_hash in processed_path_hashes:
                            LOG.debug("Not showing report because it is a "
                                      "deduplication of an already processed "
                                      "report!")
                            LOG.debug("Path hash: %s", path_hash)
                            LOG.debug(report)
                            continue

                        if skip_report_dir_result(report):
                            continue

                        processed_path_hashes.add(path_hash)
                        report.main['location']['file_name'] = \
                            files[int(report.main['location']['file'])]
                        all_reports.append(report)

                except Exception as ex:
                    LOG.error('The generated plist is not valid!')
                    LOG.error(ex)
        return all_reports

    def get_line_from_file(filename, lineno):
        with open(filename, 'r') as f:
            i = 1
            for line in f:
                if i == lineno:
                    return line
                i += 1
        return ""

    def get_diff_base_results(client, baseids, base_hashes, suppressed_hashes):
        base_results = []
        report_filter = ttypes.ReportFilter()
        add_filter_conditions(client, report_filter, args)

        sort_mode = [(ttypes.SortMode(
            ttypes.SortType.FILENAME,
            ttypes.Order.ASC))]
        limit = constants.MAX_QUERY_SIZE
        offset = 0

        report_filter.reportHash = base_hashes + suppressed_hashes
        results = client.getRunResults(baseids,
                                       limit,
                                       offset,
                                       sort_mode,
                                       report_filter,
                                       None)
        while results:
            base_results.extend(results)
            offset += limit
            results = client.getRunResults(baseids,
                                           limit,
                                           offset,
                                           sort_mode,
                                           report_filter,
                                           None)
        return base_results

    def get_suppressed_reports(reports):
        """
        Returns suppressed reports.
        """
        suppressed_in_code = []
        for rep in reports:
            bughash = rep.report_hash
            source_file = rep.main['location']['file_name']
            bug_line = rep.main['location']['line']
            checker_name = rep.main['check_name']

            sc_handler = SourceCodeCommentHandler(source_file)
            src_comment_data = sc_handler.filter_source_line_comments(
                bug_line,
                checker_name)

            if len(src_comment_data) == 1:
                suppressed_in_code.append(bughash)
                LOG.debug("Bug " + bughash +
                          "is suppressed in code. file:" + source_file +
                          "Line "+str(bug_line))
            elif len(src_comment_data) > 1:
                LOG.warning(
                    "Multiple source code comment can be found "
                    "for '{0}' checker in '{1}' at line {2}. "
                    "This bug will not be suppressed!".format(
                        checker_name, source_file, bug_line))
        return suppressed_in_code

    def get_diff_type():
        """
        Returns Thrift DiffType value by processing the arguments.
        """
        if 'new' in args:
            return ttypes.DiffType.NEW

        if 'unresolved' in args:
            return ttypes.DiffType.UNRESOLVED

        if 'resolved' in args:
            return ttypes.DiffType.RESOLVED

        return None

    def get_diff_local_dir_remote_run(client, report_dir, run_name):
        """
        Compares a local report directory with a remote run.
        """
        filtered_reports = []
        report_dir_results = get_report_dir_results(
            os.path.abspath(report_dir))
        suppressed_in_code = get_suppressed_reports(report_dir_results)

        diff_type = get_diff_type()
        run_ids, run_names, _ = process_run_arg(run_name)
        local_report_hashes = set([r.report_hash for r in report_dir_results])

        if diff_type == ttypes.DiffType.NEW:
            # Get report hashes which can be found only in the remote runs.
            remote_hashes = \
                client.getDiffResultsHash(run_ids,
                                          local_report_hashes,
                                          ttypes.DiffType.RESOLVED)

            results = get_diff_base_results(client, run_ids,
                                            remote_hashes,
                                            suppressed_in_code)
            for result in results:
                filtered_reports.append(result)
        elif diff_type == ttypes.DiffType.UNRESOLVED:
            # Get remote hashes which can be found in the remote run and in the
            # local report directory.
            remote_hashes = \
                client.getDiffResultsHash(run_ids,
                                          local_report_hashes,
                                          ttypes.DiffType.UNRESOLVED)
            for result in report_dir_results:
                rep_h = result.report_hash
                if rep_h in remote_hashes and rep_h not in suppressed_in_code:
                    filtered_reports.append(result)
        elif diff_type == ttypes.DiffType.RESOLVED:
            # Get remote hashes which can be found in the remote run and in the
            # local report directory.
            remote_hashes = \
                client.getDiffResultsHash(run_ids,
                                          local_report_hashes,
                                          ttypes.DiffType.UNRESOLVED)
            for result in report_dir_results:
                if result.report_hash not in remote_hashes:
                    filtered_reports.append(result)

        return filtered_reports, run_names

    def get_diff_remote_run_local_dir(client, run_name, report_dir):
        """
        Compares a remote run with a local report directory.
        """
        filtered_reports = []
        report_dir_results = get_report_dir_results(
            os.path.abspath(report_dir))
        suppressed_in_code = get_suppressed_reports(report_dir_results)

        diff_type = get_diff_type()
        run_ids, run_names, _ = process_run_arg(run_name)
        local_report_hashes = set([r.report_hash for r in report_dir_results])

        remote_hashes = client.getDiffResultsHash(run_ids,
                                                  local_report_hashes,
                                                  diff_type)

        if diff_type in [ttypes.DiffType.NEW, ttypes.DiffType.UNRESOLVED]:
            # Shows reports from the report dir which are not present in
            # the baseline (NEW reports) or appear in both side (UNRESOLVED
            # reports) and not suppressed in the code.
            for result in report_dir_results:
                rep_h = result.report_hash
                if rep_h in remote_hashes and rep_h not in suppressed_in_code:
                    filtered_reports.append(result)
        elif diff_type == ttypes.DiffType.RESOLVED:
            # Show bugs in the baseline (server) which are not present in
            # the report dir or suppressed.
            results = get_diff_base_results(client,
                                            run_ids,
                                            remote_hashes,
                                            suppressed_in_code)
            for result in results:
                filtered_reports.append(result)

        return filtered_reports, run_names

    def get_diff_remote_runs(client, basename, newname):
        """
        Compares two remote runs and returns the filtered results.
        """
        report_filter = ttypes.ReportFilter()
        add_filter_conditions(client, report_filter, args)

        base_ids, base_run_names, base_run_tags = process_run_arg(basename)
        report_filter.runTag = base_run_tags

        cmp_data = ttypes.CompareData()
        cmp_data.diffType = get_diff_type()

        new_ids, new_run_names, new_run_tags = process_run_arg(newname)
        cmp_data.runIds = new_ids
        cmp_data.runTag = new_run_tags

        # Do not show resolved bugs in compare mode new.
        if cmp_data.diffType == ttypes.DiffType.NEW:
            report_filter.detectionStatus = [
                ttypes.DetectionStatus.NEW,
                ttypes.DetectionStatus.UNRESOLVED,
                ttypes.DetectionStatus.REOPENED]

        sort_mode = [(ttypes.SortMode(
            ttypes.SortType.FILENAME,
            ttypes.Order.ASC))]
        limit = constants.MAX_QUERY_SIZE
        offset = 0

        all_results = []
        results = client.getRunResults(base_ids,
                                       limit,
                                       offset,
                                       sort_mode,
                                       report_filter,
                                       cmp_data)

        while results:
            all_results.extend(results)
            offset += limit
            results = client.getRunResults(base_ids,
                                           limit,
                                           offset,
                                           sort_mode,
                                           report_filter,
                                           cmp_data)
        return all_results, base_run_names, new_run_names

    def get_diff_local_dirs(basename, newname):
        """
        Compares two report directories and returns the filtered results.
        """
        filtered_reports = []
        base_results = get_report_dir_results(os.path.abspath(basename))
        new_results = get_report_dir_results(os.path.abspath(newname))

        base_hashes = set([res.report_hash for res in base_results])
        new_hashes = set([res.report_hash for res in new_results])

        diff_type = get_diff_type()
        if diff_type == ttypes.DiffType.NEW:
            for res in new_results:
                if res.report_hash not in base_hashes:
                    filtered_reports.append(res)
        if diff_type == ttypes.DiffType.UNRESOLVED:
            for res in new_results:
                if res.report_hash in base_hashes:
                    filtered_reports.append(res)
        elif diff_type == ttypes.DiffType.RESOLVED:
            for res in base_results:
                if res.report_hash not in new_hashes:
                    filtered_reports.append(res)

        return filtered_reports

    def cached_report_file_lookup(file_cache, file_id):
        """
        Get source file data for the given file and caches it in a file cache
        if file data is not found in the cache. Finally, it returns the source
        file data from the cache.
        """
        if file_id not in file_cache:
            source = client.getSourceFileData(file_id, True,
                                              ttypes.Encoding.BASE64)
            file_content = base64.b64decode(source.fileContent)
            file_cache[file_id] = {'id': file_id,
                                   'path': source.filePath,
                                   'content': file_content}

        return file_cache[file_id]

    def get_report_data(client, reports, file_cache):
        """
        Returns necessary report files and report data events for the HTML
        plist parser.
        """
        file_sources = {}
        report_data = []

        for report in reports:
            file_sources[report.fileId] = cached_report_file_lookup(
                file_cache, report.fileId)

            details = client.getReportDetails(report.reportId)
            events = []
            for index, event in enumerate(details.pathEvents):
                file_sources[event.fileId] = cached_report_file_lookup(
                    file_cache, event.fileId)

                events.append({'line': event.startLine,
                               'col': event.startCol,
                               'file': event.fileId,
                               'msg': event.msg,
                               'step': index + 1})

            report_data.append({
                'events': events,
                'path': report.checkedFile,
                'reportHash': report.bugHash,
                'checkerName': report.checkerId})

        return {'files': file_sources,
                'reports': report_data}

    def reports_to_report_data(reports):
        """
        Converts reports from Report class from one plist file
        to report data events for the HTML plist parser.
        """
        file_sources = {}
        fname_to_fid = {}
        report_data = []
        findex = 0

        for report in reports:
            # Not all report in this list may refer to the same files
            # thus we need to create a single file list with
            # all files from all reports.
            for f in report.files:
                if f not in fname_to_fid:
                    try:
                        with codecs.open(f, 'r', 'UTF-8',
                                         errors='replace') as source_data:
                            content = source_data.read()
                    except (OSError, IOError):
                        content = f + " NOT FOUND."
                    file_sources[findex] = {'id': findex,
                                            'path': f,
                                            'content': content}
                    fname_to_fid[f] = findex
                    findex += 1

            events = []
            pathElements = report.bug_path
            index = 1
            for element in pathElements:
                if element['kind'] == 'event':
                    fname = report.files[element['location']['file']]
                    new_fid = fname_to_fid[fname]
                    events.append({'line': element['location']['line'],
                                   'col':  element['location']['col'],
                                   'file': new_fid,
                                   'msg':  element['message'],
                                   'step': index})
                    index += 1

            report_hash = report.main['issue_hash_content_of_line_in_context']
            report_data.append({
                'events': events,
                'path': report.main['location']['file_name'],
                'reportHash': report_hash,
                'checkerName': report.main['check_name']})

        return {'files': file_sources,
                'reports': report_data}

    def report_to_html(client, reports, output_dir):
        """
        Generate HTML output files for the given reports in the given output
        directory by using the Plist To HTML parser.
        """
        html_builder = PlistToHtml.HtmlBuilder(
            context.path_plist_to_html_dist,
            context.severity_map)

        file_report_map = defaultdict(list)
        for report in reports:
            file_path = ""
            if isinstance(report, Report):
                file_path = report.main['location']['file_name']
            else:
                file_path = report.checkedFile
            file_report_map[file_path].append(report)

        file_cache = {}
        for file_path, file_reports in file_report_map.items():
            checked_file = file_path
            filename = os.path.basename(checked_file)
            h = int(hashlib.md5(file_path).hexdigest(), 16) % (10 ** 8)

            if isinstance(file_reports[0], Report):
                report_data = reports_to_report_data(file_reports)
            else:
                report_data = get_report_data(client, file_reports, file_cache)

            output_path = os.path.join(output_dir,
                                       filename + '_' + str(h) + '.html')
            html_builder.create(output_path, report_data)
            print('Html file was generated for file://{0}: file://{1}'.format(
                checked_file, output_path))

        html_builder.create_index_html(output_dir)

    def print_reports(client, reports, output_format):
        output_dir = args.export_dir if 'export_dir' in args else None
        if 'clean' in args and os.path.isdir(output_dir):
            print("Previous analysis results in '{0}' have been removed, "
                  "overwriting with current results.".format(output_dir))
            shutil.rmtree(output_dir)

        if output_format == 'json':
            output = []
            for report in reports:
                if isinstance(report, Report):
                    output.append(report.main)
                else:
                    output.append(report)
            print(CmdLineOutputEncoder().encode(output))
            return

        if output_format == 'html':
            output_dir = args.export_dir
            if not os.path.exists(output_dir):
                os.makedirs(output_dir)

            print("Generating HTML output files to file://{0} directory:\n"
                  .format(output_dir))

            report_to_html(client, reports, output_dir)

            print('\nTo view the results in a browser run:\n'
                  '  $ firefox {0}'.format(os.path.join(args.export_dir,
                                                        'index.html')))
            return

        header = ['File', 'Checker', 'Severity', 'Msg', 'Source']
        rows = []

        source_lines = defaultdict(set)
        for report in reports:
            if not isinstance(report, Report):
                source_lines[report.fileId].add(report.line)

        lines_in_files_requested = []
        for key in source_lines:
            lines_in_files_requested.append(
                ttypes.LinesInFilesRequested(fileId=key,
                                             lines=source_lines[key]))

        for report in reports:
            if isinstance(report, Report):
                # report is coming from a plist file.
                bug_line = report.main['location']['line']
                bug_col = report.main['location']['col']
                checked_file = report.main['location']['file_name']\
                    + ':' + str(bug_line) + ":" + str(bug_col)
                check_name = report.main['check_name']
                sev = context.severity_map.get(check_name)
                check_msg = report.main['description']
                source_line =\
                    get_line_from_file(report.main['location']['file_name'],
                                       bug_line)
            else:
                source_line_contents = client.getLinesInSourceFileContents(
                    lines_in_files_requested, ttypes.Encoding.BASE64)

                # report is of ReportData type coming from CodeChecker server.
                bug_line = report.line
                bug_col = report.column
                sev = ttypes.Severity._VALUES_TO_NAMES[report.severity]
                checked_file = report.checkedFile + ':' + str(bug_line) +\
                    ":" + str(bug_col)
                source_line = base64.b64decode(
                    source_line_contents[report.fileId][bug_line])
                check_name = report.checkerId
                check_msg = report.checkerMsg
            rows.append(
                (sev, checked_file, check_msg, check_name, source_line))
        if output_format == 'plaintext':
            for row in rows:
                print("[{0}] {1}: {2} [{3}]\n{4}\n".format(
                    row[0], row[1], row[2], row[3], row[4]))
        else:
            print(twodim_to_str(output_format, header, rows))

    def get_run_tag(client, run_ids, tag_name):
        """
        Returns run tag information for the given tag name in the given runs.
        """
        run_history_filter = ttypes.RunHistoryFilter()
        run_history_filter.tagNames = [tag_name]
        run_histories = client.getRunHistory(run_ids, None, None,
                                             run_history_filter)

        return run_histories[0] if len(run_histories) else None

    def process_run_arg(run_arg_with_tag):
        """
        Process the argument and returns run ids a run tag ids.
        The argument has the following format: <run_name>:<run_tag>
        """
        run_with_tag = run_arg_with_tag.split(':')
        run_name = run_with_tag[0]

        runs = get_runs(client, [run_name])
        run_ids = map(lambda run: run.runId, runs)
        run_names = map(lambda run: run.name, runs)

        # Set base run tag if it is available.
        run_tag_name = run_with_tag[1] if len(run_with_tag) > 1 else None
        run_tags = None
        if run_tag_name:
            tag = get_run_tag(client, run_ids, run_tag_name)
            run_tags = [tag.id] if tag else None

        if not run_ids:
            LOG.warning(
                "No run names match the given pattern: " + run_arg_with_tag)
            sys.exit(1)

        LOG.info("Matching runs: %s",
                 ', '.join(map(lambda run: run.name, runs)))

        return run_ids, run_names, run_tags

    def print_diff_results(reports):
        """
        Print the results.
        """
        if reports:
            print_reports(client, reports, args.output_format)
        else:
            LOG.info("No results.")

    client = None

    # We set up the client if we are not comparing two local report directory.
    if not os.path.isdir(args.basename) or not os.path.isdir(args.newname):
        client = setup_client(args.product_url)

    if os.path.isdir(args.basename) and os.path.isdir(args.newname):
        reports = get_diff_local_dirs(args.basename, args.newname)
        print_diff_results(reports)
        LOG.info("Compared two local report directories %s and %s",
                 os.path.abspath(args.basename),
                 os.path.abspath(args.newname))
    elif os.path.isdir(args.newname):
        reports, base_run_names = \
            get_diff_remote_run_local_dir(client,
                                          args.basename,
                                          os.path.abspath(args.newname))
        print_diff_results(reports)
        LOG.info("Compared remote run(s) %s (matching: %s) and local report "
                 "directory %s",
                 args.basename,
                 ', '.join(base_run_names),
                 os.path.abspath(args.newname))
    elif os.path.isdir(args.basename):
        reports, new_run_names = \
            get_diff_local_dir_remote_run(client,
                                          os.path.abspath(args.basename),
                                          args.newname)
        print_diff_results(reports)
        LOG.info("Compared local report directory %s and remote run(s) %s "
                 "(matching: %s).",
                 os.path.abspath(args.basename),
                 args.newname,
                 ', '.join(new_run_names))
    else:
        reports, base_run_names, new_run_names = \
            get_diff_remote_runs(client, args.basename, args.newname)
        print_diff_results(reports)
        LOG.info("Compared multiple remote runs %s (matching: %s) and %s "
                 "(matching: %s)",
                 args.basename,
                 ', '.join(base_run_names),
                 args.newname,
                 ', '.join(new_run_names))
예제 #30
0
def main(args):
    """
    List the checkers available in the specified (or all supported) analyzers
    alongside with their description or enabled status in various formats.
    """

    logger.setup_logger(args.verbose if 'verbose' in args else None)

    # If nothing is set, list checkers for all supported analyzers.
    analyzers = args.analyzers \
        if 'analyzers' in args \
        else analyzer_types.supported_analyzers

    context = generic_package_context.get_context()
    working, errored = analyzer_types.check_supported_analyzers(
        analyzers, context)

    analyzer_environment = analyzer_env.get_check_env(
        context.path_env_extra, context.ld_lib_path_extra)

    analyzer_config_map = analyzer_types.build_config_handlers(
        args, context, working)
    # List available checker profiles.
    if 'profile' in args and args.profile == 'list':
        if 'details' not in args:
            if args.output_format not in ['csv', 'json']:
                header = ['Profile name']
            else:
                header = ['profile_name']
        else:
            if args.output_format not in ['csv', 'json']:
                header = ['Profile name', 'Description']
            else:
                header = ['profile_name', 'description']

        rows = []
        for (profile, description) in context.available_profiles.items():
            if 'details' not in args:
                rows.append([profile])
            else:
                rows.append([profile, description])

        print(output_formatters.twodim_to_str(args.output_format, header,
                                              rows))
        return

    # Use good looking different headers based on format.
    if 'details' not in args:
        if args.output_format not in ['csv', 'json']:
            header = ['Name']
        else:
            header = ['name']
    else:
        if args.output_format not in ['csv', 'json']:
            header = ['', 'Name', 'Analyzer', 'Severity', 'Description']
        else:
            header = ['enabled', 'name', 'analyzer', 'severity', 'description']

    rows = []
    for analyzer in working:
        config_handler = analyzer_config_map.get(analyzer)
        source_analyzer = \
            analyzer_types.construct_analyzer_type(analyzer,
                                                   config_handler,
                                                   None)

        checkers = source_analyzer.get_analyzer_checkers(
            config_handler, analyzer_environment)
        default_checker_cfg = context.checker_config.get(analyzer +
                                                         '_checkers')

        if 'profile' in args:
            if args.profile not in context.available_profiles:
                LOG.error("Checker profile '" + args.profile +
                          "' does not exist!")
                LOG.error("To list available profiles, use '--profile list'.")
                return

            profile_checkers = [(args.profile, True)]
            analyzer_types.initialize_checkers(config_handler,
                                               context.available_profiles,
                                               context.package_root, checkers,
                                               default_checker_cfg,
                                               profile_checkers)
        else:
            analyzer_types.initialize_checkers(config_handler,
                                               context.available_profiles,
                                               context.package_root, checkers,
                                               default_checker_cfg)

        for checker_name, value in config_handler.checks().items():
            enabled, description = value

            if not enabled and 'profile' in args:
                continue

            if enabled and 'only_disabled' in args:
                continue
            elif not enabled and 'only_enabled' in args:
                continue

            if args.output_format != 'json':
                enabled = '+' if enabled else '-'

            if 'details' not in args:
                rows.append([checker_name])
            else:
                severity = context.severity_map.get(checker_name,
                                                    'UNSPECIFIED')
                rows.append(
                    [enabled, checker_name, analyzer, severity, description])

    if len(rows) > 0:
        print(output_formatters.twodim_to_str(args.output_format, header,
                                              rows))

    for analyzer_binary, reason in errored:
        LOG.error("Failed to get checkers for '" + analyzer_binary +
                  "'! The error reason was: '" + reason + "'")
        LOG.error("Please check your installation and the "
                  "'config/package_layout.json' file!")