Exemple #1
0
def handle_list_products(args):

    # If the given output format is not 'table', redirect logger's output to
    # the stderr.
    stream = None
    if 'output_format' in args and args.output_format != 'table':
        stream = 'stderr'

    init_logger(args.verbose if 'verbose' in args else None, stream)

    protocol, host, port = split_server_url(args.server_url)
    client = setup_product_client(protocol, host, port)
    products = client.getProducts(None, None)

    if args.output_format == 'json':
        results = []
        for product in products:
            results.append({product.endpoint: product})
        print(CmdLineOutputEncoder().encode(results))
    else:  # plaintext, csv
        header = ['Database status', 'Endpoint', 'Name', 'Description']
        rows = []
        for product in products:
            name = convert.from_b64(product.displayedName_b64) \
                if product.displayedName_b64 else ''
            description = convert.from_b64(product.description_b64) \
                if product.description_b64 else ''

            if not product.accessible:
                db_status_msg = 'No access.'
            else:
                db_status = product.databaseStatus
                db_status_msg = database_status.db_status_msg.get(
                    db_status, 'Unknown database status')

            rows.append((db_status_msg, product.endpoint, name, description))

        print(twodim.to_str(args.output_format, header, rows))
Exemple #2
0
def main(args):
    """
    Entry point for parsing some analysis results and printing them to the
    stdout in a human-readable format.
    """

    logger.setup_logger(args.verbose if 'verbose' in args else None)

    try:
        cmd_config.check_config_file(args)
    except FileNotFoundError as fnerr:
        LOG.error(fnerr)
        sys.exit(1)

    export = args.export if 'export' in args else None
    if export == 'html' and 'output_path' not in args:
        LOG.error("Argument --export not allowed without argument --output "
                  "when exporting to HTML.")
        sys.exit(1)

    if export == 'gerrit' and not gerrit.mandatory_env_var_is_set():
        sys.exit(1)

    context = analyzer_context.get_context()

    # To ensure the help message prints the default folder properly,
    # the 'default' for 'args.input' is a string, not a list.
    # But we need lists for the foreach here to work.
    if isinstance(args.input, str):
        args.input = [args.input]

    original_cwd = os.getcwd()

    src_comment_status_filter = args.review_status

    suppr_handler = None
    if 'suppress' in args:
        __make_handler = False
        if not os.path.isfile(args.suppress):
            if 'create_suppress' in args:
                with open(args.suppress,
                          'w',
                          encoding='utf-8',
                          errors='ignore') as _:
                    # Just create the file.
                    __make_handler = True
                    LOG.info(
                        "Will write source-code suppressions to "
                        "suppress file: %s", args.suppress)
            else:
                LOG.warning(
                    "Suppress file '%s' given, but it does not exist"
                    " -- will not suppress anything.", args.suppress)
        else:
            __make_handler = True

        if __make_handler:
            suppr_handler = suppress_handler.\
                GenericSuppressHandler(args.suppress,
                                       'create_suppress' in args,
                                       src_comment_status_filter)
    elif 'create_suppress' in args:
        LOG.error("Can't use '--export-source-suppress' unless '--suppress "
                  "SUPPRESS_FILE' is also given.")
        sys.exit(2)

    processed_path_hashes = set()

    skip_handler = None
    if 'skipfile' in args:
        with open(args.skipfile, 'r', encoding='utf-8',
                  errors='ignore') as skip_file:
            skip_handler = SkipListHandler(skip_file.read())

    trim_path_prefixes = args.trim_path_prefix if \
        'trim_path_prefix' in args else None

    if export:
        if export not in EXPORT_TYPES:
            LOG.error(f"Unknown export format: {export}")
            return

        # The HTML part will be handled separately below.
        if export != 'html':
            try:
                res = parse_convert_reports(args.input, export,
                                            context.severity_map,
                                            trim_path_prefixes)
                if 'output_path' in args:
                    output_path = os.path.abspath(args.output_path)

                    if not os.path.exists(output_path):
                        os.mkdir(output_path)

                    reports_json = os.path.join(output_path, 'reports.json')
                    with open(reports_json,
                              mode='w',
                              encoding='utf-8',
                              errors="ignore") as output_f:
                        output_f.write(json.dumps(res))

                return print(json.dumps(res))
            except Exception as ex:
                LOG.error(ex)
                sys.exit(1)

    def trim_path_prefixes_handler(source_file):
        """
        Callback to util.trim_path_prefixes to prevent module dependency
        of plist_to_html
        """
        return util.trim_path_prefixes(source_file, trim_path_prefixes)

    html_builder = None

    def skip_html_report_data_handler(report_hash, source_file, report_line,
                                      checker_name, diag, files):
        """
        Report handler which skips bugs which were suppressed by source code
        comments. This function will return a tuple. The first element
        will decide whether the report should be skipped or not and the second
        element will be a list of source code comments related to the actual
        report.
        """
        files_dict = {k: v for k, v in enumerate(files)}
        report = Report({'check_name': checker_name},
                        diag['path'],
                        files_dict,
                        metadata=None)
        path_hash = get_report_path_hash(report)
        if path_hash in processed_path_hashes:
            LOG.debug("Skip report because it is a deduplication of an "
                      "already processed report!")
            LOG.debug("Path hash: %s", path_hash)
            LOG.debug(diag)
            return True, []

        skip, source_code_comments = skip_report(report_hash, source_file,
                                                 report_line, checker_name,
                                                 suppr_handler,
                                                 src_comment_status_filter)

        if skip_handler:
            skip |= skip_handler.should_skip(source_file)

        if not skip:
            processed_path_hashes.add(path_hash)

        return skip, source_code_comments

    file_change = set()
    severity_stats = defaultdict(int)
    file_stats = defaultdict(int)
    report_count = 0

    for input_path in args.input:
        input_path = os.path.abspath(input_path)
        os.chdir(original_cwd)
        LOG.debug("Parsing input argument: '%s'", input_path)

        if export == 'html':
            output_path = os.path.abspath(args.output_path)

            if not html_builder:
                html_builder = \
                    PlistToHtml.HtmlBuilder(context.path_plist_to_html_dist,
                                            context.severity_map)

            LOG.info("Generating html output files:")
            PlistToHtml.parse(input_path, output_path,
                              context.path_plist_to_html_dist,
                              skip_html_report_data_handler, html_builder,
                              trim_path_prefixes_handler)
            continue

        files = []
        metadata_dict = {}
        if os.path.isfile(input_path):
            files.append(input_path)

        elif os.path.isdir(input_path):
            metadata_file = os.path.join(input_path, "metadata.json")
            if os.path.exists(metadata_file):
                metadata_dict = util.load_json_or_empty(metadata_file)
                LOG.debug(metadata_dict)

                if 'working_directory' in metadata_dict:
                    working_dir = metadata_dict['working_directory']
                    try:
                        os.chdir(working_dir)
                    except OSError as oerr:
                        LOG.debug(oerr)
                        LOG.error(
                            "Working directory %s is missing.\n"
                            "Can not parse reports safely.", working_dir)
                        sys.exit(1)

            _, _, file_names = next(os.walk(input_path), ([], [], []))
            files = [
                os.path.join(input_path, file_name) for file_name in file_names
            ]

        file_report_map = defaultdict(list)

        plist_pltf = PlistToPlaintextFormatter(suppr_handler, skip_handler,
                                               context.severity_map,
                                               processed_path_hashes,
                                               trim_path_prefixes,
                                               src_comment_status_filter)
        plist_pltf.print_steps = 'print_steps' in args

        for file_path in files:
            f_change = parse_with_plt_formatter(file_path, metadata_dict,
                                                plist_pltf, file_report_map)
            file_change = file_change.union(f_change)

        report_stats = plist_pltf.write(file_report_map)
        sev_stats = report_stats.get('severity')
        for severity in sev_stats:
            severity_stats[severity] += sev_stats[severity]

        f_stats = report_stats.get('files')
        for file_path in f_stats:
            file_stats[file_path] += f_stats[file_path]

        rep_stats = report_stats.get('reports')
        report_count += rep_stats.get("report_count", 0)

    # Create index.html and statistics.html for the generated html files.
    if html_builder:
        html_builder.create_index_html(args.output_path)
        html_builder.create_statistics_html(args.output_path)

        print('\nTo view statistics in a browser run:\n> firefox {0}'.format(
            os.path.join(args.output_path, 'statistics.html')))

        print('\nTo view the results in a browser run:\n> firefox {0}'.format(
            os.path.join(args.output_path, 'index.html')))
    else:
        print("\n----==== Summary ====----")
        if file_stats:
            vals = [[os.path.basename(k), v]
                    for k, v in dict(file_stats).items()]
            vals.sort(key=itemgetter(0))
            keys = ['Filename', 'Report count']
            table = twodim.to_str('table', keys, vals, 1, True)
            print(table)

        if severity_stats:
            vals = [[k, v] for k, v in dict(severity_stats).items()]
            vals.sort(key=itemgetter(0))
            keys = ['Severity', 'Report count']
            table = twodim.to_str('table', keys, vals, 1, True)
            print(table)

        print("----=================----")
        print("Total number of reports: {}".format(report_count))
        print("----=================----")

    if file_change:
        changed_files = '\n'.join([' - ' + f for f in file_change])
        LOG.warning(
            "The following source file contents changed since the "
            "latest analysis:\n%s\nMultiple reports were not "
            "shown and skipped from the statistics. Please "
            "analyze your project again to update the "
            "reports!", changed_files)

    os.chdir(original_cwd)

    if report_count != 0:
        sys.exit(2)
Exemple #3
0
def main(args):
    """
    List the checkers available in the specified (or all supported) analyzers
    alongside with their description or enabled status in various formats.
    """

    # If the given output format is not 'table', redirect logger's output to
    # the stderr.
    logger.setup_logger(args.verbose if 'verbose' in args else None,
                        None if args.output_format == 'table' else 'stderr')

    context = analyzer_context.get_context()
    working_analyzers, errored = analyzer_types.check_supported_analyzers(
        args.analyzers, context)

    analyzer_environment = env.extend(context.path_env_extra,
                                      context.ld_lib_path_extra)

    analyzer_config_map = analyzer_types.build_config_handlers(
        args, context, working_analyzers)

    def uglify(text):
        """
        csv and json format output contain this non human readable header
        string: no CamelCase and no space.
        """
        return text.lower().replace(' ', '_')

    def match_guideline(checker_name, selected_guidelines):
        """
        Returns True if checker_name gives reports related to any of the
        selected guideline rule.
        checker_name -- A full checker name.
        selected_guidelines -- A list of guideline names or guideline rule IDs.
        """
        guideline = context.guideline_map.get(checker_name, {})
        guideline_set = set(guideline)
        for value in guideline.values():
            guideline_set |= set(value)

        return any(g in guideline_set for g in selected_guidelines)

    def format_guideline(guideline):
        """
        Convert guideline rules to human-readable format.
        guideline -- Dictionary in the following format:
                     {"guideline_1": ["rule_1", "rule_2"]}
        """
        return ' '.join('Related {} rules: {}'.format(g, ', '.join(r))
                        for g, r in guideline.items())

    # List available checker profiles.
    if 'profile' in args and args.profile == 'list':
        if 'details' in args:
            header = ['Profile name', 'Description']
            rows = context.profile_map.available_profiles().items()
        else:
            header = ['Profile name']
            rows = [(key, "")
                    for key in context.profile_map.available_profiles()]

        if args.output_format in ['csv', 'json']:
            header = list(map(uglify, header))

        print(twodim.to_str(args.output_format, header, rows))
        return

    # List checker config options.
    if 'checker_config' in args:
        if 'details' in args:
            header = ['Option', 'Description']
        else:
            header = ['Option']

        if args.output_format in ['csv', 'json']:
            header = list(map(uglify, header))

        rows = []
        for analyzer in working_analyzers:
            config_handler = analyzer_config_map.get(analyzer)
            analyzer_class = analyzer_types.supported_analyzers[analyzer]

            configs = analyzer_class.get_checker_config(
                config_handler, analyzer_environment)
            rows.extend(
                (':'.join((analyzer, c[0])),
                 c[1]) if 'details' in args else (':'.join((analyzer, c[0])), )
                for c in configs)

        print(twodim.to_str(args.output_format, header, rows))
        return

    if args.guideline is not None and len(args.guideline) == 0:
        result = defaultdict(set)

        for _, guidelines in context.guideline_map.items():
            for guideline, rules in guidelines.items():
                result[guideline] |= set(rules)

        header = ['Guideline', 'Rules']
        if args.output_format in ['csv', 'json']:
            header = list(map(uglify, header))

        if args.output_format == 'json':
            rows = [(g, sorted(list(r))) for g, r in result.items()]
        else:
            rows = [(g, ', '.join(sorted(r))) for g, r in result.items()]

        if args.output_format == 'rows':
            for row in rows:
                print('Guideline: {}'.format(row[0]))
                print('Rules: {}'.format(row[1]))
        else:
            print(twodim.to_str(args.output_format, header, rows))
        return

    # List available checkers.
    if 'details' in args:
        header = [
            'Enabled', 'Name', 'Analyzer', 'Severity', 'Guideline',
            'Description'
        ]
    else:
        header = ['Name']

    if args.output_format in ['csv', 'json']:
        header = list(map(uglify, header))

    rows = []
    for analyzer in working_analyzers:
        config_handler = analyzer_config_map.get(analyzer)
        analyzer_class = analyzer_types.supported_analyzers[analyzer]

        checkers = analyzer_class.get_analyzer_checkers(
            config_handler, analyzer_environment)

        profile_checkers = []
        if 'profile' in args:
            if args.profile not in context.profile_map.available_profiles():
                LOG.error("Checker profile '%s' does not exist!", args.profile)
                LOG.error("To list available profiles, use '--profile list'.")
                sys.exit(1)

            profile_checkers = [('profile:' + args.profile, True)]

        config_handler.initialize_checkers(context, checkers, profile_checkers)

        for checker_name, value in config_handler.checks().items():
            state, description = value

            if state != CheckerState.enabled and 'profile' in args:
                continue

            if state == CheckerState.enabled and 'only_disabled' in args:
                continue
            elif state != CheckerState.enabled and 'only_enabled' in args:
                continue

            if args.output_format == 'json':
                state = state == CheckerState.enabled
            else:
                state = '+' if state == CheckerState.enabled else '-'

            if args.guideline is not None:
                if not match_guideline(checker_name, args.guideline):
                    continue

            if 'details' in args:
                severity = context.severity_map.get(checker_name)
                guideline = context.guideline_map.get(checker_name, {})
                if args.output_format != 'json':
                    guideline = format_guideline(guideline)
                rows.append([
                    state, checker_name, analyzer, severity, guideline,
                    description
                ])
            else:
                rows.append([checker_name])

    if 'show_warnings' in args:
        severity = context.severity_map.get('clang-diagnostic-')
        for warning in get_warnings(analyzer_environment):
            warning = 'clang-diagnostic-' + warning[2:]

            if 'guideline' in args:
                if not match_guideline(warning, args.guideline):
                    continue

            guideline = context.guideline_map.get(warning, {})
            if args.output_format != 'json':
                guideline = format_guideline(guideline)

            if 'details' in args:
                rows.append(['', warning, '-', severity, guideline, '-'])
            else:
                rows.append([warning])

    if rows:
        print(twodim.to_str(args.output_format, header, rows))

    for analyzer_binary, reason in errored:
        LOG.error(
            "Failed to get checkers for '%s'!"
            "The error reason was: '%s'", analyzer_binary, reason)
        LOG.error("Please check your installation and the "
                  "'config/package_layout.json' file!")
Exemple #4
0
def main(args):
    """
    Store the defect results in the specified input list as bug reports in the
    database.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    try:
        cmd_config.check_config_file(args)
    except FileNotFoundError as fnerr:
        LOG.error(fnerr)
        sys.exit(1)

    if not host_check.check_zlib():
        raise Exception("zlib is not available on the system!")

    # To ensure the help message prints the default folder properly,
    # the 'default' for 'args.input' is a string, not a list.
    # But we need lists for the foreach here to work.
    if isinstance(args.input, str):
        args.input = [args.input]

    if 'name' not in args:
        LOG.debug("Generating name for analysis...")
        generated = __get_run_name(args.input)
        if generated:
            setattr(args, 'name', generated)
        else:
            LOG.error("No suitable name was found in the inputs for the "
                      "analysis run. Please specify one by passing argument "
                      "--name run_name in the invocation.")
            sys.exit(2)  # argparse returns error code 2 for bad invocations.

    LOG.info("Storing analysis results for run '" + args.name + "'")

    if 'force' in args:
        LOG.info("argument --force was specified: the run with name '" +
                 args.name + "' will be deleted.")

    # Setup connection to the remote server.
    client = libclient.setup_client(args.product_url)

    _, zip_file = tempfile.mkstemp('.zip')
    LOG.debug("Will write mass store ZIP to '%s'...", zip_file)

    try:
        LOG.debug("Assembling zip file.")
        try:
            assemble_zip(args.input, zip_file, client)
        except Exception as ex:
            print(ex)
            import traceback
            traceback.print_stack()

        zip_size = os.stat(zip_file).st_size
        LOG.debug("Assembling zip done, size is %s", sizeof_fmt(zip_size))

        if zip_size > MAX_UPLOAD_SIZE:
            LOG.error("The result list to upload is too big (max: %s).",
                      sizeof_fmt(MAX_UPLOAD_SIZE))
            sys.exit(1)

        with open(zip_file, 'rb') as zf:
            b64zip = base64.b64encode(zf.read()).decode("utf-8")

        context = webserver_context.get_context()

        trim_path_prefixes = args.trim_path_prefix if \
            'trim_path_prefix' in args else None

        description = args.description if 'description' in args else None

        LOG.info("Storing results to the server...")
        client.massStoreRun(args.name, args.tag if 'tag' in args else None,
                            str(context.version), b64zip, 'force' in args,
                            trim_path_prefixes, description)

        # Storing analysis statistics if the server allows them.
        if client.allowsStoringAnalysisStatistics():
            storing_analysis_statistics(client, args.input, args.name)

        LOG.info("Storage finished successfully.")
    except RequestFailed as reqfail:
        if reqfail.errorCode == ErrorCode.SOURCE_FILE:
            header = ['File', 'Line', 'Checker name']
            table = twodim.to_str('table', header,
                                  [c.split('|') for c in reqfail.extraInfo])
            LOG.warning(
                "Setting the review statuses for some reports failed "
                "because of non valid source code comments: "
                "%s\n %s", reqfail.message, table)
        sys.exit(1)
    except Exception as ex:
        import traceback
        traceback.print_stack()
        LOG.info("Storage failed: %s", str(ex))
        sys.exit(1)
    finally:
        os.remove(zip_file)
Exemple #5
0
def main(args):
    """
    List the analyzers' basic information supported by CodeChecker.
    """
    # If the given output format is not 'table', redirect logger's output to
    # the stderr.
    stream = None
    if 'output_format' in args and args.output_format != 'table':
        stream = 'stderr'

    logger.setup_logger(args.verbose if 'verbose' in args else None, stream)

    context = analyzer_context.get_context()
    working_analyzers, errored = \
        analyzer_types.check_supported_analyzers(
            analyzer_types.supported_analyzers,
            context)

    if args.dump_config:
        binary = context.analyzer_binaries.get(args.dump_config)

        if args.dump_config == 'clang-tidy':
            subprocess.call([binary, '-dump-config', '-checks=*'],
                            encoding="utf-8",
                            errors="ignore")
        elif args.dump_config == 'clangsa':
            ret = subprocess.call([
                binary, '-cc1', '-analyzer-checker-option-help',
                '-analyzer-checker-option-help-alpha'
            ],
                                  stderr=subprocess.PIPE,
                                  encoding="utf-8",
                                  errors="ignore")

            if ret:
                # This flag is supported from Clang 9.
                LOG.warning("'--dump-config clangsa' is not supported yet. "
                            "Please make sure that you are using Clang 9 or "
                            "newer.")

        return

    analyzer_environment = env.extend(context.path_env_extra,
                                      context.ld_lib_path_extra)
    analyzer_config_map = analyzer_types.build_config_handlers(
        args, context, working_analyzers)

    def uglify(text):
        """
        csv and json format output contain this non human readable header
        string: no CamelCase and no space.
        """
        return text.lower().replace(' ', '_')

    if 'analyzer_config' in args:
        if 'details' in args:
            header = ['Option', 'Description']
        else:
            header = ['Option']

        if args.output_format in ['csv', 'json']:
            header = list(map(uglify, header))

        analyzer = args.analyzer_config
        config_handler = analyzer_config_map.get(analyzer)
        analyzer_class = analyzer_types.supported_analyzers[analyzer]

        configs = analyzer_class.get_analyzer_config(config_handler,
                                                     analyzer_environment)
        rows = [(':'.join((analyzer, c[0])), c[1]) if 'details' in args else
                (':'.join((analyzer, c[0])), ) for c in configs]

        print(twodim.to_str(args.output_format, header, rows))

        return

    if 'details' in args:
        header = ['Name', 'Path', 'Version']
    else:
        header = ['Name']

    if args.output_format in ['csv', 'json']:
        header = list(map(uglify, header))

    rows = []
    for analyzer in working_analyzers:
        if 'details' not in args:
            rows.append([analyzer])
        else:
            binary = context.analyzer_binaries.get(analyzer)
            try:
                version = subprocess.check_output([binary, '--version'],
                                                  encoding="utf-8",
                                                  errors="ignore")
            except (subprocess.CalledProcessError, OSError):
                version = 'ERROR'

            rows.append([analyzer, binary, version])

    if 'all' in args:
        for analyzer, err_reason in errored:
            if 'details' not in args:
                rows.append([analyzer])
            else:
                rows.append([
                    analyzer,
                    context.analyzer_binaries.get(analyzer), err_reason
                ])

    if rows:
        print(twodim.to_str(args.output_format, header, rows))