예제 #1
0
def handle_list_run_histories(args):
    init_logger(args.verbose if 'verbose' in args else None)

    client = setup_client(args.product_url)
    run_ids = None
    if 'names' in args:
        runs = get_runs(client, args.names)
        run_ids = [r.runId for r in runs]

    run_history = client.getRunHistory(run_ids, None, None, None)

    if args.output_format == 'json':
        print(CmdLineOutputEncoder().encode(run_history))
    else:  # plaintext, csv
        header = [
            'Date', 'Run name', 'Version tag', 'User', 'CodeChecker version',
            'Analyzer statistics'
        ]
        rows = []
        for h in run_history:
            analyzer_statistics = []
            for analyzer in h.analyzerStatistics:
                stat = h.analyzerStatistics[analyzer]
                num_of_all_files = stat.successful + stat.failed
                analyzer_statistics.append(analyzer + ' (' +
                                           str(num_of_all_files) + '/' +
                                           str(stat.successful) + ')')

            rows.append(
                (h.time, h.runName, h.versionTag if h.versionTag else '',
                 h.user, h.codeCheckerVersion if h.codeCheckerVersion else '',
                 ', '.join(analyzer_statistics)))

        print(twodim_to_str(args.output_format, header, rows))
def handle_add_component(args):
    init_logger(args.verbose if 'verbose' in args else None)

    client = setup_client(args.product_url)

    with open(args.component_file, 'r') as component_file:
        value = component_file.read().strip()

    description = args.description if 'description' in args else None

    check_unicode_string(args.name, "Component name")

    if description:
        check_unicode_string(args.description, "Component description")

    # Check that the given source component is exists.
    source_component = client.getSourceComponents([args.name])

    if len(source_component):
        LOG.info("The source component '{0}' already exist!".format(args.name))

        question = 'Do you want to update? Y(es)/n(o) '
        if not get_user_input(question):
            LOG.info("No source component update was done.")
            sys.exit(0)

    success = client.addSourceComponent(args.name, value, description)
    if success:
        LOG.info("Source component added.")
    else:
        LOG.error("An error occurred when adding source component.")
        sys.exit(1)
예제 #3
0
def handle_suppress(args):
    def bug_hash_filter(bug_id, filepath):
        filepath = '%' + filepath
        return [
            ttypes.ReportFilter(bugHash=bug_id, filepath=filepath),
            ttypes.ReportFilter(bugHash=bug_id, filepath=filepath)
        ]

    limit = constants.MAX_QUERY_SIZE

    client = setup_client(args.product_url)

    run_info = check_run_names(client, [args.name])
    run = run_info.get(args.name)

    if 'input' in args:
        with open(args.input) as supp_file:
            suppress_data = suppress_file_handler.get_suppress_data(supp_file)

        for bug_id, file_name, comment in suppress_data:
            reports = client.getRunResults([run.runId], limit, 0, None,
                                           bug_hash_filter(bug_id, file_name),
                                           None)

            for report in reports:
                status = ttypes.ReviewStatus.FALSE_POSITIVE
                client.changeReviewStatus(report.reportId, status, comment)
예제 #4
0
def handle_list_runs(args):

    init_logger(args.verbose if 'verbose' in args else None)

    client = setup_client(args.product_url)
    runs = client.getRunData(None)

    if args.output_format == 'json':
        results = []
        for run in runs:
            results.append({run.name: run})
        print(CmdLineOutputEncoder().encode(results))

    else:  # plaintext, csv
        header = [
            'Name', 'Number of reports', 'Storage date', 'Version tag',
            'Duration'
        ]
        rows = []
        for run in runs:
            duration = str(timedelta(seconds=run.duration)) \
                if run.duration > -1 else 'Not finished'
            rows.append((run.name, str(run.resultCount), run.runDate,
                         run.versionTag if run.versionTag else '', duration))

        print(twodim_to_str(args.output_format, header, rows))
예제 #5
0
def handle_suppress(args):

    init_logger(args.verbose if 'verbose' in args else None)

    limit = constants.MAX_QUERY_SIZE

    client = setup_client(args.product_url)

    run_info = check_run_names(client, [args.name])
    run = run_info.get(args.name)

    if 'input' in args:
        with open(args.input) as supp_file:
            suppress_data = suppress_file_handler.get_suppress_data(supp_file)

        for bug_id, file_name, comment, status in suppress_data:
            file_name = '%' + file_name
            bug_hash_filter = ttypes.ReportFilter(filepath=[file_name],
                                                  reportHash=[bug_id])
            reports = client.getRunResults([run.runId], limit, 0, None,
                                           bug_hash_filter, None)

            for report in reports:
                rw_status = ttypes.ReviewStatus.FALSE_POSITIVE
                if status == 'confirmed':
                    rw_status = ttypes.ReviewStatus.CONFIRMED
                elif status == 'intentional':
                    rw_status = ttypes.ReviewStatus.INTENTIONAL

                client.changeReviewStatus(report.reportId, rw_status, comment)
예제 #6
0
def handle_remove_run_results(args):

    init_logger(args.verbose if 'verbose' in args else None)

    client = setup_client(args.product_url)

    def is_later(d1, d2):
        dateformat = '%Y-%m-%d %H:%M:%S.%f'

        if not isinstance(d1, datetime):
            d1 = datetime.strptime(d1, dateformat)
        if not isinstance(d2, datetime):
            d2 = datetime.strptime(d2, dateformat)

        return d1 > d2

    if 'name' in args:
        check_run_names(client, args.name)

        def condition(name, runid, date):
            return name in args.name
    elif 'all_after_run' in args:
        run_info = check_run_names(client, [args.all_after_run])
        run_date = run_info[args.all_after_run].runDate

        def condition(name, runid, date):
            return is_later(date, run_date)
    elif 'all_before_run' in args:
        run_info = check_run_names(client, [args.all_before_run])
        run_date = run_info[args.all_before_run].runDate

        def condition(name, runid, date):
            return is_later(run_date, date)
    elif 'all_after_time' in args:

        def condition(name, runid, date):
            return is_later(date, args.all_after_time)
    elif 'all_before_time' in args:

        def condition(name, runid, date):
            return is_later(args.all_before_time, date)
    else:

        def condition(name, runid, date):
            return False

    for run_id in [
            run.runId for run in client.getRunData(None)
            if condition(run.name, run.runId, run.runDate)
    ]:
        client.removeRun(run_id)

    LOG.info("Done.")
예제 #7
0
def handle_list_results(args):
    init_logger(args.verbose if 'verbose' in args else None)
    check_deprecated_arg_usage(args)

    client = setup_client(args.product_url)

    run_names = map(lambda x: x.strip(), args.name.split(':'))
    run_ids = [run.runId for run in get_runs(client, run_names)]

    if not len(run_ids):
        LOG.warning("No runs were found!")
        sys.exit(1)

    limit = constants.MAX_QUERY_SIZE
    offset = 0

    report_filter = ttypes.ReportFilter()

    add_filter_conditions(client, report_filter, args)

    all_results = []
    results = client.getRunResults(run_ids, limit, offset, None, report_filter,
                                   None)

    while results:
        all_results.extend(results)
        offset += limit
        results = client.getRunResults(run_ids, limit, offset, None,
                                       report_filter, None)

    if args.output_format == 'json':
        print(CmdLineOutputEncoder().encode(all_results))
    else:
        header = [
            'File', 'Checker', 'Severity', 'Msg', 'Review status',
            'Detection status'
        ]

        rows = []
        for res in all_results:
            bug_line = res.line
            checked_file = res.checkedFile + ' @ ' + str(bug_line)
            sev = ttypes.Severity._VALUES_TO_NAMES[res.severity]
            rw_status = \
                ttypes.ReviewStatus._VALUES_TO_NAMES[res.reviewData.status]
            dt_status = \
                ttypes.DetectionStatus._VALUES_TO_NAMES[res.detectionStatus]

            rows.append((checked_file, res.checkerId, sev, res.checkerMsg,
                         rw_status, dt_status))

        print(twodim_to_str(args.output_format, header, rows))
예제 #8
0
def handle_list_results(args):
    client = setup_client(args.host, args.port, '/')

    run_info = check_run_names(client, [args.name])

    run_id, _ = run_info.get(args.name)

    limit = codeCheckerDBAccess.constants.MAX_QUERY_SIZE
    offset = 0

    filters = []
    if args.suppressed:
        report_filter = codeCheckerDBAccess.ttypes.ReportFilter(
            suppressed=True)
    else:
        report_filter = codeCheckerDBAccess.ttypes.ReportFilter(
            suppressed=False)

    add_filter_conditions(report_filter, args.filter)
    filters.append(report_filter)

    all_results = []
    results = client.getRunResults(run_id, limit, offset, None, filters)

    while results:
        all_results.extend(results)
        offset += limit
        results = client.getRunResults(run_id, limit, offset, None, filters)

    if args.output_format == 'json':
        print(CmdLineOutputEncoder().encode(all_results))
    else:

        if args.suppressed:
            header = ['File', 'Checker', 'Severity', 'Msg', 'Suppress comment']
        else:
            header = ['File', 'Checker', 'Severity', 'Msg']

        rows = []
        for res in all_results:
            bug_line = res.lastBugPosition.startLine
            checked_file = res.checkedFile + ' @ ' + str(bug_line)
            sev = shared.ttypes.Severity._VALUES_TO_NAMES[res.severity]

            if args.suppressed:
                rows.append((checked_file, res.checkerId, sev, res.checkerMsg,
                             res.suppressComment))
            else:
                rows.append((checked_file, res.checkerId, sev, res.checkerMsg))

        print(twodim_to_str(args.output_format, header, rows))
예제 #9
0
def handle_remove_run_results(args):
    client = setup_client(args.host, args.port, '/')

    def is_later(d1, d2):
        dateformat = '%Y-%m-%d %H:%M:%S.%f'

        if not isinstance(d1, datetime):
            d1 = datetime.strptime(d1, dateformat)
        if not isinstance(d2, datetime):
            d2 = datetime.strptime(d2, dateformat)

        return d1 > d2

    run_info = get_run_ids(client)

    if 'name' in args:
        check_run_names(client, args.name)

        def condition(name, runid, date):
            return name in args.name
    elif 'all_after_run' in args and args.all_after_run in run_info:
        run_date = run_info[args.all_after_run][1]

        def condition(name, runid, date):
            return is_later(date, run_date)
    elif 'all_before_run' in args and args.all_before_run in run_info:
        run_date = run_info[args.all_before_run][1]

        def condition(name, runid, date):
            return is_later(run_date, date)
    elif 'all_after_time' in args:

        def condition(name, runid, date):
            return is_later(date, args.all_after_time)
    elif 'all_before_time' in args:

        def condition(name, runid, date):
            return is_later(args.all_before_time, date)
    else:

        def condition(name, runid, date):
            return False

    client.removeRunResults([
        runid for (name, (runid, date)) in run_info.items()
        if condition(name, runid, date)
    ])

    LOG.info("Done.")
예제 #10
0
def handle_list_results(args):
    client = setup_client(args.product_url)

    run_info = check_run_names(client, [args.name])

    run = run_info.get(args.name)

    limit = constants.MAX_QUERY_SIZE
    offset = 0

    report_filter = ttypes.ReportFilter()

    add_filter_conditions(report_filter, args.filter)

    all_results = []
    results = client.getRunResults([run.runId], limit, offset, None,
                                   report_filter, None)

    while results:
        all_results.extend(results)
        offset += limit
        results = client.getRunResults([run.runId], limit, offset, None,
                                       report_filter, None)

    if args.output_format == 'json':
        print(CmdLineOutputEncoder().encode(all_results))
    else:

        if args.suppressed:
            header = ['File', 'Checker', 'Severity', 'Msg', 'Suppress comment']
        else:
            header = ['File', 'Checker', 'Severity', 'Msg']

        rows = []
        for res in all_results:
            bug_line = res.line
            checked_file = res.checkedFile + ' @ ' + str(bug_line)
            sev = ttypes.Severity._VALUES_TO_NAMES[res.severity]

            if args.suppressed:
                rows.append((checked_file, res.checkerId, sev,
                             res.checkerMsg, res.suppressComment))
            else:
                rows.append(
                    (checked_file, res.checkerId, sev, res.checkerMsg))

        print(twodim_to_str(args.output_format, header, rows))
예제 #11
0
def handle_list_runs(args):

    init_logger(args.verbose if 'verbose' in args else None)

    client = setup_client(args.product_url)

    run_filter = None
    if 'names' in args:
        run_filter = ttypes.RunFilter()
        run_filter.names = args.names

    runs = client.getRunData(run_filter)

    if args.output_format == 'json':
        results = []
        for run in runs:
            results.append({run.name: run})
        print(CmdLineOutputEncoder().encode(results))

    else:  # plaintext, csv
        header = [
            'Name', 'Number of unresolved reports', 'Analyzer statistics',
            'Storage date', 'Version tag', 'Duration', 'CodeChecker version'
        ]
        rows = []
        for run in runs:
            duration = str(timedelta(seconds=run.duration)) \
                if run.duration > -1 else 'Not finished'

            analyzer_statistics = []
            for analyzer in run.analyzerStatistics:
                stat = run.analyzerStatistics[analyzer]
                num_of_all_files = stat.successful + stat.failed
                analyzer_statistics.append(analyzer + ' (' +
                                           str(num_of_all_files) + '/' +
                                           str(stat.successful) + ')')

            codechecker_version = run.codeCheckerVersion \
                if run.codeCheckerVersion else ''

            rows.append((run.name, str(run.resultCount),
                         ', '.join(analyzer_statistics), run.runDate,
                         run.versionTag if run.versionTag else '', duration,
                         codechecker_version))

        print(twodim_to_str(args.output_format, header, rows))
예제 #12
0
def handle_list_runs(args):
    client = setup_client(args.product_url)
    runs = client.getRunData(None)

    if args.output_format == 'json':
        results = []
        for run in runs:
            results.append({run.name: run})
        print(CmdLineOutputEncoder().encode(results))

    else:  # plaintext, csv
        header = ['Name', 'ResultCount', 'RunDate']
        rows = []
        for run in runs:
            rows.append((run.name, str(run.resultCount), run.runDate))

        print(twodim_to_str(args.output_format, header, rows))
예제 #13
0
def handle_list_result_types(args):
    client = setup_client(args.host, args.port, '/')

    filters = []
    if args.suppressed:
        report_filter = codeCheckerDBAccess.ttypes.ReportFilter(
            suppressed=True)
    else:
        report_filter = codeCheckerDBAccess.ttypes.ReportFilter(
            suppressed=False)

    add_filter_conditions(report_filter, args.filter)
    filters.append(report_filter)

    if 'all_results' in args:
        items = check_run_names(client, None).items()
    else:
        items = []
        run_info = check_run_names(client, args.names)
        for name in args.names:
            items.append((name, run_info.get(name)))

    results_collector = []
    for name, run_info in items:
        run_id, run_date = run_info
        results = client.getRunResultTypes(run_id, filters)

        if args.output_format == 'json':
            for res in results:
                res.severity =\
                    shared.ttypes.Severity._VALUES_TO_NAMES[res.severity]
            results_collector.append({name: results})
        else:  # plaintext, csv
            print("Run '" + name + "', executed at '" + run_date + "'")
            rows = []
            header = ['Checker', 'Severity', 'Count']
            for res in results:
                sev = shared.ttypes.Severity._VALUES_TO_NAMES[res.severity]
                rows.append((res.checkerId, sev, str(res.count)))

            print(twodim_to_str(args.output_format, header, rows))

    if args.output_format == 'json':
        print(CmdLineOutputEncoder().encode(results_collector))
예제 #14
0
def handle_list_components(args):
    init_logger(args.verbose if 'verbose' in args else None)

    client = setup_client(args.product_url)
    components = client.getSourceComponents(None)

    if args.output_format == 'json':
        print(CmdLineOutputEncoder().encode(components))
    else:  # plaintext, csv
        header = ['Name', 'Value', 'Description']
        rows = []
        for res in components:
            for idx, value in enumerate(res.value.split('\n')):
                name = res.name if idx == 0 else ''
                description = res.description \
                    if idx == 0 and res.description else ''
                rows.append((name, value, description))

        print(twodim_to_str(args.output_format, header, rows))
예제 #15
0
def handle_del_component(args):

    init_logger(args.verbose if 'verbose' in args else None)

    client = setup_client(args.product_url)

    # Check that the given source component is exists.
    source_component = client.getSourceComponents([args.name])

    if not source_component:
        LOG.error("The source component '{0}' does not exist!"
                  .format(args.name))
        sys.exit(1)

    success = client.removeSourceComponent(args.name)
    if success:
        LOG.info("Source component removed.")
    else:
        LOG.error("An error occurred in source component removal.")
        sys.exit(1)
예제 #16
0
def handle_list_run_histories(args):
    init_logger(args.verbose if 'verbose' in args else None)

    client = setup_client(args.product_url)
    run_ids = None
    if 'names' in args:
        runs = get_runs(client, args.names)
        run_ids = [r.runId for r in runs]

    run_history = client.getRunHistory(run_ids, None, None, None)

    if args.output_format == 'json':
        print(CmdLineOutputEncoder().encode(run_history))
    else:  # plaintext, csv
        header = ['Date', 'Run name', 'Version tag', 'User']
        rows = []
        for h in run_history:
            rows.append((h.time, h.runName,
                         h.versionTag if h.versionTag else '', h.user))

        print(twodim_to_str(args.output_format, header, rows))
예제 #17
0
def handle_list_result_types(args):
    def get_statistics(client, run_ids, field, values):
        report_filter = ttypes.ReportFilter()
        report_filter.isUnique = True
        setattr(report_filter, field, values)
        checkers = client.getCheckerCounts(run_ids, report_filter, None)

        return dict((res.name, res.count) for res in checkers)

    def checker_count(dict, key):
        return dict[key] if key in dict else 0

    client = setup_client(args.product_url)

    run_ids = None
    if 'all_results' not in args:
        items = check_run_names(client, args.names)
        run_ids = map(lambda run: run.runId, items.values())

    all_checkers_report_filter = ttypes.ReportFilter()
    all_checkers_report_filter.isUnique = True

    all_checkers = client.getCheckerCounts(run_ids, all_checkers_report_filter,
                                           None)
    all_checkers_dict = dict((res.name, res) for res in all_checkers)

    unrev_checkers = get_statistics(client, run_ids, 'reviewStatus',
                                    [ttypes.ReviewStatus.UNREVIEWED])

    confirmed_checkers = get_statistics(client, run_ids, 'reviewStatus',
                                        [ttypes.ReviewStatus.CONFIRMED])

    false_checkers = get_statistics(client, run_ids, 'reviewStatus',
                                    [ttypes.ReviewStatus.FALSE_POSITIVE])

    intentional_checkers = get_statistics(client, run_ids, 'reviewStatus',
                                          [ttypes.ReviewStatus.INTENTIONAL])

    resolved_checkers = get_statistics(client, run_ids, 'detectionStatus',
                                       [ttypes.DetectionStatus.RESOLVED])

    all_results = []
    for key, checker_data in sorted(all_checkers_dict.items(),
                                    key=lambda x: x[1].severity,
                                    reverse=True):
        all_results.append(
            dict(
                checker=key,
                severity=ttypes.Severity._VALUES_TO_NAMES[
                    checker_data.severity],
                reports=checker_data.count,
                unreviewed=checker_count(unrev_checkers, key),
                confirmed=checker_count(confirmed_checkers, key),
                false_positive=checker_count(false_checkers, key),
                intentional=checker_count(intentional_checkers, key),
                resolved=checker_count(resolved_checkers, key),
            ))

    if args.output_format == 'json':
        print(CmdLineOutputEncoder().encode(all_results))
    else:
        header = [
            'Checker', 'Severity', 'All reports', 'Resolved', 'Unreviewed',
            'Confirmed', 'False positive', "Intentional"
        ]

        rows = []
        for stat in all_results:
            rows.append(
                (stat['checker'], stat['severity'], str(stat['reports']),
                 str(stat['resolved']), str(stat['unreviewed']),
                 str(stat['confirmed']), str(stat['false_positive']),
                 str(stat['intentional'])))

        print(twodim_to_str(args.output_format, header, rows))
예제 #18
0
def handle_diff_results(args):
    context = generic_package_context.get_context()

    def get_diff_results(client, baseids, cmp_data):

        report_filter = ttypes.ReportFilter()
        add_filter_conditions(report_filter, args.filter)

        # Do not show resolved bugs in compare mode new.
        if cmp_data.diffType == ttypes.DiffType.NEW:
            report_filter.detectionStatus = [
                ttypes.DetectionStatus.NEW, ttypes.DetectionStatus.UNRESOLVED,
                ttypes.DetectionStatus.REOPENED
            ]

        sort_mode = [(ttypes.SortMode(ttypes.SortType.FILENAME,
                                      ttypes.Order.ASC))]
        limit = constants.MAX_QUERY_SIZE
        offset = 0

        all_results = []
        results = client.getRunResults(baseids, limit, offset, sort_mode,
                                       report_filter, cmp_data)

        while results:
            all_results.extend(results)
            offset += limit
            results = client.getRunResults(baseids, limit, offset, sort_mode,
                                           report_filter, cmp_data)
        return all_results

    def get_report_dir_results(reportdir):
        all_reports = []
        for filename in os.listdir(reportdir):
            if filename.endswith(".plist"):
                file_path = os.path.join(reportdir, filename)
                LOG.debug("Parsing:" + file_path)
                try:
                    files, reports = plist_parser.parse_plist(file_path)
                    for report in reports:
                        report.main['location']['file_name'] = \
                            files[int(report.main['location']['file'])]
                    all_reports.extend(reports)

                except Exception as ex:
                    LOG.error('The generated plist is not valid!')
                    LOG.error(ex)
        return all_reports

    def get_line_from_file(filename, lineno):
        with open(filename, 'r') as f:
            i = 1
            for line in f:
                if i == lineno:
                    return line
                i += 1
        return ""

    def get_line_from_remote_file(client, fid, lineno):
        # Thrift Python client cannot decode JSONs that contain non '\u00??'
        # characters, so we instead ask for a Base64-encoded version.
        source = client.getSourceFileData(fid, True, ttypes.Encoding.BASE64)
        lines = base64.b64decode(source.fileContent).split('\n')
        return "" if len(lines) < lineno else lines[lineno - 1]

    def get_diff_report_dir(client, baseids, report_dir, diff_type):

        report_filter = ttypes.ReportFilter()
        add_filter_conditions(report_filter, args.filter)

        sort_mode = [(ttypes.SortMode(ttypes.SortType.FILENAME,
                                      ttypes.Order.ASC))]
        limit = constants.MAX_QUERY_SIZE
        offset = 0

        base_results = []
        results = client.getRunResults(baseids, limit, offset, sort_mode,
                                       report_filter, None)
        while results:
            base_results.extend(results)
            offset += limit
            results = client.getRunResults(baseids, limit, offset, sort_mode,
                                           report_filter, None)
        base_hashes = {}
        for res in base_results:
            base_hashes[res.bugHash] = res

        filtered_reports = []
        new_results = get_report_dir_results(report_dir)
        new_hashes = {}
        suppressed_in_code = []

        for rep in new_results:
            bughash = rep.main['issue_hash_content_of_line_in_context']
            source_file = rep.main['location']['file_name']
            bug_line = rep.main['location']['line']
            new_hashes[bughash] = rep
            sp_handler = suppress_handler.SourceSuppressHandler(
                source_file, bug_line, bughash, rep.main['check_name'])
            if sp_handler.get_suppressed():
                suppressed_in_code.append(bughash)
                LOG.debug("Bug " + bughash + "is suppressed in code. file:" +
                          source_file + "Line " + str(bug_line))

        if diff_type == 'new':
            # Shows new reports from the report dir
            # which are not present in the baseline (server)
            # and not suppressed in the code.
            for result in new_results:
                if not (result.main['issue_hash_content_of_line_in_context']
                        in base_hashes) and \
                   not (result.main['issue_hash_content_of_line_in_context']
                        in suppressed_in_code):
                    filtered_reports.append(result)
        elif diff_type == 'resolved':
            # Show bugs in the baseline (server)
            # which are not present in the report dir
            # or suppressed.
            for result in base_results:
                if not (result.bugHash in new_hashes) or \
                        (result.bugHash in suppressed_in_code):
                    filtered_reports.append(result)
        elif diff_type == 'unresolved':
            # Shows bugs in the report dir
            # that are not suppressed and
            # which are also present in the baseline (server)

            for result in new_results:
                new_hash = result.main['issue_hash_content_of_line_in_context']
                if new_hash in base_hashes and \
                        not (new_hash in suppressed_in_code):
                    filtered_reports.append(result)
        return filtered_reports

    def cached_report_file_lookup(file_cache, file_id):
        """
        Get source file data for the given file and caches it in a file cache
        if file data is not found in the cache. Finally, it returns the source
        file data from the cache.
        """
        if file_id not in file_cache:
            source = client.getSourceFileData(file_id, True,
                                              ttypes.Encoding.BASE64)
            file_content = base64.b64decode(source.fileContent)
            file_cache[file_id] = {
                'id': file_id,
                'path': source.filePath,
                'content': file_content
            }

        return file_cache[file_id]

    def get_report_data(client, reports, file_cache):
        """
        Returns necessary report files and report data events for the HTML
        plist parser.
        """
        file_sources = {}
        report_data = []

        for report in reports:
            file_sources[report.fileId] = cached_report_file_lookup(
                file_cache, report.fileId)

            details = client.getReportDetails(report.reportId)
            events = []
            for index, event in enumerate(details.pathEvents):
                file_sources[event.fileId] = cached_report_file_lookup(
                    file_cache, event.fileId)

                events.append({
                    'line': event.startLine,
                    'col': event.startCol,
                    'file': event.fileId,
                    'msg': event.msg,
                    'step': index + 1
                })
            report_data.append(events)

        return {'files': file_sources, 'reports': report_data}

    def report_to_html(client, reports, output_dir):
        """
        Generate HTML output files for the given reports in the given output
        directory by using the Plist To HTML parser.
        """
        html_builder = PlistToHtml.HtmlBuilder(context.path_plist_to_html_dist)

        file_report_map = defaultdict(list)
        for report in reports:
            file_report_map[report.fileId].append(report)

        file_cache = {}
        for file_id, file_reports in file_report_map.items():
            checked_file = file_reports[0].checkedFile
            filename = os.path.basename(checked_file)

            report_data = get_report_data(client, file_reports, file_cache)

            output_path = os.path.join(output_dir,
                                       filename + '_' + str(file_id) + '.html')
            html_builder.create(output_path, report_data)
            print('Html file was generated for file://{0}: file://{1}'.format(
                checked_file, output_path))

    def print_reports(client, reports, output_format, diff_type):
        output_dir = args.export_dir if 'export_dir' in args else None
        if 'clean' in args and os.path.isdir(output_dir):
            print("Previous analysis results in '{0}' have been removed, "
                  "overwriting with current results.".format(output_dir))
            shutil.rmtree(output_dir)

        if output_format == 'json':
            output = []
            for report in reports:
                if isinstance(report, Report):
                    output.append(report.main)
                else:
                    output.append(report)
            print(CmdLineOutputEncoder().encode(output))
            return

        if output_format == 'html':
            if len(reports) == 0:
                print('No {0} reports was found!'.format(diff_type))
                return

            output_dir = args.export_dir
            if not os.path.exists(output_dir):
                os.makedirs(output_dir)

            print("Generating HTML output files to file://{0} directory:\n".
                  format(output_dir))

            report_to_html(client, reports, output_dir)

            print('\nTo view the results in a browser run:\n'
                  '  $ firefox {0}'.format(args.export_dir))
            return

        header = ['File', 'Checker', 'Severity', 'Msg', 'Source']
        rows = []
        for report in reports:
            if type(report) is Report:
                bug_line = report.main['location']['line']
                bug_col = report.main['location']['col']
                sev = 'unknown'
                checked_file = report.main['location']['file_name']\
                    + ':' + str(bug_line) + ":" + str(bug_col)
                check_name = report.main['check_name']
                check_msg = report.main['description']
                source_line =\
                    get_line_from_file(report.main['location']['file_name'],
                                       bug_line)
            else:
                bug_line = report.line
                bug_col = report.column
                sev = ttypes.Severity._VALUES_TO_NAMES[report.severity]
                checked_file = report.checkedFile + ':' + str(bug_line) +\
                    ":" + str(bug_col)
                source_line =\
                    get_line_from_remote_file(client, report.fileId, bug_line)
                check_name = report.checkerId
                check_msg = report.checkerMsg
            rows.append(
                (checked_file, check_name, sev, check_msg, source_line))
        if output_format == 'plaintext':
            for row in rows:
                print("{0}: {1} [{2}]\n{3}\n".format(row[0], row[3], row[1],
                                                     row[4]))
        else:
            print(twodim_to_str(output_format, header, rows))

    client = setup_client(args.product_url)

    report_dir_mode = False
    if os.path.isdir(args.newname):
        # If newname is a valid directory we assume that it is a report dir and
        # we are in local compare mode.
        report_dir_mode = True
    else:
        run_info = check_run_names(client, [args.newname])
        newid = run_info[args.newname].runId

    try:
        basename_regex = '^' + args.basename + '$'
        base_runs = filter(lambda run: re.match(basename_regex, run.name),
                           client.getRunData(None))
        base_ids = map(lambda run: run.runId, base_runs)
    except re.error:
        LOG.error('Invalid regex format in ' + args.basename)
        sys.exit(1)

    if len(base_ids) == 0:
        LOG.warning("No run names match the given pattern: " + args.basename)
        sys.exit(1)

    LOG.info("Matching against runs: " +
             ', '.join(map(lambda run: run.name, base_runs)))

    diff_type = 'new'
    if 'unresolved' in args:
        diff_type = 'unresolved'
    elif 'resolved' in args:
        diff_type = 'resolved'

    results = []
    if report_dir_mode:
        diff_type = 'new'
        if 'unresolved' in args:
            diff_type = 'unresolved'
        elif 'resolved' in args:
            diff_type = 'resolved'
        results = get_diff_report_dir(client, base_ids,
                                      os.path.abspath(args.newname), diff_type)
    else:
        cmp_data = ttypes.CompareData(runIds=[newid])
        if 'new' in args:
            cmp_data.diffType = ttypes.DiffType.NEW
        elif 'unresolved' in args:
            cmp_data.diffType = ttypes.DiffType.UNRESOLVED
        elif 'resolved' in args:
            cmp_data.diffType = ttypes.DiffType.RESOLVED

        results = get_diff_results(client, base_ids, cmp_data)

    if len(results) == 0:
        LOG.info("No results.")
    else:
        print_reports(client, results, args.output_format, diff_type)
예제 #19
0
def main(args):
    """
    Store the defect results in the specified input list as bug reports in the
    database.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    if not host_check.check_zlib():
        raise Exception("zlib is not available on the system!")

    # To ensure the help message prints the default folder properly,
    # the 'default' for 'args.input' is a string, not a list.
    # But we need lists for the foreach here to work.
    if isinstance(args.input, str):
        args.input = [args.input]

    if 'name' not in args:
        LOG.debug("Generating name for analysis...")
        generated = __get_run_name(args.input)
        if generated:
            setattr(args, 'name', generated)
        else:
            LOG.error("No suitable name was found in the inputs for the "
                      "analysis run. Please specify one by passing argument "
                      "--name run_name in the invocation.")
            sys.exit(2)  # argparse returns error code 2 for bad invocations.

    LOG.info("Storing analysis results for run '" + args.name + "'")

    if 'force' in args:
        LOG.info("argument --force was specified: the run with name '" +
                 args.name + "' will be deleted.")

    protocol, host, port, product_name = split_product_url(args.product_url)

    # Before any transmission happens, check if we have the PRODUCT_STORE
    # permission to prevent a possibly long ZIP operation only to get an
    # error later on.
    product_client = libclient.setup_product_client(protocol, host, port,
                                                    product_name)
    product_id = product_client.getCurrentProduct().id

    auth_client, _ = libclient.setup_auth_client(protocol, host, port)
    has_perm = libclient.check_permission(auth_client,
                                          Permission.PRODUCT_STORE,
                                          {'productID': product_id})
    if not has_perm:
        LOG.error("You are not authorised to store analysis results in "
                  "product '{0}'".format(product_name))
        sys.exit(1)

    # Setup connection to the remote server.
    client = libclient.setup_client(args.product_url, product_client=False)

    LOG.debug("Initializing client connecting to {0}:{1}/{2} done.".format(
        host, port, product_name))

    _, zip_file = tempfile.mkstemp('.zip')
    LOG.debug("Will write mass store ZIP to '{0}'...".format(zip_file))

    try:
        assemble_zip(args.input, zip_file, client)

        if os.stat(zip_file).st_size > MAX_UPLOAD_SIZE:
            LOG.error("The result list to upload is too big (max: {}).".format(
                sizeof_fmt(MAX_UPLOAD_SIZE)))
            sys.exit(1)

        with open(zip_file, 'rb') as zf:
            b64zip = base64.b64encode(zf.read())

        context = generic_package_context.get_context()

        trim_path_prefixes = args.trim_path_prefix if \
            'trim_path_prefix' in args else None

        client.massStoreRun(args.name, args.tag if 'tag' in args else None,
                            str(context.version), b64zip, 'force' in args,
                            trim_path_prefixes)

        LOG.info("Storage finished successfully.")
    except RequestFailed as reqfail:
        if reqfail.errorCode == ErrorCode.SOURCE_FILE:
            header = ['File', 'Line', 'Checker name']
            table = twodim_to_str('table', header,
                                  [c.split('|') for c in reqfail.extraInfo])
            LOG.warning("Setting the review statuses for some reports failed "
                        "because of non valid source code comments: "
                        "{0}\n {1}".format(reqfail.message, table))
        sys.exit(1)
    except Exception as ex:
        LOG.info("Storage failed: " + str(ex))
        sys.exit(1)
    finally:
        os.remove(zip_file)
예제 #20
0
def main(args):
    """
    Store the defect results in the specified input list as bug reports in the
    database.
    """

    if not host_check.check_zlib():
        raise Exception("zlib is not available on the system!")

    # To ensure the help message prints the default folder properly,
    # the 'default' for 'args.input' is a string, not a list.
    # But we need lists for the foreach here to work.
    if isinstance(args.input, str):
        args.input = [args.input]

    if 'name' not in args:
        LOG.debug("Generating name for analysis...")
        generated = __get_run_name(args.input)
        if generated:
            setattr(args, 'name', generated)
        else:
            LOG.error("No suitable name was found in the inputs for the "
                      "analysis run. Please specify one by passing argument "
                      "--name run_name in the invocation.")
            sys.exit(2)  # argparse returns error code 2 for bad invocations.

    LOG.info("Storing analysis results for run '" + args.name + "'")

    if 'force' in args:
        LOG.info("argument --force was specified: the run with name '" +
                 args.name + "' will be deleted.")

    protocol, host, port, product_name = split_product_url(args.product_url)

    # Before any transmission happens, check if we have the PRODUCT_STORE
    # permission to prevent a possibly long ZIP operation only to get an
    # error later on.
    product_client = libclient.setup_product_client(protocol,
                                                    host, port, product_name)
    product_id = product_client.getCurrentProduct().id

    auth_client, _ = libclient.setup_auth_client(protocol, host, port)
    has_perm = libclient.check_permission(
        auth_client, Permission.PRODUCT_STORE, {'productID': product_id})
    if not has_perm:
        LOG.error("You are not authorised to store analysis results in "
                  "product '{0}'".format(product_name))
        sys.exit(1)

    # Setup connection to the remote server.
    client = libclient.setup_client(args.product_url)

    LOG.debug("Initializing client connecting to {0}:{1}/{2} done."
              .format(host, port, product_name))

    _, zip_file = tempfile.mkstemp('.zip')
    LOG.debug("Will write mass store ZIP to '{0}'...".format(zip_file))

    try:
        assemble_zip(args.input, zip_file, client)
        with open(zip_file, 'rb') as zf:
            b64zip = base64.b64encode(zf.read())

        context = generic_package_context.get_context()

        client.massStoreRun(args.name,
                            args.tag if 'tag' in args else None,
                            str(context.version),
                            b64zip,
                            'force' in args)

        LOG.info("Storage finished successfully.")
    except Exception as ex:
        LOG.info("Storage failed: " + str(ex))
        sys.exit(1)
    finally:
        os.remove(zip_file)
예제 #21
0
def handle_list_result_types(args):

    init_logger(args.verbose if 'verbose' in args else None)

    def get_statistics(client, run_ids, field, values):
        report_filter = ttypes.ReportFilter()
        report_filter.isUnique = True
        add_filter_conditions(report_filter, args.filter)
        setattr(report_filter, field, values)
        checkers = client.getCheckerCounts(run_ids, report_filter, None, None,
                                           0)

        return dict((res.name, res.count) for res in checkers)

    def checker_count(checker_dict, key):
        return checker_dict.get(key, 0)

    client = setup_client(args.product_url)

    run_ids = None
    if 'all_results' not in args:
        run_ids = [run.runId for run in get_runs(client, args.names)]
        if not len(run_ids):
            LOG.warning("No runs were found!")
            sys.exit(1)

    all_checkers_report_filter = ttypes.ReportFilter()
    all_checkers_report_filter.isUnique = True
    add_filter_conditions(all_checkers_report_filter, args.filter)

    all_checkers = client.getCheckerCounts(run_ids, all_checkers_report_filter,
                                           None, None, 0)
    all_checkers_dict = dict((res.name, res) for res in all_checkers)

    unrev_checkers = get_statistics(client, run_ids, 'reviewStatus',
                                    [ttypes.ReviewStatus.UNREVIEWED])

    confirmed_checkers = get_statistics(client, run_ids, 'reviewStatus',
                                        [ttypes.ReviewStatus.CONFIRMED])

    false_checkers = get_statistics(client, run_ids, 'reviewStatus',
                                    [ttypes.ReviewStatus.FALSE_POSITIVE])

    intentional_checkers = get_statistics(client, run_ids, 'reviewStatus',
                                          [ttypes.ReviewStatus.INTENTIONAL])

    resolved_checkers = get_statistics(client, run_ids, 'detectionStatus',
                                       [ttypes.DetectionStatus.RESOLVED])

    # Get severity counts
    report_filter = ttypes.ReportFilter()
    report_filter.isUnique = True
    add_filter_conditions(report_filter, args.filter)

    sev_count = client.getSeverityCounts(run_ids, report_filter, None)
    severities = []
    severity_total = 0
    for key, count in sorted(sev_count.items(), reverse=True):
        severities.append(
            dict(severity=ttypes.Severity._VALUES_TO_NAMES[key],
                 reports=count))
        severity_total += count

    all_results = []
    total = defaultdict(int)
    for key, checker_data in sorted(all_checkers_dict.items(),
                                    key=lambda x: x[1].severity,
                                    reverse=True):
        all_results.append(
            dict(
                checker=key,
                severity=ttypes.Severity._VALUES_TO_NAMES[
                    checker_data.severity],
                reports=checker_data.count,
                unreviewed=checker_count(unrev_checkers, key),
                confirmed=checker_count(confirmed_checkers, key),
                false_positive=checker_count(false_checkers, key),
                intentional=checker_count(intentional_checkers, key),
                resolved=checker_count(resolved_checkers, key),
            ))
        total['total_reports'] += checker_data.count
        total['total_resolved'] += checker_count(resolved_checkers, key)
        total['total_unreviewed'] += checker_count(unrev_checkers, key)
        total['total_confirmed'] += checker_count(confirmed_checkers, key)
        total['total_false_positive'] += checker_count(false_checkers, key)
        total['total_intentional'] += checker_count(intentional_checkers, key)

    if args.output_format == 'json':
        print(CmdLineOutputEncoder().encode(all_results))
    else:
        header = [
            'Checker', 'Severity', 'All reports', 'Resolved', 'Unreviewed',
            'Confirmed', 'False positive', "Intentional"
        ]

        rows = []
        for stat in all_results:
            rows.append(
                (stat['checker'], stat['severity'], str(stat['reports']),
                 str(stat['resolved']), str(stat['unreviewed']),
                 str(stat['confirmed']), str(stat['false_positive']),
                 str(stat['intentional'])))

        rows.append(
            ('Total', '-', str(total['total_reports']),
             str(total['total_resolved']), str(total['total_unreviewed']),
             str(total['total_confirmed']), str(total['total_false_positive']),
             str(total['total_intentional'])))

        print(
            twodim_to_str(args.output_format,
                          header,
                          rows,
                          separate_footer=True))

        # Print severity counts
        header = ['Severity', 'All reports']

        rows = []
        for stat in severities:
            rows.append((stat['severity'], str(stat['reports'])))

        rows.append(('Total', str(severity_total)))

        print(
            twodim_to_str(args.output_format,
                          header,
                          rows,
                          separate_footer=True))
예제 #22
0
def handle_diff_results(args):

    init_logger(args.verbose if 'verbose' in args else None)

    context = generic_package_context.get_context()

    def get_diff_results(client, baseids, cmp_data):

        report_filter = ttypes.ReportFilter()
        add_filter_conditions(report_filter, args.filter)

        # Do not show resolved bugs in compare mode new.
        if cmp_data.diffType == ttypes.DiffType.NEW:
            report_filter.detectionStatus = [
                ttypes.DetectionStatus.NEW, ttypes.DetectionStatus.UNRESOLVED,
                ttypes.DetectionStatus.REOPENED
            ]

        sort_mode = [(ttypes.SortMode(ttypes.SortType.FILENAME,
                                      ttypes.Order.ASC))]
        limit = constants.MAX_QUERY_SIZE
        offset = 0

        all_results = []
        results = client.getRunResults(baseids, limit, offset, sort_mode,
                                       report_filter, cmp_data)

        while results:
            all_results.extend(results)
            offset += limit
            results = client.getRunResults(baseids, limit, offset, sort_mode,
                                           report_filter, cmp_data)
        return all_results

    def get_report_dir_results(reportdir):
        all_reports = []
        for filename in os.listdir(reportdir):
            if filename.endswith(".plist"):
                file_path = os.path.join(reportdir, filename)
                LOG.debug("Parsing:" + file_path)
                try:
                    files, reports = plist_parser.parse_plist(file_path)
                    for report in reports:
                        report.main['location']['file_name'] = \
                            files[int(report.main['location']['file'])]
                    all_reports.extend(reports)

                except Exception as ex:
                    LOG.error('The generated plist is not valid!')
                    LOG.error(ex)
        return all_reports

    def get_line_from_file(filename, lineno):
        with open(filename, 'r') as f:
            i = 1
            for line in f:
                if i == lineno:
                    return line
                i += 1
        return ""

    def get_diff_base_results(client, baseids, base_hashes, suppressed_hashes):
        base_results = []
        report_filter = ttypes.ReportFilter()
        add_filter_conditions(report_filter, args.filter)

        sort_mode = [(ttypes.SortMode(ttypes.SortType.FILENAME,
                                      ttypes.Order.ASC))]
        limit = constants.MAX_QUERY_SIZE
        offset = 0

        report_filter.reportHash = base_hashes + suppressed_hashes
        results = client.getRunResults(baseids, limit, offset, sort_mode,
                                       report_filter, None)
        while results:
            base_results.extend(results)
            offset += limit
            results = client.getRunResults(baseids, limit, offset, sort_mode,
                                           report_filter, None)
        return base_results

    def get_diff_report_dir(client, baseids, report_dir, cmp_data):
        filtered_reports = []
        report_dir_results = get_report_dir_results(report_dir)
        new_hashes = {}
        suppressed_in_code = []

        for rep in report_dir_results:
            bughash = rep.main['issue_hash_content_of_line_in_context']
            source_file = rep.main['location']['file_name']
            bug_line = rep.main['location']['line']
            checker_name = rep.main['check_name']

            new_hashes[bughash] = rep
            sc_handler = SourceCodeCommentHandler(source_file)
            src_comment_data = sc_handler.filter_source_line_comments(
                bug_line, checker_name)

            if len(src_comment_data) == 1:
                suppressed_in_code.append(bughash)
                LOG.debug("Bug " + bughash + "is suppressed in code. file:" +
                          source_file + "Line " + str(bug_line))
            elif len(src_comment_data) > 1:
                LOG.warning("Multiple source code comment can be found "
                            "for '{0}' checker in '{1}' at line {2}. "
                            "This bug will not be suppressed!".format(
                                checker_name, source_file, bug_line))

        base_hashes = client.getDiffResultsHash(baseids, new_hashes.keys(),
                                                cmp_data.diffType)

        if cmp_data.diffType == ttypes.DiffType.NEW or \
           cmp_data.diffType == ttypes.DiffType.UNRESOLVED:
            # Shows reports from the report dir which are not present in the
            # baseline (NEW reports) or appear in both side (UNRESOLVED
            # reports) and not suppressed in the code.
            for result in report_dir_results:
                h = result.main['issue_hash_content_of_line_in_context']
                if h in base_hashes and h not in suppressed_in_code:
                    filtered_reports.append(result)
        elif cmp_data.diffType == ttypes.DiffType.RESOLVED:
            # Show bugs in the baseline (server) which are not present in the
            # report dir or suppressed.
            results = get_diff_base_results(client, baseids, base_hashes,
                                            suppressed_in_code)
            for result in results:
                filtered_reports.append(result)

        return filtered_reports

    def cached_report_file_lookup(file_cache, file_id):
        """
        Get source file data for the given file and caches it in a file cache
        if file data is not found in the cache. Finally, it returns the source
        file data from the cache.
        """
        if file_id not in file_cache:
            source = client.getSourceFileData(file_id, True,
                                              ttypes.Encoding.BASE64)
            file_content = base64.b64decode(source.fileContent)
            file_cache[file_id] = {
                'id': file_id,
                'path': source.filePath,
                'content': file_content
            }

        return file_cache[file_id]

    def get_report_data(client, reports, file_cache):
        """
        Returns necessary report files and report data events for the HTML
        plist parser.
        """
        file_sources = {}
        report_data = []

        for report in reports:
            file_sources[report.fileId] = cached_report_file_lookup(
                file_cache, report.fileId)

            details = client.getReportDetails(report.reportId)
            events = []
            for index, event in enumerate(details.pathEvents):
                file_sources[event.fileId] = cached_report_file_lookup(
                    file_cache, event.fileId)

                events.append({
                    'line': event.startLine,
                    'col': event.startCol,
                    'file': event.fileId,
                    'msg': event.msg,
                    'step': index + 1
                })
            report_data.append(events)

        return {'files': file_sources, 'reports': report_data}

    def reports_to_report_data(reports):
        """
        Converts reports from Report class from one plist file
        to report data events for the HTML plist parser.
        """
        file_sources = {}
        fname_to_fid = {}
        report_data = []
        findex = 0

        for report in reports:
            # Not all report in this list may refer to the same files
            # thus we need to create a single file list with
            # all files from all reports.
            for f in report.files:
                if f not in fname_to_fid:
                    try:
                        content = open(f, 'r').read()
                    except (OSError, IOError):
                        content = f + " NOT FOUND."
                    file_sources[findex] = {
                        'id': findex,
                        'path': f,
                        'content': content
                    }
                    fname_to_fid[f] = findex
                    findex += 1

            events = []
            pathElements = report.bug_path
            index = 1
            for element in pathElements:
                if element['kind'] == 'event':
                    fname = report.files[element['location']['file']]
                    new_fid = fname_to_fid[fname]
                    events.append({
                        'line': element['location']['line'],
                        'col': element['location']['col'],
                        'file': new_fid,
                        'msg': element['message'],
                        'step': index
                    })
                    index += 1
            report_data.append(events)

        return {'files': file_sources, 'reports': report_data}

    def report_to_html(client, reports, output_dir):
        """
        Generate HTML output files for the given reports in the given output
        directory by using the Plist To HTML parser.
        """
        html_builder = PlistToHtml.HtmlBuilder(context.path_plist_to_html_dist)

        file_report_map = defaultdict(list)
        for report in reports:
            file_path = ""
            if isinstance(report, Report):
                file_path = report.main['location']['file_name']
            else:
                file_path = report.checkedFile
            file_report_map[file_path].append(report)

        file_cache = {}
        for file_path, file_reports in file_report_map.items():
            checked_file = file_path
            filename = os.path.basename(checked_file)
            h = int(hashlib.md5(file_path).hexdigest(), 16) % (10**8)

            if isinstance(file_reports[0], Report):
                report_data = reports_to_report_data(file_reports)
            else:
                report_data = get_report_data(client, file_reports, file_cache)

            output_path = os.path.join(output_dir,
                                       filename + '_' + str(h) + '.html')
            html_builder.create(output_path, report_data)
            print('Html file was generated for file://{0}: file://{1}'.format(
                checked_file, output_path))

    def print_reports(client, reports, output_format):
        output_dir = args.export_dir if 'export_dir' in args else None
        if 'clean' in args and os.path.isdir(output_dir):
            print("Previous analysis results in '{0}' have been removed, "
                  "overwriting with current results.".format(output_dir))
            shutil.rmtree(output_dir)

        if output_format == 'json':
            output = []
            for report in reports:
                if isinstance(report, Report):
                    output.append(report.main)
                else:
                    output.append(report)
            print(CmdLineOutputEncoder().encode(output))
            return

        if output_format == 'html':
            output_dir = args.export_dir
            if not os.path.exists(output_dir):
                os.makedirs(output_dir)

            print("Generating HTML output files to file://{0} directory:\n".
                  format(output_dir))

            report_to_html(client, reports, output_dir)

            print('\nTo view the results in a browser run:\n'
                  '  $ firefox {0}'.format(args.export_dir))
            return

        header = ['File', 'Checker', 'Severity', 'Msg', 'Source']
        rows = []

        source_lines = defaultdict(set)
        for report in reports:
            if not isinstance(report, Report):
                source_lines[report.fileId].add(report.line)

        lines_in_files_requested = []
        for key in source_lines:
            lines_in_files_requested.append(
                ttypes.LinesInFilesRequested(fileId=key,
                                             lines=source_lines[key]))

        source_line_contents = client.getLinesInSourceFileContents(
            lines_in_files_requested, ttypes.Encoding.BASE64)

        for report in reports:
            if isinstance(report, Report):
                # report is coming from a plist file.
                bug_line = report.main['location']['line']
                bug_col = report.main['location']['col']
                sev = 'unknown'
                checked_file = report.main['location']['file_name']\
                    + ':' + str(bug_line) + ":" + str(bug_col)
                check_name = report.main['check_name']
                check_msg = report.main['description']
                source_line =\
                    get_line_from_file(report.main['location']['file_name'],
                                       bug_line)
            else:
                # report is of ReportData type coming from CodeChecker server.
                bug_line = report.line
                bug_col = report.column
                sev = ttypes.Severity._VALUES_TO_NAMES[report.severity]
                checked_file = report.checkedFile + ':' + str(bug_line) +\
                    ":" + str(bug_col)
                source_line = base64.b64decode(
                    source_line_contents[report.fileId][bug_line])
                check_name = report.checkerId
                check_msg = report.checkerMsg
            rows.append(
                (checked_file, check_name, sev, check_msg, source_line))
        if output_format == 'plaintext':
            for row in rows:
                print("{0}: {1} [{2}]\n{3}\n".format(row[0], row[3], row[1],
                                                     row[4]))
        else:
            print(twodim_to_str(output_format, header, rows))

    client = setup_client(args.product_url)

    base_runs = get_runs(client, [args.basename])
    base_ids = map(lambda run: run.runId, base_runs)

    if len(base_ids) == 0:
        LOG.warning("No run names match the given pattern: " + args.basename)
        sys.exit(1)

    LOG.info("Matching base runs: " +
             ', '.join(map(lambda run: run.name, base_runs)))

    cmp_data = ttypes.CompareData()
    if 'new' in args:
        cmp_data.diffType = ttypes.DiffType.NEW
    elif 'unresolved' in args:
        cmp_data.diffType = ttypes.DiffType.UNRESOLVED
    elif 'resolved' in args:
        cmp_data.diffType = ttypes.DiffType.RESOLVED

    results = []
    if os.path.isdir(args.newname):
        # If newname is a valid directory we assume that it is a report dir and
        # we are in local compare mode.
        results = get_diff_report_dir(client, base_ids,
                                      os.path.abspath(args.newname), cmp_data)
    else:
        new_runs = get_runs(client, [args.newname])
        cmp_data.runIds = map(lambda run: run.runId, new_runs)

        if len(new_runs) == 0:
            LOG.warning("No run names match the given pattern: " +
                        args.newname)
            sys.exit(1)

        LOG.info("Matching new runs: " +
                 ', '.join(map(lambda run: run.name, new_runs)))

        results = get_diff_results(client, base_ids, cmp_data)

    if len(results) == 0:
        LOG.info("No results.")
    else:
        print_reports(client, results, args.output_format)
예제 #23
0
def handle_diff_results(args):

    def getDiffResults(client, baseids, cmp_data):

        report_filter = ttypes.ReportFilter()
        add_filter_conditions(report_filter, args.filter)

        # Do not show resolved bugs in compare mode new.
        if cmp_data.diffType == ttypes.DiffType.NEW:
            report_filter.detectionStatus = [
                ttypes.DetectionStatus.NEW,
                ttypes.DetectionStatus.UNRESOLVED,
                ttypes.DetectionStatus.REOPENED]

        sort_mode = [(ttypes.SortMode(
            ttypes.SortType.FILENAME,
            ttypes.Order.ASC))]
        limit = constants.MAX_QUERY_SIZE
        offset = 0

        all_results = []
        results = client.getRunResults(baseids,
                                       limit,
                                       offset,
                                       sort_mode,
                                       report_filter,
                                       cmp_data)

        while results:
            all_results.extend(results)
            offset += limit
            results = client.getRunResults(baseids,
                                           limit,
                                           offset,
                                           sort_mode,
                                           report_filter,
                                           cmp_data)
        return all_results

    def getReportDirResults(reportdir):
        all_reports = []
        for filename in os.listdir(reportdir):
            if filename.endswith(".plist"):
                file_path = os.path.join(reportdir, filename)
                LOG.debug("Parsing:" + file_path)
                try:
                    files, reports = plist_parser.parse_plist(file_path)
                    for report in reports:
                        report.main['location']['file_name'] = \
                            files[int(report.main['location']['file'])]
                    all_reports.extend(reports)

                except Exception as ex:
                    LOG.error('The generated plist is not valid!')
                    LOG.error(ex)
        return all_reports

    def getLineFromFile(filename, lineno):
        with open(filename, 'r') as f:
            i = 1
            for line in f:
                if i == lineno:
                    return line
                i += 1
        return ""

    def getLineFromRemoteFile(client, fid, lineno):
        # Thrift Python client cannot decode JSONs that contain non '\u00??'
        # characters, so we instead ask for a Base64-encoded version.
        source = client.getSourceFileData(fid, True, ttypes.Encoding.BASE64)
        lines = base64.b64decode(source.fileContent).split('\n')
        return "" if len(lines) < lineno else lines[lineno - 1]

    def getDiffReportDir(client, baseids, report_dir, diff_type):

        report_filter = ttypes.ReportFilter()
        add_filter_conditions(report_filter, args.filter)

        sort_mode = [(ttypes.SortMode(
            ttypes.SortType.FILENAME,
            ttypes.Order.ASC))]
        limit = constants.MAX_QUERY_SIZE
        offset = 0

        base_results = []
        results = client.getRunResults(baseids,
                                       limit,
                                       offset,
                                       sort_mode,
                                       report_filter,
                                       None)
        while results:
            base_results.extend(results)
            offset += limit
            results = client.getRunResults(baseids,
                                           limit,
                                           offset,
                                           sort_mode,
                                           report_filter,
                                           None)
        base_hashes = {}
        for res in base_results:
            base_hashes[res.bugHash] = res

        filtered_reports = []
        new_results = getReportDirResults(report_dir)
        new_hashes = {}
        suppressed_in_code = []

        for rep in new_results:
            bughash = rep.main['issue_hash_content_of_line_in_context']
            source_file = rep.main['location']['file_name']
            bug_line = rep.main['location']['line']
            new_hashes[bughash] = rep
            sp_handler = suppress_handler.SourceSuppressHandler(
                    source_file,
                    bug_line,
                    bughash,
                    rep.main['check_name'])
            if sp_handler.get_suppressed():
                suppressed_in_code.append(bughash)
                LOG.debug("Bug " + bughash +
                          "is suppressed in code. file:" + source_file +
                          "Line "+str(bug_line))

        if diff_type == 'new':
            # Shows new reports from the report dir
            # which are not present in the baseline (server)
            # and not suppressed in the code.
            for result in new_results:
                if not (result.main['issue_hash_content_of_line_in_context']
                        in base_hashes) and\
                   not (result.main['issue_hash_content_of_line_in_context']
                        in suppressed_in_code):
                    filtered_reports.append(result)
        elif diff_type == 'resolved':
            # Show bugs in the baseline (server)
            # which are not present in the report dir
            # or suppressed.
            for result in base_results:
                if not (result.bugHash in new_hashes) or\
                        (result.bugHash in suppressed_in_code):
                    filtered_reports.append(result)
        elif diff_type == 'unresolved':
            # Shows bugs in the report dir
            # that are not suppressed and
            # which are also present in the baseline (server)

            for result in new_results:
                new_hash = result.main['issue_hash_content_of_line_in_context']
                if new_hash in base_hashes and\
                        not (new_hash in suppressed_in_code):
                    filtered_reports.append(result)
        return filtered_reports

    def printReports(client, reports, output_format):
        if output_format == 'json':
            output = []
            for report in reports:
                if isinstance(report, Report):
                    output.append(report.main)
                else:
                    output.append(report)
            print(CmdLineOutputEncoder().encode(output))
            return

        header = ['File', 'Checker', 'Severity', 'Msg', 'Source']
        rows = []
        for report in reports:
            if type(report) is Report:
                bug_line = report.main['location']['line']
                bug_col = report.main['location']['col']
                sev = 'unknown'
                checked_file = report.main['location']['file_name']\
                    + ':' + str(bug_line) + ":" + str(bug_col)
                check_name = report.main['check_name']
                check_msg = report.main['description']
                source_line =\
                    getLineFromFile(report.main['location']['file_name'],
                                    bug_line)
            else:
                bug_line = report.line
                bug_col = report.column
                sev = ttypes.Severity._VALUES_TO_NAMES[report.severity]
                checked_file = report.checkedFile + ':' + str(bug_line) +\
                    ":" + str(bug_col)
                source_line =\
                    getLineFromRemoteFile(client, report.fileId, bug_line)
                check_name = report.checkerId
                check_msg = report.checkerMsg
            rows.append(
                (checked_file, check_name, sev, check_msg, source_line))
        if output_format == 'plaintext':
            for row in rows:
                print("{0}: {1} [{2}]\n{3}\n".format(row[0],
                      row[3], row[1], row[4]))
        else:
            print(twodim_to_str(output_format, header, rows))

    client = setup_client(args.product_url)

    report_dir_mode = False
    if os.path.isdir(args.newname):
        # If newname is a valid directory we assume that it is a report dir and
        # we are in local compare mode.
        report_dir_mode = True
    else:
        run_info = check_run_names(client, [args.newname])
        newid = run_info[args.newname].runId

    try:
        basename_regex = '^' + args.basename + '$'
        base_runs = filter(lambda run: re.match(basename_regex, run.name),
                           client.getRunData(None))
        base_ids = map(lambda run: run.runId, base_runs)
    except re.error:
        LOG.error('Invalid regex format in ' + args.basename)
        sys.exit(1)

    if len(base_ids) == 0:
        LOG.warning("No run names match the given pattern: " + args.basename)
        sys.exit(1)

    LOG.info("Matching against runs: " +
             ', '.join(map(lambda run: run.name, base_runs)))

    results = []
    if report_dir_mode:
        diff_type = 'new'
        if 'unresolved' in args:
            diff_type = 'unresolved'
        elif 'resolved' in args:
            diff_type = 'resolved'
        results = getDiffReportDir(client, base_ids,
                                   os.path.abspath(args.newname),
                                   diff_type)
    else:
        cmp_data = ttypes.CompareData(runIds=[newid])
        if 'new' in args:
            cmp_data.diffType = ttypes.DiffType.NEW
        elif 'unresolved' in args:
            cmp_data.diffType = ttypes.DiffType.UNRESOLVED
        elif 'resolved' in args:
            cmp_data.diffType = ttypes.DiffType.RESOLVED

        results = getDiffResults(client, base_ids, cmp_data)

    printReports(client, results, args.output_format)
예제 #24
0
def handle_diff_results(args):

    init_logger(args.verbose if 'verbose' in args else None)
    check_deprecated_arg_usage(args)

    f_severities, f_checkers, f_file_path, _, _ = check_filter_values(args)

    context = generic_package_context.get_context()

    def skip_report_dir_result(report):
        """
        Returns True if the report should be skipped from the results based on
        the given filter set.
        """
        if f_severities:
            severity_name = context.severity_map.get(report.main['check_name'])
            if severity_name.lower() not in map(str.lower, f_severities):
                return True

        if f_checkers:
            checker_name = report.main['check_name']
            if not any([re.match(r'^' + c.replace("*", ".*") + '$',
                                 checker_name, re.IGNORECASE)
                        for c in f_checkers]):
                return True

        if f_file_path:
            file_path = report.files[int(report.main['location']['file'])]
            if not any([re.match(r'^' + f.replace("*", ".*") + '$',
                                 file_path, re.IGNORECASE)
                        for f in f_file_path]):
                return True

        if 'checker_msg' in args:
            checker_msg = report.main['description']
            if not any([re.match(r'^' + c.replace("*", ".*") + '$',
                                 checker_msg, re.IGNORECASE)
                        for c in args.checker_msg]):
                return True

        return False

    def get_report_dir_results(reportdir):
        all_reports = []
        processed_path_hashes = set()
        for filename in os.listdir(reportdir):
            if filename.endswith(".plist"):
                file_path = os.path.join(reportdir, filename)
                LOG.debug("Parsing:" + file_path)
                try:
                    files, reports = plist_parser.parse_plist(file_path)
                    for report in reports:
                        path_hash = get_report_path_hash(report, files)
                        if path_hash in processed_path_hashes:
                            LOG.debug("Not showing report because it is a "
                                      "deduplication of an already processed "
                                      "report!")
                            LOG.debug("Path hash: %s", path_hash)
                            LOG.debug(report)
                            continue

                        if skip_report_dir_result(report):
                            continue

                        processed_path_hashes.add(path_hash)
                        report.main['location']['file_name'] = \
                            files[int(report.main['location']['file'])]
                        all_reports.append(report)

                except Exception as ex:
                    LOG.error('The generated plist is not valid!')
                    LOG.error(ex)
        return all_reports

    def get_line_from_file(filename, lineno):
        with open(filename, 'r') as f:
            i = 1
            for line in f:
                if i == lineno:
                    return line
                i += 1
        return ""

    def get_diff_base_results(client, baseids, base_hashes, suppressed_hashes):
        base_results = []
        report_filter = ttypes.ReportFilter()
        add_filter_conditions(client, report_filter, args)

        sort_mode = [(ttypes.SortMode(
            ttypes.SortType.FILENAME,
            ttypes.Order.ASC))]
        limit = constants.MAX_QUERY_SIZE
        offset = 0

        report_filter.reportHash = base_hashes + suppressed_hashes
        results = client.getRunResults(baseids,
                                       limit,
                                       offset,
                                       sort_mode,
                                       report_filter,
                                       None)
        while results:
            base_results.extend(results)
            offset += limit
            results = client.getRunResults(baseids,
                                           limit,
                                           offset,
                                           sort_mode,
                                           report_filter,
                                           None)
        return base_results

    def get_suppressed_reports(reports):
        """
        Returns suppressed reports.
        """
        suppressed_in_code = []
        for rep in reports:
            bughash = rep.report_hash
            source_file = rep.main['location']['file_name']
            bug_line = rep.main['location']['line']
            checker_name = rep.main['check_name']

            sc_handler = SourceCodeCommentHandler(source_file)
            src_comment_data = sc_handler.filter_source_line_comments(
                bug_line,
                checker_name)

            if len(src_comment_data) == 1:
                suppressed_in_code.append(bughash)
                LOG.debug("Bug " + bughash +
                          "is suppressed in code. file:" + source_file +
                          "Line "+str(bug_line))
            elif len(src_comment_data) > 1:
                LOG.warning(
                    "Multiple source code comment can be found "
                    "for '{0}' checker in '{1}' at line {2}. "
                    "This bug will not be suppressed!".format(
                        checker_name, source_file, bug_line))
        return suppressed_in_code

    def get_diff_type():
        """
        Returns Thrift DiffType value by processing the arguments.
        """
        if 'new' in args:
            return ttypes.DiffType.NEW

        if 'unresolved' in args:
            return ttypes.DiffType.UNRESOLVED

        if 'resolved' in args:
            return ttypes.DiffType.RESOLVED

        return None

    def get_diff_local_dir_remote_run(client, report_dir, run_name):
        """
        Compares a local report directory with a remote run.
        """
        filtered_reports = []
        report_dir_results = get_report_dir_results(
            os.path.abspath(report_dir))
        suppressed_in_code = get_suppressed_reports(report_dir_results)

        diff_type = get_diff_type()
        run_ids, run_names, _ = process_run_arg(run_name)
        local_report_hashes = set([r.report_hash for r in report_dir_results])

        if diff_type == ttypes.DiffType.NEW:
            # Get report hashes which can be found only in the remote runs.
            remote_hashes = \
                client.getDiffResultsHash(run_ids,
                                          local_report_hashes,
                                          ttypes.DiffType.RESOLVED)

            results = get_diff_base_results(client, run_ids,
                                            remote_hashes,
                                            suppressed_in_code)
            for result in results:
                filtered_reports.append(result)
        elif diff_type == ttypes.DiffType.UNRESOLVED:
            # Get remote hashes which can be found in the remote run and in the
            # local report directory.
            remote_hashes = \
                client.getDiffResultsHash(run_ids,
                                          local_report_hashes,
                                          ttypes.DiffType.UNRESOLVED)
            for result in report_dir_results:
                rep_h = result.report_hash
                if rep_h in remote_hashes and rep_h not in suppressed_in_code:
                    filtered_reports.append(result)
        elif diff_type == ttypes.DiffType.RESOLVED:
            # Get remote hashes which can be found in the remote run and in the
            # local report directory.
            remote_hashes = \
                client.getDiffResultsHash(run_ids,
                                          local_report_hashes,
                                          ttypes.DiffType.UNRESOLVED)
            for result in report_dir_results:
                if result.report_hash not in remote_hashes:
                    filtered_reports.append(result)

        return filtered_reports, run_names

    def get_diff_remote_run_local_dir(client, run_name, report_dir):
        """
        Compares a remote run with a local report directory.
        """
        filtered_reports = []
        report_dir_results = get_report_dir_results(
            os.path.abspath(report_dir))
        suppressed_in_code = get_suppressed_reports(report_dir_results)

        diff_type = get_diff_type()
        run_ids, run_names, _ = process_run_arg(run_name)
        local_report_hashes = set([r.report_hash for r in report_dir_results])

        remote_hashes = client.getDiffResultsHash(run_ids,
                                                  local_report_hashes,
                                                  diff_type)

        if diff_type in [ttypes.DiffType.NEW, ttypes.DiffType.UNRESOLVED]:
            # Shows reports from the report dir which are not present in
            # the baseline (NEW reports) or appear in both side (UNRESOLVED
            # reports) and not suppressed in the code.
            for result in report_dir_results:
                rep_h = result.report_hash
                if rep_h in remote_hashes and rep_h not in suppressed_in_code:
                    filtered_reports.append(result)
        elif diff_type == ttypes.DiffType.RESOLVED:
            # Show bugs in the baseline (server) which are not present in
            # the report dir or suppressed.
            results = get_diff_base_results(client,
                                            run_ids,
                                            remote_hashes,
                                            suppressed_in_code)
            for result in results:
                filtered_reports.append(result)

        return filtered_reports, run_names

    def get_diff_remote_runs(client, basename, newname):
        """
        Compares two remote runs and returns the filtered results.
        """
        report_filter = ttypes.ReportFilter()
        add_filter_conditions(client, report_filter, args)

        base_ids, base_run_names, base_run_tags = process_run_arg(basename)
        report_filter.runTag = base_run_tags

        cmp_data = ttypes.CompareData()
        cmp_data.diffType = get_diff_type()

        new_ids, new_run_names, new_run_tags = process_run_arg(newname)
        cmp_data.runIds = new_ids
        cmp_data.runTag = new_run_tags

        # Do not show resolved bugs in compare mode new.
        if cmp_data.diffType == ttypes.DiffType.NEW:
            report_filter.detectionStatus = [
                ttypes.DetectionStatus.NEW,
                ttypes.DetectionStatus.UNRESOLVED,
                ttypes.DetectionStatus.REOPENED]

        sort_mode = [(ttypes.SortMode(
            ttypes.SortType.FILENAME,
            ttypes.Order.ASC))]
        limit = constants.MAX_QUERY_SIZE
        offset = 0

        all_results = []
        results = client.getRunResults(base_ids,
                                       limit,
                                       offset,
                                       sort_mode,
                                       report_filter,
                                       cmp_data)

        while results:
            all_results.extend(results)
            offset += limit
            results = client.getRunResults(base_ids,
                                           limit,
                                           offset,
                                           sort_mode,
                                           report_filter,
                                           cmp_data)
        return all_results, base_run_names, new_run_names

    def get_diff_local_dirs(basename, newname):
        """
        Compares two report directories and returns the filtered results.
        """
        filtered_reports = []
        base_results = get_report_dir_results(os.path.abspath(basename))
        new_results = get_report_dir_results(os.path.abspath(newname))

        base_hashes = set([res.report_hash for res in base_results])
        new_hashes = set([res.report_hash for res in new_results])

        diff_type = get_diff_type()
        if diff_type == ttypes.DiffType.NEW:
            for res in new_results:
                if res.report_hash not in base_hashes:
                    filtered_reports.append(res)
        if diff_type == ttypes.DiffType.UNRESOLVED:
            for res in new_results:
                if res.report_hash in base_hashes:
                    filtered_reports.append(res)
        elif diff_type == ttypes.DiffType.RESOLVED:
            for res in base_results:
                if res.report_hash not in new_hashes:
                    filtered_reports.append(res)

        return filtered_reports

    def cached_report_file_lookup(file_cache, file_id):
        """
        Get source file data for the given file and caches it in a file cache
        if file data is not found in the cache. Finally, it returns the source
        file data from the cache.
        """
        if file_id not in file_cache:
            source = client.getSourceFileData(file_id, True,
                                              ttypes.Encoding.BASE64)
            file_content = base64.b64decode(source.fileContent)
            file_cache[file_id] = {'id': file_id,
                                   'path': source.filePath,
                                   'content': file_content}

        return file_cache[file_id]

    def get_report_data(client, reports, file_cache):
        """
        Returns necessary report files and report data events for the HTML
        plist parser.
        """
        file_sources = {}
        report_data = []

        for report in reports:
            file_sources[report.fileId] = cached_report_file_lookup(
                file_cache, report.fileId)

            details = client.getReportDetails(report.reportId)
            events = []
            for index, event in enumerate(details.pathEvents):
                file_sources[event.fileId] = cached_report_file_lookup(
                    file_cache, event.fileId)

                events.append({'line': event.startLine,
                               'col': event.startCol,
                               'file': event.fileId,
                               'msg': event.msg,
                               'step': index + 1})

            report_data.append({
                'events': events,
                'path': report.checkedFile,
                'reportHash': report.bugHash,
                'checkerName': report.checkerId})

        return {'files': file_sources,
                'reports': report_data}

    def reports_to_report_data(reports):
        """
        Converts reports from Report class from one plist file
        to report data events for the HTML plist parser.
        """
        file_sources = {}
        fname_to_fid = {}
        report_data = []
        findex = 0

        for report in reports:
            # Not all report in this list may refer to the same files
            # thus we need to create a single file list with
            # all files from all reports.
            for f in report.files:
                if f not in fname_to_fid:
                    try:
                        with codecs.open(f, 'r', 'UTF-8',
                                         errors='replace') as source_data:
                            content = source_data.read()
                    except (OSError, IOError):
                        content = f + " NOT FOUND."
                    file_sources[findex] = {'id': findex,
                                            'path': f,
                                            'content': content}
                    fname_to_fid[f] = findex
                    findex += 1

            events = []
            pathElements = report.bug_path
            index = 1
            for element in pathElements:
                if element['kind'] == 'event':
                    fname = report.files[element['location']['file']]
                    new_fid = fname_to_fid[fname]
                    events.append({'line': element['location']['line'],
                                   'col':  element['location']['col'],
                                   'file': new_fid,
                                   'msg':  element['message'],
                                   'step': index})
                    index += 1

            report_hash = report.main['issue_hash_content_of_line_in_context']
            report_data.append({
                'events': events,
                'path': report.main['location']['file_name'],
                'reportHash': report_hash,
                'checkerName': report.main['check_name']})

        return {'files': file_sources,
                'reports': report_data}

    def report_to_html(client, reports, output_dir):
        """
        Generate HTML output files for the given reports in the given output
        directory by using the Plist To HTML parser.
        """
        html_builder = PlistToHtml.HtmlBuilder(
            context.path_plist_to_html_dist,
            context.severity_map)

        file_report_map = defaultdict(list)
        for report in reports:
            file_path = ""
            if isinstance(report, Report):
                file_path = report.main['location']['file_name']
            else:
                file_path = report.checkedFile
            file_report_map[file_path].append(report)

        file_cache = {}
        for file_path, file_reports in file_report_map.items():
            checked_file = file_path
            filename = os.path.basename(checked_file)
            h = int(hashlib.md5(file_path).hexdigest(), 16) % (10 ** 8)

            if isinstance(file_reports[0], Report):
                report_data = reports_to_report_data(file_reports)
            else:
                report_data = get_report_data(client, file_reports, file_cache)

            output_path = os.path.join(output_dir,
                                       filename + '_' + str(h) + '.html')
            html_builder.create(output_path, report_data)
            print('Html file was generated for file://{0}: file://{1}'.format(
                checked_file, output_path))

        html_builder.create_index_html(output_dir)

    def print_reports(client, reports, output_format):
        output_dir = args.export_dir if 'export_dir' in args else None
        if 'clean' in args and os.path.isdir(output_dir):
            print("Previous analysis results in '{0}' have been removed, "
                  "overwriting with current results.".format(output_dir))
            shutil.rmtree(output_dir)

        if output_format == 'json':
            output = []
            for report in reports:
                if isinstance(report, Report):
                    output.append(report.main)
                else:
                    output.append(report)
            print(CmdLineOutputEncoder().encode(output))
            return

        if output_format == 'html':
            output_dir = args.export_dir
            if not os.path.exists(output_dir):
                os.makedirs(output_dir)

            print("Generating HTML output files to file://{0} directory:\n"
                  .format(output_dir))

            report_to_html(client, reports, output_dir)

            print('\nTo view the results in a browser run:\n'
                  '  $ firefox {0}'.format(os.path.join(args.export_dir,
                                                        'index.html')))
            return

        header = ['File', 'Checker', 'Severity', 'Msg', 'Source']
        rows = []

        source_lines = defaultdict(set)
        for report in reports:
            if not isinstance(report, Report):
                source_lines[report.fileId].add(report.line)

        lines_in_files_requested = []
        for key in source_lines:
            lines_in_files_requested.append(
                ttypes.LinesInFilesRequested(fileId=key,
                                             lines=source_lines[key]))

        for report in reports:
            if isinstance(report, Report):
                # report is coming from a plist file.
                bug_line = report.main['location']['line']
                bug_col = report.main['location']['col']
                checked_file = report.main['location']['file_name']\
                    + ':' + str(bug_line) + ":" + str(bug_col)
                check_name = report.main['check_name']
                sev = context.severity_map.get(check_name)
                check_msg = report.main['description']
                source_line =\
                    get_line_from_file(report.main['location']['file_name'],
                                       bug_line)
            else:
                source_line_contents = client.getLinesInSourceFileContents(
                    lines_in_files_requested, ttypes.Encoding.BASE64)

                # report is of ReportData type coming from CodeChecker server.
                bug_line = report.line
                bug_col = report.column
                sev = ttypes.Severity._VALUES_TO_NAMES[report.severity]
                checked_file = report.checkedFile + ':' + str(bug_line) +\
                    ":" + str(bug_col)
                source_line = base64.b64decode(
                    source_line_contents[report.fileId][bug_line])
                check_name = report.checkerId
                check_msg = report.checkerMsg
            rows.append(
                (sev, checked_file, check_msg, check_name, source_line))
        if output_format == 'plaintext':
            for row in rows:
                print("[{0}] {1}: {2} [{3}]\n{4}\n".format(
                    row[0], row[1], row[2], row[3], row[4]))
        else:
            print(twodim_to_str(output_format, header, rows))

    def get_run_tag(client, run_ids, tag_name):
        """
        Returns run tag information for the given tag name in the given runs.
        """
        run_history_filter = ttypes.RunHistoryFilter()
        run_history_filter.tagNames = [tag_name]
        run_histories = client.getRunHistory(run_ids, None, None,
                                             run_history_filter)

        return run_histories[0] if len(run_histories) else None

    def process_run_arg(run_arg_with_tag):
        """
        Process the argument and returns run ids a run tag ids.
        The argument has the following format: <run_name>:<run_tag>
        """
        run_with_tag = run_arg_with_tag.split(':')
        run_name = run_with_tag[0]

        runs = get_runs(client, [run_name])
        run_ids = map(lambda run: run.runId, runs)
        run_names = map(lambda run: run.name, runs)

        # Set base run tag if it is available.
        run_tag_name = run_with_tag[1] if len(run_with_tag) > 1 else None
        run_tags = None
        if run_tag_name:
            tag = get_run_tag(client, run_ids, run_tag_name)
            run_tags = [tag.id] if tag else None

        if not run_ids:
            LOG.warning(
                "No run names match the given pattern: " + run_arg_with_tag)
            sys.exit(1)

        LOG.info("Matching runs: %s",
                 ', '.join(map(lambda run: run.name, runs)))

        return run_ids, run_names, run_tags

    def print_diff_results(reports):
        """
        Print the results.
        """
        if reports:
            print_reports(client, reports, args.output_format)
        else:
            LOG.info("No results.")

    client = None

    # We set up the client if we are not comparing two local report directory.
    if not os.path.isdir(args.basename) or not os.path.isdir(args.newname):
        client = setup_client(args.product_url)

    if os.path.isdir(args.basename) and os.path.isdir(args.newname):
        reports = get_diff_local_dirs(args.basename, args.newname)
        print_diff_results(reports)
        LOG.info("Compared two local report directories %s and %s",
                 os.path.abspath(args.basename),
                 os.path.abspath(args.newname))
    elif os.path.isdir(args.newname):
        reports, base_run_names = \
            get_diff_remote_run_local_dir(client,
                                          args.basename,
                                          os.path.abspath(args.newname))
        print_diff_results(reports)
        LOG.info("Compared remote run(s) %s (matching: %s) and local report "
                 "directory %s",
                 args.basename,
                 ', '.join(base_run_names),
                 os.path.abspath(args.newname))
    elif os.path.isdir(args.basename):
        reports, new_run_names = \
            get_diff_local_dir_remote_run(client,
                                          os.path.abspath(args.basename),
                                          args.newname)
        print_diff_results(reports)
        LOG.info("Compared local report directory %s and remote run(s) %s "
                 "(matching: %s).",
                 os.path.abspath(args.basename),
                 args.newname,
                 ', '.join(new_run_names))
    else:
        reports, base_run_names, new_run_names = \
            get_diff_remote_runs(client, args.basename, args.newname)
        print_diff_results(reports)
        LOG.info("Compared multiple remote runs %s (matching: %s) and %s "
                 "(matching: %s)",
                 args.basename,
                 ', '.join(base_run_names),
                 args.newname,
                 ', '.join(new_run_names))
예제 #25
0
def handle_diff_results(args):
    def getDiffResults(getterFn, baseid, newid, suppr):
        report_filter = [
            codeCheckerDBAccess.ttypes.ReportFilter(suppressed=suppr)
        ]
        add_filter_conditions(report_filter[0], args.filter)
        sort_mode = [(ttypes.SortMode(ttypes.SortType.FILENAME,
                                      ttypes.Order.ASC))]
        limit = codeCheckerDBAccess.constants.MAX_QUERY_SIZE
        offset = 0

        all_results = []
        results = getterFn(baseid, newid, limit, offset, sort_mode,
                           report_filter)

        while results:
            all_results.extend(results)
            offset += limit
            results = getterFn(baseid, newid, limit, offset, sort_mode,
                               report_filter)
        return all_results

    def getReportDirResults(reportdir):
        all_reports = []
        for filename in os.listdir(reportdir):
            if filename.endswith(".plist"):
                file_path = os.path.join(reportdir, filename)
                LOG.debug("Parsing:" + file_path)
                try:
                    files, reports = plist_parser.parse_plist(file_path)
                    for report in reports:
                        report.main['location']['file_name'] = \
                            files[int(report.main['location']['file'])]
                    all_reports.extend(reports)

                except Exception as ex:
                    LOG.error('The generated plist is not valid!')
                    LOG.error(ex)
        return all_reports

    def getLineFromFile(filename, lineno):
        with open(filename, 'r') as f:
            i = 1
            for line in f:
                if i == lineno:
                    return line
                i += 1
        return ""

    def getLineFromRemoteFile(client, fid, lineno):
        # Thrift Python client cannot decode JSONs that contain non '\u00??'
        # characters, so we instead ask for a Base64-encoded version.
        source = client.getSourceFileData(fid, True, ttypes.Encoding.BASE64)
        lines = base64.b64decode(source.fileContent).split('\n')
        return "" if len(lines) < lineno else lines[lineno - 1]

    def getDiffReportDir(getterFn, baseid, report_dir, suppr, diff_type):
        report_filter = [
            codeCheckerDBAccess.ttypes.ReportFilter(suppressed=suppr)
        ]
        add_filter_conditions(report_filter[0], args.filter)

        sort_mode = [(ttypes.SortMode(ttypes.SortType.FILENAME,
                                      ttypes.Order.ASC))]
        limit = codeCheckerDBAccess.constants.MAX_QUERY_SIZE
        offset = 0

        base_results = []
        results = getterFn(baseid, limit, offset, sort_mode, report_filter)
        while results:
            base_results.extend(results)
            offset += limit
            results = getterFn(baseid, limit, offset, sort_mode, report_filter)
        base_hashes = {}
        for res in base_results:
            base_hashes[res.bugHash] = res

        filtered_reports = []
        new_results = getReportDirResults(report_dir)
        new_hashes = {}
        for rep in new_results:
            bughash = rep.main['issue_hash_content_of_line_in_context']
            new_hashes[bughash] = rep
        if diff_type == 'new':
            # Shows new reports from the report dir
            # which are not present in the baseline (server).
            for result in new_results:
                if not (result.main['issue_hash_content_of_line_in_context']
                        in base_hashes):
                    filtered_reports.append(result)
        elif diff_type == 'resolved':
            # Show bugs in the baseline (server)
            # which are not present in the report dir.
            for result in base_results:
                if not (result.bugHash in new_hashes):
                    filtered_reports.append(result)
        elif diff_type == 'unresolved':
            # Shows bugs in the report dir
            # which are also present in the baseline (server).
            for result in new_results:
                new_hash = result.main['issue_hash_content_of_line_in_context']
                if new_hash in base_hashes:
                    filtered_reports.append(result)
        return filtered_reports

    def printReports(client, reports, output_format):
        if output_format == 'json':
            output = []
            for report in reports:
                if type(report) is Report:
                    output.append(report.main)
                else:
                    output.append(report)
            print(CmdLineOutputEncoder().encode(output))
            return

        header = ['File', 'Checker', 'Severity', 'Msg', 'Source']
        rows = []
        for report in reports:
            if type(report) is Report:
                bug_line = report.main['location']['line']
                bug_col = report.main['location']['col']
                sev = 'unknown'
                checked_file = report.main['location']['file_name']\
                    + ':' + str(bug_line) + ":" + str(bug_col)
                check_name = report.main['check_name']
                check_msg = report.main['description']
                source_line =\
                    getLineFromFile(report.main['location']['file_name'],
                                    bug_line)
            else:
                bug_line = report.lastBugPosition.startLine
                bug_col = report.lastBugPosition.startCol
                sev = \
                    shared.ttypes.Severity._VALUES_TO_NAMES[report.severity]
                checked_file = report.checkedFile + ':' + str(bug_line) +\
                    ":" + str(bug_col)
                source_line =\
                    getLineFromRemoteFile(client, report.fileId, bug_line)
                check_name = report.checkerId
                check_msg = report.checkerMsg
            rows.append(
                (checked_file, check_name, sev, check_msg, source_line))
        if output_format == 'plaintext':
            for row in rows:
                print("{0}: {1} [{2}]\n{3}\n".format(row[0], row[3], row[1],
                                                     row[4]))
        else:
            print(twodim_to_str(output_format, header, rows))

    client = setup_client(args.host, args.port, '/')

    report_dir_mode = False
    if os.path.isdir(args.newname):
        # If newname is a valid directory
        # we assume that it is a report dir
        # and we are in local compare mode.
        run_info = check_run_names(client, [args.basename])
        report_dir_mode = True
    else:
        run_info = check_run_names(client, [args.basename, args.newname])
        newid = run_info[args.newname][0]

    baseid = run_info[args.basename][0]
    results = []
    if report_dir_mode:
        diff_type = 'new'
        if 'unresolved' in args:
            diff_type = 'unresolved'
        elif 'resolved' in args:
            diff_type = 'resolved'
        results = getDiffReportDir(client.getRunResults, baseid,
                                   os.path.abspath(args.newname),
                                   args.suppressed, diff_type)
    else:
        if 'new' in args:
            results = getDiffResults(client.getNewResults, baseid, newid,
                                     args.suppressed)
        elif 'unresolved' in args:
            results = getDiffResults(client.getUnresolvedResults, baseid,
                                     newid, args.suppressed)
        elif 'resolved' in args:
            results = getDiffResults(client.getResolvedResults, baseid, newid,
                                     args.suppressed)

    printReports(client, results, args.output_format)
예제 #26
0
def handle_suppress(args):
    def update_suppression_comment(run_id, report_id, comment):
        client.unSuppressBug([run_id], report_id)
        client.suppressBug([run_id], report_id, comment)

    def bug_hash_filter(bug_id, filepath):
        filepath = '%' + filepath
        return [
            codeCheckerDBAccess.ttypes.ReportFilter(bugHash=bug_id,
                                                    suppressed=True,
                                                    filepath=filepath),
            codeCheckerDBAccess.ttypes.ReportFilter(bugHash=bug_id,
                                                    suppressed=False,
                                                    filepath=filepath)
        ]

    already_suppressed = "Bug {} in file {} already suppressed. Use '--force'!"
    limit = codeCheckerDBAccess.constants.MAX_QUERY_SIZE

    client = setup_client(args.host, args.port, '/')

    run_info = check_run_names(client, [args.name])
    run_id, run_date = run_info.get(args.name)

    if 'output' in args:
        for suppression in client.getSuppressedBugs(run_id):
            suppress_file_handler.write_to_suppress_file(
                args.output, suppression.bug_hash, suppression.file_name,
                suppression.comment)

    elif 'input' in args:
        with open(args.input) as supp_file:
            suppress_data = suppress_file_handler.get_suppress_data(supp_file)

        for bug_id, file_name, comment in suppress_data:
            reports = client.getRunResults(run_id, limit, 0, None,
                                           bug_hash_filter(bug_id, file_name))

            for report in reports:
                if report.suppressed and 'force' not in args:
                    print(already_suppressed.format(bug_id, file_name))
                else:
                    update_suppression_comment(run_id, report.reportId,
                                               comment)

    elif 'bugid' in args:
        reports = client.getRunResults(
            run_id, limit, 0, None,
            bug_hash_filter(args.bugid, args.file if 'file' in args else ""))

        for report in reports:
            if 'unsuppress' in args:
                client.unSuppressBug([run_id], report.reportId)
            elif report.suppressed and 'force' not in args:
                print(
                    already_suppressed.format(
                        args.bugid, args.file if 'file' in args else ""))
            else:
                update_suppression_comment(
                    run_id, report.reportId,
                    args.comment if 'comment' in args else "")