def get_diff_remote_runs(client, basename, newname): """ Compares two remote runs and returns the filtered results. """ report_filter = ttypes.ReportFilter() add_filter_conditions(client, report_filter, args) base_ids, base_run_names, base_run_tags = process_run_arg(basename) report_filter.runTag = base_run_tags cmp_data = ttypes.CompareData() cmp_data.diffType = get_diff_type() new_ids, new_run_names, new_run_tags = process_run_arg(newname) cmp_data.runIds = new_ids cmp_data.runTag = new_run_tags # Do not show resolved bugs in compare mode new. if cmp_data.diffType == ttypes.DiffType.NEW: report_filter.detectionStatus = [ ttypes.DetectionStatus.NEW, ttypes.DetectionStatus.UNRESOLVED, ttypes.DetectionStatus.REOPENED] sort_mode = [(ttypes.SortMode( ttypes.SortType.FILENAME, ttypes.Order.ASC))] limit = constants.MAX_QUERY_SIZE offset = 0 all_results = [] results = client.getRunResults(base_ids, limit, offset, sort_mode, report_filter, cmp_data) while results: all_results.extend(results) offset += limit results = client.getRunResults(base_ids, limit, offset, sort_mode, report_filter, cmp_data) return all_results, base_run_names, new_run_names
def handle_diff_results(args): context = generic_package_context.get_context() def get_diff_results(client, baseids, cmp_data): report_filter = ttypes.ReportFilter() add_filter_conditions(report_filter, args.filter) # Do not show resolved bugs in compare mode new. if cmp_data.diffType == ttypes.DiffType.NEW: report_filter.detectionStatus = [ ttypes.DetectionStatus.NEW, ttypes.DetectionStatus.UNRESOLVED, ttypes.DetectionStatus.REOPENED ] sort_mode = [(ttypes.SortMode(ttypes.SortType.FILENAME, ttypes.Order.ASC))] limit = constants.MAX_QUERY_SIZE offset = 0 all_results = [] results = client.getRunResults(baseids, limit, offset, sort_mode, report_filter, cmp_data) while results: all_results.extend(results) offset += limit results = client.getRunResults(baseids, limit, offset, sort_mode, report_filter, cmp_data) return all_results def get_report_dir_results(reportdir): all_reports = [] for filename in os.listdir(reportdir): if filename.endswith(".plist"): file_path = os.path.join(reportdir, filename) LOG.debug("Parsing:" + file_path) try: files, reports = plist_parser.parse_plist(file_path) for report in reports: report.main['location']['file_name'] = \ files[int(report.main['location']['file'])] all_reports.extend(reports) except Exception as ex: LOG.error('The generated plist is not valid!') LOG.error(ex) return all_reports def get_line_from_file(filename, lineno): with open(filename, 'r') as f: i = 1 for line in f: if i == lineno: return line i += 1 return "" def get_line_from_remote_file(client, fid, lineno): # Thrift Python client cannot decode JSONs that contain non '\u00??' # characters, so we instead ask for a Base64-encoded version. source = client.getSourceFileData(fid, True, ttypes.Encoding.BASE64) lines = base64.b64decode(source.fileContent).split('\n') return "" if len(lines) < lineno else lines[lineno - 1] def get_diff_report_dir(client, baseids, report_dir, diff_type): report_filter = ttypes.ReportFilter() add_filter_conditions(report_filter, args.filter) sort_mode = [(ttypes.SortMode(ttypes.SortType.FILENAME, ttypes.Order.ASC))] limit = constants.MAX_QUERY_SIZE offset = 0 base_results = [] results = client.getRunResults(baseids, limit, offset, sort_mode, report_filter, None) while results: base_results.extend(results) offset += limit results = client.getRunResults(baseids, limit, offset, sort_mode, report_filter, None) base_hashes = {} for res in base_results: base_hashes[res.bugHash] = res filtered_reports = [] new_results = get_report_dir_results(report_dir) new_hashes = {} suppressed_in_code = [] for rep in new_results: bughash = rep.main['issue_hash_content_of_line_in_context'] source_file = rep.main['location']['file_name'] bug_line = rep.main['location']['line'] new_hashes[bughash] = rep sp_handler = suppress_handler.SourceSuppressHandler( source_file, bug_line, bughash, rep.main['check_name']) if sp_handler.get_suppressed(): suppressed_in_code.append(bughash) LOG.debug("Bug " + bughash + "is suppressed in code. file:" + source_file + "Line " + str(bug_line)) if diff_type == 'new': # Shows new reports from the report dir # which are not present in the baseline (server) # and not suppressed in the code. for result in new_results: if not (result.main['issue_hash_content_of_line_in_context'] in base_hashes) and \ not (result.main['issue_hash_content_of_line_in_context'] in suppressed_in_code): filtered_reports.append(result) elif diff_type == 'resolved': # Show bugs in the baseline (server) # which are not present in the report dir # or suppressed. for result in base_results: if not (result.bugHash in new_hashes) or \ (result.bugHash in suppressed_in_code): filtered_reports.append(result) elif diff_type == 'unresolved': # Shows bugs in the report dir # that are not suppressed and # which are also present in the baseline (server) for result in new_results: new_hash = result.main['issue_hash_content_of_line_in_context'] if new_hash in base_hashes and \ not (new_hash in suppressed_in_code): filtered_reports.append(result) return filtered_reports def cached_report_file_lookup(file_cache, file_id): """ Get source file data for the given file and caches it in a file cache if file data is not found in the cache. Finally, it returns the source file data from the cache. """ if file_id not in file_cache: source = client.getSourceFileData(file_id, True, ttypes.Encoding.BASE64) file_content = base64.b64decode(source.fileContent) file_cache[file_id] = { 'id': file_id, 'path': source.filePath, 'content': file_content } return file_cache[file_id] def get_report_data(client, reports, file_cache): """ Returns necessary report files and report data events for the HTML plist parser. """ file_sources = {} report_data = [] for report in reports: file_sources[report.fileId] = cached_report_file_lookup( file_cache, report.fileId) details = client.getReportDetails(report.reportId) events = [] for index, event in enumerate(details.pathEvents): file_sources[event.fileId] = cached_report_file_lookup( file_cache, event.fileId) events.append({ 'line': event.startLine, 'col': event.startCol, 'file': event.fileId, 'msg': event.msg, 'step': index + 1 }) report_data.append(events) return {'files': file_sources, 'reports': report_data} def report_to_html(client, reports, output_dir): """ Generate HTML output files for the given reports in the given output directory by using the Plist To HTML parser. """ html_builder = PlistToHtml.HtmlBuilder(context.path_plist_to_html_dist) file_report_map = defaultdict(list) for report in reports: file_report_map[report.fileId].append(report) file_cache = {} for file_id, file_reports in file_report_map.items(): checked_file = file_reports[0].checkedFile filename = os.path.basename(checked_file) report_data = get_report_data(client, file_reports, file_cache) output_path = os.path.join(output_dir, filename + '_' + str(file_id) + '.html') html_builder.create(output_path, report_data) print('Html file was generated for file://{0}: file://{1}'.format( checked_file, output_path)) def print_reports(client, reports, output_format, diff_type): output_dir = args.export_dir if 'export_dir' in args else None if 'clean' in args and os.path.isdir(output_dir): print("Previous analysis results in '{0}' have been removed, " "overwriting with current results.".format(output_dir)) shutil.rmtree(output_dir) if output_format == 'json': output = [] for report in reports: if isinstance(report, Report): output.append(report.main) else: output.append(report) print(CmdLineOutputEncoder().encode(output)) return if output_format == 'html': if len(reports) == 0: print('No {0} reports was found!'.format(diff_type)) return output_dir = args.export_dir if not os.path.exists(output_dir): os.makedirs(output_dir) print("Generating HTML output files to file://{0} directory:\n". format(output_dir)) report_to_html(client, reports, output_dir) print('\nTo view the results in a browser run:\n' ' $ firefox {0}'.format(args.export_dir)) return header = ['File', 'Checker', 'Severity', 'Msg', 'Source'] rows = [] for report in reports: if type(report) is Report: bug_line = report.main['location']['line'] bug_col = report.main['location']['col'] sev = 'unknown' checked_file = report.main['location']['file_name']\ + ':' + str(bug_line) + ":" + str(bug_col) check_name = report.main['check_name'] check_msg = report.main['description'] source_line =\ get_line_from_file(report.main['location']['file_name'], bug_line) else: bug_line = report.line bug_col = report.column sev = ttypes.Severity._VALUES_TO_NAMES[report.severity] checked_file = report.checkedFile + ':' + str(bug_line) +\ ":" + str(bug_col) source_line =\ get_line_from_remote_file(client, report.fileId, bug_line) check_name = report.checkerId check_msg = report.checkerMsg rows.append( (checked_file, check_name, sev, check_msg, source_line)) if output_format == 'plaintext': for row in rows: print("{0}: {1} [{2}]\n{3}\n".format(row[0], row[3], row[1], row[4])) else: print(twodim_to_str(output_format, header, rows)) client = setup_client(args.product_url) report_dir_mode = False if os.path.isdir(args.newname): # If newname is a valid directory we assume that it is a report dir and # we are in local compare mode. report_dir_mode = True else: run_info = check_run_names(client, [args.newname]) newid = run_info[args.newname].runId try: basename_regex = '^' + args.basename + '$' base_runs = filter(lambda run: re.match(basename_regex, run.name), client.getRunData(None)) base_ids = map(lambda run: run.runId, base_runs) except re.error: LOG.error('Invalid regex format in ' + args.basename) sys.exit(1) if len(base_ids) == 0: LOG.warning("No run names match the given pattern: " + args.basename) sys.exit(1) LOG.info("Matching against runs: " + ', '.join(map(lambda run: run.name, base_runs))) diff_type = 'new' if 'unresolved' in args: diff_type = 'unresolved' elif 'resolved' in args: diff_type = 'resolved' results = [] if report_dir_mode: diff_type = 'new' if 'unresolved' in args: diff_type = 'unresolved' elif 'resolved' in args: diff_type = 'resolved' results = get_diff_report_dir(client, base_ids, os.path.abspath(args.newname), diff_type) else: cmp_data = ttypes.CompareData(runIds=[newid]) if 'new' in args: cmp_data.diffType = ttypes.DiffType.NEW elif 'unresolved' in args: cmp_data.diffType = ttypes.DiffType.UNRESOLVED elif 'resolved' in args: cmp_data.diffType = ttypes.DiffType.RESOLVED results = get_diff_results(client, base_ids, cmp_data) if len(results) == 0: LOG.info("No results.") else: print_reports(client, results, args.output_format, diff_type)
def handle_diff_results(args): init_logger(args.verbose if 'verbose' in args else None) context = generic_package_context.get_context() def get_diff_results(client, baseids, cmp_data): report_filter = ttypes.ReportFilter() add_filter_conditions(report_filter, args.filter) # Do not show resolved bugs in compare mode new. if cmp_data.diffType == ttypes.DiffType.NEW: report_filter.detectionStatus = [ ttypes.DetectionStatus.NEW, ttypes.DetectionStatus.UNRESOLVED, ttypes.DetectionStatus.REOPENED ] sort_mode = [(ttypes.SortMode(ttypes.SortType.FILENAME, ttypes.Order.ASC))] limit = constants.MAX_QUERY_SIZE offset = 0 all_results = [] results = client.getRunResults(baseids, limit, offset, sort_mode, report_filter, cmp_data) while results: all_results.extend(results) offset += limit results = client.getRunResults(baseids, limit, offset, sort_mode, report_filter, cmp_data) return all_results def get_report_dir_results(reportdir): all_reports = [] for filename in os.listdir(reportdir): if filename.endswith(".plist"): file_path = os.path.join(reportdir, filename) LOG.debug("Parsing:" + file_path) try: files, reports = plist_parser.parse_plist(file_path) for report in reports: report.main['location']['file_name'] = \ files[int(report.main['location']['file'])] all_reports.extend(reports) except Exception as ex: LOG.error('The generated plist is not valid!') LOG.error(ex) return all_reports def get_line_from_file(filename, lineno): with open(filename, 'r') as f: i = 1 for line in f: if i == lineno: return line i += 1 return "" def get_diff_base_results(client, baseids, base_hashes, suppressed_hashes): base_results = [] report_filter = ttypes.ReportFilter() add_filter_conditions(report_filter, args.filter) sort_mode = [(ttypes.SortMode(ttypes.SortType.FILENAME, ttypes.Order.ASC))] limit = constants.MAX_QUERY_SIZE offset = 0 report_filter.reportHash = base_hashes + suppressed_hashes results = client.getRunResults(baseids, limit, offset, sort_mode, report_filter, None) while results: base_results.extend(results) offset += limit results = client.getRunResults(baseids, limit, offset, sort_mode, report_filter, None) return base_results def get_diff_report_dir(client, baseids, report_dir, cmp_data): filtered_reports = [] report_dir_results = get_report_dir_results(report_dir) new_hashes = {} suppressed_in_code = [] for rep in report_dir_results: bughash = rep.main['issue_hash_content_of_line_in_context'] source_file = rep.main['location']['file_name'] bug_line = rep.main['location']['line'] checker_name = rep.main['check_name'] new_hashes[bughash] = rep sc_handler = SourceCodeCommentHandler(source_file) src_comment_data = sc_handler.filter_source_line_comments( bug_line, checker_name) if len(src_comment_data) == 1: suppressed_in_code.append(bughash) LOG.debug("Bug " + bughash + "is suppressed in code. file:" + source_file + "Line " + str(bug_line)) elif len(src_comment_data) > 1: LOG.warning("Multiple source code comment can be found " "for '{0}' checker in '{1}' at line {2}. " "This bug will not be suppressed!".format( checker_name, source_file, bug_line)) base_hashes = client.getDiffResultsHash(baseids, new_hashes.keys(), cmp_data.diffType) if cmp_data.diffType == ttypes.DiffType.NEW or \ cmp_data.diffType == ttypes.DiffType.UNRESOLVED: # Shows reports from the report dir which are not present in the # baseline (NEW reports) or appear in both side (UNRESOLVED # reports) and not suppressed in the code. for result in report_dir_results: h = result.main['issue_hash_content_of_line_in_context'] if h in base_hashes and h not in suppressed_in_code: filtered_reports.append(result) elif cmp_data.diffType == ttypes.DiffType.RESOLVED: # Show bugs in the baseline (server) which are not present in the # report dir or suppressed. results = get_diff_base_results(client, baseids, base_hashes, suppressed_in_code) for result in results: filtered_reports.append(result) return filtered_reports def cached_report_file_lookup(file_cache, file_id): """ Get source file data for the given file and caches it in a file cache if file data is not found in the cache. Finally, it returns the source file data from the cache. """ if file_id not in file_cache: source = client.getSourceFileData(file_id, True, ttypes.Encoding.BASE64) file_content = base64.b64decode(source.fileContent) file_cache[file_id] = { 'id': file_id, 'path': source.filePath, 'content': file_content } return file_cache[file_id] def get_report_data(client, reports, file_cache): """ Returns necessary report files and report data events for the HTML plist parser. """ file_sources = {} report_data = [] for report in reports: file_sources[report.fileId] = cached_report_file_lookup( file_cache, report.fileId) details = client.getReportDetails(report.reportId) events = [] for index, event in enumerate(details.pathEvents): file_sources[event.fileId] = cached_report_file_lookup( file_cache, event.fileId) events.append({ 'line': event.startLine, 'col': event.startCol, 'file': event.fileId, 'msg': event.msg, 'step': index + 1 }) report_data.append(events) return {'files': file_sources, 'reports': report_data} def reports_to_report_data(reports): """ Converts reports from Report class from one plist file to report data events for the HTML plist parser. """ file_sources = {} fname_to_fid = {} report_data = [] findex = 0 for report in reports: # Not all report in this list may refer to the same files # thus we need to create a single file list with # all files from all reports. for f in report.files: if f not in fname_to_fid: try: content = open(f, 'r').read() except (OSError, IOError): content = f + " NOT FOUND." file_sources[findex] = { 'id': findex, 'path': f, 'content': content } fname_to_fid[f] = findex findex += 1 events = [] pathElements = report.bug_path index = 1 for element in pathElements: if element['kind'] == 'event': fname = report.files[element['location']['file']] new_fid = fname_to_fid[fname] events.append({ 'line': element['location']['line'], 'col': element['location']['col'], 'file': new_fid, 'msg': element['message'], 'step': index }) index += 1 report_data.append(events) return {'files': file_sources, 'reports': report_data} def report_to_html(client, reports, output_dir): """ Generate HTML output files for the given reports in the given output directory by using the Plist To HTML parser. """ html_builder = PlistToHtml.HtmlBuilder(context.path_plist_to_html_dist) file_report_map = defaultdict(list) for report in reports: file_path = "" if isinstance(report, Report): file_path = report.main['location']['file_name'] else: file_path = report.checkedFile file_report_map[file_path].append(report) file_cache = {} for file_path, file_reports in file_report_map.items(): checked_file = file_path filename = os.path.basename(checked_file) h = int(hashlib.md5(file_path).hexdigest(), 16) % (10**8) if isinstance(file_reports[0], Report): report_data = reports_to_report_data(file_reports) else: report_data = get_report_data(client, file_reports, file_cache) output_path = os.path.join(output_dir, filename + '_' + str(h) + '.html') html_builder.create(output_path, report_data) print('Html file was generated for file://{0}: file://{1}'.format( checked_file, output_path)) def print_reports(client, reports, output_format): output_dir = args.export_dir if 'export_dir' in args else None if 'clean' in args and os.path.isdir(output_dir): print("Previous analysis results in '{0}' have been removed, " "overwriting with current results.".format(output_dir)) shutil.rmtree(output_dir) if output_format == 'json': output = [] for report in reports: if isinstance(report, Report): output.append(report.main) else: output.append(report) print(CmdLineOutputEncoder().encode(output)) return if output_format == 'html': output_dir = args.export_dir if not os.path.exists(output_dir): os.makedirs(output_dir) print("Generating HTML output files to file://{0} directory:\n". format(output_dir)) report_to_html(client, reports, output_dir) print('\nTo view the results in a browser run:\n' ' $ firefox {0}'.format(args.export_dir)) return header = ['File', 'Checker', 'Severity', 'Msg', 'Source'] rows = [] source_lines = defaultdict(set) for report in reports: if not isinstance(report, Report): source_lines[report.fileId].add(report.line) lines_in_files_requested = [] for key in source_lines: lines_in_files_requested.append( ttypes.LinesInFilesRequested(fileId=key, lines=source_lines[key])) source_line_contents = client.getLinesInSourceFileContents( lines_in_files_requested, ttypes.Encoding.BASE64) for report in reports: if isinstance(report, Report): # report is coming from a plist file. bug_line = report.main['location']['line'] bug_col = report.main['location']['col'] sev = 'unknown' checked_file = report.main['location']['file_name']\ + ':' + str(bug_line) + ":" + str(bug_col) check_name = report.main['check_name'] check_msg = report.main['description'] source_line =\ get_line_from_file(report.main['location']['file_name'], bug_line) else: # report is of ReportData type coming from CodeChecker server. bug_line = report.line bug_col = report.column sev = ttypes.Severity._VALUES_TO_NAMES[report.severity] checked_file = report.checkedFile + ':' + str(bug_line) +\ ":" + str(bug_col) source_line = base64.b64decode( source_line_contents[report.fileId][bug_line]) check_name = report.checkerId check_msg = report.checkerMsg rows.append( (checked_file, check_name, sev, check_msg, source_line)) if output_format == 'plaintext': for row in rows: print("{0}: {1} [{2}]\n{3}\n".format(row[0], row[3], row[1], row[4])) else: print(twodim_to_str(output_format, header, rows)) client = setup_client(args.product_url) base_runs = get_runs(client, [args.basename]) base_ids = map(lambda run: run.runId, base_runs) if len(base_ids) == 0: LOG.warning("No run names match the given pattern: " + args.basename) sys.exit(1) LOG.info("Matching base runs: " + ', '.join(map(lambda run: run.name, base_runs))) cmp_data = ttypes.CompareData() if 'new' in args: cmp_data.diffType = ttypes.DiffType.NEW elif 'unresolved' in args: cmp_data.diffType = ttypes.DiffType.UNRESOLVED elif 'resolved' in args: cmp_data.diffType = ttypes.DiffType.RESOLVED results = [] if os.path.isdir(args.newname): # If newname is a valid directory we assume that it is a report dir and # we are in local compare mode. results = get_diff_report_dir(client, base_ids, os.path.abspath(args.newname), cmp_data) else: new_runs = get_runs(client, [args.newname]) cmp_data.runIds = map(lambda run: run.runId, new_runs) if len(new_runs) == 0: LOG.warning("No run names match the given pattern: " + args.newname) sys.exit(1) LOG.info("Matching new runs: " + ', '.join(map(lambda run: run.name, new_runs))) results = get_diff_results(client, base_ids, cmp_data) if len(results) == 0: LOG.info("No results.") else: print_reports(client, results, args.output_format)
def handle_diff_results(args): def getDiffResults(client, baseids, cmp_data): report_filter = ttypes.ReportFilter() add_filter_conditions(report_filter, args.filter) # Do not show resolved bugs in compare mode new. if cmp_data.diffType == ttypes.DiffType.NEW: report_filter.detectionStatus = [ ttypes.DetectionStatus.NEW, ttypes.DetectionStatus.UNRESOLVED, ttypes.DetectionStatus.REOPENED] sort_mode = [(ttypes.SortMode( ttypes.SortType.FILENAME, ttypes.Order.ASC))] limit = constants.MAX_QUERY_SIZE offset = 0 all_results = [] results = client.getRunResults(baseids, limit, offset, sort_mode, report_filter, cmp_data) while results: all_results.extend(results) offset += limit results = client.getRunResults(baseids, limit, offset, sort_mode, report_filter, cmp_data) return all_results def getReportDirResults(reportdir): all_reports = [] for filename in os.listdir(reportdir): if filename.endswith(".plist"): file_path = os.path.join(reportdir, filename) LOG.debug("Parsing:" + file_path) try: files, reports = plist_parser.parse_plist(file_path) for report in reports: report.main['location']['file_name'] = \ files[int(report.main['location']['file'])] all_reports.extend(reports) except Exception as ex: LOG.error('The generated plist is not valid!') LOG.error(ex) return all_reports def getLineFromFile(filename, lineno): with open(filename, 'r') as f: i = 1 for line in f: if i == lineno: return line i += 1 return "" def getLineFromRemoteFile(client, fid, lineno): # Thrift Python client cannot decode JSONs that contain non '\u00??' # characters, so we instead ask for a Base64-encoded version. source = client.getSourceFileData(fid, True, ttypes.Encoding.BASE64) lines = base64.b64decode(source.fileContent).split('\n') return "" if len(lines) < lineno else lines[lineno - 1] def getDiffReportDir(client, baseids, report_dir, diff_type): report_filter = ttypes.ReportFilter() add_filter_conditions(report_filter, args.filter) sort_mode = [(ttypes.SortMode( ttypes.SortType.FILENAME, ttypes.Order.ASC))] limit = constants.MAX_QUERY_SIZE offset = 0 base_results = [] results = client.getRunResults(baseids, limit, offset, sort_mode, report_filter, None) while results: base_results.extend(results) offset += limit results = client.getRunResults(baseids, limit, offset, sort_mode, report_filter, None) base_hashes = {} for res in base_results: base_hashes[res.bugHash] = res filtered_reports = [] new_results = getReportDirResults(report_dir) new_hashes = {} suppressed_in_code = [] for rep in new_results: bughash = rep.main['issue_hash_content_of_line_in_context'] source_file = rep.main['location']['file_name'] bug_line = rep.main['location']['line'] new_hashes[bughash] = rep sp_handler = suppress_handler.SourceSuppressHandler( source_file, bug_line, bughash, rep.main['check_name']) if sp_handler.get_suppressed(): suppressed_in_code.append(bughash) LOG.debug("Bug " + bughash + "is suppressed in code. file:" + source_file + "Line "+str(bug_line)) if diff_type == 'new': # Shows new reports from the report dir # which are not present in the baseline (server) # and not suppressed in the code. for result in new_results: if not (result.main['issue_hash_content_of_line_in_context'] in base_hashes) and\ not (result.main['issue_hash_content_of_line_in_context'] in suppressed_in_code): filtered_reports.append(result) elif diff_type == 'resolved': # Show bugs in the baseline (server) # which are not present in the report dir # or suppressed. for result in base_results: if not (result.bugHash in new_hashes) or\ (result.bugHash in suppressed_in_code): filtered_reports.append(result) elif diff_type == 'unresolved': # Shows bugs in the report dir # that are not suppressed and # which are also present in the baseline (server) for result in new_results: new_hash = result.main['issue_hash_content_of_line_in_context'] if new_hash in base_hashes and\ not (new_hash in suppressed_in_code): filtered_reports.append(result) return filtered_reports def printReports(client, reports, output_format): if output_format == 'json': output = [] for report in reports: if isinstance(report, Report): output.append(report.main) else: output.append(report) print(CmdLineOutputEncoder().encode(output)) return header = ['File', 'Checker', 'Severity', 'Msg', 'Source'] rows = [] for report in reports: if type(report) is Report: bug_line = report.main['location']['line'] bug_col = report.main['location']['col'] sev = 'unknown' checked_file = report.main['location']['file_name']\ + ':' + str(bug_line) + ":" + str(bug_col) check_name = report.main['check_name'] check_msg = report.main['description'] source_line =\ getLineFromFile(report.main['location']['file_name'], bug_line) else: bug_line = report.line bug_col = report.column sev = ttypes.Severity._VALUES_TO_NAMES[report.severity] checked_file = report.checkedFile + ':' + str(bug_line) +\ ":" + str(bug_col) source_line =\ getLineFromRemoteFile(client, report.fileId, bug_line) check_name = report.checkerId check_msg = report.checkerMsg rows.append( (checked_file, check_name, sev, check_msg, source_line)) if output_format == 'plaintext': for row in rows: print("{0}: {1} [{2}]\n{3}\n".format(row[0], row[3], row[1], row[4])) else: print(twodim_to_str(output_format, header, rows)) client = setup_client(args.product_url) report_dir_mode = False if os.path.isdir(args.newname): # If newname is a valid directory we assume that it is a report dir and # we are in local compare mode. report_dir_mode = True else: run_info = check_run_names(client, [args.newname]) newid = run_info[args.newname].runId try: basename_regex = '^' + args.basename + '$' base_runs = filter(lambda run: re.match(basename_regex, run.name), client.getRunData(None)) base_ids = map(lambda run: run.runId, base_runs) except re.error: LOG.error('Invalid regex format in ' + args.basename) sys.exit(1) if len(base_ids) == 0: LOG.warning("No run names match the given pattern: " + args.basename) sys.exit(1) LOG.info("Matching against runs: " + ', '.join(map(lambda run: run.name, base_runs))) results = [] if report_dir_mode: diff_type = 'new' if 'unresolved' in args: diff_type = 'unresolved' elif 'resolved' in args: diff_type = 'resolved' results = getDiffReportDir(client, base_ids, os.path.abspath(args.newname), diff_type) else: cmp_data = ttypes.CompareData(runIds=[newid]) if 'new' in args: cmp_data.diffType = ttypes.DiffType.NEW elif 'unresolved' in args: cmp_data.diffType = ttypes.DiffType.UNRESOLVED elif 'resolved' in args: cmp_data.diffType = ttypes.DiffType.RESOLVED results = getDiffResults(client, base_ids, cmp_data) printReports(client, results, args.output_format)