def __test_html_builder(self, proj): """ Test building html file from the given proj's plist file. """ proj_dir = os.path.join(self.test_workspace, 'test_files', proj) plist_file = os.path.join(proj_dir, proj + '.plist') plist = load_plist_data(plist_file) report_data = PlistToHtml.get_report_data_from_plist(plist) output_dir = os.path.join(proj_dir, 'html') if not os.path.exists(output_dir): os.mkdir(output_dir) output_path = os.path.join(output_dir, proj + '.html') html_builder = PlistToHtml.HtmlBuilder(self.layout_dir) html_builder.create(output_path, report_data) self.assertTrue(os.path.exists(output_path)) html_builder.create_index_html(output_dir) html_builder.create_statistics_html(output_dir) index_html = os.path.join(output_dir, 'index.html') self.assertTrue(os.path.exists(index_html))
def __test_html_builder(self, proj): """ Test building html file from the given proj's plist file. """ proj_dir = os.path.join(self.test_workspace, 'test_files', proj) plist_file = os.path.join(proj_dir, proj + '.plist') plist = plistlib.readPlist(plist_file) report_data = PlistToHtml.get_report_data_from_plist(plist) output_dir = os.path.join(proj_dir, 'html') if not os.path.exists(output_dir): os.mkdir(output_dir) output_path = os.path.join(output_dir, proj + '.html') html_builder = PlistToHtml.HtmlBuilder(self.layout_dir) html_builder.create(output_path, report_data) self.assertTrue(os.path.exists(output_path)) html_builder.create_index_html(output_dir) html_builder.create_statistics_html(output_dir) index_html = os.path.join(output_dir, 'index.html') self.assertTrue(os.path.exists(index_html))
def test_get_report_data_simple(self): """ Get report data for plist which contains simple reports. """ proj_simple = os.path.join(self.test_workspace, 'test_files', 'simple') plist_file = os.path.join(proj_simple, 'simple.plist') plist = load_plist_data(plist_file) res = PlistToHtml.get_report_data_from_plist(plist) self.assertEqual(len(res['files']), 1) reports = res['reports'] self.assertEqual(len(reports), 2) dead_stores = [ r for r in reports if r['checkerName'] == 'deadcode.DeadStores' ][0] self.assertEqual(len(dead_stores['notes']), 0) self.assertEqual(len(dead_stores['macros']), 0) self.assertGreaterEqual(len(dead_stores['events']), 1) divide_zero = [ r for r in reports if r['checkerName'] == 'core.DivideZero' ][0] self.assertEqual(len(divide_zero['notes']), 0) self.assertEqual(len(divide_zero['macros']), 0) self.assertGreaterEqual(len(divide_zero['events']), 1)
def report_to_html(client, reports, output_dir): """ Generate HTML output files for the given reports in the given output directory by using the Plist To HTML parser. """ html_builder = PlistToHtml.HtmlBuilder(context.path_plist_to_html_dist, context.severity_map) file_report_map = defaultdict(list) for report in reports: if isinstance(report, Report): file_path = report.main['location']['file_name'] else: file_path = report.checkedFile file_report_map[file_path].append(report) file_cache = {} for file_path, file_reports in file_report_map.items(): checked_file = file_path filename = os.path.basename(checked_file) h = int(hashlib.md5(file_path).hexdigest(), 16) % (10**8) if isinstance(file_reports[0], Report): report_data = reports_to_report_data(file_reports) else: report_data = get_report_data(client, file_reports, file_cache) output_path = os.path.join(output_dir, filename + '_' + str(h) + '.html') html_builder.create(output_path, report_data) print('Html file was generated for file://{0}: file://{1}'.format( checked_file, output_path)) html_builder.create_index_html(output_dir)
def test_get_report_data_macros(self): """ Get report data for plist which contains macro expansion. """ proj_macros = os.path.join(self.test_workspace, 'test_files', 'macros') plist_file = os.path.join(proj_macros, 'macros.plist') plist = load_plist_data(plist_file) res = PlistToHtml.get_report_data_from_plist(plist) self.assertEqual(len(res['files']), 1) reports = res['reports'] self.assertEqual(len(reports), 1) report = reports[0] self.assertEqual(len(report['notes']), 0) self.assertEqual(len(report['macros']), 1) self.assertGreaterEqual(len(report['events']), 1) self.assertEqual(report['checkerName'], 'core.NullDereference')
def test_get_report_data_notes(self): """ Get report data for plist which contains notes. """ proj_notes = os.path.join(self.test_workspace, 'test_files', 'notes') plist_file = os.path.join(proj_notes, 'notes.plist') plist = load_plist_data(plist_file) res = PlistToHtml.get_report_data_from_plist(plist) self.assertEqual(len(res['files']), 1) reports = res['reports'] self.assertEqual(len(reports), 1) report = reports[0] self.assertEqual(len(report['notes']), 1) self.assertEqual(len(report['macros']), 0) self.assertGreaterEqual(len(report['events']), 1) self.assertEqual(report['checkerName'], 'alpha.clone.CloneChecker')
def test_get_report_data_macros(self): """ Get report data for plist which contains macro expansion. """ proj_macros = os.path.join(self.test_workspace, 'test_files', 'macros') plist_file = os.path.join(proj_macros, 'macros.plist') plist = plistlib.readPlist(plist_file) res = PlistToHtml.get_report_data_from_plist(plist) self.assertEqual(len(res['files']), 1) reports = res['reports'] self.assertEqual(len(reports), 1) report = reports[0] self.assertEqual(len(report['notes']), 0) self.assertEqual(len(report['macros']), 1) self.assertGreaterEqual(len(report['events']), 1) self.assertEqual(report['checkerName'], 'core.NullDereference')
def test_get_report_data_notes(self): """ Get report data for plist which contains notes. """ proj_notes = os.path.join(self.test_workspace, 'test_files', 'notes') plist_file = os.path.join(proj_notes, 'notes.plist') plist = plistlib.readPlist(plist_file) res = PlistToHtml.get_report_data_from_plist(plist) self.assertEqual(len(res['files']), 1) reports = res['reports'] self.assertEqual(len(reports), 1) report = reports[0] self.assertEqual(len(report['notes']), 1) self.assertEqual(len(report['macros']), 0) self.assertGreaterEqual(len(report['events']), 1) self.assertEqual(report['checkerName'], 'alpha.clone.CloneChecker')
def report_to_html(client, reports, output_dir): """ Generate HTML output files for the given reports in the given output directory by using the Plist To HTML parser. """ html_builder = PlistToHtml.HtmlBuilder(context.path_plist_to_html_dist) file_report_map = defaultdict(list) for report in reports: file_report_map[report.fileId].append(report) file_cache = {} for file_id, file_reports in file_report_map.items(): checked_file = file_reports[0].checkedFile filename = os.path.basename(checked_file) report_data = get_report_data(client, file_reports, file_cache) output_path = os.path.join(output_dir, filename + '_' + str(file_id) + '.html') html_builder.create(output_path, report_data) print('Html file was generated for file://{0}: file://{1}'.format( checked_file, output_path))
def test_get_report_data_simple(self): """ Get report data for plist which contains simple reports. """ proj_simple = os.path.join(self.test_workspace, 'test_files', 'simple') plist_file = os.path.join(proj_simple, 'simple.plist') plist = plistlib.readPlist(plist_file) res = PlistToHtml.get_report_data_from_plist(plist) self.assertEqual(len(res['files']), 1) reports = res['reports'] self.assertEqual(len(reports), 2) dead_stores = [r for r in reports if r['checkerName'] == 'deadcode.DeadStores'][0] self.assertEqual(len(dead_stores['notes']), 0) self.assertEqual(len(dead_stores['macros']), 0) self.assertGreaterEqual(len(dead_stores['events']), 1) divide_zero = [r for r in reports if r['checkerName'] == 'core.DivideZero'][0] self.assertEqual(len(divide_zero['notes']), 0) self.assertEqual(len(divide_zero['macros']), 0) self.assertGreaterEqual(len(divide_zero['events']), 1)
def main(args): """ Entry point for parsing some analysis results and printing them to the stdout in a human-readable format. """ logger.setup_logger(args.verbose if 'verbose' in args else None) try: cmd_config.check_config_file(args) except FileNotFoundError as fnerr: LOG.error(fnerr) sys.exit(1) export = args.export if 'export' in args else None if export == 'html' and 'output_path' not in args: LOG.error("Argument --export not allowed without argument --output " "when exporting to HTML.") sys.exit(1) if export == 'gerrit' and not gerrit.mandatory_env_var_is_set(): sys.exit(1) context = analyzer_context.get_context() # To ensure the help message prints the default folder properly, # the 'default' for 'args.input' is a string, not a list. # But we need lists for the foreach here to work. if isinstance(args.input, str): args.input = [args.input] original_cwd = os.getcwd() src_comment_status_filter = args.review_status suppr_handler = None if 'suppress' in args: __make_handler = False if not os.path.isfile(args.suppress): if 'create_suppress' in args: with open(args.suppress, 'w', encoding='utf-8', errors='ignore') as _: # Just create the file. __make_handler = True LOG.info( "Will write source-code suppressions to " "suppress file: %s", args.suppress) else: LOG.warning( "Suppress file '%s' given, but it does not exist" " -- will not suppress anything.", args.suppress) else: __make_handler = True if __make_handler: suppr_handler = suppress_handler.\ GenericSuppressHandler(args.suppress, 'create_suppress' in args, src_comment_status_filter) elif 'create_suppress' in args: LOG.error("Can't use '--export-source-suppress' unless '--suppress " "SUPPRESS_FILE' is also given.") sys.exit(2) processed_path_hashes = set() skip_handler = None if 'skipfile' in args: with open(args.skipfile, 'r', encoding='utf-8', errors='ignore') as skip_file: skip_handler = SkipListHandler(skip_file.read()) trim_path_prefixes = args.trim_path_prefix if \ 'trim_path_prefix' in args else None if export: if export not in EXPORT_TYPES: LOG.error(f"Unknown export format: {export}") return # The HTML part will be handled separately below. if export != 'html': try: res = parse_convert_reports(args.input, export, context.severity_map, trim_path_prefixes) if 'output_path' in args: output_path = os.path.abspath(args.output_path) if not os.path.exists(output_path): os.mkdir(output_path) reports_json = os.path.join(output_path, 'reports.json') with open(reports_json, mode='w', encoding='utf-8', errors="ignore") as output_f: output_f.write(json.dumps(res)) return print(json.dumps(res)) except Exception as ex: LOG.error(ex) sys.exit(1) def trim_path_prefixes_handler(source_file): """ Callback to util.trim_path_prefixes to prevent module dependency of plist_to_html """ return util.trim_path_prefixes(source_file, trim_path_prefixes) html_builder = None def skip_html_report_data_handler(report_hash, source_file, report_line, checker_name, diag, files): """ Report handler which skips bugs which were suppressed by source code comments. This function will return a tuple. The first element will decide whether the report should be skipped or not and the second element will be a list of source code comments related to the actual report. """ files_dict = {k: v for k, v in enumerate(files)} report = Report({'check_name': checker_name}, diag['path'], files_dict, metadata=None) path_hash = get_report_path_hash(report) if path_hash in processed_path_hashes: LOG.debug("Skip report because it is a deduplication of an " "already processed report!") LOG.debug("Path hash: %s", path_hash) LOG.debug(diag) return True, [] skip, source_code_comments = skip_report(report_hash, source_file, report_line, checker_name, suppr_handler, src_comment_status_filter) if skip_handler: skip |= skip_handler.should_skip(source_file) if not skip: processed_path_hashes.add(path_hash) return skip, source_code_comments file_change = set() severity_stats = defaultdict(int) file_stats = defaultdict(int) report_count = 0 for input_path in args.input: input_path = os.path.abspath(input_path) os.chdir(original_cwd) LOG.debug("Parsing input argument: '%s'", input_path) if export == 'html': output_path = os.path.abspath(args.output_path) if not html_builder: html_builder = \ PlistToHtml.HtmlBuilder(context.path_plist_to_html_dist, context.severity_map) LOG.info("Generating html output files:") PlistToHtml.parse(input_path, output_path, context.path_plist_to_html_dist, skip_html_report_data_handler, html_builder, trim_path_prefixes_handler) continue files = [] metadata_dict = {} if os.path.isfile(input_path): files.append(input_path) elif os.path.isdir(input_path): metadata_file = os.path.join(input_path, "metadata.json") if os.path.exists(metadata_file): metadata_dict = util.load_json_or_empty(metadata_file) LOG.debug(metadata_dict) if 'working_directory' in metadata_dict: working_dir = metadata_dict['working_directory'] try: os.chdir(working_dir) except OSError as oerr: LOG.debug(oerr) LOG.error( "Working directory %s is missing.\n" "Can not parse reports safely.", working_dir) sys.exit(1) _, _, file_names = next(os.walk(input_path), ([], [], [])) files = [ os.path.join(input_path, file_name) for file_name in file_names ] file_report_map = defaultdict(list) plist_pltf = PlistToPlaintextFormatter(suppr_handler, skip_handler, context.severity_map, processed_path_hashes, trim_path_prefixes, src_comment_status_filter) plist_pltf.print_steps = 'print_steps' in args for file_path in files: f_change = parse_with_plt_formatter(file_path, metadata_dict, plist_pltf, file_report_map) file_change = file_change.union(f_change) report_stats = plist_pltf.write(file_report_map) sev_stats = report_stats.get('severity') for severity in sev_stats: severity_stats[severity] += sev_stats[severity] f_stats = report_stats.get('files') for file_path in f_stats: file_stats[file_path] += f_stats[file_path] rep_stats = report_stats.get('reports') report_count += rep_stats.get("report_count", 0) # Create index.html and statistics.html for the generated html files. if html_builder: html_builder.create_index_html(args.output_path) html_builder.create_statistics_html(args.output_path) print('\nTo view statistics in a browser run:\n> firefox {0}'.format( os.path.join(args.output_path, 'statistics.html'))) print('\nTo view the results in a browser run:\n> firefox {0}'.format( os.path.join(args.output_path, 'index.html'))) else: print("\n----==== Summary ====----") if file_stats: vals = [[os.path.basename(k), v] for k, v in dict(file_stats).items()] vals.sort(key=itemgetter(0)) keys = ['Filename', 'Report count'] table = twodim.to_str('table', keys, vals, 1, True) print(table) if severity_stats: vals = [[k, v] for k, v in dict(severity_stats).items()] vals.sort(key=itemgetter(0)) keys = ['Severity', 'Report count'] table = twodim.to_str('table', keys, vals, 1, True) print(table) print("----=================----") print("Total number of reports: {}".format(report_count)) print("----=================----") if file_change: changed_files = '\n'.join([' - ' + f for f in file_change]) LOG.warning( "The following source file contents changed since the " "latest analysis:\n%s\nMultiple reports were not " "shown and skipped from the statistics. Please " "analyze your project again to update the " "reports!", changed_files) os.chdir(original_cwd) if report_count != 0: sys.exit(2)
def main(args): """ Entry point for parsing some analysis results and printing them to the stdout in a human-readable format. """ logger.setup_logger(args.verbose if 'verbose' in args else None) context = package_context.get_context() # To ensure the help message prints the default folder properly, # the 'default' for 'args.input' is a string, not a list. # But we need lists for the foreach here to work. if isinstance(args.input, str): args.input = [args.input] original_cwd = os.getcwd() suppr_handler = None if 'suppress' in args: __make_handler = False if not os.path.isfile(args.suppress): if 'create_suppress' in args: with open(args.suppress, 'w') as _: # Just create the file. __make_handler = True LOG.info("Will write source-code suppressions to " "suppress file.") else: LOG.warning("Suppress file '" + args.suppress + "' given, but " "it does not exist -- will not suppress anything.") else: __make_handler = True if __make_handler: suppr_handler = suppress_handler.\ GenericSuppressHandler(args.suppress, 'create_suppress' in args) elif 'create_suppress' in args: LOG.error("Can't use '--export-source-suppress' unless '--suppress " "SUPPRESS_FILE' is also given.") sys.exit(2) processed_path_hashes = set() def skip_html_report_data_handler(report_hash, source_file, report_line, checker_name, diag, files): """ Report handler which skips bugs which were suppressed by source code comments. """ report = Report(None, diag['path'], files) path_hash = get_report_path_hash(report, files) if path_hash in processed_path_hashes: LOG.debug("Skip report because it is a deduplication of an " "already processed report!") LOG.debug("Path hash: %s", path_hash) LOG.debug(diag) return True skip = plist_parser.skip_report(report_hash, source_file, report_line, checker_name, suppr_handler) if not skip: processed_path_hashes.add(path_hash) return skip skip_handler = None if 'skipfile' in args: with open(args.skipfile, 'r') as skip_file: skip_handler = SkipListHandler(skip_file.read()) html_builder = None for input_path in args.input: input_path = os.path.abspath(input_path) os.chdir(original_cwd) LOG.debug("Parsing input argument: '" + input_path + "'") export = args.export if 'export' in args else None if export is not None and export == 'html': output_path = os.path.abspath(args.output_path) if not html_builder: html_builder = \ PlistToHtml.HtmlBuilder(context.path_plist_to_html_dist, context.severity_map) LOG.info("Generating html output files:") PlistToHtml.parse(input_path, output_path, context.path_plist_to_html_dist, skip_html_report_data_handler, html_builder) continue files = [] metadata_dict = {} if os.path.isfile(input_path): files.append(input_path) elif os.path.isdir(input_path): metadata_file = os.path.join(input_path, "metadata.json") if os.path.exists(metadata_file): metadata_dict = util.load_json_or_empty(metadata_file) LOG.debug(metadata_dict) if 'working_directory' in metadata_dict: working_dir = metadata_dict['working_directory'] try: os.chdir(working_dir) except OSError as oerr: LOG.debug(oerr) LOG.error( "Working directory %s is missing.\n" "Can not parse reports safely.", working_dir) sys.exit(1) _, _, file_names = next(os.walk(input_path), ([], [], [])) files = [ os.path.join(input_path, file_name) for file_name in file_names ] file_change = set() file_report_map = defaultdict(list) rh = plist_parser.PlistToPlaintextFormatter(suppr_handler, skip_handler, context.severity_map, processed_path_hashes) rh.print_steps = 'print_steps' in args for file_path in files: f_change = parse(file_path, metadata_dict, rh, file_report_map) file_change = file_change.union(f_change) report_stats = rh.write(file_report_map) severity_stats = report_stats.get('severity') file_stats = report_stats.get('files') reports_stats = report_stats.get('reports') print("\n----==== Summary ====----") if file_stats: vals = [[os.path.basename(k), v] for k, v in dict(file_stats).items()] keys = ['Filename', 'Report count'] table = twodim_to_str('table', keys, vals, 1, True) print(table) if severity_stats: vals = [[k, v] for k, v in dict(severity_stats).items()] keys = ['Severity', 'Report count'] table = twodim_to_str('table', keys, vals, 1, True) print(table) report_count = reports_stats.get("report_count", 0) print("----=================----") print("Total number of reports: {}".format(report_count)) print("----=================----") if file_change: changed_files = '\n'.join([' - ' + f for f in file_change]) LOG.warning("The following source file contents changed since the " "latest analysis:\n{0}\nMultiple reports were not " "shown and skipped from the statistics. Please " "analyze your project again to update the " "reports!".format(changed_files)) os.chdir(original_cwd) # Create index.html for the generated html files. if html_builder: html_builder.create_index_html(args.output_path)
def main(args): """ Entry point for parsing some analysis results and printing them to the stdout in a human-readable format. """ logger.setup_logger(args.verbose if 'verbose' in args else None) context = generic_package_context.get_context() # To ensure the help message prints the default folder properly, # the 'default' for 'args.input' is a string, not a list. # But we need lists for the foreach here to work. if isinstance(args.input, str): args.input = [args.input] original_cwd = os.getcwd() suppress_handler = None if 'suppress' in args: __make_handler = False if not os.path.isfile(args.suppress): if 'create_suppress' in args: with open(args.suppress, 'w') as _: # Just create the file. __make_handler = True LOG.info("Will write source-code suppressions to " "suppress file.") else: LOG.warning("Suppress file '" + args.suppress + "' given, but " "it does not exist -- will not suppress anything.") else: __make_handler = True if __make_handler: suppress_handler = generic_package_suppress_handler.\ GenericSuppressHandler(args.suppress, 'create_suppress' in args) elif 'create_suppress' in args: LOG.error("Can't use '--export-source-suppress' unless '--suppress " "SUPPRESS_FILE' is also given.") sys.exit(2) skip_handler = None if 'skipfile' in args: skip_handler = SkipListHandler(args.skipfile) for input_path in args.input: input_path = os.path.abspath(input_path) os.chdir(original_cwd) LOG.debug("Parsing input argument: '" + input_path + "'") export = args.export if 'export' in args else None if export is not None and export == 'html': output_path = os.path.abspath(args.output_path) LOG.info("Generating html output files:") PlistToHtml.parse(input_path, output_path, context.path_plist_to_html_dist, 'clean' in args) continue severity_stats = Counter({}) file_stats = Counter({}) report_count = Counter({}) files = [] metadata_dict = {} if os.path.isfile(input_path): files.append(input_path) elif os.path.isdir(input_path): metadata_file = os.path.join(input_path, "metadata.json") if os.path.exists(metadata_file): with open(metadata_file, 'r') as metadata: metadata_dict = json.load(metadata) LOG.debug(metadata_dict) if 'working_directory' in metadata_dict: os.chdir(metadata_dict['working_directory']) _, _, file_names = next(os.walk(input_path), ([], [], [])) files = [ os.path.join(input_path, file_name) for file_name in file_names ] for file_path in files: report_stats = parse(file_path, context, metadata_dict, suppress_handler, skip_handler, 'print_steps' in args) severity_stats.update(Counter(report_stats.get('severity', {}))) file_stats.update(Counter(report_stats.get('files', {}))) report_count.update(Counter(report_stats.get('reports', {}))) print("\n----==== Summary ====----") if file_stats: vals = [[os.path.basename(k), v] for k, v in dict(file_stats).items()] keys = ['Filename', 'Report count'] table = twodim_to_str('table', keys, vals, 1, True) print(table) if severity_stats: vals = [[k, v] for k, v in dict(severity_stats).items()] keys = ['Severity', 'Report count'] table = twodim_to_str('table', keys, vals, 1, True) print(table) report_count = dict(report_count).get("report_count", 0) print("----=================----") print("Total number of reports: {}".format(report_count)) print("----=================----") os.chdir(original_cwd)
def main(args): """ Entry point for parsing some analysis results and printing them to the stdout in a human-readable format. """ logger.setup_logger(args.verbose if 'verbose' in args else None) context = analyzer_context.get_context() # To ensure the help message prints the default folder properly, # the 'default' for 'args.input' is a string, not a list. # But we need lists for the foreach here to work. if isinstance(args.input, str): args.input = [args.input] original_cwd = os.getcwd() suppr_handler = None if 'suppress' in args: __make_handler = False if not os.path.isfile(args.suppress): if 'create_suppress' in args: with open(args.suppress, 'w') as _: # Just create the file. __make_handler = True LOG.info("Will write source-code suppressions to " "suppress file.") else: LOG.warning("Suppress file '%s' given, but it does not exist" " -- will not suppress anything.", args.suppress) else: __make_handler = True if __make_handler: suppr_handler = suppress_handler.\ GenericSuppressHandler(args.suppress, 'create_suppress' in args) elif 'create_suppress' in args: LOG.error("Can't use '--export-source-suppress' unless '--suppress " "SUPPRESS_FILE' is also given.") sys.exit(2) processed_path_hashes = set() skip_handler = None if 'skipfile' in args: with open(args.skipfile, 'r') as skip_file: skip_handler = SkipListHandler(skip_file.read()) trim_path_prefixes = args.trim_path_prefix if \ 'trim_path_prefix' in args else None def trim_path_prefixes_handler(source_file): """ Callback to util.trim_path_prefixes to prevent module dependency of plist_to_html """ return util.trim_path_prefixes(source_file, trim_path_prefixes) html_builder = None def skip_html_report_data_handler(report_hash, source_file, report_line, checker_name, diag, files): """ Report handler which skips bugs which were suppressed by source code comments. """ report = Report(None, diag['path'], files) path_hash = get_report_path_hash(report, files) if path_hash in processed_path_hashes: LOG.debug("Skip report because it is a deduplication of an " "already processed report!") LOG.debug("Path hash: %s", path_hash) LOG.debug(diag) return True skip = plist_parser.skip_report(report_hash, source_file, report_line, checker_name, suppr_handler) if skip_handler: skip |= skip_handler.should_skip(source_file) if not skip: processed_path_hashes.add(path_hash) return skip for input_path in args.input: input_path = os.path.abspath(input_path) os.chdir(original_cwd) LOG.debug("Parsing input argument: '%s'", input_path) export = args.export if 'export' in args else None if export is not None and export == 'html': output_path = os.path.abspath(args.output_path) if not html_builder: html_builder = \ PlistToHtml.HtmlBuilder(context.path_plist_to_html_dist, context.severity_map) LOG.info("Generating html output files:") PlistToHtml.parse(input_path, output_path, context.path_plist_to_html_dist, skip_html_report_data_handler, html_builder, trim_path_prefixes_handler) continue files = [] metadata_dict = {} if os.path.isfile(input_path): files.append(input_path) elif os.path.isdir(input_path): metadata_file = os.path.join(input_path, "metadata.json") if os.path.exists(metadata_file): metadata_dict = util.load_json_or_empty(metadata_file) LOG.debug(metadata_dict) if 'working_directory' in metadata_dict: working_dir = metadata_dict['working_directory'] try: os.chdir(working_dir) except OSError as oerr: LOG.debug(oerr) LOG.error("Working directory %s is missing.\n" "Can not parse reports safely.", working_dir) sys.exit(1) _, _, file_names = next(os.walk(input_path), ([], [], [])) files = [os.path.join(input_path, file_name) for file_name in file_names] file_change = set() file_report_map = defaultdict(list) rh = plist_parser.PlistToPlaintextFormatter(suppr_handler, skip_handler, context.severity_map, processed_path_hashes, trim_path_prefixes) rh.print_steps = 'print_steps' in args for file_path in files: f_change = parse(file_path, metadata_dict, rh, file_report_map) file_change = file_change.union(f_change) report_stats = rh.write(file_report_map) severity_stats = report_stats.get('severity') file_stats = report_stats.get('files') reports_stats = report_stats.get('reports') print("\n----==== Summary ====----") if file_stats: vals = [[os.path.basename(k), v] for k, v in dict(file_stats).items()] keys = ['Filename', 'Report count'] table = twodim_to_str('table', keys, vals, 1, True) print(table) if severity_stats: vals = [[k, v] for k, v in dict(severity_stats).items()] keys = ['Severity', 'Report count'] table = twodim_to_str('table', keys, vals, 1, True) print(table) report_count = reports_stats.get("report_count", 0) print("----=================----") print("Total number of reports: {}".format(report_count)) print("----=================----") if file_change: changed_files = '\n'.join([' - ' + f for f in file_change]) LOG.warning("The following source file contents changed since the " "latest analysis:\n%s\nMultiple reports were not " "shown and skipped from the statistics. Please " "analyze your project again to update the " "reports!", changed_files) os.chdir(original_cwd) # Create index.html and statistics.html for the generated html files. if html_builder: html_builder.create_index_html(args.output_path) html_builder.create_statistics_html(args.output_path) print('\nTo view statistics in a browser run:\n> firefox {0}'.format( os.path.join(args.output_path, 'statistics.html'))) print('\nTo view the results in a browser run:\n> firefox {0}'.format( os.path.join(args.output_path, 'index.html')))