def report(manager, filename, sev_level, conf_level, lines=-1): """Prints discovered issues in the text format :param manager: the bandit manager object :param filename: The output file name, or None for stdout :param sev_level: Filtering severity level :param conf_level: Filtering confidence level :param lines: Number of lines to report, -1 for all """ bits = [] bits.append("Run started:%s" % datetime.datetime.utcnow()) if manager.verbose: bits.append(get_verbose_details(manager)) bits.append('\tTotal lines of code: %i' % (manager.metrics.data['_totals']['loc'])) bits.append('\tTotal lines skipped (#nosec): %i' % (manager.metrics.data['_totals']['nosec'])) bits.append(get_metrics(manager)) bits.append("Files skipped (%i):" % len(manager.skipped)) bits.extend(["\t%s (%s)" % skip for skip in manager.skipped]) bits.append("\nTest results:") bits.append(get_results(manager, sev_level, conf_level, lines)) result = '\n'.join([str(bit) for bit in bits]) + '\n' with utils.output_file(filename, 'w') as fout: fout.write(result) if filename is not None: logger.info("Text output written to file: %s", filename)
def report(manager, filename, sev_level, conf_level, lines=-1): '''Prints issues in CSV format :param manager: the bandit manager object :param filename: The output file name, or None for stdout :param sev_level: Filtering severity level :param conf_level: Filtering confidence level :param lines: Number of lines to report, -1 for all ''' results = manager.get_issue_list(sev_level=sev_level, conf_level=conf_level) with utils.output_file(filename, 'w') as fout: fieldnames = [ 'filename', 'test_name', 'test_id', 'issue_severity', 'issue_confidence', 'issue_text', 'line_number', 'line_range' ] writer = csv.DictWriter(fout, fieldnames=fieldnames, extrasaction='ignore') writer.writeheader() for result in results: writer.writerow(result.as_dict(with_code=False)) if filename is not None: logger.info("CSV output written to file: %s" % filename)
def report(manager, filename, sev_level, conf_level, lines=-1, out_format="csv"): """Prints issues in CSV format :param manager: the bandit manager object :param filename: The output file name, or None for stdout :param sev_level: Filtering severity level :param conf_level: Filtering confidence level :param lines: Number of lines to report, -1 for all :param out_format: The ouput format name """ results = manager.get_issue_list(sev_level=sev_level, conf_level=conf_level) with utils.output_file(filename, "w") as fout: fieldnames = [ "filename", "test_name", "issue_severity", "issue_confidence", "issue_text", "line_number", "line_range", ] writer = csv.DictWriter(fout, fieldnames=fieldnames, extrasaction="ignore") writer.writeheader() for result in results: writer.writerow(result.as_dict(with_code=False)) if filename is not None: logger.info("CSV output written to file: %s" % filename)
def report(manager, filename, sev_level, conf_level, lines=-1): '''Prints issues in CSV format :param manager: the bandit manager object :param filename: The output file name, or None for stdout :param sev_level: Filtering severity level :param conf_level: Filtering confidence level :param lines: Number of lines to report, -1 for all ''' results = manager.get_issue_list(sev_level=sev_level, conf_level=conf_level) with utils.output_file(filename, 'w') as fout: fieldnames = ['filename', 'test_name', 'issue_severity', 'issue_confidence', 'issue_text', 'line_number', 'line_range'] writer = csv.DictWriter(fout, fieldnames=fieldnames, extrasaction='ignore') writer.writeheader() for result in results: writer.writerow(result.as_dict(with_code=False)) if filename is not None: logger.info("CSV output written to file: %s" % filename)
def report(manager, filename, sev_level, conf_level, lines=-1): '''''Prints issues in JSON format :param manager: the bandit manager object :param filename: The output file name, or None for stdout :param sev_level: Filtering severity level :param conf_level: Filtering confidence level :param lines: Number of lines to report, -1 for all ''' stats = dict(zip(manager.files_list, manager.scores)) machine_output = dict({'results': [], 'errors': [], 'stats': []}) for (fname, reason) in manager.skipped: machine_output['errors'].append({'filename': fname, 'reason': reason}) for filer, score in six.iteritems(stats): totals = {} rank = constants.RANKING sev_idx = rank.index(sev_level) for i in range(sev_idx, len(rank)): severity = rank[i] severity_value = constants.RANKING_VALUES[severity] try: sc = score['SEVERITY'][i] / severity_value except ZeroDivisionError: sc = 0 totals[severity] = sc machine_output['stats'].append({ 'filename': filer, 'score': { 'SEVERITY': sum(i for i in score['SEVERITY']), 'CONFIDENCE': sum(i for i in score['CONFIDENCE']) }, 'issue totals': totals }) results = manager.get_issue_list(sev_level=sev_level, conf_level=conf_level) collector = [r.as_dict() for r in results] if manager.agg_type == 'vuln': machine_output['results'] = sorted(collector, key=itemgetter('test_name')) else: machine_output['results'] = sorted(collector, key=itemgetter('filename')) machine_output['metrics'] = manager.metrics.data # timezone agnostic format TS_FORMAT = "%Y-%m-%dT%H:%M:%SZ" time_string = datetime.datetime.utcnow().strftime(TS_FORMAT) machine_output['generated_at'] = time_string result = json.dumps(machine_output, sort_keys=True, indent=2, separators=(',', ': ')) with utils.output_file(filename, 'w') as fout: fout.write(result) if filename is not None: logger.info("JSON output written to file: %s" % filename)
def report(manager, filename, sev_level, conf_level, lines=-1): """Writes issues to 'filename' in HTML format :param manager: the bandit manager object :param filename: output file name :param sev_level: Filtering severity level :param conf_level: Filtering confidence level :param lines: Number of lines to report, -1 for all """ header_block = u""" <!DOCTYPE html> <html> <head> <title> Bandit Report </title> <style> html * { font-family: "Arial", sans-serif; } pre { font-family: "Monaco", monospace; } .bordered-box { border: 1px solid black; padding-top:.5em; padding-bottom:.5em; padding-left:1em; } .metrics-box { font-size: 1.1em; line-height: 130%; } .metrics-title { font-size: 1.5em; font-weight: 500; margin-bottom: .25em; } .issue-description { font-size: 1.3em; font-weight: 500; } .candidate-issues { margin-left: 2em; border-left: solid 1px; LightGray; padding-left: 5%; margin-top: .2em; margin-bottom: .2em; } .issue-block { border: 1px solid LightGray; padding-left: .5em; padding-top: .5em; padding-bottom: .5em; margin-bottom: .5em; } .issue-sev-high { background-color: Pink; } .issue-sev-medium { background-color: NavajoWhite; } .issue-sev-low { background-color: LightCyan; } </style> </head> """ report_block = u""" <body> {metrics} {skipped} <br> <span id='results'> {results} </span> </body> </html> """ issue_block = u""" <span id='issue-{issue_no}'> <div class='issue-block {issue_class}'> <b>{test_name}: </b> {test_text}<br> <b>Test ID:</b> {test_id}<br> <b>Severity: </b>{severity}<br /> <b>Confidence: </b>{confidence}</br /> <b>File: </b><a href='{path}' target='_blank'>{path}</a> <br /> <b>More info: </b><a href='{url}' target='_blank'>{url}</a><br /> {code} {candidates} </div> </span> """ code_block = u""" <span id='code'> <pre> {code} </pre> </span> """ candidate_block = u""" <span id='candidates'> <br> <b>Candidates: </b> {candidate_list} </span> """ candidate_issue = u""" <span id='candidate'> <div class='candidate-issues'> <pre>{code}</pre> </div> </span> """ skipped_block = u""" <br> <span id='skipped'> <div class='bordered-box'> <b>Skipped files:</b><br><br> {files_list} </div> </span> """ metrics_block = u""" <span id='metrics'> <div class='metrics-box bordered-box'> <div class='metrics-title'> Metrics:<br> </div> Total lines of code: <span id='loc'>{loc}</span><br> Total lines skipped (#nosec): <span id='nosec'>{nosec}</span> </div> </span> """ issues = manager.get_issue_list(sev_level=sev_level, conf_level=conf_level) baseline = not isinstance(issues, list) # build the skipped string to insert in the report skipped_str = ''.join('%s <b>reason:</b> %s<br>' % (fname, reason) for fname, reason in manager.skipped) if skipped_str: skipped_text = skipped_block.format(files_list=skipped_str) else: skipped_text = '' # build the results string to insert in the report results_str = '' for index, issue in enumerate(issues): if not baseline or len(issues[issue]) == 1: candidates = '' code = code_block.format( code=issue.get_code(lines, True).strip('\n').lstrip(' ')) else: candidates_str = '' code = '' for candidate in issues[issue]: candidate_code = (candidate.get_code( lines, True).strip('\n').lstrip(' ')) candidates_str += candidate_issue.format(code=candidate_code) candidates = candidate_block.format(candidate_list=candidates_str) url = docs_utils.get_url(issue.test_id) results_str += issue_block.format(issue_no=index, issue_class='issue-sev-{}'.format( issue.severity.lower()), test_name=issue.test, test_id=issue.test_id, test_text=issue.text, severity=issue.severity, confidence=issue.confidence, path=issue.fname, code=code, candidates=candidates, url=url) # build the metrics string to insert in the report metrics_summary = metrics_block.format( loc=manager.metrics.data['_totals']['loc'], nosec=manager.metrics.data['_totals']['nosec']) # build the report and output it report_contents = report_block.format(metrics=metrics_summary, skipped=skipped_text, results=results_str) with utils.output_file(filename, 'w') as fout: fout.write(str(header_block.encode('utf-8'))) fout.write(str(report_contents.encode('utf-8'))) if filename is not None: logger.info("HTML output written to file: %s" % filename)
def _b_tester(a, b): with b_utils.output_file(a, b): pass
def report(manager, filename, sev_level, conf_level, lines=-1): '''''Prints issues in JSON format :param manager: the bandit manager object :param filename: The output file name, or None for stdout :param sev_level: Filtering severity level :param conf_level: Filtering confidence level :param lines: Number of lines to report, -1 for all ''' stats = dict(zip(manager.files_list, manager.scores)) machine_output = dict({'results': [], 'errors': [], 'stats': []}) for (fname, reason) in manager.skipped: machine_output['errors'].append({'filename': fname, 'reason': reason}) for filer, score in six.iteritems(stats): totals = {} rank = constants.RANKING sev_idx = rank.index(sev_level) for i in range(sev_idx, len(rank)): severity = rank[i] severity_value = constants.RANKING_VALUES[severity] try: sc = score['SEVERITY'][i] / severity_value except ZeroDivisionError: sc = 0 totals[severity] = sc machine_output['stats'].append({ 'filename': filer, 'score': {'SEVERITY': sum(i for i in score['SEVERITY']), 'CONFIDENCE': sum(i for i in score['CONFIDENCE'])}, 'issue totals': totals}) results = manager.get_issue_list(sev_level=sev_level, conf_level=conf_level) collector = [r.as_dict() for r in results] if manager.agg_type == 'vuln': machine_output['results'] = sorted(collector, key=itemgetter('test_name')) else: machine_output['results'] = sorted(collector, key=itemgetter('filename')) machine_output['metrics'] = manager.metrics.data # timezone agnostic format TS_FORMAT = "%Y-%m-%dT%H:%M:%SZ" time_string = datetime.datetime.utcnow().strftime(TS_FORMAT) machine_output['generated_at'] = time_string result = json.dumps(machine_output, sort_keys=True, indent=2, separators=(',', ': ')) with utils.output_file(filename, 'w') as fout: fout.write(result) if filename is not None: logger.info("JSON output written to file: %s" % filename)
def report(manager, filename, sev_level, conf_level, lines=-1, out_format='txt'): '''Prints issues in Text formt :param manager: the bandit manager object :param filename: The output file name, or None for stdout :param sev_level: Filtering severity level :param conf_level: Filtering confidence level :param lines: Number of lines to report, -1 for all :param out_format: The ouput format name ''' tmpstr_list = [] # use a defaultdict to default to an empty string color = collections.defaultdict(str) if out_format == 'txt': # get text colors from settings for TTY output get_setting = manager.b_conf.get_setting color = {'HEADER': get_setting('color_HEADER'), 'DEFAULT': get_setting('color_DEFAULT'), 'LOW': get_setting('color_LOW'), 'MEDIUM': get_setting('color_MEDIUM'), 'HIGH': get_setting('color_HIGH') } # print header tmpstr_list.append("%sRun started:%s\n\t%s\n" % ( color['HEADER'], color['DEFAULT'], datetime.datetime.utcnow() )) if manager.verbose: # print which files were inspected tmpstr_list.append("\n%sFiles in scope (%s):%s\n" % ( color['HEADER'], len(manager.files_list), color['DEFAULT'] )) for (item, score) in zip(manager.files_list, manager.scores): score_dict = {'SEVERITY': sum(i for i in score['SEVERITY']), 'CONFIDENCE': sum(i for i in score['CONFIDENCE'])} tmpstr_list.append("\t%s (score: %s)\n" % (item, score_dict)) # print which files were excluded and why tmpstr_list.append("\n%sFiles excluded (%s):%s\n" % (color['HEADER'], len(manager.excluded_files), color['DEFAULT'])) for fname in manager.excluded_files: tmpstr_list.append("\t%s\n" % fname) # print out basic metrics from run metrics_summary = '' for (label, metric) in [ ('Total lines of code', 'loc'), ('Total lines skipped (#nosec)', 'nosec') ]: metrics_summary += "\t{0}: {1}\n".format( label, manager.metrics.data['_totals'][metric] ) for (criteria, default) in constants.CRITERIA: metrics_summary += "\tTotal issues (by {0}):\n".format( criteria.lower() ) for rank in constants.RANKING: metrics_summary += "\t\t{0}: {1}\n".format( rank.capitalize(), manager.metrics.data['_totals']['{0}.{1}'.format(criteria, rank)] ) tmpstr_list.append("\n%sRun metrics:%s\n%s" % ( color['HEADER'], color['DEFAULT'], metrics_summary )) # print which files were skipped and why tmpstr_list.append("\n%sFiles skipped (%s):%s\n" % ( color['HEADER'], len(manager.skipped), color['DEFAULT'] )) for (fname, reason) in manager.skipped: tmpstr_list.append("\t%s (%s)\n" % (fname, reason)) # print the results tmpstr_list.append("\n%sTest results:%s\n" % ( color['HEADER'], color['DEFAULT'] )) issues = manager.get_issue_list(sev_level=sev_level, conf_level=conf_level) if not len(issues): tmpstr_list.append("\tNo issues identified.\n") for issue in issues: tmpstr_list.append("\n%s>> Issue: [%s] %s\n" % ( color.get(issue.severity, color['DEFAULT']), issue.test, issue.text )) tmpstr_list.append(" Severity: %s Confidence: %s\n" % ( issue.severity.capitalize(), issue.confidence.capitalize() )) tmpstr_list.append(" Location: %s:%s\n" % ( issue.fname, issue.lineno )) tmpstr_list.append(color['DEFAULT']) tmpstr_list.append( issue.get_code(lines, True)) result = ''.join(tmpstr_list) with utils.output_file(filename, 'w') as fout: fout.write(result) if filename is not None: logger.info("Text output written to file: %s", filename)
def report(manager, filename, sev_level, conf_level, lines=-1): """Writes issues to 'filename' in HTML format :param manager: the bandit manager object :param filename: output file name :param sev_level: Filtering severity level :param conf_level: Filtering confidence level :param lines: Number of lines to report, -1 for all """ header_block = """ <!DOCTYPE html> <html> <head> <title> Bandit Report </title> <style> html * { font-family: "Arial", sans-serif; } pre { font-family: "Monaco", monospace; } .bordered-box { border: 1px solid black; padding-top:.5em; padding-bottom:.5em; padding-left:1em; } .metrics-box { font-size: 1.1em; line-height: 130%; } .metrics-title { font-size: 1.5em; font-weight: 500; margin-bottom: .25em; } .issue-description { font-size: 1.3em; font-weight: 500; } .candidate-issues { margin-left: 2em; border-left: solid 1px; LightGray; padding-left: 5%; margin-top: .2em; margin-bottom: .2em; } .issue-block { border: 1px solid LightGray; padding-left: .5em; padding-top: .5em; padding-bottom: .5em; margin-bottom: .5em; } .issue-sev-high { background-color: Pink; } .issue-sev-medium { background-color: NavajoWhite; } .issue-sev-low { background-color: LightCyan; } </style> </head> """ report_block = """ <body> {metrics} {skipped} <br> <span id='results'> {results} </span> </body> </html> """ issue_block = """ <span id='issue-{issue_no}'> <div class='issue-block {issue_class}'> <b>{test_name}: </b> {test_text}<br> <b>Severity: </b>{severity}<br /> <b>Confidence: </b>{confidence}</br /> <b>File: </b><a href='{path}' target='_blank'>{path}</a> <br /> {code} {candidates} </div> </span> """ code_block = """ <span id='code'> <pre> {code} </pre> </span> """ candidate_block = """ <span id='candidates'> <br> <b>Candidates: </b> {candidate_list} </span> """ candidate_issue = """ <span id='candidate'> <div class='candidate-issues'> <pre>{code}</pre> </div> </span> """ skipped_block = """ <br> <span id='skipped'> <div class='bordered-box'> <b>Skipped files:</b><br><br> {files_list} </div> </span> """ metrics_block = """ <span id='metrics'> <div class='metrics-box bordered-box'> <div class='metrics-title'> Metrics:<br> </div> Total lines of code: <span id='loc'>{loc}</span><br> Total lines skipped (#nosec): <span id='nosec'>{nosec}</span> </div> </span> """ issues = manager.get_issue_list(sev_level=sev_level, conf_level=conf_level) baseline = not isinstance(issues, list) # build the skipped string to insert in the report skipped_str = ''.join('%s - %s\n' % (fname, reason) for fname, reason in manager.skipped) if skipped_str: skipped_text = skipped_block.format(files_list=skipped_str) else: skipped_text = '' # build the results string to insert in the report results_str = '' for index, issue in enumerate(issues): if not baseline or len(issues[issue]) == 1: candidates = '' code = code_block.format(code=issue.get_code(lines, True). strip('\n').lstrip(' ')) else: candidates_str = '' code = '' for candidate in issues[issue]: candidate_code = (candidate.get_code(lines, True).strip('\n'). lstrip(' ')) candidates_str += candidate_issue.format(code=candidate_code) candidates = candidate_block.format(candidate_list=candidates_str) results_str += issue_block.format(issue_no=index, issue_class='issue-sev-{}'. format(issue.severity.lower()), test_name=issue.test, test_text=issue.text, severity=issue.severity, confidence=issue.confidence, path=issue.fname, code=code, candidates=candidates) # build the metrics string to insert in the report metrics_summary = metrics_block.format( loc=manager.metrics.data['_totals']['loc'], nosec=manager.metrics.data['_totals']['nosec']) # build the report and output it report_contents = report_block.format(metrics=metrics_summary, skipped=skipped_text, results=results_str) with utils.output_file(filename, 'w') as fout: fout.write(header_block) fout.write(report_contents) if filename is not None: logger.info("HTML output written to file: %s" % filename)
def report(manager, filename, sev_level, conf_level, lines=-1, out_format='txt'): """Prints baseline issues in the text format This is identical to normal text output except for each issue we're going to output the issue we've found and the candidate issues in the file. :param manager: the bandit manager object :param filename: The output file name, or None for stdout :param sev_level: Filtering severity level :param conf_level: Filtering confidence level :param lines: Number of lines to report, -1 for all :param out_format: The output format name """ tmpstr_list = [] # use a defaultdict to default to an empty string color = collections.defaultdict(str) candidate_indent = ' ' * 10 if out_format == 'txt': # get text colors from settings for TTY output get_setting = manager.b_conf.get_setting color = {'HEADER': get_setting('color_HEADER'), 'DEFAULT': get_setting('color_DEFAULT'), 'LOW': get_setting('color_LOW'), 'MEDIUM': get_setting('color_MEDIUM'), 'HIGH': get_setting('color_HIGH') } # print header tmpstr_list.append("%sRun started:%s\n\t%s\n" % ( color['HEADER'], color['DEFAULT'], datetime.datetime.utcnow() )) if manager.verbose: # print which files were inspected tmpstr_list.append("\n%sFiles in scope (%s):%s\n" % ( color['HEADER'], len(manager.files_list), color['DEFAULT'] )) for (item, score) in zip(manager.files_list, manager.scores): score_dict = {'SEVERITY': sum(score['SEVERITY']), 'CONFIDENCE': sum(score['CONFIDENCE'])} tmpstr_list.append("\t%s (score: %s)\n" % (item, score_dict)) # print which files were excluded and why tmpstr_list.append("\n%sFiles excluded (%s):%s\n" % (color['HEADER'], len(manager.excluded_files), color['DEFAULT'])) for fname in manager.excluded_files: tmpstr_list.append("\t%s\n" % fname) # print out basic metrics from run metrics_summary = '' for (label, metric) in [ ('Total lines of code', 'loc'), ('Total lines skipped (#nosec)', 'nosec') ]: metrics_summary += "\t%s: %s\n" % ( label, manager.metrics.data['_totals'][metric] ) for (criteria, default) in constants.CRITERIA: metrics_summary += "\tTotal issues (by %s):\n" % ( criteria.lower() ) for rank in constants.RANKING: metrics_summary += "\t\t%s: %s\n" % ( rank.capitalize(), manager.metrics.data['_totals']['%s.%s' % (criteria, rank)] ) tmpstr_list.append("\n%sRun metrics:%s\n%s" % ( color['HEADER'], color['DEFAULT'], metrics_summary )) # print which files were skipped and why tmpstr_list.append("\n%sFiles skipped (%s):%s\n" % ( color['HEADER'], len(manager.skipped), color['DEFAULT'] )) for (fname, reason) in manager.skipped: tmpstr_list.append("\t%s (%s)\n" % (fname, reason)) # print the results tmpstr_list.append("\n%sTest results:%s\n" % ( color['HEADER'], color['DEFAULT'] )) issues = manager.get_issue_list(sev_level=sev_level, conf_level=conf_level) if not len(issues): tmpstr_list.append("\tNo issues identified.\n") baseline = not isinstance(issues, list) for issue in issues: # if not a baseline or only one candidate we know the issue if not baseline or len(issues[issue]) == 1: tmpstr_list += _output_issue_str(issue, color, "", lines=lines) # otherwise show the finding and the candidates else: tmpstr_list += _output_issue_str(issue, color, "", show_lineno=False, show_code=False) tmpstr_list.append('\n-- Candidate Issues --\n') for candidate in issues[issue]: tmpstr_list += _output_issue_str(candidate, color, candidate_indent, lines=lines) tmpstr_list.append('\n') tmpstr_list.append(str('-' * 50 + '\n')) result = ''.join(tmpstr_list) with utils.output_file(filename, 'w') as fout: fout.write(result) if filename is not None: logger.info("Text output written to file: %s", filename)
def report(manager, filename, sev_level, conf_level, lines=-1, out_format='html'): '''Writes issues to 'filename' in HTML format :param manager: the bandit manager object :param filename: output file name :param sev_level: Filtering severity level :param conf_level: Filtering confidence level :param lines: Number of lines to report, -1 for all :param out_format: The ouput format name ''' report_block = """ <!DOCTYPE html> <html> <head> <style> .metrics-main {{ border: 1px solid black; padding-top:.5em; padding-bottom:.5em; padding-left:1em ; font-size: 1.1em; line-height: 130%; }} .metrics-title {{ font-size: 1.5em; font-weight: 500; }} .issue-description {{ font-size: 1.3em; font-weight: 500; }} </style> <title> Bandit Report </title> </head> <body> <div class="metrics-main"> {metrics} </div> <br><br> <div class="results"> {results} </div> </body> </html> """ issue_block = """ <div class="issue-description"><b>{test_name}:</b> {test_text}</div><br> <div class="details"> <b>Severity: </b> <span class='severity severity_{severity}'>{severity}</span><br /> <b>Confidence:</b> <span class='confidence confidence_{confidence}'>{confidence}</span><br /> <b>File:</b> <a class='file_link' href='{path}' target='_blank'>{path}</a> <br /> </div> <div class="code"> <pre> {code} </pre> </div> """ results = {} issues = manager.get_issue_list(sev_level=sev_level, conf_level=conf_level) for issue in issues: if not results.get(issue.fname): results[issue.fname] = [] code = issue.get_code(lines, True) temp_result = issue_block.format( test_name=issue.test, test_text=issue.text, severity=issue.severity, confidence=issue.confidence, path=issue.fname, code=code ) results[issue.fname].append(temp_result) results_str = "" for res in results: if results[res]: for result in results[res]: results_str += result + "\n" # print out basic metrics from run metrics_summary = '<div class=metrics-title>Metrics:</div><br>\n' for (label, metric) in [ ('Total lines of code', 'loc'), ('Total lines skipped (#nosec)', 'nosec') ]: metrics_summary += "{0}: <span class='{1}'>{2}</span><br>\n".format( label, metric, manager.metrics.data['_totals'][metric] ) report_contents = report_block.format(metrics=metrics_summary, results=results_str) with utils.output_file(filename, 'w') as fout: fout.write(report_contents) if filename is not None: logger.info("HTML output written to file: %s" % filename)