def main(argv=None): if argv is None: argv = sys.argv[1:] args = parse_args(argv) result_file = args.resultsXML witness_files = args.witnessXML overwrite_status = not args.no_overwrite_status_true out_dir = args.outputpath assert witness_files or not overwrite_status if not os.path.exists(result_file) or not os.path.isfile(result_file): sys.exit(f"File {result_file!r} does not exist.") result_xml = tablegenerator.parse_results_file(result_file) witness_sets = [] for witnessFile in witness_files: if not os.path.exists(witnessFile) or not os.path.isfile(witnessFile): sys.exit(f"File {witnessFile!r} does not exist.") witness_xml = tablegenerator.parse_results_file(witnessFile) witness_sets.append(get_witnesses(witness_xml)) merge(result_xml, witness_sets, overwrite_status) filename = result_file + ".merged.xml.bz2" if out_dir is not None: outfile = os.path.join(out_dir, os.path.basename(filename)) else: outfile = filename os.makedirs(os.path.dirname(outfile), exist_ok=True) print(" " + outfile) with io.TextIOWrapper(bz2.BZ2File(outfile, "wb"), encoding="utf-8") as xml_file: xml_file.write( xml_to_string(result_xml).replace(" \n", "").replace(" \n", ""))
def load_results(result_file, status_print): run_set_result = tablegenerator.RunSetResult.create_from_xml( result_file, tablegenerator.parse_results_file(result_file)) run_set_result.collect_data(False) total_stats = StatAccumulator() category_stats = collections.defaultdict(StatAccumulator) status_stats = collections.defaultdict( lambda: collections.defaultdict(StatAccumulator)) for run_result in run_set_result.results: total_stats.add(run_result) category_stats[run_result.category].add(run_result) if status_print == "full": status_stats[run_result.category][run_result.status].add( run_result) elif status_print == "short": short_status = re.sub(r" *\(.*", "", run_result.status) status_stats[run_result.category][short_status].add(run_result) assert len(run_set_result.results) == total_stats.count basenames = [ Util.prettylist(run_set_result.attributes.get("benchmarkname")), Util.prettylist(run_set_result.attributes.get("name")) ] # status_stats must be transformed to a dictionary to get rid of the lambda-factory used above (can't be pickled) return StatsCollection(basenames, total_stats, category_stats, dict(status_stats))
def load_results(result_file, status_print): run_set_result = tablegenerator.RunSetResult.create_from_xml( result_file, tablegenerator.parse_results_file(result_file)) run_set_result.collect_data(False) total_stats = StatAccumulator() category_stats = collections.defaultdict(StatAccumulator) status_stats = collections.defaultdict(lambda: collections.defaultdict(StatAccumulator)) for run_result in run_set_result.results: total_stats.add(run_result) category_stats[run_result.category].add(run_result) if status_print == "full": status_stats[run_result.category][run_result.status].add(run_result) elif status_print == "short": short_status = re.sub(r" *\(.*", "", run_result.status) status_stats[run_result.category][short_status].add(run_result) assert len(run_set_result.results) == total_stats.count basenames = [Util.prettylist(run_set_result.attributes.get("benchmarkname")), Util.prettylist(run_set_result.attributes.get("name"))] # status_stats must be transformed to a dictionary to get rid of the lambda-factory used above (can't be pickled) return StatsCollection(basenames, total_stats, category_stats, dict(status_stats))
def main(args=None): if args is None: args = sys.argv parser = argparse.ArgumentParser( fromfile_prefix_chars="@", description= """Create CSV tables for quantile plots with the results of a benchmark execution. The CSV tables are similar to those produced with table-generator, but have an additional first column with the index for the quantile plot, and they are sorted. The output is written to stdout. Part of BenchExec: https://github.com/sosy-lab/benchexec/""", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument( "result", metavar="RESULT", type=str, nargs="+", help="XML files with result produced by benchexec", ) parser.add_argument( "--correct-only", action="store_true", dest="correct_only", help= "only use correct results (recommended, implied if --score-based is used)", ) parser.add_argument( "--score-based", action="store_true", dest="score_based", help="create data for score-based quantile plot", ) parser.add_argument( "--sort-by", metavar="SORT", default="cputime", dest="column_identifier", type=str, help= "column identifier for sorting the values, e.g. 'cputime' or 'walltime'", ) options = parser.parse_args(args[1:]) # load results run_set_result = tablegenerator.RunSetResult.create_from_xml( options.result[0], tablegenerator.parse_results_file(options.result[0])) for results_file in options.result[1:]: run_set_result.append(results_file, tablegenerator.parse_results_file(results_file)) run_set_result.collect_data(options.correct_only or options.score_based) # select appropriate results if options.score_based: start_index = 0 index_increment = lambda run_result: run_result.score # noqa: E731 results = [] for run_result in run_set_result.results: if run_result.score is None: sys.exit("No score available for task {0}, " "cannot produce score-based quantile data.".format( run_result.task_id[0])) if run_result.category == result.CATEGORY_WRONG: start_index += run_result.score elif run_result.category == result.CATEGORY_MISSING: sys.exit("Property missing for task {0}, " "cannot produce score-based quantile data.".format( run_result.task_id[0])) elif run_result.category == result.CATEGORY_CORRECT: results.append(run_result) else: assert run_result.category in { result.CATEGORY_ERROR, result.CATEGORY_UNKNOWN, } else: start_index = 0 index_increment = lambda run_result: 1 # noqa: E731 if options.correct_only: results = [ run_result for run_result in run_set_result.results if run_result.category == result.CATEGORY_CORRECT ] else: results = run_set_result.results # sort data for quantile plot results.sort(key=get_extract_value_function(options.column_identifier)) # extract information which id columns should be shown for run_result in run_set_result.results: run_result.id = run_result.task_id relevant_id_columns = tablegenerator.select_relevant_id_columns(results) # write output index = start_index for run_result in results: index += index_increment(run_result) task_ids = ( task_id for task_id, show in zip(run_result.id, relevant_id_columns) if show) result_values = (util.remove_unit(value or "") for value in run_result.values) print(*itertools.chain([index], task_ids, result_values), sep="\t")
def main(argv=None): if argv is None: argv = sys.argv if len(argv) < 3: sys.exit( 'Usage: ' + argv[0] + ' <results-xml> [<witness-xml>]* [--no-overwrite-status-true].\n') resultFile = argv[1] witnessFiles = [] isOverwrite = True for i in range(2, len(argv)): if len(argv) > i and not argv[i].startswith('--'): witnessFiles.append(argv[i]) if argv[i] == '--no-overwrite-status-true': isOverwrite = False if not os.path.exists(resultFile) or not os.path.isfile(resultFile): sys.exit('File {0} does not exist.'.format(repr(resultFile))) resultXML = TableGenerator.parse_results_file(resultFile) witnessSets = [] for witnessFile in witnessFiles: if not os.path.exists(witnessFile) or not os.path.isfile(witnessFile): sys.exit('File {0} does not exist.'.format(repr(witnessFile))) witnessXML = TableGenerator.parse_results_file(witnessFile) witnessSets.append(getWitnesses(witnessXML)) for result in resultXML.findall('run'): run = result.get('name') statusWit, categoryWit = (None, None) i = 0 for witnessSet in witnessSets: i = i + 1 witness = witnessSet.get(run, None) # copy data from witness if witness is not None: statusWitNew, categoryWitNew = getWitnessResult( witness, result) if (categoryWit is None or not categoryWit.startswith(Result.CATEGORY_CORRECT) or categoryWitNew == Result.CATEGORY_CORRECT or statusWitNew.startswith('witness invalid')): statusWit, categoryWit = (statusWitNew, categoryWitNew) # Overwrite status with status from witness if ((isOverwrite or Result.RESULT_CLASS_FALSE == Result.get_result_classification( result.findall('column[@title="status"]')[0].get('value'))) and 'correct' == result.findall('column[@title="category"]')[0].get('value') and statusWit is not None and categoryWit is not None): #print(run, statusWit, categoryWit) result.findall('column[@title="status"]')[0].set( 'value', statusWit) result.findall('column[@title="category"]')[0].set( 'value', categoryWit) # Clean-up an entry that can be inferred by table-generator automatically, avoids path confusion del result.attrib['logfile'] filename = resultFile + '.merged.xml.bz2' print(' ' + filename) open_func = bz2.BZ2File if hasattr(bz2.BZ2File, 'writable') else util.BZ2FileHack with io.TextIOWrapper(open_func(filename, 'wb'), encoding='utf-8') as xml_file: xml_file.write( xml_to_string(resultXML).replace(' \n', '').replace(' \n', ''))
def main(argv=None): if argv is None: argv = sys.argv if len(argv) < 3: sys.exit( "Usage: " + argv[0] + " <results-xml> [<witness-xml>]* [--no-overwrite-status-true].\n" ) resultFile = argv[1] witnessFiles = [] isOverwrite = True for i in range(2, len(argv)): if len(argv) > i and not argv[i].startswith("--"): witnessFiles.append(argv[i]) if argv[i] == "--no-overwrite-status-true": isOverwrite = False if not os.path.exists(resultFile) or not os.path.isfile(resultFile): sys.exit("File {0} does not exist.".format(repr(resultFile))) resultXML = tablegenerator.parse_results_file(resultFile) witnessSets = [] for witnessFile in witnessFiles: if not os.path.exists(witnessFile) or not os.path.isfile(witnessFile): sys.exit("File {0} does not exist.".format(repr(witnessFile))) witnessXML = tablegenerator.parse_results_file(witnessFile) witnessSets.append(getWitnesses(witnessXML)) for result_tag in resultXML.findall("run"): run = result_tag.get("name") try: status_from_verification = result_tag.find('column[@title="status"]').get( "value" ) category_from_verification = result_tag.find( 'column[@title="category"]' ).get("value") except AttributeError: status_from_verification = "not found" category_from_verification = "not found" print(run, status_from_verification, category_from_verification) statusWit, categoryWit = (None, None) for witnessSet in witnessSets: witness = witnessSet.get(run, None) # copy data from witness if witness is not None and len(witness) > 0: if result_tag.get("properties") == "coverage-error-call": status_from_validation = witness.find( 'column[@title="status"]' ).get("value") if status_from_validation == "true": statusWit, categoryWit = (status_from_verification, "correct") category_from_verification = "correct" scoreColumn = ElementTree.Element( "column", {"title": "score", "value": "1"} ) result_tag.append(scoreColumn) elif result_tag.get("properties") == "coverage-branches": try: coverage_value = ( witness.find('column[@title="branches_covered"]') .get("value") .replace("%", "") ) except AttributeError: coverage_value = "0.00" statusWit, categoryWit = (status_from_verification, "correct") category_from_verification = "correct" try: coverage_float = float(coverage_value) except ValueError: continue scoreColumn = ElementTree.Element( "column", {"title": "score", "value": str(coverage_float / 100)}, ) result_tag.append(scoreColumn) else: # For verification statusWitNew, categoryWitNew = getWitnessResult(witness, result_tag) print(statusWitNew, categoryWitNew) if ( categoryWit is None or not categoryWit.startswith(result.CATEGORY_CORRECT) or categoryWitNew == result.CATEGORY_CORRECT ): statusWit, categoryWit = (statusWitNew, categoryWitNew) # Overwrite status with status from witness if ( ( isOverwrite or result.RESULT_CLASS_FALSE == result.get_result_classification(status_from_verification) ) and "correct" == category_from_verification and statusWit is not None and categoryWit is not None ): try: result_tag.find('column[@title="status"]').set("value", statusWit) result_tag.find('column[@title="category"]').set("value", categoryWit) except AttributeError: pass # Clean-up an entry that can be inferred by table-generator automatically, avoids path confusion del result_tag.attrib["logfile"] filename = resultFile + ".merged.xml.bz2" print(" " + filename) with io.TextIOWrapper(bz2.BZ2File(filename, "wb"), encoding="utf-8") as xml_file: xml_file.write( xml_to_string(resultXML).replace(" \n", "").replace(" \n", "") )
def main(args=None): if args is None: args = sys.argv parser = argparse.ArgumentParser( fromfile_prefix_chars='@', description= """Create CSV tables for quantile plots with the results of a benchmark execution. The CSV tables are similar to those produced with table-generator, but have an additional first column with the index for the quantile plot, and they are sorted. The output is written to stdout. Part of BenchExec: https://github.com/sosy-lab/benchexec/""") parser.add_argument("result", metavar="RESULT", type=str, help="XML file with result produced by benchexec") parser.add_argument( "--correct-only", action="store_true", dest="correct_only", help= "only use correct results (recommended, implied if --score-based is used)" ) parser.add_argument("--score-based", action="store_true", dest="score_based", help="create data for score-based quantile plot") options = parser.parse_args(args[1:]) # load results run_set_result = tablegenerator.RunSetResult.create_from_xml( options.result, tablegenerator.parse_results_file(options.result)) run_set_result.collect_data(options.correct_only) # select appropriate results if options.score_based: start_index = 0 index_increment = lambda run_result: run_result.score results = [] for run_result in run_set_result.results: if run_result.score is None: sys.exit('No score available for task {0}, ' 'cannot produce score-based quantile data.'.format( run_result.task_id[0])) if run_result.category == result.CATEGORY_WRONG: start_index += run_result.score elif run_result.category == result.CATEGORY_MISSING: sys.exit('Property missing for task {0}, ' 'cannot produce score-based quantile data.'.format( run_result.task_id[0])) elif run_result.category == result.CATEGORY_CORRECT: results.append(run_result) else: assert run_result.category in { result.CATEGORY_ERROR, result.CATEGORY_UNKNOWN } else: start_index = 0 index_increment = lambda run_result: 1 if options.correct_only: results = [ run_result for run_result in run_set_result.results if run_result.category == result.CATEGORY_CORRECT ] else: results = run_set_result.results # sort data for quantile plot results.sort(key=extract_cputime) # extract information which id columns should be shown for run_result in run_set_result.results: run_result.id = run_result.task_id relevant_id_columns = tablegenerator.select_relevant_id_columns(results) # write output index = start_index for run_result in results: index += index_increment(run_result) columns = itertools.chain( [index], (id for id, show in zip(run_result.id, relevant_id_columns) if show), map(Util.remove_unit, (value or '' for value in run_result.values)), ) print(*columns, sep='\t')
def main(argv=None): if argv is None: argv = sys.argv if len(argv) < 3: sys.exit('Usage: ' + argv[0] + ' <results-xml> [<witness-xml>]* [--no-overwrite-status].\n') resultFile = argv[1] witnessFiles = [] isOverwrite = True for i in range(2, len(argv)): if len(argv) > i and not argv[i].startswith('--'): witnessFiles.append(argv[i]) if argv[i] == '--no-overwrite-status': isOverwrite = False if not os.path.exists(resultFile) or not os.path.isfile(resultFile): sys.exit('File {0} does not exist.'.format(repr(resultFile))) resultXML = tablegenerator.parse_results_file(resultFile) witnessSets = [] for witnessFile in witnessFiles: if not os.path.exists(witnessFile) or not os.path.isfile(witnessFile): sys.exit('File {0} does not exist.'.format(repr(witnessFile))) witnessXML = tablegenerator.parse_results_file(witnessFile) witnessSets.append(getWitnesses(witnessXML)) resultXML.set('options', '' + resultXML.get('options', default='') + ' [[ ' + witnessXML.get('options', default='') + ' ]]') resultXML.set('date', '' + resultXML.get('date', default='') + ' [[ ' + witnessXML.get('date', default='') + ' ]]') for result in resultXML.findall('run'): run = result.get('name') basename = os.path.basename(run) if 'correct' == result.findall('column[@title="category"]')[0].get('value'): statusVer = result.findall('column[@title="status"]')[0] categoryVer = result.findall('column[@title="category"]')[0] properties = result.get('properties').split(' '); expected_result = benchexec.result.satisfies_file_property(basename, properties); statusWit, categoryWit = (None, None) i = 0 for witnessSet in witnessSets: i = i + 1 witness = witnessSet.get(run, None) # copy data from witness if witness is not None: for column in witness: newColumn = ET.Element('column', { 'title': 'wit' + str(i) + '_' + column.get('title'), 'value': column.get('value'), 'hidden': column.get('hidden','false') }) result.append(newColumn) witnessSet.pop(run) statusWitNew, categoryWitNew = getWitnessResult(witness, expected_result) if expected_result == False: if statusWitNew.startswith('false(') or statusWit is None: statusWit, categoryWit = (statusWitNew, categoryWitNew) if expected_result == True: if statusWitNew.startswith('true') or statusWit is None: statusWit, categoryWit = (statusWitNew, categoryWitNew) # Overwrite status with status from witness if isOverwrite and statusWit is not None and categoryWit is not None: result.findall('column[@title="status"]')[0].set('value', statusWit) result.findall('column[@title="category"]')[0].set('value', categoryWit) # Clean-up an entry that can be inferred by table-generator automatically, avoids path confusion del result.attrib['logfile'] filename = resultFile + '.merged.xml.bz2' print (' ' + filename) open_func = bz2.BZ2File if hasattr(bz2.BZ2File, 'writable') else util.BZ2FileHack with io.TextIOWrapper(open_func(filename, 'wb'), encoding='utf-8') as xml_file: xml_file.write(xml_to_string(resultXML).replace(' \n','').replace(' \n',''))
def main(args=None): if args is None: args = sys.argv parser = argparse.ArgumentParser( fromfile_prefix_chars="@", description= """Dump LaTeX commands with summary values of the table. All the information from the footer of HTML tables is available. The output is written to stdout. Part of BenchExec: https://github.com/sosy-lab/benchexec/""" ) parser.add_argument("result", metavar="RESULT", type=str, help="XML file with result produced by benchexec" ) parser.add_argument("--status", action="store", choices=["none", "short", "full"], default="short", help="whether to output statistics aggregated for each different status value, " "for each abbreviated status value, or not", ) options = parser.parse_args(args[1:]) # load results run_set_result = tablegenerator.RunSetResult.create_from_xml( options.result, tablegenerator.parse_results_file(options.result)) run_set_result.collect_data(False) total_stats = StatAccumulator() category_stats = collections.defaultdict(StatAccumulator) status_stats = collections.defaultdict(lambda: collections.defaultdict(StatAccumulator)) for run_result in run_set_result.results: total_stats.add(run_result) category_stats[run_result.category].add(run_result) if options.status == "full": status_stats[run_result.category][run_result.status].add(run_result) elif options.status == "short": short_status = re.sub(r" *\(.*", "", run_result.status) status_stats[run_result.category][short_status].add(run_result) assert len(run_set_result.results) == total_stats.count basenames = [Util.prettylist(run_set_result.attributes.get("benchmarkname")), Util.prettylist(run_set_result.attributes.get("name"))] print(HEADER) print(total_stats.to_latex(basenames + ["total"])) for (category, counts) in sorted(category_stats.items()): print(counts.to_latex(basenames + [category])) categories = [(s, c) for (s, c) in status_stats[category].items() if s] for (status, counts2) in sorted(categories): print(counts2.to_latex(basenames + [category, status])) if category == "correct" and status_stats["wrong"].get(status) is None: print(StatAccumulator().to_latex(basenames + ["wrong", status])) elif category == "wrong" and status_stats["correct"].get(status) is None: print(StatAccumulator().to_latex(basenames + ["correct", status]))
def main(args=None): if args is None: args = sys.argv parser = argparse.ArgumentParser( fromfile_prefix_chars='@', description= """Create CSV tables for quantile plots with the results of a benchmark execution. The CSV tables are similar to those produced with table-generator, but have an additional first column with the index for the quantile plot, and they are sorted. The output is written to stdout. Part of BenchExec: https://github.com/sosy-lab/benchexec/""" ) parser.add_argument("result", metavar="RESULT", type=str, nargs="+", help="XML files with result produced by benchexec" ) parser.add_argument("--correct-only", action="store_true", dest="correct_only", help="only use correct results (recommended, implied if --score-based is used)" ) parser.add_argument("--score-based", action="store_true", dest="score_based", help="create data for score-based quantile plot" ) options = parser.parse_args(args[1:]) # load results run_set_result = tablegenerator.RunSetResult.create_from_xml( options.result[0], tablegenerator.parse_results_file(options.result[0])) for results_file in options.result[1:]: run_set_result.append(results_file, tablegenerator.parse_results_file(results_file)) run_set_result.collect_data(options.correct_only or options.score_based) # select appropriate results if options.score_based: start_index = 0 index_increment = lambda run_result: run_result.score results = [] for run_result in run_set_result.results: if run_result.score is None: sys.exit('No score available for task {0}, ' 'cannot produce score-based quantile data.' .format(run_result.task_id[0])) if run_result.category == result.CATEGORY_WRONG: start_index += run_result.score elif run_result.category == result.CATEGORY_MISSING: sys.exit('Property missing for task {0}, ' 'cannot produce score-based quantile data.' .format(run_result.task_id[0])) elif run_result.category == result.CATEGORY_CORRECT: results.append(run_result) else: assert run_result.category in {result.CATEGORY_ERROR, result.CATEGORY_UNKNOWN} else: start_index = 0 index_increment = lambda run_result: 1 if options.correct_only: results = [run_result for run_result in run_set_result.results if run_result.category == result.CATEGORY_CORRECT] else: results = run_set_result.results # sort data for quantile plot results.sort(key=extract_cputime) # extract information which id columns should be shown for run_result in run_set_result.results: run_result.id = run_result.task_id relevant_id_columns = tablegenerator.select_relevant_id_columns(results) # write output index = start_index for run_result in results: index += index_increment(run_result) columns = itertools.chain( [index], (id for id, show in zip(run_result.id, relevant_id_columns) if show), map(Util.remove_unit, (value or '' for value in run_result.values)), ) print(*columns, sep='\t')
def main(argv=None): if argv is None: argv = sys.argv if len(argv) < 3: sys.exit( 'Usage: ' + argv[0] + ' <results-xml> [<witness-xml>]* [--no-overwrite-status-true].\n') resultFile = argv[1] witnessFiles = [] isOverwrite = True for i in range(2, len(argv)): if len(argv) > i and not argv[i].startswith('--'): witnessFiles.append(argv[i]) if argv[i] == '--no-overwrite-status-true': isOverwrite = False if not os.path.exists(resultFile) or not os.path.isfile(resultFile): sys.exit('File {0} does not exist.'.format(repr(resultFile))) resultXML = TableGenerator.parse_results_file(resultFile) witnessSets = [] for witnessFile in witnessFiles: if not os.path.exists(witnessFile) or not os.path.isfile(witnessFile): sys.exit('File {0} does not exist.'.format(repr(witnessFile))) witnessXML = TableGenerator.parse_results_file(witnessFile) witnessSets.append(getWitnesses(witnessXML)) for result in resultXML.findall('run'): run = result.get('name') try: status_from_verification = result.findall( 'column[@title="status"]')[0].get('value') category_from_verification = result.findall( 'column[@title="category"]')[0].get('value') except: status_from_verification = "not found" category_from_verification = "not found" statusWit, categoryWit = (None, None) for witnessSet in witnessSets: witness = witnessSet.get(run, None) # copy data from witness if witness is not None: if result.get('properties') == 'coverage-error-call': status_from_validation = witness.findall( 'column[@title="status"]')[0].get('value') if status_from_validation == "true": statusWit, categoryWit = (status_from_verification, 'correct') category_from_verification = 'correct' scoreColumn = ET.Element('column', { 'title': 'score', 'value': '1' }) result.append(scoreColumn) elif result.get('properties') == 'coverage-branches': try: coverage_value = witness.findall( 'column[@title="branches_covered"]')[0].get( 'value').replace("%", "") except IndexError: coverage_value = '0.00' statusWit, categoryWit = (status_from_verification, 'correct') category_from_verification = 'correct' scoreColumn = ET.Element( 'column', { 'title': 'score', 'value': str(float(coverage_value) / 100) }) result.append(scoreColumn) else: # For verification statusWitNew, categoryWitNew = getWitnessResult( witness, result) if (categoryWit is None or not categoryWit.startswith(Result.CATEGORY_CORRECT) or categoryWitNew == Result.CATEGORY_CORRECT or statusWitNew.startswith('witness invalid')): statusWit, categoryWit = (statusWitNew, categoryWitNew) # Overwrite status with status from witness if ((isOverwrite or Result.RESULT_CLASS_FALSE == Result.get_result_classification(status_from_verification)) and 'correct' == category_from_verification and statusWit is not None and categoryWit is not None): #print(run, statusWit, categoryWit) try: result.findall('column[@title="status"]')[0].set( 'value', statusWit) result.findall('column[@title="category"]')[0].set( 'value', categoryWit) except: pass # Clean-up an entry that can be inferred by table-generator automatically, avoids path confusion del result.attrib['logfile'] filename = resultFile + '.merged.xml.bz2' print(' ' + filename) open_func = bz2.BZ2File if hasattr(bz2.BZ2File, 'writable') else util.BZ2FileHack with io.TextIOWrapper(open_func(filename, 'wb'), encoding='utf-8') as xml_file: xml_file.write( xml_to_string(resultXML).replace(' \n', '').replace(' \n', ''))
def main(argv=None): if argv is None: argv = sys.argv if len(argv) < 3: sys.exit('Usage: ' + argv[0] + ' <results-xml> [<witness-xml>]* [--no-overwrite-status-true].\n') resultFile = argv[1] witnessFiles = [] isOverwrite = True for i in range(2, len(argv)): if len(argv) > i and not argv[i].startswith('--'): witnessFiles.append(argv[i]) if argv[i] == '--no-overwrite-status-true': isOverwrite = False if not os.path.exists(resultFile) or not os.path.isfile(resultFile): sys.exit('File {0} does not exist.'.format(repr(resultFile))) resultXML = TableGenerator.parse_results_file(resultFile) witnessSets = [] for witnessFile in witnessFiles: if not os.path.exists(witnessFile) or not os.path.isfile(witnessFile): sys.exit('File {0} does not exist.'.format(repr(witnessFile))) witnessXML = TableGenerator.parse_results_file(witnessFile) witnessSets.append(getWitnesses(witnessXML)) for result in resultXML.findall('run'): run = result.get('name') statusWit, categoryWit = (None, None) i = 0 for witnessSet in witnessSets: i = i + 1 witness = witnessSet.get(run, None) # copy data from witness if witness is not None: statusWitNew, categoryWitNew = getWitnessResult(witness, result) if ( categoryWit is None or not categoryWit.startswith(Result.CATEGORY_CORRECT) or categoryWitNew == Result.CATEGORY_CORRECT or statusWitNew.startswith('witness invalid') ): statusWit, categoryWit = (statusWitNew, categoryWitNew) # Overwrite status with status from witness if ( ( isOverwrite or Result.RESULT_CLASS_FALSE == Result.get_result_classification( result.findall('column[@title="status"]')[0].get('value')) ) and 'correct' == result.findall('column[@title="category"]')[0].get('value') and statusWit is not None and categoryWit is not None ): #print(run, statusWit, categoryWit) result.findall('column[@title="status"]')[0].set('value', statusWit) result.findall('column[@title="category"]')[0].set('value', categoryWit) # Clean-up an entry that can be inferred by table-generator automatically, avoids path confusion del result.attrib['logfile'] filename = resultFile + '.merged.xml.bz2' print (' ' + filename) open_func = bz2.BZ2File if hasattr(bz2.BZ2File, 'writable') else util.BZ2FileHack with io.TextIOWrapper(open_func(filename, 'wb'), encoding='utf-8') as xml_file: xml_file.write(xml_to_string(resultXML).replace(' \n','').replace(' \n',''))
def main(argv=None): if argv is None: argv = sys.argv if len(argv) < 3: sys.exit('Usage: ' + argv[0] + ' <results-xml> [<witness-xml>]* [--no-overwrite-status].\n') resultFile = argv[1] witnessFiles = [] isOverwrite = True for i in range(2, len(argv)): if len(argv) > i and not argv[i].startswith('--'): witnessFiles.append(argv[i]) if argv[i] == '--no-overwrite-status': isOverwrite = False if not os.path.exists(resultFile) or not os.path.isfile(resultFile): sys.exit('File {0} does not exist.'.format(repr(resultFile))) resultXML = tablegenerator.parse_results_file(resultFile) witnessSets = [] for witnessFile in witnessFiles: if not os.path.exists(witnessFile) or not os.path.isfile(witnessFile): sys.exit('File {0} does not exist.'.format(repr(witnessFile))) witnessXML = tablegenerator.parse_results_file(witnessFile) witnessSets.append(getWitnesses(witnessXML)) resultXML.set( 'options', '' + resultXML.get('options', default='') + ' [[ ' + witnessXML.get('options', default='') + ' ]]') resultXML.set( 'date', '' + resultXML.get('date', default='') + ' [[ ' + witnessXML.get('date', default='') + ' ]]') for result in resultXML.findall('run'): run = result.get('name') basename = os.path.basename(run) if 'correct' == result.findall('column[@title="category"]')[0].get( 'value'): statusVer = result.findall('column[@title="status"]')[0] categoryVer = result.findall('column[@title="category"]')[0] properties = result.get('properties').split(' ') expected_result = benchexec.result.satisfies_file_property( basename, properties) statusWit, categoryWit = (None, None) i = 0 for witnessSet in witnessSets: i = i + 1 witness = witnessSet.get(run, None) # copy data from witness if witness is not None: for column in witness: newColumn = ET.Element( 'column', { 'title': 'wit' + str(i) + '_' + column.get('title'), 'value': column.get('value'), 'hidden': column.get('hidden', 'false') }) result.append(newColumn) witnessSet.pop(run) statusWitNew, categoryWitNew = getWitnessResult( witness, expected_result) if expected_result == False: if statusWitNew.startswith( 'false(') or statusWit is None: statusWit, categoryWit = (statusWitNew, categoryWitNew) if expected_result == True: if statusWitNew.startswith( 'true') or statusWit is None: statusWit, categoryWit = (statusWitNew, categoryWitNew) # Overwrite status with status from witness if isOverwrite and statusWit is not None and categoryWit is not None: result.findall('column[@title="status"]')[0].set( 'value', statusWit) result.findall('column[@title="category"]')[0].set( 'value', categoryWit) # Clean-up an entry that can be inferred by table-generator automatically, avoids path confusion del result.attrib['logfile'] filename = resultFile + '.merged.xml.bz2' print(' ' + filename) open_func = bz2.BZ2File if hasattr(bz2.BZ2File, 'writable') else util.BZ2FileHack with io.TextIOWrapper(open_func(filename, 'wb'), encoding='utf-8') as xml_file: xml_file.write( xml_to_string(resultXML).replace(' \n', '').replace(' \n', ''))