def test_add_suite(self): suite1 = TestSuite("suite1") suite2 = TestSuite("suite2") result = JUnitXml() result.add_testsuite(suite1) result.add_testsuite(suite2) self.assertEqual(len(result), 2)
def analysis(): skipped = SkippedElement() failure = FailureElement() newxml = JUnitXml() for filename in os.listdir('./test_results'): # Only get xml files if not filename.endswith('.xml'): continue fullname = os.path.join('./test_results', filename) xml = JUnitXml.fromfile(fullname) newxml += xml output = [] for suite in newxml: # handle suites for testcase in suite: testcase.append(failure) testcase.child(FailureElement) for fail in testcase.iterchildren(FailureElement): detail = { "test suite": testcase.classname, "test case": testcase.name, "failure message": html_decode(fail.message) } output.append(detail) return output
def parse_xml(xmlpath: Path) -> (str, float, float): """ Load junit files from xmlpath, then parse them for point values and return simplified output. """ xml = JUnitXml.fromfile(str( xmlpath.absolute() / "TEST-junit-vintage.xml")) + JUnitXml.fromfile( str(xmlpath.absolute() / "TEST-junit-jupiter.xml")) earned_points = 0 total_points = 0 output = "" for suite in xml: for case in suite: point_str = points_regex.search(case.system_out) point_val = float(point_str.groups()[0]) point_out = "" if point_str: # add point attribute to case, then determine value of test case.points = FloatAttr("points") total_points += point_val if case.result is None: case.points = point_val earned_points += point_val else: case.points = 0 point_out = f"{case.points:1g} points"
def add_junit_failure(xml: junitparser.JUnitXml, test: Path, message: str, starttime: datetime.datetime): t = junitparser.TestCase(name=test.name) t.result = junitparser.Failure(message=str(message)) t.time = (datetime.datetime.utcnow() - starttime).total_seconds() suite = junitparser.TestSuite(name=test.name) suite.add_testcase(t) xml.add_testsuite(suite)
def run_subdir(qemu: boot_cheribsd.CheriBSDInstance, subdir: Path, xml: junitparser.JUnitXml, successful_tests: list, failed_tests: list, build_dir: Path): tests = [] for root, dirs, files in os.walk(str(subdir), topdown=True): for name in files: if not name.startswith("tst_") or name.endswith(".core"): continue tests.append(Path(root, name)) # Ignore .moc and .obj directories: dirs[:] = [d for d in dirs if not d.startswith(".")] # Ensure that we run the tests in a reproducible order for f in sorted(tests): starttime = datetime.datetime.utcnow() try: # TODO: -o /path/to/file -junitxml qemu.checked_run( "rm -f /build/test.xml && {} -o /build/test.xml,junitxml -o -,txt -v1" .format(f), timeout=10) endtime = datetime.datetime.utcnow() successful_tests.append(f) qemu.checked_run("fsync /build/test.xml") test_xml = build_dir / "test.xml" qt_test = junitparser.JUnitXml.fromfile(str(test_xml)) if not isinstance(qt_test, junitparser.TestSuite): raise ValueError( "Got unexpected parse result loading JUnit Xml: " + qt_test.tostring()) if qt_test.name.lower() != f.name: raise ValueError( "Got unexpected test suite name: '{}' instead of '{}'". format(qt_test.name, f.name)) if not qt_test.time: qt_test.time = (endtime - starttime).total_seconds() boot_cheribsd.info("Results for ", f.name, ": ", qt_test) xml.add_testsuite(qt_test) except Exception as e: if isinstance(e, boot_cheribsd.CheriBSDCommandFailed): boot_cheribsd.failure("Failed to run ", f.name, ": ", str(e), exit=False) else: boot_cheribsd.failure("Error loading JUnit result for", f.name, ": ", str(e), exit=False) failed_tests.append(f) add_junit_failure(xml, f, str(e), starttime) # Kill the process that timed out: qemu.sendintr() qemu.expect_prompt(timeout=60)
def compare_artifacts(dir1, dir2, run_name1, run_name2): """Compare artifacts from dir1 with di2 and store results in out_dir""" logger.info("Comparing artifacts from %s with %s", dir1, dir2) sub_dirs_1 = get_sub_dirs(dir1) over_all_pass = True aggregates = ["mean", "max", "min"] header = ["run_name1", "run_name2", "test_suite", "metric", "run1", "run2", "percentage_diff", "expected_diff", "result", "message"] rows = [header] reporter = JUnitXml() for sub_dir1 in sub_dirs_1: with Timer("Comparison test suite {} execution time".format(sub_dir1)) as t: comp_ts = CompareTestSuite(sub_dir1, run_name1 + " and " + run_name1, t) metrics_file1, metrics_file2 = get_log_file(dir1, sub_dir1), get_log_file(dir2, sub_dir1) if not (metrics_file1 and metrics_file2): msg = "Metrics monitoring logs are not captured for {} in either " \ "of the runs.".format(sub_dir1) logger.info(msg) rows.append([run_name1, run_name2, sub_dir1, "metrics_log_file_availability", "NA", "NA", "NA", "NA", "pass", msg]) comp_ts.add_test_case("metrics_log_file_availability", msg, "skip") continue metrics_from_file1 = pd.read_csv(metrics_file1) metrics_from_file2 = pd.read_csv(metrics_file2) metrics, diff_percents = taurus_reader.get_compare_metric_list(dir1, sub_dir1) for col, diff_percent in zip(metrics, diff_percents): for agg_func in aggregates: name = "{}_{}".format(agg_func, str(col)) val1 = get_aggregate_val(metrics_from_file1, agg_func, col) val2 = get_aggregate_val(metrics_from_file2, agg_func, col) diff, pass_fail, msg = compare_values(val1, val2, diff_percent, run_name1, run_name2) if over_all_pass: over_all_pass = pass_fail == "pass" result_row = [run_name1, run_name2, sub_dir1, name, val1, val2, diff, diff_percent, pass_fail, msg] rows.append(result_row) test_name = "{}: diff_percent < {}".format(name, diff_percent) comp_ts.add_test_case(test_name, msg, pass_fail) comp_ts.ts.time = t.diff() comp_ts.ts.update_statistics() reporter.add_testsuite(comp_ts.ts) dataframe = pd.DataFrame(rows[1:], columns=rows[0]) return reporter, dataframe
def generate_junitxml_merged_report(test_results_dir): """ Merge all junitxml generated reports in a single one. :param test_results_dir: output dir containing the junitxml reports to merge. """ merged_xml = JUnitXml() for dir, _, files in os.walk(test_results_dir): for file in files: if file.endswith("results.xml"): merged_xml += JUnitXml.fromfile(os.path.join(dir, file)) merged_xml.write("{0}/test_report.xml".format(test_results_dir), pretty=True)
def parse(path: str) -> Union[str, Any]: if not os.path.exists(path): return FileNotFoundError(f'File does not exist.') if os.stat(path).st_size == 0: return Exception(f'File is empty.') try: if drop_testcases: builder = DropTestCaseBuilder() return JUnitXml.fromfile(path, parse_func=builder.parse) return JUnitXml.fromfile(path) except BaseException as e: return e
def generate_junitxml_merged_report(test_results_dir): """ Merge all junitxml generated reports in a single one. :param test_results_dir: output dir containing the junitxml reports to merge. """ merged_xml = None for dir, _, files in os.walk(test_results_dir): for file in files: if file.endswith("results.xml"): if not merged_xml: merged_xml = JUnitXml.fromfile(os.path.join(dir, file)) else: merged_xml += JUnitXml.fromfile(os.path.join(dir, file)) merged_xml.write("{0}/test_report.xml".format(test_results_dir), pretty=True)
def test_construct_xml(self): suite1 = TestSuite() suite1.name = "suite1" case1 = TestCase() case1.name = "case1" suite1.add_testcase(case1) result = JUnitXml() result.add_testsuite(suite1) self.assertEqual(result._elem.tag, "testsuites") suite = result._elem.findall("testsuite") self.assertEqual(len(suite), 1) self.assertEqual(suite[0].attrib["name"], "suite1") case = suite[0].findall("testcase") self.assertEqual(len(case), 1) self.assertEqual(case[0].attrib["name"], "case1")
def test_construct_xml(self): suite1 = TestSuite() suite1.name = 'suite1' case1 = TestCase() case1.name = 'case1' suite1.add_testcase(case1) result = JUnitXml() result.add_testsuite(suite1) self.assertEqual(result._elem.tag, 'testsuites') suite = result._elem.findall('testsuite') self.assertEqual(len(suite), 1) self.assertEqual(suite[0].attrib['name'], 'suite1') case = suite[0].findall('testcase') self.assertEqual(len(case), 1) self.assertEqual(case[0].attrib['name'], 'case1')
def test_fromfile_without_testsuites_tag(self): text = """<?xml version="1.0" encoding="UTF-8"?> <testsuite name="JUnitXmlReporter.constructor" errors="0" skipped="1" tests="3" failures="1" time="0.006" timestamp="2013-05-24T10:23:58"> <properties> <property name="java.vendor" value="Sun Microsystems Inc." /> <property name="compiler.debug" value="on" /> <property name="project.jdk.classpath" value="jdk.classpath.1.6" /> </properties> <testcase classname="JUnitXmlReporter.constructor" name="should default path to an empty string" time="0.006"> <failure message="test failure">Assertion failed</failure> </testcase> <testcase classname="JUnitXmlReporter.constructor" name="should default consolidate to true" time="0"> <skipped /> </testcase> <testcase classname="JUnitXmlReporter.constructor" name="should default useDotNotation to true" time="0" /> </testsuite>""" with open(self.tmp, 'w') as f: f.write(text) xml = JUnitXml.fromfile(self.tmp) cases = list(iter(xml)) properties = list(iter(xml.properties())) self.assertEqual(len(properties), 3) self.assertEqual(len(cases), 3) self.assertEqual(xml.name, 'JUnitXmlReporter.constructor') self.assertEqual(xml.tests, 3) case_results = [Failure, Skipped, type(None)] for case, result in zip(xml, case_results): self.assertIsInstance(case.result, result)
def __init__(self, path): """Initiates JUniXML object. Args: path (str): Path to XML file """ self.junit_results = JUnitXml.fromfile(path)
def subset(client, reports): for r in reports: suite = JUnitXml.fromfile(r, parse_func) for case in suite: cls_name = case._elem.attrib.get("classname") name = case._elem.attrib.get('name') if cls_name != '' and name != '': client.test_path([{ 'type': 'class', 'name': cls_name }, { 'type': 'testcase', 'name': name }]) def formatter(x: TestPath): cls_name = '' case = '' for path in x: t = path['type'] if t == 'class': cls_name = path['name'] if t == 'testcase': case = path['name'] if cls_name != '' and case != '': return "-s '{}' -t '{}'".format(cls_name, case) return ''
def parse_results(self, result_file): result_file = self._validate_path(result_file) try: xml = JUnitXml.fromfile(result_file) except TypeError as e: raise JUnitHandlerException(e) return self._transform_tests(xml)
def testcases(reports: List[str]): exceptions = [] for report in reports: try: # To understand JUnit XML format, https://llg.cubic.org/docs/junit/ is helpful # TODO: robustness: what's the best way to deal with broken XML file, if any? xml = JUnitXml.fromfile( report, self.junitxml_parse_func) if isinstance(xml, JUnitXml): testsuites = [suite for suite in xml] elif isinstance(xml, TestSuite): testsuites = [xml] else: # TODO: what is a Pythonesque way to do this? assert False for suite in testsuites: for case in suite: yield json.dumps(CaseEvent.from_case_and_suite(self.path_builder, case, suite, report)) except Exception as e: exceptions.append(Exception( "Failed to process a report file: {}".format(report), e)) if len(exceptions) > 0: # defer XML persing exceptions raise Exception(exceptions)
def main(): tmpdir = tempfile.mkdtemp(prefix='test_reports') print(tmpdir) path = 'cases' cases = os.listdir(path) for case in cases: #print(case) if case != 'c.py': continue xml_file = os.path.join(tmpdir, f"{case}.xml") cmd_list = ['pytest', os.path.join(path, case), '--junitxml', xml_file] code, stdout, stderr = qxx(cmd_list) if code != 0: print(f"Execution failed with {code}") print(stdout) print(stderr) exit(1) with open(xml_file) as fh: print(fh.read()) return xml = JUnitXml.fromfile(xml_file) for suite in xml: #print(suite) for case in suite: #print(type(case)) print(case.name) if case.result: print(case.result) if isinstance(case.result, Failure): print(' failure ', case.result.message) if isinstance(case.result, Skipped): print(' skipped ', case.result.message)
def test_fromstring_no_testsuites(self): text = """<testsuite name="suitename1"> <testcase name="testname1"> </testcase></testsuite>""" result = JUnitXml.fromstring(text) self.assertEqual(result.time, 0) self.assertEqual(len(result), 1)
def process_xml(self, path: Path) -> None: print(f"Processing {str(path)!r}") suites = JUnitXml.fromfile(path) if isinstance(suites, TestSuite): suites = [suites] for suite in suites: self.add_tests(suite)
def summarise_junit(file_path: str) -> bool: """Parse jUnit output and show a summary. Preprocesses input to increase the chances of getting valid XML. Returns True if there were no failures or errors, raises exception on IOError or XML parse errors.""" with open(file_path, 'r') as file: # Skip log text before and after the XML, escape some of the test output to get valid XML lines = StringIO() take = False for line in file: if line.strip() == '<testsuite>': take = True if take: lines.write(line.replace('<<', '<<')) if line.strip() == "</testsuite>": break xml = JUnitXml.fromstring(strip_ansi(lines.getvalue())) print("") print("Test summary") print("------------") print(f"tests: {xml.tests}") print(f"skipped: {xml.skipped}") print(f"failures: {xml.failures}") print(f"errors: {xml.errors}\n") return xml.failures == 0 and xml.errors == 0
def pytest_collection_modifyitems(session, config, items): """ Add Polarion ID property to test cases that are marked with one. """ re_trigger_failed_tests = ocsci_config.RUN.get("re_trigger_failed_tests") if re_trigger_failed_tests: junit_report = JUnitXml.fromfile(re_trigger_failed_tests) cases_to_re_trigger = [] for suite in junit_report: cases_to_re_trigger += [_case.name for _case in suite if _case.result] for item in items[:]: if re_trigger_failed_tests and item.name not in cases_to_re_trigger: log.info( f"Test case: {item.name} will be removed from execution, " "because of you provided --re-trigger-failed-tests parameter " "and this test passed in previous execution from the report!" ) items.remove(item) try: marker = item.get_closest_marker(name="polarion_id") if marker: polarion_id = marker.args[0] if polarion_id: item.user_properties.append(("polarion-testcase-id", polarion_id)) except IndexError: log.warning( f"polarion_id marker found with no value for " f"{item.name} in {item.fspath}", exc_info=True, )
def test_write_results_vulns(): """Test parsing results and writing test-output.xml""" # Open the Veracode SCA JSON results with open("example-dotnet.json", "r") as sca_results: data = json.load(sca_results) # Include all CVSS in the output parsed_data = parse_sca_json(data, 0) # Create the test-output.xml write_output("dotnet vulns", parsed_data, 0) # Get test-output.xml xml = JUnitXml.fromfile("test-output.xml") # Assert there are 15 vulnerabilities in the output case_counter = 0 for suite in xml: for case in suite: case_counter += 1 assert case_counter == 15 # Assert there are 15 failures reported in the Test Suite assert xml.failures == 15
def observe_tests(self): self.test_results = {} for report in self.get_xml_files(): try: suite = JUnitXml.fromfile(report) for case in suite: test = TestResult(case, suite.name, report) self.test_results[test.full_name.lower()] = test except Exception as e: print(e, report) pass with open(self.path_to_tests_results, "w") as f: json.dump( list(map(lambda x: x.as_dict(), self.test_results.values())), f) tests = list( set( map( lambda x: x.full_name, filter(lambda t: not t.is_passed(), self.test_results.values())))) with open(self.trigger_tests_path, 'w') as f: json.dump(tests, f) if not os.path.exists(self.tests_to_exclude_path): with open(self.tests_to_exclude_path, 'w') as f: json.dump(tests, f) return self.test_results
def parse(report: str) -> Generator[CaseEventType, None, None]: # To understand JUnit XML format, https://llg.cubic.org/docs/junit/ is helpful # TODO: robustness: what's the best way to deal with broken XML file, if any? try: xml = JUnitXml.fromfile(report, f) except Exception as e: click.echo(click.style( "Warning: error reading JUnitXml file {filename}: {error}" .format(filename=report, error=e), fg="yellow"), err=True) # `JUnitXml.fromfile()` will raise `JUnitXmlError` and other lxml related errors if the file has wrong format. # https://github.com/weiwei/junitparser/blob/master/junitparser/junitparser.py#L321 return if isinstance(xml, JUnitXml): testsuites = [suite for suite in xml] elif isinstance(xml, TestSuite): testsuites = [xml] else: raise InvalidJUnitXMLException(filename=report) for suite in testsuites: for case in suite: yield CaseEvent.from_case_and_suite( self.path_builder, case, suite, report)
def test_iadd_same_suite(self): result1 = JUnitXml() suite1 = TestSuite() result1.add_testsuite(suite1) result2 = JUnitXml() suite2 = TestSuite() result2.add_testsuite(suite2) result1 += result2 self.assertEqual(len(result1), 1)
def test_add(self): result1 = JUnitXml() suite1 = TestSuite("suite1") result1.add_testsuite(suite1) result2 = JUnitXml() suite2 = TestSuite("suite2") result2.add_testsuite(suite2) result3 = result1 + result2 self.assertEqual(len(result3), 2)
def test_xml_statistics(self): result1 = JUnitXml() suite1 = TestSuite() result1.add_testsuite(suite1) result2 = JUnitXml() suite2 = TestSuite() result2.add_testsuite(suite2) result3 = result1 + result2 result3.update_statistics()
def gen_results_summary(results_dir, output_fn=None, merge_fn=None, verbose=False, print_section=False, results_file='results.xml'): """Scan a results directory and generate a summary file""" reports = [] combined = JUnitXml() nr_files = 0 out_f = sys.stdout for filename in get_results(results_dir, results_file): reports.append(JUnitXml.fromfile(filename)) if len(reports) == 0: return 0 if output_fn is not None: out_f = open(output_fn, "w") props = copy.deepcopy(reports[0].child(Properties)) ltm = check_for_ltm(results_dir, props) print_header(out_f, props) sort_by = lambda ts: parse_timestamp(ts.timestamp) if ltm: sort_by = lambda ts: ts.hostname if total_tests(reports) < 30: verbose = True for testsuite in sorted(reports, key=sort_by): print_summary(out_f, testsuite, verbose, print_section) combined.add_testsuite(testsuite) nr_files += 1 out_f.write('Totals: %d tests, %d skipped, %d failures, %d errors, %ds\n' \ % sum_testsuites(reports)) print_trailer(out_f, props) if merge_fn is not None: combined.update_statistics() combined.write(merge_fn + '.new') if os.path.exists(merge_fn): os.rename(merge_fn, merge_fn + '.bak') os.rename(merge_fn + '.new', merge_fn) return nr_files
def parse_file(path: str) -> List[TestCase]: try: return convert_junit_to_testcases(JUnitXml.fromfile(path)) except Exception as err: rich.print( f":Warning: [yellow]Warning[/yellow]: Failed to read {path}: {err}" ) return []
def merge_xml_files(test_file_pattern): xml_data = JUnitXml() staging_dir = os.environ['BUILD_ARTIFACTSTAGINGDIRECTORY'] for test_file in glob.glob(test_file_pattern): xml_data += JUnitXml.fromfile(test_file) # Move file to harvest dir to save state and not publish the same test twice shutil.move( test_file, os.path.join(staging_dir, "harvest", os.path.basename(test_file))) if xml_data.tests > 0: # Merge all files into a single file for cleaner output output_file_name = f"test-results-{os.environ['SCENARIONAME']}-{os.environ['DISTRONAME']}.xml" xml_data.write(os.path.join(staging_dir, output_file_name)) else: logger.info(f"No test files found for pattern: {test_file_pattern}")
def parse_tests(xml_file_object): xml = JUnitXml.fromfile(xml_file_object) tests = [] for testcase in xml: testcase_status = _get_status(testcase) if testcase.classname != "" and testcase_status != "skipped": tests.append({"name": _get_name(testcase), "status": testcase_status}) return tests