def test_iadd_same_suite(self): result1 = JUnitXml() suite1 = TestSuite() result1.add_testsuite(suite1) result2 = JUnitXml() suite2 = TestSuite() result2.add_testsuite(suite2) result1 += result2 self.assertEqual(len(result1), 1)
def test_xml_statistics(self): result1 = JUnitXml() suite1 = TestSuite() result1.add_testsuite(suite1) result2 = JUnitXml() suite2 = TestSuite() result2.add_testsuite(suite2) result3 = result1 + result2 result3.update_statistics()
def test_add(self): result1 = JUnitXml() suite1 = TestSuite("suite1") result1.add_testsuite(suite1) result2 = JUnitXml() suite2 = TestSuite("suite2") result2.add_testsuite(suite2) result3 = result1 + result2 self.assertEqual(len(result3), 2)
def test_add_suite(self): suite1 = TestSuite("suite1") suite2 = TestSuite("suite2") result = JUnitXml() result.add_testsuite(suite1) result.add_testsuite(suite2) self.assertEqual(len(result), 2)
def write_output(target: str, results: list, min_cvss: int) -> None: """Write scan results in junitxml format""" suite = TestSuite(f"{target}") no_vulns: List = [ { "Results": "No vulnerabilities." }, { "Results": f"No vulnerabilities >= the min CVSS score {min_cvss}." }, ] for result in results: if result not in no_vulns: test_case = TestCase(result["Vulnerable Library"]) test_case.name = (result["Vulnerable Library"] + " - " + result["Vulnerability"] + " - " + "CVSS " + str(result["CVSS"])) test_case.result = [Failure(result)] else: test_case = TestCase("No vulnerabilities") test_case.result = result suite.add_testcase(test_case) xml = JUnitXml() xml.add_testsuite(suite) xml.write("test-output.xml")
def analysis(): skipped = SkippedElement() failure = FailureElement() newxml = JUnitXml() for filename in os.listdir('./test_results'): # Only get xml files if not filename.endswith('.xml'): continue fullname = os.path.join('./test_results', filename) xml = JUnitXml.fromfile(fullname) newxml += xml output = [] for suite in newxml: # handle suites for testcase in suite: testcase.append(failure) testcase.child(FailureElement) for fail in testcase.iterchildren(FailureElement): detail = { "test suite": testcase.classname, "test case": testcase.name, "failure message": html_decode(fail.message) } output.append(detail) return output
def test_write_noarg(self): suite1 = TestSuite() suite1.name = 'suite1' case1 = TestCase() case1.name = 'case1' suite1.add_testcase(case1) result = JUnitXml() result.add_testsuite(suite1) with self.assertRaises(JUnitXmlError): result.write()
def compare_artifacts(dir1, dir2, run_name1, run_name2): """Compare artifacts from dir1 with di2 and store results in out_dir""" logger.info("Comparing artifacts from %s with %s", dir1, dir2) sub_dirs_1 = get_sub_dirs(dir1) over_all_pass = True aggregates = ["mean", "max", "min"] header = ["run_name1", "run_name2", "test_suite", "metric", "run1", "run2", "percentage_diff", "expected_diff", "result", "message"] rows = [header] reporter = JUnitXml() for sub_dir1 in sub_dirs_1: with Timer("Comparison test suite {} execution time".format(sub_dir1)) as t: comp_ts = CompareTestSuite(sub_dir1, run_name1 + " and " + run_name1, t) metrics_file1, metrics_file2 = get_log_file(dir1, sub_dir1), get_log_file(dir2, sub_dir1) if not (metrics_file1 and metrics_file2): msg = "Metrics monitoring logs are not captured for {} in either " \ "of the runs.".format(sub_dir1) logger.info(msg) rows.append([run_name1, run_name2, sub_dir1, "metrics_log_file_availability", "NA", "NA", "NA", "NA", "pass", msg]) comp_ts.add_test_case("metrics_log_file_availability", msg, "skip") continue metrics_from_file1 = pd.read_csv(metrics_file1) metrics_from_file2 = pd.read_csv(metrics_file2) metrics, diff_percents = taurus_reader.get_compare_metric_list(dir1, sub_dir1) for col, diff_percent in zip(metrics, diff_percents): for agg_func in aggregates: name = "{}_{}".format(agg_func, str(col)) val1 = get_aggregate_val(metrics_from_file1, agg_func, col) val2 = get_aggregate_val(metrics_from_file2, agg_func, col) diff, pass_fail, msg = compare_values(val1, val2, diff_percent, run_name1, run_name2) if over_all_pass: over_all_pass = pass_fail == "pass" result_row = [run_name1, run_name2, sub_dir1, name, val1, val2, diff, diff_percent, pass_fail, msg] rows.append(result_row) test_name = "{}: diff_percent < {}".format(name, diff_percent) comp_ts.add_test_case(test_name, msg, pass_fail) comp_ts.ts.time = t.diff() comp_ts.ts.update_statistics() reporter.add_testsuite(comp_ts.ts) dataframe = pd.DataFrame(rows[1:], columns=rows[0]) return reporter, dataframe
def __init__(self, json_or_xml): if str(json_or_xml) == "xml": self.formatter = JUnitXml() self.format = '.xml' else: # TODO: Implement a JSON format compatible with SQUAD # This means using the python (l)xml ElementTree (<=> TestSuite()) self.formatter = None self.format = '.json' self.test_factory = TestFactory()
def main(): args = parse_args() github_token = '' gh = None if args.github: github_token = os.environ['GH_TOKEN'] gh = Github(github_token) if args.status and args.sha != None and args.repo and gh: set_status(gh, args.repo, args.sha) sys.exit(0) if not args.commits: sys.exit(1) suite = TestSuite("Compliance") docs = {} for Test in ComplianceTest.__subclasses__(): t = Test(suite, args.commits) t.run() suite.add_testcase(t.case) docs[t.case.name] = t._doc xml = JUnitXml() xml.add_testsuite(suite) xml.update_statistics() xml.write('compliance.xml') if args.github: repo = gh.get_repo(args.repo) pr = repo.get_pull(int(args.pull_request)) commit = repo.get_commit(args.sha) comment = "Found the following issues, please fix and resubmit:\n\n" comment_count = 0 print("Processing results...") for case in suite: if case.result and case.result.type != 'skipped': comment_count += 1 comment += ("## {}\n".format(case.result.message)) comment += "\n" if case.name not in ['Gitlint', 'Identity/Emails', 'License']: comment += "```\n" comment += ("{}\n".format(case.result._elem.text)) if case.name not in ['Gitlint', 'Identity/Emails', 'License']: comment += "```\n" commit.create_status('failure', docs[case.name], 'Verification failed', '{}'.format(case.name))
def generate_junitxml_merged_report(test_results_dir): """ Merge all junitxml generated reports in a single one. :param test_results_dir: output dir containing the junitxml reports to merge. """ merged_xml = JUnitXml() for dir, _, files in os.walk(test_results_dir): for file in files: if file.endswith("results.xml"): merged_xml += JUnitXml.fromfile(os.path.join(dir, file)) merged_xml.write("{0}/test_report.xml".format(test_results_dir), pretty=True)
def gen_results_summary(results_dir, output_fn=None, merge_fn=None, verbose=False, print_section=False, results_file='results.xml'): """Scan a results directory and generate a summary file""" reports = [] combined = JUnitXml() nr_files = 0 out_f = sys.stdout for filename in get_results(results_dir, results_file): reports.append(JUnitXml.fromfile(filename)) if len(reports) == 0: return 0 if output_fn is not None: out_f = open(output_fn, "w") props = copy.deepcopy(reports[0].child(Properties)) ltm = check_for_ltm(results_dir, props) print_header(out_f, props) sort_by = lambda ts: parse_timestamp(ts.timestamp) if ltm: sort_by = lambda ts: ts.hostname if total_tests(reports) < 30: verbose = True for testsuite in sorted(reports, key=sort_by): print_summary(out_f, testsuite, verbose, print_section) combined.add_testsuite(testsuite) nr_files += 1 out_f.write('Totals: %d tests, %d skipped, %d failures, %d errors, %ds\n' \ % sum_testsuites(reports)) print_trailer(out_f, props) if merge_fn is not None: combined.update_statistics() combined.write(merge_fn + '.new') if os.path.exists(merge_fn): os.rename(merge_fn, merge_fn + '.bak') os.rename(merge_fn + '.new', merge_fn) return nr_files
def test_write_nonascii(self): suite1 = TestSuite() suite1.name = 'suite1' case1 = TestCase() case1.name = '用例1' suite1.add_testcase(case1) result = JUnitXml() result.add_testsuite(suite1) result.write(self.tmp) with open(self.tmp, encoding='utf-8') as f: text = f.read() self.assertIn('suite1', text) self.assertIn('用例1', text)
def test_write_nonascii(self): suite1 = TestSuite() suite1.name = "suite1" case1 = TestCase() case1.name = "用例1" suite1.add_testcase(case1) result = JUnitXml() result.add_testsuite(suite1) result.write(self.tmp) with open(self.tmp, encoding="utf-8") as f: text = f.read() self.assertIn("suite1", text) self.assertIn("用例1", text)
def test_write_pretty(self): suite1 = TestSuite() suite1.name = 'suite1' case1 = TestCase() case1.name = '用例1' suite1.add_testcase(case1) result = JUnitXml() result.add_testsuite(suite1) result.write(self.tmp, pretty=True) xml = JUnitXml.fromfile(self.tmp) suite = next(iter(xml)) case = next(iter(suite)) self.assertEqual(case.name, '用例1')
def test_read_written_xml(self): suite1 = TestSuite() suite1.name = "suite1" case1 = TestCase() case1.name = "用例1" suite1.add_testcase(case1) result = JUnitXml() result.add_testsuite(suite1) result.write(self.tmp) xml = JUnitXml.fromfile(self.tmp) suite = next(iter(xml)) case = next(iter(suite)) self.assertEqual(case.name, "用例1")
def test_write(self): suite1 = TestSuite() suite1.name = 'suite1' case1 = TestCase() case1.name = 'case1' suite1.add_testcase(case1) result = JUnitXml() result.add_testsuite(suite1) result.write(self.tmp) with open(self.tmp) as f: text = f.read() self.assertIn('suite1', text) self.assertIn('case1', text)
def test_construct_xml(self): suite1 = TestSuite() suite1.name = 'suite1' case1 = TestCase() case1.name = 'case1' suite1.add_testcase(case1) result = JUnitXml() result.add_testsuite(suite1) self.assertEqual(result._elem.tag, 'testsuites') suite = result._elem.findall('testsuite') self.assertEqual(len(suite), 1) self.assertEqual(suite[0].attrib['name'], 'suite1') case = suite[0].findall('testcase') self.assertEqual(len(case), 1) self.assertEqual(case[0].attrib['name'], 'case1')
def test_construct_xml(self): suite1 = TestSuite() suite1.name = "suite1" case1 = TestCase() case1.name = "case1" suite1.add_testcase(case1) result = JUnitXml() result.add_testsuite(suite1) self.assertEqual(result._elem.tag, "testsuites") suite = result._elem.findall("testsuite") self.assertEqual(len(suite), 1) self.assertEqual(suite[0].attrib["name"], "suite1") case = suite[0].findall("testcase") self.assertEqual(len(case), 1) self.assertEqual(case[0].attrib["name"], "case1")
def __init__(self, agent, artifacts_dir, env, local_run, use=True, check_mms_server_status=False): self.monitoring_agent = agent self.artifacts_dir = artifacts_dir self.use = use self.env = env self.local_run = local_run self.check_mms_server_status = check_mms_server_status self.reporter = JUnitXml() self.compare_reporter_generator = CompareReportGenerator( self.artifacts_dir, self.env, self.local_run)
def write_output(target, results) -> None: """Write scan results in junitxml format""" test_case = TestCase(f"{target}") test_case.name = f"{target}" if results["Results"] != ["No SSL/TLS Violations found."]: test_case.result = [Failure(results)] else: test_case.result = results suite = TestSuite("SSLChecker") suite.add_testcase(test_case) xml = JUnitXml() xml.add_testsuite(suite) xml.write("test-output.xml")
def build(self) -> None: self.mainsuite = TestSuite("Drive") self.process_xml(self.folder / "final.xml") for idx in (2, 1): # First add the results from the reruns (suffixed with "2") # then the first runs, to add successes before failures. for results in Path(self.folder).glob(f"**/*.{idx}.xml"): self.process_xml(results) print("End of processing") print_suite(self.mainsuite) xml = JUnitXml() xml.add_testsuite(self.mainsuite) xml.write(self.folder / self.output)
def merge_xml_files(test_file_pattern): xml_data = JUnitXml() staging_dir = os.environ['BUILD_ARTIFACTSTAGINGDIRECTORY'] for test_file in glob.glob(test_file_pattern): xml_data += JUnitXml.fromfile(test_file) # Move file to harvest dir to save state and not publish the same test twice shutil.move( test_file, os.path.join(staging_dir, "harvest", os.path.basename(test_file))) if xml_data.tests > 0: # Merge all files into a single file for cleaner output output_file_name = f"test-results-{os.environ['SCENARIONAME']}-{os.environ['DISTRONAME']}.xml" xml_data.write(os.path.join(staging_dir, output_file_name)) else: logger.info(f"No test files found for pattern: {test_file_pattern}")
def test_merge_test_count(self): text1 = """<testsuite name="suitename1" tests="2" failures="1"> <testcase name="testname1"><failure message="failed"/></testcase> <testcase name="testname2"></testcase> </testsuite>""" test_suite1 = TestSuite.fromstring(text1) text2 = """<testsuite name="suitename2" tests="2" skipped="1"> <testcase name="testname3"><skipped message="no reason given"/></testcase> <testcase name="testname4"></testcase> </testsuite>""" test_suite2 = TestSuite.fromstring(text2) combined_suites = JUnitXml() combined_suites += test_suite1 combined_suites += test_suite2 self.assertEqual(combined_suites.tests, 4) self.assertEqual(combined_suites.failures, 1) self.assertEqual(combined_suites.skipped, 1)
def test_combining_testsuite_should_keep_name(self): text1 = """<testsuite name="suitename1" tests="2" failures="1"> <testcase name="testname1"><failure message="failed"/></testcase> <testcase name="testname2"></testcase> </testsuite>""" test_suite1 = TestSuite.fromstring(text1) text2 = """<testsuite name="suitename2" tests="2" skipped="1"> <testcase name="testname3"><skipped message="no reason given"/></testcase> <testcase name="testname4"></testcase> </testsuite>""" test_suite2 = TestSuite.fromstring(text2) combined_suites = JUnitXml() combined_suites += test_suite1 combined_suites += test_suite2 self.assertEqual([s.name for s in combined_suites], ["suitename1", "suitename2"])
def test_is_compliant_suite_returns_false_WHEN_errors_in_JUnitXML(): # Create cases case1 = TestCase('case1') case1.result = [Error()] case2 = TestCase('case2') # Create suite and add cases suite = TestSuite('suite1') suite.add_property('build', '55') suite.add_testcase(case1) suite.add_testcase(case2) # Add suite to JunitXml xml = JUnitXml() xml.add_testsuite(suite) (control_result, message) = is_compliant_suite(xml) assert control_result is False assert message == "Tests contain errors"
def test_is_compliant_suite_returns_true_WHEN_no_failures_AND_no_errors_in_JUnitXML( ): # Create cases case1 = TestCase('case1') case2 = TestCase('case2') case2.result = [Skipped()] # Create suite and add cases suite = TestSuite('suite1') suite.add_property('build', '55') suite.add_testcase(case1) suite.add_testcase(case2) # Add suite to JunitXml xml = JUnitXml() xml.add_testsuite(suite) (control_result, message) = is_compliant_suite(xml) assert control_result is True assert message == "All tests passed"
def build(self) -> None: test_suite = os.getenv("TEST_SUITE", "Project") self.mainsuite = TestSuite(test_suite) # Aggregate all reports in reverse order: # this is important for projects using "rerun" mechanism and where # reports are numbered so that report-2.xml should be processed # before report-1.xml in order to add successes before failures. for report in sorted(self.folder.glob("**/*.xml"), reverse=True): # Skip the final report, if present if report.name == self.output: continue self.process_xml(report) print("End of processing") print_suite(self.mainsuite) xml = JUnitXml() xml.add_testsuite(self.mainsuite) xml.write(self.folder / self.output)
def write_output(target: str, results: list) -> None: """ Write scan results in junitxml format """ suite = TestSuite(f"{target}") for result in results: if result != {"Results": "No vulnerabilities."}: test_case = TestCase(result["Vulnerable Library"]) test_case.name = (result["Vulnerable Library"] + " - "\ + result["Vulnerability"] + " - "\ + "CVSS " + str(result["CVSS"])) test_case.result = [Failure(result)] else: test_case = TestCase("No vulnerabilities") test_case.result = result suite.add_testcase(test_case) xml = JUnitXml() xml.add_testsuite(suite) xml.write('test-output.xml')
def parse_junit(junit_dir): test_suite = TestSuite("Combined TestSuite") for junit_xml in glob.glob(os.path.join(junit_dir, "junit_*.xml")): if "junit_runner.xml" not in junit_xml: parsed = JUnitXml.fromfile(junit_xml) for testcase in parsed: if isinstance(testcase, TestSuite) or isinstance( testcase.result, Skipped): continue test_suite.add_testcase(testcase) os.remove(junit_xml) xml = JUnitXml() xml.add_testsuite(test_suite) xml.write(os.path.join(junit_dir, "junit_combined.xml")) xml.update_statistics() test_failure_rate = 0 if xml.tests != 0: test_failure_rate = int( math.ceil(((xml.failures + xml.errors) * 100) / xml.tests)) return utils.generate_payload(CANARY_TEST_FAILURE_RATE, test_failure_rate)