def write_output(target: str, results: list, min_cvss: int) -> None: """Write scan results in junitxml format""" suite = TestSuite(f"{target}") no_vulns: List = [ { "Results": "No vulnerabilities." }, { "Results": f"No vulnerabilities >= the min CVSS score {min_cvss}." }, ] for result in results: if result not in no_vulns: test_case = TestCase(result["Vulnerable Library"]) test_case.name = (result["Vulnerable Library"] + " - " + result["Vulnerability"] + " - " + "CVSS " + str(result["CVSS"])) test_case.result = [Failure(result)] else: test_case = TestCase("No vulnerabilities") test_case.result = result suite.add_testcase(test_case) xml = JUnitXml() xml.add_testsuite(suite) xml.write("test-output.xml")
def generate_test_case(name, duration, status, polarion_id=None): """Create test case object. Args: name: test case name duration: test run duration status: test status polarion_id: polarion Id (default: None) Returns: test_case: junit parser test case object """ test_case = TestCase(name) if isinstance(duration, timedelta): test_case.time = duration.total_seconds() else: test_case.time = 0.0 if status != "Pass": test_case.result = Failure("test failed") if polarion_id: props = Properties() props.append(Property(name="polarion-testcase-id", value=polarion_id)) test_case.append(props) return test_case
def run(self): self.prepare() # Look up Kconfig files relative to ZEPHYR_BASE os.environ["srctree"] = zephyr_path # Parse the entire Kconfig tree, to make sure we see all symbols os.environ["SOC_DIR"] = "soc/" os.environ["BOARD_DIR"] = "boards/*/*" os.environ["ARCH"] = "*" # Enable strict Kconfig mode in Kconfiglib, which assumes there's just a # single Kconfig tree and warns for all references to undefined symbols os.environ["KCONFIG_STRICT"] = "y" undef_ref_warnings = [] for warning in kconfiglib.Kconfig().warnings: if "undefined symbol" in warning: undef_ref_warnings.append(warning) # Generating multiple JUnit <failure>s would be neater, but Shippable only # seems to display the first one if undef_ref_warnings: self.case.result = Failure("undefined Kconfig symbols", "failure") self.case.result._elem.text = "\n\n\n".join(undef_ref_warnings)
def run(self): self.prepare() scancode = "/opt/scancode-toolkit/scancode" if not os.path.exists(scancode): self.case.result = Skipped("scancode-toolkit not installed", "skipped") return os.makedirs("scancode-files", exist_ok=True) new_files = sh.git("diff", "--name-only", "--diff-filter=A", self.commit_range, **sh_special_args) if len(new_files) == 0: return for newf in new_files: f = str(newf).rstrip() os.makedirs(os.path.join('scancode-files', os.path.dirname(f)), exist_ok=True) copy = os.path.join("scancode-files", f) copyfile(f, copy) try: cmd = [scancode, '--verbose', '--copyright', '--license', '--license-diag', '--info', '--classify', '--summary', '--json', 'scancode.json', 'scancode-files/'] cmd_str = " ".join(cmd) logging.info(cmd_str) out = subprocess.check_output(cmd_str, stderr=subprocess.STDOUT, shell=True) except subprocess.CalledProcessError as e: logging.error(e.output) self.case.result = Skipped("Exception when running scancode", "skipped") return report = "" with open ('scancode.json', 'r') as json_fp: scancode_results = json.load(json_fp) for file in scancode_results['files']: if file['type'] == 'directory': continue original_fp = str(file['path']).replace('scancode-files/', '') licenses = file['licenses'] if (file['is_script'] or file['is_source']) and (file['programming_language'] not in ['CMake']) and (file['extension'] not in ['.yaml']): if len(file['licenses']) == 0: report += ("* {} missing license.\n".format(original_fp)) else: for l in licenses: if l['key'] != "apache-2.0": report += ("* {} is not apache-2.0 licensed: {}\n".format(original_fp, l['key'])) if l['category'] != 'Permissive': report += ("* {} has non-permissive license: {}\n".format(original_fp, l['key'])) if len(file['copyrights']) == 0: report += ("* {} missing copyright.\n".format(original_fp)) if report != "": self.case.result = Failure("License/Copyright issues", "failure") self.case.result._elem.text = report
def convert_csv_to_junit(csv_filename, junit_filename): suite = TestSuite('Gadgetron Integration') with open(csv_filename) as csv_file: statsreader = csv.DictReader(csv_file) for row in statsreader: case = TestCase(name=row['test'], time=row['processing_time']) if row['status'] != "Passed": case.result = [Failure()] suite.add_testcase(case)
def add_failure(self, msg): """ Signals that the test failed, with message 'msg'. Can be called many times within the same test to report multiple failures. """ if not self.case.result: # First reported failure self.case.result = Failure(self.name + " issues", "failure") self.case.result._elem.text = msg.rstrip() else: # If there are multiple Failures, concatenate their messages self.case.result._elem.text += "\n\n" + msg.rstrip()
def run(self): self.prepare() diff = subprocess.Popen(('git', 'diff', '%s' %(self.commit_range)), stdout=subprocess.PIPE) try: output = subprocess.check_output(('%s/scripts/checkpatch.pl' %repository_path, '--mailback', '--no-tree', '-'), stdin=diff.stdout, stderr=subprocess.STDOUT, shell=True) except subprocess.CalledProcessError as ex: m = re.search("([1-9][0-9]*) errors,", ex.output.decode('utf8')) if m: self.case.result = Failure("Checkpatch issues", "failure") self.case.result._elem.text = (ex.output.decode('utf8'))
def run(self): self.prepare() proc = subprocess.Popen('gitlint --commits %s' % (self.commit_range), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) msg = "" if proc.wait() != 0: msg = proc.stdout.read() if msg != "": text = (msg.decode('utf8')) self.case.result = Failure("commit message syntax issues", "failure") self.case.result._elem.text = text
def write_output(target, results) -> None: """Write scan results in junitxml format""" test_case = TestCase(f"{target}") test_case.name = f"{target}" if results["Results"] != ["No SSL/TLS Violations found."]: test_case.result = [Failure(results)] else: test_case.result = results suite = TestSuite("SSLChecker") suite.add_testcase(test_case) xml = JUnitXml() xml.add_testsuite(suite) xml.write("test-output.xml")
def test_add_case(self): suite = TestSuite() case1 = TestCase() case2 = TestCase() case2.result = Failure() case3 = TestCase() case3.result = Error() case4 = TestCase() case4.result = Skipped() suite.add_testcase(case1) suite.add_testcase(case2) suite.add_testcase(case3) suite.add_testcase(case4) suite.update_statistics() self.assertEqual(suite.tests, 4) self.assertEqual(suite.failures, 1) self.assertEqual(suite.errors, 1) self.assertEqual(suite.skipped, 1)
def test_is_compliant_suite_returns_fails_WHEN_failures_in_JUnitXML(): # Create cases case1 = TestCase('case1') case1.result = [Failure()] case2 = TestCase('case2') # Create suite and add cases suite = TestSuite('suite1') suite.add_property('build', '55') suite.add_testcase(case1) suite.add_testcase(case2) # Add suite to JunitXml xml = JUnitXml() xml.add_testsuite(suite) (control_result, message) = is_compliant_suite(xml) assert control_result is False assert message == "Tests contain failures"
def run_test_and_get_tc(self, test_name, test_func) -> TestCase: stdout = "" tc = TestCase(test_name, classname=os.environ['SCENARIONAME']) start_time = time.time() self.log.info("Execute Test: {0}".format(test_name)) try: stdout = test_func() self.log.debug("[{0}] Debug Output: {1}".format(test_name, stdout)) except Exception as err: self.log.exception("Error: {1}".format(test_name, err)) stdout = str(err) tc.result = [ Failure(f"Failure: {err}", type_=f"Stack: {traceback.format_exc()}") ] tc.system_out = stdout tc.time = (time.time() - start_time) return tc
def write_output(target: str, results: list) -> None: """ Write scan results in junitxml format """ suite = TestSuite(f"{target}") for result in results: if result != {"Results": "No vulnerabilities."}: test_case = TestCase(result["Vulnerable Library"]) test_case.name = (result["Vulnerable Library"] + " - "\ + result["Vulnerability"] + " - "\ + "CVSS " + str(result["CVSS"])) test_case.result = [Failure(result)] else: test_case = TestCase("No vulnerabilities") test_case.result = result suite.add_testcase(test_case) xml = JUnitXml() xml.add_testsuite(suite) xml.write('test-output.xml')
def create_xunit_results(suite_name, test_cases, run_dir): """ Create an xUnit result file for the test suite's executed test cases. Args: suite_name: the test suite name test_cases: the test cases objects run_dir: the run directory to store xUnit result files Returns: None """ xml_file = f"{run_dir}/{suite_name}.xml" log.info(f"Creating xUnit result file for test suite: {suite_name}") suite = TestSuite(suite_name) for tc in test_cases: case = TestCase(tc["name"]) if tc["status"] != "Pass": case.result = Failure("test failed")
def run(self): self.prepare() for file in get_shas(self.commit_range): commit = sh.git("log", "--decorate=short", "-n 1", file, **sh_special_args) signed = [] author = "" sha = "" parsed_addr = None for line in commit.split("\n"): match = re.search("^commit\s([^\s]*)", line) if match: sha = match.group(1) match = re.search("^Author:\s(.*)", line) if match: author = match.group(1) parsed_addr = parseaddr(author) match = re.search("signed-off-by:\s(.*)", line, re.IGNORECASE) if match: signed.append(match.group(1)) error1 = "%s: author email (%s) needs to match one of the signed-off-by entries." % ( sha, author) error2 = "%s: author email (%s) does not follow the syntax: First Last <email>." % ( sha, author) failure = None if author not in signed: failure = error1 if not parsed_addr or len(parsed_addr[0].split(" ")) < 2: if not failure: failure = error2 else: failure = failure + "\n" + error2 if failure: self.case.result = Failure("identity/email issues", "failure") self.case.result._elem.text = failure
def __exit__(self, type, value, traceback): xunit_file = os.path.join(self.artifacts_dir, "xunit.xml") tests, failures, skipped, errors = 0, 0, 0, 0 if os.path.exists(xunit_file): xml = JUnitXml.fromfile(xunit_file) for i, suite in enumerate(xml): for case in suite: name = "scenario_{}: {}".format(i, case.name) result = case.result if isinstance(result, Error): failures += 1 result = Failure(result.message, result.type) elif isinstance(result, Failure): errors += 1 result = Error(result.message, result.type) elif isinstance(result, Skipped): skipped += 1 else: tests += 1 tc = TestCase(name) tc.result = result self.ts.add_testcase(tc) else: tc = TestCase(self.name) tc.result = Skipped() self.ts.add_testcase(tc) self.ts.hostname = self.env_name self.ts.timestamp = self.timer.start self.ts.time = self.timer.diff() self.ts.tests = tests self.ts.failures = failures self.ts.skipped = skipped self.ts.errors = errors self.ts.update_statistics() self.junit_xml.add_testsuite(self.ts)
def run(self): self.prepare() # Default to Zephyr's checkpatch if ZEPHYR_BASE is set checkpatch = os.path.join(self.zephyr_base or self.repo_path, 'scripts', 'checkpatch.pl') if not os.path.exists(checkpatch): self.case.result = Skipped("checkpatch script not found", "skipped") diff = subprocess.Popen(('git', 'diff', '%s' % (self.commit_range)), stdout=subprocess.PIPE) try: subprocess.check_output( (checkpatch, '--mailback', '--no-tree', '-'), stdin=diff.stdout, stderr=subprocess.STDOUT, shell=True) except subprocess.CalledProcessError as ex: match = re.search("([1-9][0-9]*) errors,", ex.output.decode('utf8')) if match: self.case.result = Failure("Checkpatch issues", "failure") self.case.result._elem.text = (ex.output.decode('utf8'))
def run(self): self.prepare() scancode = "/opt/scancode-toolkit/scancode" if not os.path.exists(scancode): self.case.result = Skipped("scancode-toolkit not installed", "skipped") return os.makedirs("scancode-files", exist_ok=True) new_files = sh.git("diff", "--name-only", "--diff-filter=A", self.commit_range, **sh_special_args) if not new_files: return for newf in new_files: file = str(newf).rstrip() os.makedirs(os.path.join('scancode-files', os.path.dirname(file)), exist_ok=True) copy = os.path.join("scancode-files", file) copyfile(file, copy) try: cmd = [ scancode, '--verbose', '--copyright', '--license', '--license-diag', '--info', '--classify', '--summary', '--html', 'scancode.html', '--json', 'scancode.json', 'scancode-files/' ] cmd_str = " ".join(cmd) logging.info(cmd_str) subprocess.check_output(cmd_str, stderr=subprocess.STDOUT, shell=True) except subprocess.CalledProcessError as ex: logging.error(ex.output) self.case.result = Error("Exception when running scancode", "error") return report = "" whitelist_extensions = ['.yaml', '.html'] whitelist_languages = ['CMake', 'HTML'] with open('scancode.json', 'r') as json_fp: scancode_results = json.load(json_fp) for file in scancode_results['files']: if file['type'] == 'directory': continue original_fp = str(file['path']).replace('scancode-files/', '') licenses = file['licenses'] if (file['is_script'] or file['is_source']) and ( file['programming_language'] not in whitelist_languages ) and (file['extension'] not in whitelist_extensions): if not file['licenses']: report += ( "* {} missing license.\n".format(original_fp)) else: for lic in licenses: if lic['key'] != "apache-2.0": report += ( "* {} is not apache-2.0 licensed: {}\n". format(original_fp, lic['key'])) if lic['category'] != 'Permissive': report += ( "* {} has non-permissive license: {}\n". format(original_fp, lic['key'])) if lic['key'] == 'unknown-spdx': report += ( "* {} has unknown SPDX: {}\n".format( original_fp, lic['key'])) if not file['copyrights']: report += ( "* {} missing copyright.\n".format(original_fp)) if report != "": self.case.result = Failure("License/Copyright issues", "failure") preamble = "In most cases you do not need to do anything here, especially if the files reported below are going into ext/ and if license was approved for inclusion into ext/ already. Fix any missing license/copyright issues. The license exception if a JFYI for the maintainers and can be overriden when merging the pull request.\n" self.case.result._elem.text = preamble + report
def run(self): self.prepare() if not self.zephyr_base: self.case.result = Skipped("Not a Zephyr tree", "skipped") return # Put the Kconfiglib path first to make sure no local Kconfiglib version is # used kconfig_path = os.path.join(self.zephyr_base, "scripts", "kconfig") if not os.path.exists(kconfig_path): self.case.result = Error("Can't find Kconfig", "error") return sys.path.insert(0, kconfig_path) import kconfiglib # Look up Kconfig files relative to ZEPHYR_BASE os.environ["srctree"] = self.zephyr_base # Parse the entire Kconfig tree, to make sure we see all symbols os.environ["SOC_DIR"] = "soc/" os.environ["ARCH_DIR"] = "arch/" os.environ["BOARD_DIR"] = "boards/*/*" os.environ["ARCH"] = "*" os.environ["PROJECT_BINARY_DIR"] = tempfile.gettempdir() os.environ['GENERATED_DTS_BOARD_CONF'] = "dummy" # For multi repo support open(os.path.join(tempfile.gettempdir(), "Kconfig.modules"), 'a').close() # Enable strict Kconfig mode in Kconfiglib, which assumes there's just a # single Kconfig tree and warns for all references to undefined symbols os.environ["KCONFIG_STRICT"] = "y" try: kconf = kconfiglib.Kconfig() except kconfiglib.KconfigError as e: self.case.result = Failure("error while parsing Kconfig files", "failure") self.case.result._elem.text = str(e) return # # Look for undefined symbols # undef_ref_warnings = [ warning for warning in kconf.warnings if "undefined symbol" in warning ] # Generating multiple JUnit <failure>s would be neater, but Shippable only # seems to display the first one if undef_ref_warnings: self.case.result = Failure("undefined Kconfig symbols", "failure") self.case.result._elem.text = "\n\n\n".join(undef_ref_warnings) return # # Check for stuff being added to the top-level menu # max_top_items = 50 n_top_items = 0 node = kconf.top_node.list while node: # Only count items with prompts. Other items will never be # shown in the menuconfig (outside show-all mode). if node.prompt: n_top_items += 1 node = node.next if n_top_items > max_top_items: self.case.result = Failure("new entries in top menu", "failure") self.case.result._elem.text = """ Expected no more than {} potentially visible items (items with prompts) in the top-level Kconfig menu, found {} items. If you're deliberately adding new entries, then bump the 'max_top_items' variable in {}. """.format(max_top_items, n_top_items, __file__)
def test_result_attrs(self): res1 = Failure("A") # NOTE: lxml gives spaceless result self.assertIn(res1.tostring(), [b'<failure message="A" />', b'<failure message="A"/>'])
def test_result_eq(self): # TODO: Weird, need to think of a better API self.assertEqual(Failure("A"), Failure("A")) self.assertNotEqual(Skipped("B"), Skipped("A")) self.assertNotEqual(Error("C"), Error("B"))
def test_case_output(self): case = TestCase() case.system_err = "error message" case.system_out = "out message" self.assertEqual(case.system_err, "error message") self.assertEqual(case.system_out, "out message") case.system_err = "error2" case.system_out = "out2" self.assertEqual(case.system_err, "error2") self.assertEqual(case.system_out, "out2") def test_update_results(self): case = TestCase() case.result = [Skipped()] case.result = [Failure(), Skipped()] self.assertEqual(len(case.result), 2) def test_monkypatch(self): TestCase.id = Attr("id") case = TestCase() case.id = "100" self.assertEqual(case.id, "100") def test_equal(self): case = TestCase() case.name = "test1" case2 = TestCase() case2.name = "test1" self.assertEqual(case, case2)
def test_case_output(self): case = TestCase() case.system_err = 'error message' case.system_out = 'out message' self.assertEqual(case.system_err, 'error message') self.assertEqual(case.system_out, 'out message') case.system_err = 'error2' case.system_out = 'out2' self.assertEqual(case.system_err, 'error2') self.assertEqual(case.system_out, 'out2') def test_set_multiple_results(self): case = TestCase() case.result = Skipped() case.result = Failure() self.assertIsInstance(case.result, Failure) def test_monkypatch(self): TestCase.id = Attr('id') case = TestCase() case.id = "100" self.assertEqual(case.id, "100") def test_equal(self): case = TestCase() case.name = 'test1' case2 = TestCase() case2.name = 'test1' self.assertEqual(case, case2)
Returns: None """ _file = suite_name.split("/")[-1].strip(".yaml") xml_file = f"{run_dir}/xunit.xml" log.info(f"Creating xUnit result file for test suite: {_file}") suite = TestSuite(_file) for k, v in test_run_metadata.items(): suite.add_property(k, v if v else "--NA--") for tc in test_cases: case = TestCase(tc["name"]) elapsed = tc.get("duration") if isinstance(elapsed, timedelta): case.time = elapsed.total_seconds() else: case.time = 0.0 if tc["status"] != "Pass": case.result = Failure("test failed") suite.add_testcase(case) suite.update_statistics() xml = JUnitXml() xml.add_testsuite(suite) xml.write(xml_file, pretty=True) log.info(f"xUnit result file created: {xml_file}")
def test_case_output(self): case = TestCase() case.system_err = "error message" case.system_out = "out message" self.assertEqual(case.system_err, "error message") self.assertEqual(case.system_out, "out message") case.system_err = "error2" case.system_out = "out2" self.assertEqual(case.system_err, "error2") self.assertEqual(case.system_out, "out2") def test_set_multiple_results(self): case = TestCase() case.result = Skipped() case.result = Failure() self.assertIsInstance(case.result, Failure) def test_monkypatch(self): TestCase.id = Attr("id") case = TestCase() case.id = "100" self.assertEqual(case.id, "100") def test_equal(self): case = TestCase() case.name = "test1" case2 = TestCase() case2.name = "test1" self.assertEqual(case, case2)
def main(args=None): if args is None: args = sys.argv[1:] parser = argparse.ArgumentParser() parser.add_argument("--url", required=True) parser.add_argument("--apikey", required=True, default="cdeb2184-cb23-40a1-bdfd-d0fe2715547a") parser.add_argument("--port", type=int, default=4723) parsed_args = parser.parse_args(args) client_site_url = parsed_args.url if not client_site_url.endswith("/"): client_site_url = client_site_url + "/" apikey = parsed_args.apikey port = parsed_args.port s = socket.socket() try: s.bind(('localhost', port)) except socket.error as err: if err.errno == 98: #Create Test Cases case1 = TestCase('Test1') case1.name = 'Test for get_resources' case1.result = Failure( 'Test failed. Can not connect because port is actually used', err) #Create Test Suite suite = TestSuite('Suite1') suite.name = 'Test suite 1' suite.add_testcase(case1) #Add info into JunitXml xml = JUnitXml() xml.add_testsuite(suite) xml.write('junit_test.xml') sys.exit( "Port {port} is already in use.\n" "Is there another instance of {process} already running?\n" "To run multiple instances of {process} at once use the " "--port <num> option.".format(port=port, process=sys.argv[0])) else: raise try: response = requests.get(client_site_url, headers=dict(Authorization=apikey)) except requests.exceptions.RequestException as err: #Create Test Cases case1 = TestCase('Test1') case1.name = 'Test the connection to client_site_url' case1.result = Failure( 'Test failed. Cannot connect to the client_site_url', err) #Create Test Suite suite = TestSuite('Suite1') suite.name = 'Test suite 1' suite.add_testcase(case1) #Add info into JunitXml xml = JUnitXml() xml.add_testsuite(suite) xml.write('junit_test.xml') sys.exit( "The client could not connect with the client site due to {error}". format(error=err)) success, response = get_resources_to_check(client_site_url, apikey) data = response.json() if success: #Create Test Cases case1 = TestCase('Test1') case1.name = 'Test for get_resources' case1.result = Skipped( 'Test passed successfully with 50 resources obtained') else: #Create Test Cases if not response.ok: case1 = TestCase('Test1') case1.name = 'Test for get_resources' case1.result = Failure( 'Client could not get the list with code error {0} and reason {1}' .format(response.status_code, response.reason), 'failure_of_connection') else: case1 = TestCase('Test1') case1.name = 'Test for get_resources' case1.result = Error( 'Client could not get the list correctly, it only have got {0} resources' .format(len(data)), 'error_list') resource_id = data[0] success, response = get_url_for_id(client_site_url, apikey, resource_id) if success: #Create Test Cases case2 = TestCase('Test2') case2.name = 'Test for get_url_for_resource_id' case2.result = Skipped( 'Test passed successfully with the url obtained correctly') else: #Create Test Cases if not response.ok: case2 = TestCase('Test2') case2.name = 'Test for get_url_for_resource_id' case2.result = Failure( 'Client could not get the url for the resource with code error {0} and reason {1}' .format(response.status_code, response.reason), 'failure_of_connection') else: case2 = TestCase('Test2') case2.name = 'Test for get_url_for_resource_id' case2.result = Error('Client could not get the url correctly', 'the_error_type') #Create Test Suite suite = TestSuite('Suite1') suite.name = 'Test suite 1' suite.add_testcase(case1) suite.add_testcase(case2) #Add info into JunitXml xml = JUnitXml() xml.add_testsuite(suite) xml.write('junit_test.xml')
def test_result_eq(self): # TODO: Weird, need to think of a better API self.assertEqual(Failure('A'), Failure('A')) self.assertNotEqual(Skipped('B'), Skipped('A')) self.assertNotEqual(Error('C'), Error('B'))