def main(): cwd = os.getcwd() args = parse_args() try: if os.path.exists(args.clairfile): with open(args.clairfile) as clairfile: clair_parsed_file = json.load(clairfile) if os.path.exists( os.path.join("clair-scanner-logs", "/clair_setup_errors.log")): with open( os.path.join("clair-scanner-logs", "/clair_setup_errors.log"), 'r') as clairfile_errors: clair_parsed_error_file = clairfile_errors.readlines() else: clair_parsed_error_file = None except: logger.exception("Failed to parse clair / clair_error file. Exiting.") current_sorted_level = None current_suite = None test_suites = [] if clair_parsed_error_file: current_suite = TestSuite("SetupError") new_step = TestCase(name="SetupError", classname="SetupError", status="unapproved", stderr=clair_parsed_error_file) new_step.log = clair_parsed_error_file new_step.category = "SetupError" new_step.failure_type = "unapproved" new_step.failure_message = "Please have the following security issue reviewed by Splunk: {}".format( vuln["link"]) new_step.failure_output = clair_parsed_error_file current_suite.test_cases.append(new_step) test_suites.append(current_suite) for vuln in clair_parsed_file["vulnerabilities"]: if current_sorted_level != vuln["severity"]: if current_suite: test_suites.append(current_suite) current_suite = TestSuite(name=vuln["severity"]) current_sorted_level = vuln["severity"] new_step = TestCase(name=vuln["vulnerability"], classname=vuln["severity"], status="unapproved", url=vuln["link"], stderr=vuln["description"]) new_step.log = vuln new_step.category = vuln["severity"] new_step.failure_type = "unapproved" new_step.failure_message = "Please have the following security issue reviewed by Splunk: {}".format( vuln["link"]) new_step.failure_output = vuln["description"] current_suite.test_cases.append(new_step) # try to write new file try: with open(args.output, 'w') as outfile: outfile.write(TestSuite.to_xml_string(test_suites)) except: logger.exception("Filed saving file.")
def generate_junit_report(args, reports, start_time, end_time, total, junit_file): from junit_xml import TestSuite, TestCase import sys junit_log = [] junit_prop = {} junit_prop['Command Line'] = ' '.join(args) junit_prop['Python'] = sys.version.replace('\n', '') junit_prop['test_groups'] = [] junit_prop['Host'] = host.label(mode='all') junit_prop['passed_count'] = reports.passed junit_prop['failed_count'] = reports.failed junit_prop['user-input_count'] = reports.user_input junit_prop['expected-fail_count'] = reports.expected_fail junit_prop['indeterminate_count'] = reports.indeterminate junit_prop['benchmark_count'] = reports.benchmark junit_prop['timeout_count'] = reports.timeouts junit_prop['test-too-long_count'] = reports.test_too_long junit_prop['invalid_count'] = reports.invalids junit_prop['wrong-version_count'] = reports.wrong_version junit_prop['wrong-build_count'] = reports.wrong_build junit_prop['wrong-tools_count'] = reports.wrong_tools junit_prop['total_count'] = reports.total time_delta = end_time - start_time junit_prop['average_test_time'] = str(time_delta / total) junit_prop['testing_time'] = str(time_delta) for name in reports.results: result_type = reports.results[name]['result'] test_parts = name.split('/') test_category = test_parts[-2] test_name = test_parts[-1] junit_result = TestCase(test_name.split('.')[0]) junit_result.category = test_category if result_type == 'failed' or result_type == 'timeout': junit_result.add_failure_info(None, reports.results[name]['output'], result_type) junit_log.append(junit_result) ts = TestSuite('RTEMS Test Suite', junit_log) ts.properties = junit_prop ts.hostname = host.label(mode='all') # write out junit log with open(junit_file, 'w') as f: TestSuite.to_file(f, [ts], prettyprint=True)