def main(): args = parse_args() spawn_func = None spawn_arg = None if args.port is not None: spawn_func = spawn_port spawn_arg = args.port elif args.executable is not None: spawn_func = spawn_exec spawn_arg = args.executable name = args.name or "" global debug if args.debug: debug = True if spawn_func is None: debug_print("Please specify port or executable", file=sys.stderr) return 1 env_vars = [] if args.env_file is not None: cfg = ConfigParser() cfg.optionxform = str with args.env_file as fp: cfg.readfp(fp) env_vars = cfg.items('global') mocks = {} if args.mock is not None: mocks_mod = imp.load_source('mocks', args.mock) mocks = mock_decorators.env with spawn_func(spawn_arg) as sp: ts = run_tests(sp, name, mocks, env_vars) if args.output: with open(args.output, "w") as f: TestSuite.to_file(f, [ts]) return 0
def serialize_and_read(test_suites, to_file=False, prettyprint=None): """writes the test suite to an XML string and then re-reads it using minidom, returning => (test suite element, list of test case elements)""" try: iter(test_suites) except TypeError: test_suites = [test_suites] if to_file: fd, filename = tempfile.mkstemp(text=True) with os.fdopen(fd, 'w') as f: TestSuite.to_file(f, test_suites) print("Serialized XML to temp file [%s]" % filename) xmldoc = minidom.parse(filename) os.remove(filename) else: if prettyprint is not None: xml_string = TestSuite.to_xml_string(test_suites, prettyprint=prettyprint) else: xml_string = TestSuite.to_xml_string(test_suites) print("Serialized XML to string:\n%s" % xml_string) xmldoc = minidom.parseString(xml_string) ret = [] suites = xmldoc.getElementsByTagName("testsuites")[0] for suite in suites.getElementsByTagName("testsuite"): cases = suite.getElementsByTagName("testcase") ret.append((suite, cases)) return ret
def test_to_xml_string_test_suites_not_a_list(self): test_suites = TestSuite('suite1', [TestCase('Test1')]) try: TestSuite.to_xml_string(test_suites) except Exception as exc: self.assertEqual(str(exc), 'test_suites must be a list of test suites')
def main(): args = parse_args() spawn_func = None spawn_arg = None if args.port is not None: spawn_func = spawn_port spawn_arg = args.port elif args.executable is not None: spawn_func = spawn_exec spawn_arg = args.executable name = args.name or "" global debug if args.debug: debug = True if spawn_func is None: debug_print("Please specify port or executable", file=sys.stderr) return 1 mocks = {} if args.mock is not None: mocks_mod = imp.load_source('mocks', args.mock) mocks = mock_decorators.env with spawn_func(spawn_arg) as sp: ts = run_tests(sp, name, mocks) if args.output: with open(args.output, "w") as f: TestSuite.to_file(f, [ts]) return 0
def generateJUnitReport(self, lstRunResult, runResultDir): #create junit xml report file use junit-xml 1.4 pip install junit-xml resultFileName = runResultDir + os.path.sep + 'RunResult.xml' previousCaseModuleName = '' rowIndex = 0 lstTestSuites = [] testSuite = [] for runResult in lstRunResult: #runResult (sheetName, moduleName, testCaseID, runResult, timeElapsedSec, failureMessage) #test testCaseName = runResult[2] className = runResult[1] + '.' + runResult[2] timeElapsedSec = runResult[4] failureMessage = runResult[5] testCase = TestCase(testCaseName, className, timeElapsedSec) testCase.add_failure_info(None, failureMessage) currTestCaseModuleName = runResult[1] if not currTestCaseModuleName == previousCaseModuleName: testSuite = TestSuite(currTestCaseModuleName) lstTestSuites.append(testSuite) testSuite.test_cases.append(testCase) #print TestSuite.to_xml_string(lstTestSuites) #Write the xml content to result file with open(runResultDir + os.path.sep + 'Result.xml', 'w') as f: TestSuite.to_file(f, lstTestSuites)
def run_everything_else(xml = False): mega_suite = [] tests = [ run_test_arakoon_changes, run_tests_cli, run_test_big_object ] for x in tests: r = x () mega_suite.append(r) if is_true(xml): from junit_xml import TestSuite, TestCase test_cases = [] for (suite, results) in mega_suite: for (name,result, delta) in results: test_case = TestCase(name, suite, elapsed_sec = delta) if not result: test_case.add_error_info(message = "failed") test_cases.append(test_case) ts = [TestSuite("run_everything_else", test_cases)] with open('./testresults.xml', 'w') as f: TestSuite.to_file(f,ts) else: print mega_suite
def dump_junit_xml(): from junit_xml import TestSuite, TestCase test_cases = [TestCase('testname', 'package.test', 123.345, 'I am stdout!', 'I am stderr!')] ts = [ TestSuite("stress test suite", test_cases) ] with open('./testresults.xml', mode='w') as f: TestSuite.to_file(f, ts)
def generate(self): """ Generates the report """ self._setup() for config_name in self.report_info.config_to_test_names_map.keys(): config_dir = os.path.join(self.report_info.resource_dir, config_name) utils.makedirs(config_dir) testsuite = self._generate_junit_xml(config_name) with open(os.path.join(self.report_info.junit_xml_path, 'zopkio_junit_reports.xml'), 'w') as file: TestSuite.to_file(file, [testsuite], prettyprint=False)
def take_action(self, args): test_cases = [] if args.playbook is not None: playbooks = args.playbook results = (models.TaskResult().query .join(models.Task) .filter(models.TaskResult.task_id == models.Task.id) .filter(models.Task.playbook_id.in_(playbooks))) else: results = models.TaskResult().query.all() for result in results: task_name = result.task.name if not task_name: task_name = result.task.action additional_results = { 'host': result.host.name, 'playbook_path': result.task.playbook.path } result_str = jsonutils.dumps(additional_results) test_path = \ u'{playbook_file}.{play_name}'.format( playbook_file=os.path.basename(result.task.playbook.path), play_name=result.task.play.name) test_case = TestCase( name=task_name, classname=test_path, elapsed_sec=result.duration.seconds, stdout=result_str) if result.status == 'skipped': test_case.add_skipped_info(message=result.result) elif ((result.status in ('failed', 'unreachable') and result.ignore_errors is False and 'EXPECTED FAILURE' not in task_name and 'TOGGLE RESULT' not in task_name) or (result.status == 'ok' and 'TOGGLE RESULT' in task_name)): test_case.add_failure_info(message=result.result) test_cases.append(test_case) test_suite = TestSuite('Ansible Tasks', test_cases) # TODO: junit_xml doesn't order the TestCase parameters. # This makes it so the order of the parameters for the same exact # TestCase is not guaranteed to be the same and thus results in a # different stdout (or file). This is easily reproducible on Py3. xml_string = six.text_type(test_suite.to_xml_string([test_suite])) if args.output_file == '-': if six.PY2: sys.stdout.write(encodeutils.safe_encode(xml_string)) else: sys.stdout.buffer.write(encodeutils.safe_encode(xml_string)) else: with open(args.output_file, 'wb') as f: f.write(encodeutils.safe_encode(xml_string))
def Run(conf, xmldir): logfile = "%s/pacemaker.log" % xmldir cluster_env = readClusterConf(conf) testcases = [] #Name of Test Suite TestSuiteName = "Running pacemaker-cts" #Name of junit xml file JunitXML = "junit-pacemakerCTS-ha.xml" #Define testcases #testcases = [(TestcaseName, TestcaseClass, TestcaseFunction)] #eg. # ('PacemakerService', 'SetupCluster.service', runPackmakerService) #Define function runPackmakerService before using cases_def = [("Test Flip", "Flip.PacemakerCTS.service", get_result), ("Test Restart", "Restart.PacemakerCTS.service", get_result), ("Test Stonithd", "Stonithd.PacemakerCTS.service", get_result), ("Test StartOnebyOne", "StartOnebyOne.PacemakerCTS.service", get_result), ("Test SimulStart", "SimulStart.PacemakerCTS.service", get_result), ("Test SimulStop", "SimulStop.PacemakerCTS.service", get_result), ("Test StopOnebyOne", "StopOnebyOne.PacemakerCTS.service", get_result), ("Test RestartOnebyOne", "RestartOnebyOne.PacemakerCTS.service", get_result), ("Test PartialStart", "PartialStart.PacemakerCTS.service", get_result), ("Test Standby", "Standby.PacemakerCTS.service", get_result), ("Test MaintenanceMode", "MaintenanceMode.PacemakerCTS.service", get_result), ("Test ResourceRecover", "ResourceRecover.PacemakerCTS.service", get_result), ("Test ComponentFail", "ComponentFail.PacemakerCTS.service", get_result), ("Test Reattach", "Reattach.PacemakerCTS.service", get_result), ("Test SpecialTest1", "SpecialTest1.PacemakerCTS.service", get_result), ("Test NearQuorumPoint", "NearQuorumPoint.PacemakerCTS.service", get_result), ("Test RemoteBasic", "RemoteBasic.PacemakerCTS.service", get_result), ("Test RemoteStonithd", "RemoteStonithd.PacemakerCTS.service", get_result), ("Test RemoteMigrate", "RemoteMigrate.PacemakerCTS.service", get_result), ("Test RemoteRscFailure","RemoteRscFailure.PacemakerCTS.service", get_result)] #Not necessary to modify the lines below! skip_flag = False for a_case in cases_def: case = TestCase(a_case[0], a_case[1]) testcases.append(case) if skip_flag: skipCase(case, "Pacemaker service of the first node not started.") continue skip_flag = assertCase(case, a_case[2], cluster_env, a_case[0], logfile) ts = TestSuite(TestSuiteName, testcases) with open(xmldir+"/"+JunitXML, "w") as f: ts.to_file(f, [ts])
def exporter_junit(test_result_ext, test_suite_properties=None): """! Export test results in JUnit XML compliant format @details This function will import junit_xml library to perform report conversion @return String containing Junit XML formatted test result output """ from junit_xml import TestSuite, TestCase test_suites = [] test_cases = [] targets = sorted(test_result_ext.keys()) for target in targets: test_cases = [] tests = sorted(test_result_ext[target].keys()) for test in tests: test_results = test_result_ext[target][test] classname = 'test.%s.%s' % (target, test) elapsed_sec = test_results['elapsed_time'] _stdout = test_results['single_test_output'] _stderr = '' # Test case tc = TestCase(test, classname, elapsed_sec, _stdout, _stderr) # Test case extra failure / error info if test_results['single_test_result'] == 'FAIL': message = test_results['single_test_result'] tc.add_failure_info(message, _stdout) elif test_results['single_test_result'] != 'OK': message = test_results['single_test_result'] tc.add_error_info(message, _stdout) test_cases.append(tc) ts = TestSuite("test.suite.%s" % target, test_cases) test_suites.append(ts) return TestSuite.to_xml_string(test_suites)
def parse(infile, outfile, format_type, classname, suitename): testcases = list() testcase_logs = list() current = None test_block_delimiter = known_formats[format_type]['tb_delimiter'] # separate log file into test blocks by test block delimiter for line in infile: if test_block_delimiter(line): if current: # non-empty list testcase_logs.append(current) current = list() if current is not None: current.append(line) # add last record if present if current not in testcase_logs: testcase_logs.append(current) # create test cases from test blocks for entry in testcase_logs: testcases.append(known_formats[format_type]['test_parser'](entry, classname)) # generate test suite result using provided test cases test_suite = TestSuite(suitename, testcases) # get rid of unnecessary 'disabled' strings in formatted xml string s = TestSuite.to_xml_string([test_suite]) s = s.replace(' disabled=\"0\"', '') # write xml to outfile outfile.write(s)
def exporter_junit_ioper(self, test_result_ext, test_suite_properties=None): from junit_xml import TestSuite, TestCase test_suites = [] test_cases = [] for platform in sorted(test_result_ext.keys()): # {platform : ['Platform', 'Result', 'Scope', 'Description']) test_cases = [] for tr_result in test_result_ext[platform]: result, name, scope, description = tr_result classname = "test.ioper.%s.%s.%s" % (platform, name, scope) elapsed_sec = 0 _stdout = description _stderr = "" # Test case tc = TestCase(name, classname, elapsed_sec, _stdout, _stderr) # Test case extra failure / error info if result == "FAIL": tc.add_failure_info(description, _stdout) elif result == "ERROR": tc.add_error_info(description, _stdout) elif result == "SKIP" or result == "NOT_SUPPORTED": tc.add_skipped_info(description, _stdout) test_cases.append(tc) ts = TestSuite("test.suite.ioper.%s" % (platform), test_cases) test_suites.append(ts) return TestSuite.to_xml_string(test_suites)
def _test(): test_case1 = TestCase('Testname1', 'SetupCluster.Name') test_case2 = TestCase('Testname2', 'SetupCluster.Name') test_case3 = TestCase('Testname3', 'SetupCluster.Misc') test_case4 = TestCase('Testname4', 'SetupCluster.Misc') test_cases = [test_case1, test_case2, test_case3, test_case4] ts = TestSuite("My Test Suite", test_cases) #Run and verify test case assertCase(test_case1, _exampleFunc(True)) assertCase(test_case2, _exampleFunc(False)) assertCase(test_case3, _exampleFunc(True)) #Skip test case skipCase(test_case4, "Skip Testname4.", "Testname2 is failed.") print(ts.to_xml_string([ts]))
def _output_normal(self, test_result): # Need refactor if test_result == {}: print '[what?!] there are not any test result, what is the test case id?' else: print xml_test_suites = [] summary_dict = self._get_summary_dict(test_result) self.report_create_time = str(time.strftime('%Y%m%d_%H%M%S', time.localtime())) for case_classify in test_result.keys(): xml_test_cases = [] if 'result' in test_result[case_classify].keys(): # Generate HTML report self._generate_html_file( case_classify, test_result[case_classify]['result'], test_result[case_classify]['summary']) # Save the result into the CSV self._output_result_to_csv(test_result) # Show in Console print '{0} {1} {2}'.format('='*16, case_classify, '='*16) test_case_result = test_result[case_classify]['result'] for case_id in test_case_result.keys(): print '[{0}][{1}] {2}, {3}, {4}'.format(case_classify, case_id, test_case_result[case_id][0], test_case_result[case_id][1], str(test_case_result[case_id][2])) # Produce xml file test_case = TestCase(case_id, case_classify, int(test_case_result[case_id][2])) if test_case_result[case_id][0] == 'Fail' or test_case_result[case_id][0] == 'Error': try: test_case.add_failure_info('msg' + test_case_result[case_id][1]) except: test_case.add_failure_info('msg' + str(test_case_result[case_id])) xml_test_cases.append(test_case) xml_test_suites.append(TestSuite(case_classify, xml_test_cases)) with open(os.path.join(self.latest_reports_dir, case_classify + '.xml'), 'w') as f: TestSuite.to_file(f, xml_test_suites, prettyprint=True) self._generate_summary_html_file(summary_dict) print '{0} {1} {2}'.format('='*16, 'Summary', '='*16) pprint.pprint(summary_dict)
def build_packages(self, packages): self._results = [] for package, version in packages: if self._should_package_be_build(package, version): logger.info('Building %s %s', package, version) try: wheel_file = self._builder(package, version) self._upload_package(package, version, wheel_file) self._log_success(package, version) except wheeler.BuildError as e: self._log_fail(e, package, version) if self._junit_xml: with open(self._junit_xml, 'w') as output: test_suite = TestSuite('devpi-builder results', self._results) TestSuite.to_file(output, [test_suite])
def parseLog(TestSuiteName, xmlfile, caseset, test_results, cluster_env): testcases = [] #Not necessary to modify the lines below! skip_flag = False for a_case in caseset: case = TestCase(a_case[0], a_case[1]) testcases.append(case) if skip_flag: skipCase(case, "This case is not scheduled.") continue skip_flag = assertCase(case, a_case[2], cluster_env, a_case[0], test_results) ts = TestSuite(TestSuiteName, testcases) with open(xmlfile, "w") as f: ts.to_file(f, [ts])
def publish_result(file=None): ts = [TestSuite("Manifest launcher", test_cases)] if file: with open(file, 'w') as f: TestSuite.to_file(f, ts, prettyprint=True, encoding='utf-8') else: err = 0 pas = 0 for case in test_cases: if case.is_error() or case.is_failure(): print case.name+':'+case.classname+' ... FAIL' print case.failure_message err+=1 else: print case.name+':'+case.classname+'... PASS' pas+=1 print "Passed: %d, failed: %d\n" % (pas, err)
def execute(self, log, keyvals, testDef): testDef.logger.verbose_print("JunitXML Reporter") # pickup the options cmds = {} testDef.parseOptions(log, self.options, keyvals, cmds) if cmds['filename'] is not None: self.fh = open(cmds['filename'] if os.path.isabs(cmds['filename']) \ else os.path.join(cmds['scratch'],cmds['filename']), 'w') if testDef.options['description'] is not None: print(testDef.options['description'], file=self.fh) print(file=self.fh) # Use the Junit classname field to store the list of inifiles try: classname = testDef.log['inifiles'] except KeyError: classname = None # get the entire log of results fullLog = testDef.logger.getLog(None) testCases = [] # TODO: ain't nobody got time for that. 8-). time = 0 for lg in fullLog: if 'stdout' in lg and lg['stdout'] is not None: stdout = "\n".join(lg['stdout']) else: stdout = None if 'stderr' in lg and lg['stderr'] is not None: stderr = "\n".join(lg['stderr']) else: stderr = None if 'time' in lg and lg['time'] is not None: time = lg['time'] else: time = 0 tc = TestCase(lg['section'], classname, time, stdout, stderr) try: if 0 != lg['status']: # Find sections prefixed with 'TestRun' if re.match("TestRun", lg['section']): tc.add_failure_info("Test reported failure") else: tc.add_error_info("Test error") except KeyError: sys.exit(lg['section'] + " is missing status!") testCases.append(tc) # TODO: Pull in the resource manager jobid. jobid = "job1" ts = TestSuite(jobid, testCases) print(TestSuite.to_xml_string([ts]), file=self.fh) if cmds['filename'] is not None: self.fh.close() log['status'] = 0 return
def Run(conf, xmldir): cluster_env = readClusterConf(conf) testcases = [] #Name of Test Suite TestSuiteName = "Setup HA Cluster" #Name of junit xml file JunitXML = "junit-setup-ha.xml" #Define testcases #testcases = [(TestcaseName, TestcaseClass, TestcaseFunction)] #eg. # ('PacemakerService', 'SetupCluster.service', runPackmakerService) #Define function runPackmakerService before using cases_def = [('PacemakerService', 'SetupCluster.service', runPackmakerService), ('NodesNumber', 'SetupCluster.nodes', runNodesNumber), ('NodesStatus', 'SetupCluster.nodes', runNodesStatus)] #('ConfigureRes', 'SetupCluster.resources', runConfigureRes)] #Not necessary to modify the lines below! skip_flag = False for a_case in cases_def: case = TestCase(a_case[0], a_case[1]) testcases.append(case) if skip_flag: skipCase(case, "Can not test!", "Pacemaker service of the first node not started.") continue skip_flag = assertCase(case, a_case[2], cluster_env) ts = TestSuite(TestSuiteName, testcases) with open(xmldir+"/"+JunitXML, "w") as f: ts.to_file(f, [ts]) lines = os.popen("ssh root@%s crm_mon -1r" % cluster_env["IP_NODE1"]).readlines() with open(xmldir+"/"+"crm_mon", "w") as p: p.writelines(lines) lines = os.popen("ssh root@%s cat /etc/YaST2/*build*" % cluster_env["IP_NODE1"]).readlines() with open(xmldir+"/"+"host-build", "w") as p: p.writelines(lines)
def serialize_and_read(test_suites, to_file=False, prettyprint=False, encoding=None): """writes the test suite to an XML string and then re-reads it using minidom, returning => (test suite element, list of test case elements)""" try: iter(test_suites) except TypeError: test_suites = [test_suites] if to_file: fd, filename = tempfile.mkstemp(text=True) os.close(fd) with codecs.open(filename, mode='w', encoding=encoding) as f: TestSuite.to_file(f, test_suites, prettyprint=prettyprint, encoding=encoding) print("Serialized XML to temp file [%s]" % filename) xmldoc = minidom.parse(filename) os.remove(filename) else: xml_string = TestSuite.to_xml_string( test_suites, prettyprint=prettyprint, encoding=encoding) if PY2: assert isinstance(xml_string, unicode) print("Serialized XML to string:\n%s" % xml_string) if encoding: xml_string = xml_string.encode(encoding) xmldoc = minidom.parseString(xml_string) def remove_blanks(node): for x in node.childNodes: if x.nodeType == minidom.Node.TEXT_NODE: if x.nodeValue: x.nodeValue = x.nodeValue.strip() elif x.nodeType == minidom.Node.ELEMENT_NODE: remove_blanks(x) remove_blanks(xmldoc) xmldoc.normalize() ret = [] suites = xmldoc.getElementsByTagName("testsuites")[0] for suite in suites.getElementsByTagName("testsuite"): cases = suite.getElementsByTagName("testcase") ret.append((suite, cases)) return ret
def Run(conf, xmldir): cluster_env = readClusterConf(conf) testcases = [] #Name of Test Suite TestSuiteName = "Setup HA Cluster" #Name of junit xml file JunitXML = "junit-drbd-pacemaker.xml" #Define testcases #testcases = [(TestcaseName, TestcaseClass, TestcaseFunction)] #eg. # ('PacemakerService', 'SetupCluster.service', runPackmakerService) #Define function runPackmakerService before using cases_def = [('drbdPacemakerRes', 'SetupCluster.drbd', configurePacemaker), ('drbdUpToDateBefore', 'DRBD.disks', checkDRBDState), ('drbdPrimaryBefore', 'DRBD.state', checkDRBDRole), ('drbdShowInPacemaker', 'DRBD.pacemaker', checkPacemakerStatus), ('drbdSwitchMaster', 'DRBD.pacemaker', switchDRBD), ('drbdUpToDateAfter', 'DRBD.disks', checkDRBDState), ('drbdPrimaryAfter', 'DRBD.state', checkDRBDRole), ('drbdShowInPacemakerAfter', 'DRBD.pacemaker', checkPacemakerStatus)] #('ConfigureRes', 'SetupCluster.resources', runConfigureRes)] #Not necessary to modify the lines below! skip_flag = False for a_case in cases_def: case = TestCase(a_case[0], a_case[1]) testcases.append(case) if skip_flag: skipCase(case, "Can not test!", "Pacemaker service of the first node not started or didn't configure DRBD.") continue skip_flag = assertCase(case, a_case[2], cluster_env) sleep(3) ts = TestSuite(TestSuiteName, testcases) with open(xmldir+"/"+JunitXML, "w") as f: ts.to_file(f, [ts])
def test_init_classname_time(self): (ts, tcs) = serialize_and_read( TestSuite('test', [ TestCase(name='Test1', classname='some.class.name', elapsed_sec=123.345) ]))[0] verify_test_case( self, tcs[0], { 'name': 'Test1', 'classname': 'some.class.name', 'time': ("%f" % 123.345) })
def write_xml_file(build_event): from junit_xml import TestSuite, TestCase build_id = build_event["id"] steps = build_event["steps"] outputs = build_event["results"]["buildStepOutputs"] test_cases = [] for x in range(len(steps)): step = steps[x] output = outputs[x] status = step["status"] start_time, elapsed = get_elapsed_time(step) if status in ("FAILURE", "INTERNAL_ERROR", "TIMEOUT", "EXPIRED"): failure = outputs[x] or status test = TestCase( name=step.get("id") or step.get("name"), stderr=failure, timestamp=start_time, elapsed_sec=elapsed ) test.add_failure_info(build_event.get("logUrl")) else: test = TestCase( name=step.get("id") or step.get("name"), stdout=outputs[x] or status, timestamp=start_time, elapsed_sec=elapsed, ) test_cases.append(test) ts = TestSuite("Cloud Build Suite", test_cases) # create a new XML file with the results sponge_log = open("/tmp/sponge_log.xml", "w") sponge_log.write(TestSuite.to_xml_string([ts]))
def v2_playbook_on_stats(self, stats): """ Implementation of the callback endpoint to be fired when a playbook is finished. As we are only running one playbook at a time, we know we are done logging and can aggregate the jUnit test suite and serialize it. :param stats: statistics about the run """ suites = [] for play_name in self.test_suites: suites.append(TestSuite(play_name, self.test_suites[play_name])) if 'ANSIBLE_JUNIT_DIR' in os.environ: log_dir = abspath(getenv('ANSIBLE_JUNIT_DIR')) else: if 'OCT_CONFIG_HOME' in os.environ: base_dir = getenv('OCT_CONFIG_HOME') else: base_dir = abspath(join(expanduser('~'), '.config')) log_dir = abspath(join(base_dir, 'origin-ci-tool', 'logs', 'junit')) if not exists(log_dir): makedirs(log_dir) log_filename = '' for _ in range(10): log_basename = '{}.xml'.format(''.join( choice(ascii_letters) for _ in range(10))) log_filename = join(log_dir, log_basename) if not exists(log_filename): # TODO: determine a better way to do this break contents = TestSuite.to_xml_string(suites, 'utf-8') with codec_open(log_filename, 'w', 'utf-8') as result_file: result_file.write(contents)
def run_api_tests(args, data_format): endpoints = [] for i in range(len(args.host)): if args.port[i] == 0: args.port[i] = None endpoints.append({"host": args.host[i], "port": args.port[i], "version": args.version[i]}) results = run_tests(args.suite, endpoints, [args.selection]) if data_format == "xml": formatted_test_results = format_test_results(results, endpoints, "junit", args) return TestSuite.to_xml_string([formatted_test_results], prettyprint=True) else: formatted_test_results = format_test_results(results, endpoints, "json", args) return json.loads(formatted_test_results)
def createTestSuits(scriptFiles, root): lenRootParts = len(Path(root).parts) testSuits = [] for scriptFile in map(Path, scriptFiles): tc = TestCase(scriptFile.parent.name, file = scriptFile) tsName = scriptFile.parts[-3] if len(scriptFile.parts) - lenRootParts > 2 else "" ts = next((testSuit for testSuit in testSuits if testSuit.name == tsName), None) if ts == None: testSuits.append(TestSuite(tsName, [tc])) else: ts.test_cases.append(tc) return testSuits
def createJunitTestResults(boardToResults, fileName): """Create junit xml test result. Args: boardToResults(dict[str:obj(OtaTestResult)]): Dictionary of the board name to it's OtaTestResult. fileName: The name of the junit test file to create. """ testSuites = [] for board in boardToResults.keys(): testCases = [] for otaTestResult in boardToResults[board]: testCase = TestCase(otaTestResult.testName, classname=board + '.OTAEndToEndTests') testCases.append(testCase) if otaTestResult.result == OtaTestResult.FAIL: testCases[-1].add_failure_info(message=otaTestResult.summary) elif otaTestResult.result == OtaTestResult.ERROR: testCases[-1].add_skipped_info(message=otaTestResult.summary) testSuites.append(TestSuite(board, test_cases=testCases, package=board)) with open(fileName, 'w') as f: TestSuite.to_file(f, testSuites)
def test_init_stderr(self): (ts, tcs) = serialize_and_read( TestSuite('test', [ TestCase( 'Test1', 'some.class.name', 123.345, stderr='I am stderr!') ]))[0] verify_test_case(self, tcs[0], { 'name': 'Test1', 'classname': 'some.class.name', 'time': ("%f" % 123.345) }, stderr='I am stderr!')
def teardown(self): if len(self.failed_test): test_cases = self.failed_test else: test_cases = list() test_cases.append(TestCase(name='Fuzz test succeed', status='Pass')) if self.junit_report_path: with open(self.junit_report_path, 'w') as report_file: to_xml_report_file(report_file, [TestSuite("API Fuzzer", test_cases)], prettyprint=True) super(ServerTarget, self).teardown() # pylint: disable=E1003
def pytest_runtest_teardown(item): # called for running each test in 'a' directory localPath = item.fspath baseScript = os.path.basename(item.fspath.strpath) tsObj = next((x for x in testSuiteList if x.name == baseScript), None) # print("runtest teardown for " + baseScript + " : " + str(len(testCaseList))) if tsObj == None: # logging.getLogger().info(' Tear down Test suite ' + baseScript + ' with test cases ' + str(len(testCaseList))) ts = TestSuite(baseScript, testCaseDictList[item.fspath.strpath]) testSuiteList.append(ts)
def exporter_junit(self, test_result_ext, test_suite_properties=None): """ Export test results in JUnit XML compliant format """ from junit_xml import TestSuite, TestCase test_suites = [] test_cases = [] targets = sorted(test_result_ext.keys()) for target in targets: toolchains = sorted(test_result_ext[target].keys()) for toolchain in toolchains: test_cases = [] tests = sorted(test_result_ext[target][toolchain].keys()) for test in tests: test_results = test_result_ext[target][toolchain][test] for test_res in test_results: test_ids = sorted(test_res.keys()) for test_no in test_ids: test_result = test_res[test_no] name = test_result["description"] classname = "%s.%s.%s.%s" % (self.package, target, toolchain, test_result["id"]) elapsed_sec = test_result["elapsed_time"] _stdout = test_result["output"] if "target_name_unique" in test_result: _stderr = test_result["target_name_unique"] else: _stderr = test_result["target_name"] # Test case tc = TestCase(name, classname, elapsed_sec, _stdout, _stderr) # Test case extra failure / error info message = test_result["result"] if test_result["result"] == "FAIL": tc.add_failure_info(message, _stdout) elif test_result["result"] == "SKIP" or test_result["result"] == "NOT_SUPPORTED": tc.add_skipped_info(message, _stdout) elif test_result["result"] != "OK": tc.add_error_info(message, _stdout) test_cases.append(tc) ts = TestSuite( "test.suite.%s.%s" % (target, toolchain), test_cases, properties=test_suite_properties[target][toolchain], ) test_suites.append(ts) return TestSuite.to_xml_string(test_suites)
def main(mode, ratelimit, projects, age, artifacts, filt): """Run janitor for each project.""" if mode == 'pr': check_predefine_jobs(PR_PROJECTS, ratelimit) elif mode == 'custom': projs = str.split(projects, ',') for proj in projs: clean_project(proj.strip(), hours=age, ratelimit=ratelimit, filt=filt) else: check_ci_jobs() # Summary print 'Janitor checked %d project, %d failed to clean up.' % (len(CHECKED), len(FAILED)) print HAS_JUNIT if artifacts: output = os.path.join(artifacts, 'junit_janitor.xml') if not HAS_JUNIT: print 'Please install junit-xml (https://pypi.org/project/junit-xml/)' else: print 'Generating junit output:' tcs = [] for project in CHECKED: tc = TestCase(project, 'kubernetes_janitor') if project in FAILED: # TODO(krzyzacy): pipe down stdout here as well tc.add_failure_info('failed to clean up gcp project') tcs.append(tc) ts = TestSuite('janitor', tcs) with open(output, 'w') as f: TestSuite.to_file(f, [ts]) if FAILED: print >> sys.stderr, 'Failed projects: %r' % FAILED exit(1)
def exporter_junit(self, test_result_ext, test_suite_properties=None): """ Export test results in JUnit XML compliant format """ from junit_xml import TestSuite, TestCase test_suites = [] test_cases = [] toolchains = sorted(test_result_ext.keys()) for toolchain in toolchains: targets = sorted(test_result_ext[toolchain].keys()) for target in targets: test_cases = [] tests = sorted(test_result_ext[toolchain][target].keys()) for test in tests: test_results = test_result_ext[toolchain][target][test] for test_res in test_results: test_ids = sorted(test_res.keys()) for test_no in test_ids: test_result = test_res[test_no] name = test_result['test_description'] classname = 'test.%s.%s.%s'% (target, toolchain, test_result['test_id']) elapsed_sec = test_result['elapsed_time'] _stdout = test_result['single_test_output'] _stderr = test_result['target_name_unique'] # Test case tc = TestCase(name, classname, elapsed_sec, _stdout, _stderr) # Test case extra failure / error info if test_result['single_test_result'] == 'FAIL': message = test_result['single_test_result'] tc.add_failure_info(message, _stdout) elif test_result['single_test_result'] != 'OK': message = test_result['single_test_result'] tc.add_error_info(message, _stdout) test_cases.append(tc) ts = TestSuite("test.suite.%s.%s"% (target, toolchain), test_cases, properties=test_suite_properties[target][toolchain]) test_suites.append(ts) return TestSuite.to_xml_string(test_suites)
def generate_test_report(cfn_guard_output_list): """ Generates a test report that contains the collection of test cases generated for each CloudFormation Guard execution and writes it to a file: "output.xml". Parameters ---------- cfn_guard_output_list : list List of evaluations from the `validate_cloudformation_template` function. Returns ------- None """ test_cases = [] for output in cfn_guard_output_list: test_cases.append( generate_test_case(output["Output"], output["Template"], output["RuleSet"])) test_suite = TestSuite("clouformation-guard tests", test_cases) print(TestSuite.to_xml_string([test_suite])) with open("output.xml", "w") as f: TestSuite.to_file(f, [test_suite], prettyprint=False) return None
def cli(infile, outfile, debug): if debug: logger.setLevel(logging.DEBUG) logger.info("infile: {inf} outfile: {outf}".format(inf=infile.name, outf=outfile.name)) tree = ET.parse(infile) root = tree.getroot() test_cases = [] files = root.findall("file") for f in files: for b in f.findall("BugInstance"): classname = f.get("classname") tc = TestCase( b.get("type"), # name classname, # classname ) # mark test case as a failure tc.add_failure_info("{0} line:{1}".format(b.get("message"), b.get("lineNumber"))) test_cases.append(tc) ts = TestSuite("Findbugs", test_cases) ts.to_file(outfile, [ts])
def __build_test_suite(json_result, kind, sla_json): status = sla_json['status'] category = sla_json['element']['category'] user_path = sla_json['element']['userpath'] suite_name = 'com.neotys.%s.%s%s' % (category, kind.replace( ' ', ''), ('' if user_path == '' else '.%s' % user_path)) test_name = sla_json['kpi'] tc = TestCase(test_name, suite_name) if status == "FAILED" or status == "WARNING": txt = __build_unit_test(json_result, kind, sla_json) tc.add_failure_info("SLA failed", txt, 'NeoLoad SLA') return TestSuite(suite_name, [tc])
def print_result_cache_junitxml(dict_synonyms, suspicious_policy, untested_policy): test_cases = [] mutant_list = list(select(x for x in Mutant)) for filename, mutants in groupby(mutant_list, key=lambda x: x.line.sourcefile.filename): for mutant in mutants: tc = TestCase("Mutant #{}".format(mutant.id), file=filename, line=mutant.line.line_number, stdout=mutant.line.line) if mutant.status == BAD_SURVIVED: tc.add_failure_info(message=mutant.status, output=get_unified_diff(mutant.id, dict_synonyms)) if mutant.status == BAD_TIMEOUT: tc.add_error_info(message=mutant.status, error_type="timeout", output=get_unified_diff(mutant.id, dict_synonyms)) if mutant.status == OK_SUSPICIOUS: if suspicious_policy != 'ignore': func = getattr(tc, 'add_{}_info'.format(suspicious_policy)) func(message=mutant.status, output=get_unified_diff(mutant.id, dict_synonyms)) if mutant.status == UNTESTED: if untested_policy != 'ignore': func = getattr(tc, 'add_{}_info'.format(untested_policy)) func(message=mutant.status, output=get_unified_diff(mutant.id, dict_synonyms)) test_cases.append(tc) ts = TestSuite("mutmut", test_cases) print(TestSuite.to_xml_string([ts]))
async def trigger_test_run(self, ev=None): try: await asyncio.wait_for(self.platform_event.wait(), self.discover_dependencies_timeout) except asyncio.exceptions.TimeoutError as e: self.logger.error('Timeout to gather dependencies expired. There are unmet dependencies.') unmet_dependencies = self.dependencies.check_all() if unmet_dependencies: self.logger.error("Can't start tests because of not connected " 'TestSuiteTalent(s): %s', unmet_dependencies) test_suites = [] for unmet_dep in unmet_dependencies: test_suites.append(TestSuite(unmet_dep, [], stderr=f'Can not start tests because of not connected' f' TestSuiteTalent(s): {unmet_dep}')) self.create_test_output(test_suites) result = False else: self.logger.info('Start Integration Tests') initial_event = { 'returnTopic': INGESTION_TOPIC, 'subject': INTEGRATION_TEST_SUBJECT } #TODO Even though we're waiting for the platoform events of TestSuites to be discovered before sending method #invocations, there's a timeout calling the function getTestSuiteInfo #the problem happens only if TestRunner.registerTestSuite is invoked ???. If the suite is in config.json - #there's no such issue. if not self.config_json.get('testSuites', []): await asyncio.sleep(15) result = await self.run_test_suites(initial_event) self.logger.info('Overall test result is %s', result) result_event = { 'subject': ev.get('subject') if ev is not None else INTEGRATION_TEST_SUBJECT, 'type': ev.get('type') if ev is not None else 'default', 'instance': ev.get('instance') if ev is not None else INTEGRATION_TEST_INSTANCE, 'feature': 'testResultsHandler.test-result', 'value': {'id': ev['value']['id'] if ev is not None else 1, 'result': result}, 'whenMs': round(time.time()*1000) } await self.pg.publish_json(INGESTION_TOPIC, result_event) if ev is None or ev['value']['exit']: # give time to publish the result before exiting await asyncio.sleep(5) if result: os._exit(0) else: # signal that tests have failed os._exit(1)
def _generate_report(self): """Generate a TestSuite report. Generate a TestSuite report from the collected TaskData and HostData. """ test_cases = [] for task_uuid, task_data in self._task_data.items(): if task_data.action == 'setup' and \ self._include_setup_tasks_in_report == 'false': continue for host_uuid, host_data in task_data.host_data.items(): test_cases.append(self._build_test_case(task_data, host_data)) test_suite = TestSuite(self._playbook_name, test_cases) report = TestSuite.to_xml_string([test_suite]) output_file = os.path.join( self._output_dir, '%s-%s.xml' % (self._playbook_name, time.time())) with open(output_file, 'wb') as xml: xml.write(to_bytes(report, errors='surrogate_or_strict'))
def main(): args = parse_args() spawn_func = None spawn_arg = None if args.port is not None: spawn_func = spawn_port spawn_arg = args.port elif args.executable is not None: spawn_func = spawn_exec spawn_arg = args.executable name = args.name or "" global debug if args.debug: debug = True if spawn_func is None: debug_print("Please specify port or executable", file=sys.stderr) return 1 with spawn_func(spawn_arg) as sp: ts = run_tests(sp, name) if args.output: with open(args.output, "w") as f: TestSuite.to_file(f, [ts]) return 0
def format_test_results(results, format): formatted = None if format == "json": formatted = { "suite": results["suite"], "url": results["base_url"], "timestamp": time.time(), "results": [] } for test_result in results["result"]: formatted["results"].append({ "name": test_result.name, "state": str(test_result.state), "detail": test_result.detail }) formatted = json.dumps(formatted, sort_keys=True, indent=4) elif format == "junit": test_cases = [] for test_result in results["result"]: test_case = TestCase(test_result.name, classname=results["suite"], elapsed_sec=test_result.elapsed_time, timestamp=test_result.timestamp) if test_result.name in args.ignore or test_result.state in [ TestStates.DISABLED, TestStates.UNCLEAR, TestStates.MANUAL, TestStates.NA, TestStates.OPTIONAL ]: test_case.add_skipped_info(test_result.detail) elif test_result.state in [TestStates.WARNING, TestStates.FAIL]: test_case.add_failure_info(test_result.detail, failure_type=str(test_result.state)) elif test_result.state != TestStates.PASS: test_case.add_error_info(test_result.detail, error_type=str(test_result.state)) test_cases.append(test_case) formatted = TestSuite( results["def"]["name"] + ": " + results["base_url"], test_cases) elif format == "console": formatted = "\r\nPrinting test results for suite '{}' using API '{}'\r\n" \ .format(results["suite"], results["base_url"]) formatted += "----------------------------\r\n" total_time = 0 for test_result in results["result"]: formatted += "{} ... {}\r\n".format(test_result.name, str(test_result.state)) total_time += test_result.elapsed_time formatted += "----------------------------\r\n" formatted += "Ran {} tests in ".format(len( results["result"])) + "{0:.3f}s".format(total_time) + "\r\n" return formatted
def generate_junit_xml(file_name='junit.xml'): results = monitor_runner.get_latest_status() print(results) test_suites = [] # test_cases = [TestCase('Test1', 'some.class.name', 123.345, 'I am stdout!', 'I am stderr!')] # ts = TestSuite("my test suite", test_cases) for testsuite_name in results: test_cases = [] for test_case_name in results[testsuite_name]: try: name = results[testsuite_name][test_case_name]['name'] except: name = '.' success = results[testsuite_name][test_case_name]['success'] try: elapsed_sec = results[testsuite_name][test_case_name]['response_time'].total_seconds() except: elapsed_sec = -1 tc = TestCase( name=name, classname='{}.{}'.format(testsuite_name, test_case_name), elapsed_sec=elapsed_sec, stdout='{}'.format(success), ) if success is False: tc.add_failure_info('Failed') test_cases.append(tc) ts = TestSuite(testsuite_name, test_cases) test_suites.append(ts) pass with open(file_name, "w", encoding='utf-8-sig') as f: TestSuite.to_file(f, test_suites, prettyprint=True)
def Run(conf, xmldir): cluster_env = readClusterConf(conf) testcases = [] #Name of Test Suite TestSuiteName = "Linbit DRBD Test" #Name of junit xml file JunitXML = "junit-linbit-drbd-test.xml" yml_file = "%s/Linbit-drbd-test.yml" % xmldir results = readFromYaml(yml_file) #Define testcases #testcases = [(TestcaseName, TestcaseClass, TestcaseFunction)] #eg. # ('PacemakerService', 'SetupCluster.service', runPackmakerService) #Define function runPackmakerService before using cases_def = [] for c_name in results.keys(): cases_def.append( (c_name, TESTCASES.get(c_name, CLASSIFY[0]), parseResult) ) #Not necessary to modify the lines below! skip_flag = False for a_case in cases_def: case = TestCase(a_case[0], a_case[1]) testcases.append(case) if skip_flag: skipCase(case, "Can not test!", "Case is skipped due to previous errors.") continue skip_flag = assertCase(case, a_case[2], cluster_env, a_case[0], results[a_case[0]]) ts = TestSuite(TestSuiteName, testcases) with open(xmldir+"/"+JunitXML, "w") as f: ts.to_file(f, [ts])
def write_xml_file(self): test_cases = [] if os.path.isfile(self.output): logging.warn("File exists,deleting...") os.remove(self.output) with open(self.output, "a") as f: for _, elements in self.log.items(): for j in elements.viewitems(): if j[0] == "date" or j[0] == "profile" or j[0] == "score": # we really don't care pass else: try: test_case = TestCase(j[0], j[1]["descr"], "", "", "") if j[1]["status"] == "Fail": test_case.add_failure_info(j[1]["output"]) else: test_case = TestCase(j[0], "", "", "", "") test_cases.append(test_case) except KeyError: # the world's smallest violin playin' for KeyError pass ts = [TestSuite("Docker Security Benchmarks", test_cases)] TestSuite.to_file(f, ts)
def _generate_junit_xml(self, config_name): testcases = [] tests = self.data_source.get_test_results(config_name) for test in tests: test_time = 0 if test.func_end_time != None and test.func_start_time != None: test_time = test.func_end_time - test.func_start_time tc = TestCase(test.name, config_name, test_time, test.description, test.message) if 'failed' in test.result: tc.add_failure_info(test.result) elif 'skipped' in test.result: tc.add_skipped_info(test.result) testcases.append(tc) testsuite = TestSuite(config_name + '_' + self.name, testcases) return testsuite
def _generate_report(self): """ generate a TestSuite report from the collected TaskData and HostData """ test_cases = [] for task_uuid, task_data in self._task_data.items(): for host_uuid, host_data in task_data.host_data.items(): test_cases.append(self._build_test_case(task_data, host_data)) test_suite = TestSuite(self._playbook_name, test_cases) report = TestSuite.to_xml_string([test_suite]) output_file = os.path.join(self._output_dir, '%s-%s.xml' % (self._playbook_name, time.time())) with open(output_file, 'wb') as xml: xml.write(to_bytes(report, errors='strict'))
def on_set_current_module(self, module, filepath): if self.current_module is not None and self.items[ self.current_module].test_cases is not None: stdout_line = "All checks passed for: {0}".format( self.current_filepath) testcase_name = "{0}:0:0".format(self.current_module) testcase = TestCase(testcase_name, stdout=stdout_line, file=self.current_filepath, line=0) self.items[self.current_module].test_cases.append(testcase) self.current_module = module self.current_filepath = filepath if module not in self.items: self.items[module] = TestSuite(module)
def test_to_xml_string(self): test_suites = [TestSuite('suite1', [TestCase('Test1')]), TestSuite('suite2', [TestCase('Test2')])] xml_string = TestSuite.to_xml_string(test_suites) expected_xml_string = textwrap.dedent(""" <?xml version="1.0" ?> <testsuites> \t<testsuite errors="0" failures="0" name="suite1" skipped="0" tests="1" time="0"> \t\t<testcase name="Test1"/> \t</testsuite> \t<testsuite errors="0" failures="0" name="suite2" skipped="0" tests="1" time="0"> \t\t<testcase name="Test2"/> \t</testsuite> </testsuites> """.strip("\n")) self.assertEqual(xml_string, expected_xml_string)
def exporter_junit(self, test_result_ext, test_suite_properties=None): """ Export test results in JUnit XML compliant format """ from junit_xml import TestSuite, TestCase test_suites = [] test_cases = [] targets = sorted(test_result_ext.keys()) for target in targets: toolchains = sorted(test_result_ext[target].keys()) for toolchain in toolchains: test_cases = [] tests = sorted(test_result_ext[target][toolchain].keys()) for test in tests: test_results = test_result_ext[target][toolchain][test] for test_res in test_results: test_ids = sorted(test_res.keys()) for test_no in test_ids: test_result = test_res[test_no] name = test_result['description'] classname = '%s.%s.%s.%s'% (self.package, target, toolchain, test_result['id']) elapsed_sec = test_result['elapsed_time'] _stdout = test_result['output'] if 'target_name_unique' in test_result: _stderr = test_result['target_name_unique'] else: _stderr = test_result['target_name'] # Test case tc = TestCase(name, classname, elapsed_sec, _stdout, _stderr) # Test case extra failure / error info message = test_result['result'] if test_result['result'] == 'FAIL': tc.add_failure_info(message, _stdout) elif test_result['result'] == 'SKIP' or test_result["result"] == 'NOT_SUPPORTED': tc.add_skipped_info(message, _stdout) elif test_result['result'] != 'OK': tc.add_error_info(message, _stdout) test_cases.append(tc) ts = TestSuite("test.suite.%s.%s"% (target, toolchain), test_cases, properties=test_suite_properties[target][toolchain]) test_suites.append(ts) return TestSuite.to_xml_string(test_suites)
def main(): """ Create a "testcase" for each invocation of the script, and output the results of the test case to an XML file within $IMPALA_HOME/logs/extra_junit_xml_logs. The log file name will use "phase" and "step" values provided on the command line to structure the report. The XML report filename will follow the form: junitxml_logger.<phase>.<step>.<time_stamp>.xml Phase can be repeated in a given test run, but the step leaf node, which is equivalent to a "test case", must be unique within each phase. """ junitxml_logdir = os.path.join(IMPALA_HOME, 'logs', 'extra_junit_xml_logs') # The equivalent of mkdir -p try: os.makedirs(junitxml_logdir) except OSError as e: if e.errno == errno.EEXIST and os.path.isdir(junitxml_logdir): pass else: raise options = get_options() root_name, _ = os.path.splitext(os.path.basename(__file__)) tc = TestCase(classname='{}.{}'.format(root_name, options.phase), name=options.step, elapsed_sec=options.time, stdout=get_xml_content(options.stdout), stderr=get_xml_content(options.stderr)) # Specifying an error message for any step causes the buid to be marked as invalid. if options.error: tc.add_error_info(get_xml_content(options.error)) assert tc.is_error() testsuite = TestSuite(name='{}.{}.{}'.format(root_name, options.phase, options.step), timestamp=dt.utcnow().replace(tzinfo=pytz.UTC), test_cases=[tc]) xml_report = generate_xml_file(testsuite, junitxml_logdir) print("Generated: {}".format(xml_report))
def generate_junit_report(args, reports, start_time, end_time, total, junit_file): from junit_xml import TestSuite, TestCase import sys junit_log = [] junit_prop = {} junit_prop['Command Line'] = ' '.join(args) junit_prop['Python'] = sys.version.replace('\n', '') junit_prop['test_groups'] = [] junit_prop['Host'] = host.label(mode='all') junit_prop['passed_count'] = reports.passed junit_prop['failed_count'] = reports.failed junit_prop['user-input_count'] = reports.user_input junit_prop['expected-fail_count'] = reports.expected_fail junit_prop['indeterminate_count'] = reports.indeterminate junit_prop['benchmark_count'] = reports.benchmark junit_prop['timeout_count'] = reports.timeouts junit_prop['test-too-long_count'] = reports.test_too_long junit_prop['invalid_count'] = reports.invalids junit_prop['wrong-version_count'] = reports.wrong_version junit_prop['wrong-build_count'] = reports.wrong_build junit_prop['wrong-tools_count'] = reports.wrong_tools junit_prop['total_count'] = reports.total time_delta = end_time - start_time junit_prop['average_test_time'] = str(time_delta / total) junit_prop['testing_time'] = str(time_delta) for name in reports.results: result_type = reports.results[name]['result'] test_parts = name.split('/') test_category = test_parts[-2] test_name = test_parts[-1] junit_result = TestCase(test_name.split('.')[0]) junit_result.category = test_category if result_type == 'failed' or result_type == 'timeout': junit_result.add_failure_info(None, reports.results[name]['output'], result_type) junit_log.append(junit_result) ts = TestSuite('RTEMS Test Suite', junit_log) ts.properties = junit_prop ts.hostname = host.label(mode='all') # write out junit log with open(junit_file, 'w') as f: TestSuite.to_file(f, [ts], prettyprint=True)
def test_single_suite_no_test_cases(self): properties = {'foo': 'bar'} package = 'mypackage' timestamp = 1398382805 (ts, tcs) = serialize_and_read(TestSuite('test', [], hostname='localhost', id=1, properties=properties, package=package, timestamp=timestamp), to_file=True)[0] self.assertEqual(ts.tagName, 'testsuite') self.assertEqual(ts.attributes['package'].value, package) self.assertEqual(ts.attributes['timestamp'].value, str(timestamp)) self.assertEqual( ts.childNodes[1].childNodes[1].attributes['name'].value, 'foo') self.assertEqual( ts.childNodes[1].childNodes[1].attributes['value'].value, 'bar')
def get_junit_items(self, new_items=''): """ Convert from canonical data model to junit test suit :param new_items: :return: """ test_cases = [] if not self.test_items and not new_items: raise ValueError('There it no test items') data = self.test_items if not new_items else new_items for item in data: tc = TestCase(item.issue, classname=item.confidence) message = '' for msg in item.msgs: message = message + msg.message + "\n\n" tc.add_error_info(message=message, error_type=item.severity) test_cases.append(tc) ts = TestSuite(self.report_name, test_cases) return ts
def output_test_summary(errors_total): """Prints summary of script output in form of junit-xml Args: errors_total (int): Total number of broken links """ if not os.path.isdir("test-summary"): os.mkdir("test-summary") with open("test-summary/junit-xml-report.xml", "w") as test_summary: time_taken = time.time() - START_TIME test_case = TestCase("Broken links checker", "License files", time_taken) if errors_total != 0: test_case.add_failure_info( f"{errors_total} broken links found", f"Number of error links: {errors_total}\nNumber of unique" f" broken links: {len(MAP_BROKEN_LINKS.keys())}", ) ts = TestSuite("cc-link-checker", [test_case]) to_xml_report_file(test_summary, [ts])
def getSLATestSuites(test, group, sla): #pprint(vars(sla)) slaprofile = sla.element.category #"SLANAME" userpath = "" if sla.element.userpath is None else sla.element.userpath suitename = "com.neotys." + slaprofile + "." + group + ( "" if userpath == "" else "." + userpath) testname = sla.kpi tc = TestCase(testname, suitename) if sla.status == "PASSED": tc.stdout = "" #"Value is " + str(sla.value) elif sla.status == "FAILED": txt = getSLAJUnitText(test, group.lower(), sla, slaprofile, userpath) tc.add_error_info("SLA failed", txt, 'NeoLoad SLA') elif sla.status == "WARNING": txt = getSLAJUnitText(test, group.lower(), sla, slaprofile, userpath) #tc.add_error_info("SLA failed",txt,'NeoLoad SLA') else: logging.warning("Unknown sla.status value: " + sla.status) return TestSuite(suitename, [tc])
def _to_junitxml(self, notebook_path, test_result): tsuite = TestSuite("nutter") for t_result in test_result.results: fail_error = None tc_result = 'PASSED' if not t_result.passed: fail_error = 'Exception: {} \n Stack: {}'.format( t_result.exception, t_result.stack_trace) tc_result = 'FAILED' t_case = TestCase(t_result.test_name, classname=notebook_path, elapsed_sec=t_result.execution_time, stderr=fail_error, stdout=tc_result) if tc_result == 'FAILED': t_case.add_failure_info(tc_result, fail_error) tsuite.test_cases.append(t_case) return tsuite
def test_init_utf8(self): tc = TestCase('Test äöü', 'some.class.name.äöü', 123.345, 'I am stdöüt!', 'I am stdärr!') tc.add_skipped_info(message='Skipped äöü', output="I skippäd!") tc.add_error_info(message='Skipped error äöü', output="I skippäd with an error!") test_suite = TestSuite('Test UTF-8', [tc]) (ts, tcs) = serialize_and_read(test_suite, encoding='utf-8')[0] verify_test_case( self, tcs[0], { 'name': decode('Test äöü', 'utf-8'), 'classname': decode('some.class.name.äöü', 'utf-8'), 'time': ("%f" % 123.345) }, stdout=decode('I am stdöüt!', 'utf-8'), stderr=decode('I am stdärr!', 'utf-8'), skipped_message=decode('Skipped äöü', 'utf-8'), skipped_output=decode('I skippäd!', 'utf-8'), error_message=decode('Skipped error äöü', 'utf-8'), error_output=decode('I skippäd with an error!', 'utf-8'))