def merge(*result_files): assert len(result_files) > 0 out = ExecutionResult(result_files[0]) merger = ResultMerger(out) for result in result_files[1:]: merger.merge(ExecutionResult(result)) return out
def home(request): try: os.environ['TEST_PATH'] except: with open('settings.yml', 'r') as outfile: stngs = yaml.load(outfile) os.environ["TEST_PATH"] = str(stngs["TestPath"]) print "Path updated!" try: path = os.path.join(os.environ['TEST_PATH'], "results") folder = sorted(next(os.walk(path))[1])[-1] run_results_path = os.path.join(os.environ['TEST_PATH'], "results", folder) results_files = {} results_files[folder] = {} passed = 0 failed = 0 if os.path.isdir(run_results_path): for result_file in os.listdir(run_results_path): if os.path.splitext(result_file)[1] == ".html" and "Report" not in os.path.splitext(result_file)[0]: results_files[folder][os.path.splitext(result_file)[0]]=[] xml_file = os.path.splitext(result_file)[0]+'.xml' xml_file_path = os.path.join(path, folder, xml_file) results_files[folder][os.path.splitext(result_file)[0]].append({'failed' : ExecutionResult(xml_file_path).statistics.total.critical.failed }) results_files[folder][os.path.splitext(result_file)[0]].append({'passed' : ExecutionResult(xml_file_path).statistics.total.critical.passed }) failed += ExecutionResult(xml_file_path).statistics.total.critical.failed passed += ExecutionResult(xml_file_path).statistics.total.critical.passed sorted_results = reversed(sorted(results_files.items())) except: sorted_results = [] failed = 0 passed = 0 return render(request, 'dashboard.html', {"folders":sorted_results,"failed":failed,"passed":passed,})
def __downruninfo(self, args): # charis added : user_path = args["key"] project = get_projectnamefromkey(user_path) self.log.info("下载请求 runinfo:" + user_path) jobpath = self.app.config["AUTO_HOME"] + "/jobs" job_path = self.app.config["AUTO_HOME"] + "/jobs/%s/%s" % ( session['username'], project) for user in os.listdir(job_path): if os.path.isdir(user): for prj in os.listdir(user): if os.path.isdir(prj) and prj == project: for tasks in prj: if os.path.isdir(tasks): for f in tasks: if f.endswith('.xml'): suite = ExecutionResult(f).suite user_path = "/Users/tester/PycharmProjects/uniRobotDev/.beats/workspace/Admin/Demo_Project/RobotTestDemo/TestCase/01Template.robot" self.app.config['DB'].insert_loginfo(session['username'], 'runinfo', 'download', project) return self.__sendfile(user_path)
def __init__(self, original_output_xml): if not os.path.isfile(original_output_xml): raise FileNotFoundError(f"{original_output_xml} is no file") result = ExecutionResult(original_output_xml) results_visitor = DataDriverResultsVisitor() result.visit(results_visitor) self._failed_tests = results_visitor.failed_tests
def RunTest(request, pk=None): test_record = TestRequest.objects.get(pk=pk) test_record.status = TestRequest.TESTING test_record.save() free_envs = get_free_envs() if free_envs: logpath = get_output_dir( (free_envs[0], test_record.test_file.file.name)) else: return HttpResponseRedirect(reverse('status_sort', args=('C', ))) print(logpath + 'from views') test_record.status = TestRequest.COMPLETED test_record.envt = free_envs[0].split('/')[::-1][1] print(free_envs[0].split('/')[::-1]) test_record.log_path = '/media/' + logpath.split( '/')[::-1][0] + '/log.html' xml_path = os.path.dirname(os.path.abspath(__file__)).split( 'NUTAS', 1)[0] + 'NUTAS/UI/USAS/media/' + logpath.split( '/')[::-1][0] + '/output.xml' result = ExecutionResult(xml_path) stats = result.statistics if stats.total.critical.failed: test_record.result = 'Failed' else: test_record.result = 'Passed' test_record.execution = datetime.now() test_record.save() return HttpResponseRedirect(reverse('status_sort', args=('C', )))
def run(self, args, opts): """Sub command 'check' runner""" if not opts.cases: raise UsageError("case path must be set with -c or --cases!") print " Syntax Checking ".center(70, '*') print '...' log_level = 'TRACE' cases_path = opts.cases tmp_path = tempfile.gettempdir() xml_result = os.path.join(tmp_path, "check_result.xml") output_file = os.path.join(tmp_path, "stdout.txt") with open(output_file, 'w') as stdout: dryrun_result = run(cases_path, dryrun=True, loglevel=log_level, log='NONE', report='NONE', output=xml_result, stdout=stdout) detail_result = ExecutionResult(xml_result) if opts.is_tcms: if not opts.plan_id: raise UsageError("plan id must be set with -p or --planid!") plan_id = opts.plan_id tcms = TCMS() ids = tcms.get_plan_case_ids(plan_id) detail_result.visit(ResultChecker(ids, plan_id)) elif dryrun_result == 0: print 'Contratulations, no syntax error' else: detail_result.visit(DryRunChecker()) print '\n( No news is good news:) )' print 'checing result is in the file: %s' % output_file print ' DONE '.center(70, '*')
def xml_to_db(self, xml_file): self._verbose('- Parsing %s' % xml_file) test_run = ExecutionResult(xml_file, include_keywords=self._include_keywords) hash = self._hash(xml_file) try: test_run_id = self._db.insert( 'test_runs', { 'hash': hash, 'imported_at': datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f'), 'source_file': test_run.source, 'started_at': self._format_robot_timestamp(test_run.suite.starttime) if test_run.suite.starttime else 'NULL', 'finished_at': self._format_robot_timestamp(test_run.suite.endtime) if test_run.suite.starttime else 'NULL' }) except IntegrityError: test_run_id = self._db.fetch_id( 'test_runs', { 'source_file': test_run.source, 'started_at': self._format_robot_timestamp(test_run.suite.starttime), 'finished_at': self._format_robot_timestamp(test_run.suite.endtime) }) self._parse_errors(test_run.errors.messages, test_run_id) self._parse_statistics(test_run.statistics, test_run_id) self._parse_suite(test_run.suite, test_run_id)
def parseRFreport(self, RFreport): try: project_prefix_name = self.robotI.getTestProjectPrefixByName( self.project_name) tcIntID = "" result = ExecutionResult(RFreport) result.visit(PrintTestInfo()) pass_index = 0 fail_index = 0 for name, result in testcase_id_result_dict['p']: tcIntID = self.robotI.getTestCaseIDByName( name, self.project_name) testcase_id_result_dict['p'][pass_index][1] = tcIntID pass_index += 1 for name, result in testcase_id_result_dict['f']: tcIntID = self.robotI.getTestCaseIDByName( name, self.project_name) testcase_id_result_dict['f'][fail_index][1] = tcIntID fail_index += 1 except IOError as e: errno, strerror = e.args print "parseRFreport I/O error({0}): {1}".format(errno, strerror) except testlink.testlinkerrors.TLResponseError as e: print "TestLink error({0}): {1}".format(e.code, e.message) return False except testlink.testlinkerrors.TLConnectionError as e: print "TestLink error:({0})".format(e.message) return False except: print "Unexpected error:", sys.exc_info()[0] raise
def get_last_task(app, username, project): icons = { "running": url_for('static', filename='img/running.gif'), "success": url_for('static', filename='img/success.png'), "fail": url_for('static', filename='img/fail.png'), "exception": url_for('static', filename='img/exception.png') } job_path = app.config["AUTO_HOME"] + "/jobs/%s/%s" % (username, project) status = icons["running"] if exists_path(job_path): next_build = get_next_build_number(job_path) last_job = next_build - 1 if exists_path(job_path + "/%s" % last_job): try: suite = ExecutionResult(job_path + "/%s/output.xml" % last_job).suite stat = suite.statistics.critical if stat.failed != 0: status = icons["fail"] else: status = icons['success'] except: status = icons["running"] else: status = icons["exception"] else: status = icons['success'] return status
def run(self): #lock = threading.Lock() # self.lock.acquire() if not exists_path(self.output): mk_dirs(self.output) self.suite = TestSuiteBuilder().build(self.project) (output, index) = self.reset_next_build_numb() self.setName(output) self.result = self.suite.run(output_directory=output, output=output + "/output.xml", debugfile=output + "/debug.txt", loglevel="TRACE") # self.reset_last_status(index) # Report and xUnit files can be generated based on the result object. # ResultWriter(self.result).write_results(report=output + '/report.html', log=output + '/log.html') # self.lock.release() # Generating log files requires processing the earlier generated output XML. # ResultWriter(self.output + '/output.xml').write_results() self.result = ExecutionResult(out + "/output.xml") self.reset_last_status(self.result, output, index) # Report and xUnit files can be generated based on the result object. ResultWriter(self.result).write_results(report=out + '/report.html', log=out + '/log.html')
def robot_run(username, name, project, output): if not exists_path(output): mk_dirs(output) suite = TestSuiteBuilder().build(project) (out, index) = reset_next_build_numb(output) result = suite.run(output_directory=out, output=out + "/output.xml", debugfile=out + "/debug.txt", loglevel="TRACE") # reset_last_status(result, output, index) # Report and xUnit files can be generated based on the result object. # ResultWriter(result).write_results(report=out + '/report.html', log=out + '/log.html') detail_result = ExecutionResult(out + "/output.xml") # detail_result.save(out + "/output_new.xml") reset_last_status(detail_result, output, index) # Report and xUnit files can be generated based on the result object. ResultWriter(detail_result).write_results(report=out + '/report.html', log=out + '/log.html') send_robot_report(username, name, index, detail_result, out)
def main(paths, outpath='times2csv.csv'): with open(outpath, 'wb') as outfile: writer = csv.writer(outfile) writer.writerow(['TYPE', 'NAME'] + paths) suites = [ExecutionResult(p).suite for p in paths] times = Times(writer) times.process_suites(suites)
def check_tests(robot_file): output = _run_tests_and_process_output(robot_file) result = ExecutionResult(output) checker = StatusCheckerChecker() result.suite.visit(checker) checker.print_status() sys.exit(len(checker.errors))
def _parse_tests(self, job, build, log): """Process data from robot output.xml file and return JSON structured data. :param job: The name of job which build output data will be processed. :param build: The build which output data will be processed. :param log: List of log messages. :type job: str :type build: dict :type log: list of tuples (severity, msg) :returns: JSON data structure. :rtype: dict """ metadata = {"job": job, "build": build} with open(build["file-name"], 'r') as data_file: try: result = ExecutionResult(data_file) except errors.DataError as err: log.append( ("ERROR", "Error occurred while parsing output.xml: " "{0}".format(err))) return None checker = ExecutionChecker(metadata, self._cfg.mapping, self._cfg.ignore) result.visit(checker) return checker.data
def group_by_root(results, critical_tags, non_critical_tags): groups = {} for src in results: res = ExecutionResult(src) res.suite.set_criticality(critical_tags, non_critical_tags) groups[res.suite.name] = groups.get(res.suite.name, []) + [res] return groups
def _combine_suite_xmls(self): result = Result() result.suite.name = self.name for result_path in self.result_paths: new_result = ExecutionResult(result_path) result.suite.suites.append(new_result.suite) result.save(path=path.join(self.config['outputdir'], 'output.xml'))
def get_stats(xml_file): ''' This function does the separation of passed suites and failed suites and returns a dictionary containing them as keys. passed suites key contains value as list of dictionaries which consist of suite name as key and testcases as values. failed suites key contains value as list of dictionaries which consist of suite name as key and testcases as values. *Parameters:* xml_file is a output.xml file generated by robot framework. *Return:* Returns a dictionary containing passed suites and failed suites Example: {'passed_suites': [{'suite1': [('test1', 'PASS'),('test2', 'PASS')]}], 'failed_suites': [{'suite2': [('test1', 'FAIL', 'device not responding'), ('test2', 'PASS')]}]} ''' result = ExecutionResult(xml_file) result.visit(SuiteResults()) temp_passed_suites = [] temp_failed_suites = [] for item in test_list: if res_dict[item[0]] == 'PASS': if passed_suites: for each in passed_suites: temp_passed_suites.append(list(each.keys())[0]) if item[0] in temp_passed_suites: for each in passed_suites: if item[0] == list(each.keys())[0]: each[item[0]].append((item[1], item[2])) else: passed_suites.append({item[0]: [(item[1], item[2])]}) else: passed_suites.append({item[0]: [(item[1], item[2])]}) else: if failed_suites: for each in failed_suites: temp_failed_suites.append(list(each.keys())[0]) if item[0] in temp_failed_suites: for each in failed_suites: if item[0] == list(each.keys())[0]: if item[2] == 'PASS': each[item[0]].append((item[1], item[2])) else: each[item[0]].append( (item[1], item[2], item[3])) else: if item[2] == 'PASS': failed_suites.append({item[0]: [(item[1], item[2])]}) else: failed_suites.append( {item[0]: [(item[1], item[2], item[3])]}) else: if item[2] == 'PASS': failed_suites.append({item[0]: [(item[1], item[2])]}) else: failed_suites.append( {item[0]: [(item[1], item[2], item[3])]}) return {'passed_suites': passed_suites, 'failed_suites': failed_suites}
def process(infile="output.xml"): test_run = ExecutionResult(infile) test_run.visit(TimeVisitor()) if corrections: logging.warning("{0} is missing some of its starttime/endtime. " "This might cause inconsistencies with your " "duration report.".format(infile)) test_run.visit(RobotResultsVisitor())
def check_tests(robot_file): output = _run_tests_and_process_output(robot_file) result = ExecutionResult(output) checker = StatusCheckerChecker() result.suite.visit(checker) checker.print_status() print(f"Robot Framework version: {VERSION}") sys.exit(len(checker.errors))
def xml_to_db(self, xml_file, base_dir=''): self._logger.info('- Parsing %s' % xml_file) self.base_dir = base_dir test_run = ExecutionResult(xml_file, include_keywords=True) self._find_start_end_time(test_run.suite) self._parse_suite(test_run.suite)
def _suites_from_outputxml(outputxml): res = ExecutionResult(outputxml) suite_times = SuiteNotPassingsAndTimes() res.visit(suite_times) return [ SuiteItem(suite) for (_, _, suite) in reversed(sorted(suite_times.suites)) ]
def stats_by_keywords(inpath): result = ExecutionResult(inpath) visitor = ExecutionKeywordStats() result.visit(visitor) for kw in sorted(visitor.elapsed_by_kw, key=visitor.elapsed_by_kw.get, reverse=True): print kw, visitor.elapsed_by_kw[kw]
def parse_result_file(self): output = ExecutionResult(ROBOT_OUT_FILE_WITH_PTAH) result_summary = ResultSummary() output.visit(result_summary) self.results = result_summary.results Params.total_run = result_summary.total_run Params.total_pass = result_summary.total_pass Params.total_errors = result_summary.total_errors Params.total_failures = result_summary.total_failures
def _combine_all_devices(self): all_devices = Result() all_devices.suite.name = self.name for device_file in self.device_filenames: current_result = ExecutionResult(path.join(self.config['outputdir'], device_file)) current_result.suite.name = device_file all_devices.suite.suites.append(current_result.suite) all_devices.save(path=path.join(self.config['outputdir'], 'devices.xml'))
def stats_by_keywords(path): result = ExecutionResult(path) visitor = ExecutionKeywordStats() result.visit(visitor) for kw in sorted(visitor.stats_by_kw, key=lambda kw: visitor.stats_by_kw[kw]['elapsedtime'], reverse=True): print('{0}\t{1}\t{2}'.format(kw, str(datetime.timedelta(milliseconds=visitor.stats_by_kw[kw]['elapsedtime']))[:-7], visitor.stats_by_kw[kw]['count']))
def stats_by_keywords(path): result = ExecutionResult(path) visitor = ExecutionKeywordStats() result.visit(visitor) for kw in sorted(visitor.elapsed_by_kw, key=visitor.elapsed_by_kw.get, reverse=True): print kw, str( datetime.timedelta(milliseconds=visitor.elapsed_by_kw[kw]))[:-7]
def get_suite_names(output_file): if not os.path.isfile(output_file): return [] try: e = ExecutionResult(output_file) gatherer = GatherSuiteNames() e.visit(gatherer) return gatherer.result except: return []
def rfhistoric_reparser(opts): if opts.ignoreresult == "True": print("Ignoring execution results...") return path = os.path.abspath(os.path.expanduser(opts.path)) # output.xml files output_names = [] # support "*.xml" of output files if (opts.output == "*.xml"): for item in os.listdir(path): if os.path.isfile(item) and item.endswith('.xml'): output_names.append(item) else: for curr_name in opts.output.split(","): curr_path = os.path.join(path, curr_name) output_names.append(curr_path) required_files = list(output_names) missing_files = [ filename for filename in required_files if not os.path.exists(filename) ] if missing_files: # We have files missing. exit("output.xml file is missing: {}".format(", ".join(missing_files))) # Read output.xml file result = ExecutionResult(*output_names) result.configure(stat_config={ 'suite_stat_level': 2, 'tag_stat_combine': 'tagANDanother' }) print("Capturing execution results, This may take few minutes...") # connect to database mydb = connect_to_mysql_db(opts.host, opts.username, opts.password, opts.projectname) rootdb = connect_to_mysql_db(opts.host, opts.username, opts.password, 'robothistoric') # get latest execution id if opts.executionid == "latest": result_id = get_latest_execution_id(mydb) else: result_id = opts.executionid print("INFO: Updating test results") result.visit(TestMetrics(mydb, result_id, opts.fullsuitename)) print("INFO: Updating execution table") update_execution_table(mydb, rootdb, opts.projectname, result_id) print("INFO: Updating execution results") commit_and_close_db(mydb)
def robot_run(app, case_key, args='', user='', driver='USER'): username = user if user != '' else session['username'] project = get_projectnamefromkey(case_key) output = app.config["AUTO_HOME"] + "/jobs/%s/%s" % (username, project) if not exists_path(output): mk_dirs(output) (out, index) = reset_next_build_numb(output) mk_dirs(out) if not exists_path(out) else None cmd = 'robot ' + args + ' --outputdir=' + out + ' ' + case_key log.info("Robot_run CMD:{}".format(cmd)) with open(out + "/cmd.txt", 'w') as f: f.write("{}|robot|{}|--outputdir={}|{}\n".format( driver, args, out, case_key)) cp = subRun(cmd, shell=True, stdout=PIPE, stderr=STDOUT, text=True, timeout=7200) # timeout: sec 2hrs with open(out + "/debug.txt", 'w') as f: f.write(cp.stdout) app.config['DB'].insert_loginfo(username, 'task', 'run', case_key, 'OK') # Report and xUnit files can be generated based on the result object. # ResultWriter(result).write_results(report=out + '/report.html', log=out + '/log.html') try: detail_result = ExecutionResult(out + "/output.xml") except Exception as e: log.error( "Open output.xml Exception:{},\n May robot run fail, console:{}". format(e, cp.stdout)) return # detail_result.save(out + "/output_new.xml") reset_last_status(detail_result, output, index) # Report and xUnit files can be generated based on the result object. ResultWriter(detail_result).write_results(report=out + '/report.html', log=out + '/log.html') s = detail_result.suite dealwith_source(app, username, s)
def output_file(self, path: str) -> None: """Called when writing to an output file is ready. Adds Report Portal links to output file. Args: path: absolute path to output file. """ result = ExecutionResult(path) result.visit(RobotFrameworkReportModifier(robot_service=RobotService)) result.save()