def merge(args, logger): if resultutils.is_url(args.target_results) or os.path.isdir(args.target_results): results = resultutils.load_resultsdata(args.target_results, configmap=resultutils.store_map) resultutils.append_resultsdata(results, args.base_results, configmap=resultutils.store_map) resultutils.save_resultsdata(results, args.target_results) else: results = resultutils.load_resultsdata(args.base_results, configmap=resultutils.flatten_map) if os.path.exists(args.target_results): resultutils.append_resultsdata(results, args.target_results, configmap=resultutils.flatten_map) resultutils.save_resultsdata(results, os.path.dirname(args.target_results), fn=os.path.basename(args.target_results)) return 0
def view_test_report(self, logger, source_dir, branch, commit, tag): test_count_reports = [] if commit: if tag: logger.warning("Ignoring --tag as --commit was specified") tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}" repo = GitRepo(source_dir) revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=branch) rev_index = gitarchive.rev_find(revs, 'commit', commit) testresults = resultutils.git_get_result(repo, revs[rev_index][2]) elif tag: repo = GitRepo(source_dir) testresults = resultutils.git_get_result(repo, [tag]) else: testresults = resultutils.load_resultsdata(source_dir) for testsuite in testresults: for resultid in testresults[testsuite]: result = testresults[testsuite][resultid] test_count_report = self.get_aggregated_test_result( logger, result) test_count_report['testseries'] = result['configuration'][ 'TESTSERIES'] test_count_report['result_id'] = resultid test_count_reports.append(test_count_report) self.print_test_report('test_report_full_text.txt', test_count_reports)
def log(args, logger): results = resultutils.load_resultsdata(args.source) ptest_count = sum(1 for _, _, _, r in resultutils.test_run_results(results) if 'ptestresult.sections' in r) if ptest_count > 1 and not args.prepend_run: print("%i ptest sections found. '--prepend-run' is required" % ptest_count) return 1 for _, run_name, _, r in resultutils.test_run_results(results): if args.dump_ptest: if 'ptestresult.sections' in r: for name, ptest in r['ptestresult.sections'].items(): if 'log' in ptest: dest_dir = args.dump_ptest if args.prepend_run: dest_dir = os.path.join(dest_dir, run_name) os.makedirs(dest_dir, exist_ok=True) dest = os.path.join(dest_dir, '%s.log' % name) print(dest) with open(dest, 'w') as f: f.write(ptest['log']) if args.raw: if 'ptestresult.rawlogs' in r: print(r['ptestresult.rawlogs']['log']) else: print('Raw logs not found') return 1 for ptest in args.ptest: if not show_ptest(r, ptest, logger): return 1
def merge(args, logger): configvars = {} if not args.not_add_testseries: configvars = resultutils.extra_configvars.copy() if args.executed_by: configvars['EXECUTED_BY'] = args.executed_by if resultutils.is_url(args.target_results) or os.path.isdir(args.target_results): results = resultutils.load_resultsdata(args.target_results, configmap=resultutils.store_map, configvars=configvars) resultutils.append_resultsdata(results, args.base_results, configmap=resultutils.store_map, configvars=configvars) resultutils.save_resultsdata(results, args.target_results) else: results = resultutils.load_resultsdata(args.base_results, configmap=resultutils.flatten_map, configvars=configvars) if os.path.exists(args.target_results): resultutils.append_resultsdata(results, args.target_results, configmap=resultutils.flatten_map, configvars=configvars) resultutils.save_resultsdata(results, os.path.dirname(args.target_results), fn=os.path.basename(args.target_results)) logger.info('Merged results to %s' % os.path.dirname(args.target_results)) return 0
def merge(args, logger): if os.path.isdir(args.target_results): results = resultutils.load_resultsdata(args.target_results, configmap=resultutils.store_map) resultutils.append_resultsdata(results, args.base_results, configmap=resultutils.store_map) resultutils.save_resultsdata(results, args.target_results) else: results = resultutils.load_resultsdata( args.base_results, configmap=resultutils.flatten_map) if os.path.exists(args.target_results): resultutils.append_resultsdata(results, args.target_results, configmap=resultutils.flatten_map) resultutils.save_resultsdata(results, os.path.dirname(args.target_results), fn=os.path.basename(args.target_results)) return 0
def view_test_report(self, logger, source_dir, branch, commit, tag): test_count_reports = [] if commit: if tag: logger.warning("Ignoring --tag as --commit was specified") tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}" repo = GitRepo(source_dir) revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=branch) rev_index = gitarchive.rev_find(revs, 'commit', commit) testresults = resultutils.git_get_result(repo, revs[rev_index][2]) elif tag: repo = GitRepo(source_dir) testresults = resultutils.git_get_result(repo, [tag]) else: testresults = resultutils.load_resultsdata(source_dir) for testsuite in testresults: for resultid in testresults[testsuite]: skip = False result = testresults[testsuite][resultid] machine = result['configuration']['MACHINE'] # Check to see if there is already results for these kinds of tests for the machine for key in result['result'].keys(): testtype = str(key).split('.')[0] if ((machine in self.ptests and testtype == "ptestresult" and self.ptests[machine]) or (machine in self.ltptests and testtype == "ltpiresult" and self.ltptests[machine]) or (machine in self.ltpposixtests and testtype == "ltpposixresult" and self.ltpposixtests[machine])): print( "Already have test results for %s on %s, skipping %s" % (str(key).split('.')[0], machine, resultid)) skip = True break if skip: break test_count_report = self.get_aggregated_test_result( logger, result, machine) test_count_report['machine'] = machine test_count_report['testseries'] = result['configuration'][ 'TESTSERIES'] test_count_report['result_id'] = resultid test_count_reports.append(test_count_report) self.print_test_report('test_report_full_text.txt', test_count_reports)
def view_test_report(self, logger, source_dir, tag): test_count_reports = [] if tag: repo = GitRepo(source_dir) testresults = resultutils.git_get_result(repo, [tag]) else: testresults = resultutils.load_resultsdata(source_dir) for testsuite in testresults: for resultid in testresults[testsuite]: result = testresults[testsuite][resultid] test_count_report = self.get_aggregated_test_result(logger, result) test_count_report['testseries'] = result['configuration']['TESTSERIES'] test_count_report['result_id'] = resultid test_count_reports.append(test_count_report) self.print_test_report('test_report_full_text.txt', test_count_reports)
def log(args, logger): results = resultutils.load_resultsdata(args.source) ptest_count = sum(1 for _, _, _, r in resultutils.test_run_results(results) if 'ptestresult.sections' in r) if ptest_count > 1 and not args.prepend_run: print("%i ptest sections found. '--prepend-run' is required" % ptest_count) return 1 for _, run_name, _, r in resultutils.test_run_results(results): if args.dump_ptest and 'ptestresult.sections' in r: for name, ptest in r['ptestresult.sections'].items(): logdata = resultutils.ptestresult_get_log(r, name) if logdata is not None: dest_dir = args.dump_ptest if args.prepend_run: dest_dir = os.path.join(dest_dir, run_name) os.makedirs(dest_dir, exist_ok=True) dest = os.path.join(dest_dir, '%s.log' % name) print(dest) with open(dest, 'w') as f: f.write(logdata) if args.raw_ptest: rawlog = resultutils.ptestresult_get_rawlogs(r) if rawlog is not None: print(rawlog) else: print('Raw ptest logs not found') return 1 if args.raw_reproducible: if 'reproducible.rawlogs' in r: print(r['reproducible.rawlogs']['log']) else: print('Raw reproducible logs not found') return 1 for ptest in args.ptest: if not show_ptest(r, ptest, logger): return 1 for reproducible in args.reproducible: if not show_reproducible(r, reproducible, logger): return 1
def view_test_report(self, logger, source_dir, tag): test_count_reports = [] if tag: repo = GitRepo(source_dir) testresults = resultutils.git_get_result(repo, [tag]) else: testresults = resultutils.load_resultsdata(source_dir) for testsuite in testresults: for resultid in testresults[testsuite]: result = testresults[testsuite][resultid] test_count_report = self.get_aggregated_test_result( logger, result) test_count_report['testseries'] = result['configuration'][ 'TESTSERIES'] test_count_report['result_id'] = resultid test_count_reports.append(test_count_report) self.print_test_report('test_report_full_text.txt', test_count_reports)
def log(args, logger): results = resultutils.load_resultsdata(args.source) for path in results: for res in results[path]: if 'result' not in results[path][res]: continue r = results[path][res]['result'] if args.raw: if 'ptestresult.rawlogs' in r: print(r['ptestresult.rawlogs']['log']) else: print('Raw logs not found') return 1 for ptest in args.ptest: if not show_ptest(r, ptest, logger): return 1
def view_test_report(self, logger, source_dir, branch, commit, tag): test_count_reports = [] if commit: if tag: logger.warning("Ignoring --tag as --commit was specified") tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}" repo = GitRepo(source_dir) revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=branch) rev_index = gitarchive.rev_find(revs, 'commit', commit) testresults = resultutils.git_get_result(repo, revs[rev_index][2]) elif tag: repo = GitRepo(source_dir) testresults = resultutils.git_get_result(repo, [tag]) else: testresults = resultutils.load_resultsdata(source_dir) for testsuite in testresults: for resultid in testresults[testsuite]: result = testresults[testsuite][resultid] test_count_report = self.get_aggregated_test_result(logger, result) test_count_report['testseries'] = result['configuration']['TESTSERIES'] test_count_report['result_id'] = resultid test_count_reports.append(test_count_report) self.print_test_report('test_report_full_text.txt', test_count_reports)
def get_results(logger, source): return resultutils.load_resultsdata(source, configmap=resultutils.regression_map)
def view_test_report(self, logger, source_dir, branch, commit, tag, use_regression_map, raw_test, selected_test_case_only): def print_selected_testcase_result(testresults, selected_test_case_only): for testsuite in testresults: for resultid in testresults[testsuite]: result = testresults[testsuite][resultid]['result'] test_case_result = result.get(selected_test_case_only, {}) if test_case_result.get('status'): print( 'Found selected test case result for %s from %s' % (selected_test_case_only, resultid)) print(test_case_result['status']) else: print( 'Could not find selected test case result for %s from %s' % (selected_test_case_only, resultid)) if test_case_result.get('log'): print(test_case_result['log']) test_count_reports = [] configmap = resultutils.store_map if use_regression_map: configmap = resultutils.regression_map if commit: if tag: logger.warning("Ignoring --tag as --commit was specified") tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}" repo = GitRepo(source_dir) revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=branch) rev_index = gitarchive.rev_find(revs, 'commit', commit) testresults = resultutils.git_get_result(repo, revs[rev_index][2], configmap=configmap) elif tag: repo = GitRepo(source_dir) testresults = resultutils.git_get_result(repo, [tag], configmap=configmap) else: testresults = resultutils.load_resultsdata(source_dir, configmap=configmap) if raw_test: raw_results = {} for testsuite in testresults: result = testresults[testsuite].get(raw_test, {}) if result: raw_results[testsuite] = {raw_test: result} if raw_results: if selected_test_case_only: print_selected_testcase_result(raw_results, selected_test_case_only) else: print(json.dumps(raw_results, sort_keys=True, indent=4)) else: print('Could not find raw test result for %s' % raw_test) return 0 if selected_test_case_only: print_selected_testcase_result(testresults, selected_test_case_only) return 0 for testsuite in testresults: for resultid in testresults[testsuite]: skip = False result = testresults[testsuite][resultid] machine = result['configuration']['MACHINE'] # Check to see if there is already results for these kinds of tests for the machine for key in result['result'].keys(): testtype = str(key).split('.')[0] if ((machine in self.ltptests and testtype == "ltpiresult" and self.ltptests[machine]) or (machine in self.ltpposixtests and testtype == "ltpposixresult" and self.ltpposixtests[machine])): print( "Already have test results for %s on %s, skipping %s" % (str(key).split('.')[0], machine, resultid)) skip = True break if skip: break test_count_report = self.get_aggregated_test_result( logger, result, machine) test_count_report['machine'] = machine test_count_report['testseries'] = result['configuration'][ 'TESTSERIES'] test_count_report['result_id'] = resultid test_count_reports.append(test_count_report) self.print_test_report('test_report_full_text.txt', test_count_reports)