def main(): args = parse_args() github_token = '' gh = None if args.github: github_token = os.environ['GH_TOKEN'] gh = Github(github_token) if args.status and args.sha != None and args.repo and gh: set_status(gh, args.repo, args.sha) sys.exit(0) if not args.commits: sys.exit(1) suite = TestSuite("Compliance") docs = {} for Test in ComplianceTest.__subclasses__(): t = Test(suite, args.commits) t.run() suite.add_testcase(t.case) docs[t.case.name] = t._doc xml = JUnitXml() xml.add_testsuite(suite) xml.update_statistics() xml.write('compliance.xml') if args.github: repo = gh.get_repo(args.repo) pr = repo.get_pull(int(args.pull_request)) commit = repo.get_commit(args.sha) comment = "Found the following issues, please fix and resubmit:\n\n" comment_count = 0 print("Processing results...") for case in suite: if case.result and case.result.type != 'skipped': comment_count += 1 comment += ("## {}\n".format(case.result.message)) comment += "\n" if case.name not in ['Gitlint', 'Identity/Emails', 'License']: comment += "```\n" comment += ("{}\n".format(case.result._elem.text)) if case.name not in ['Gitlint', 'Identity/Emails', 'License']: comment += "```\n" commit.create_status('failure', docs[case.name], 'Verification failed', '{}'.format(case.name))
def gen_results_summary(results_dir, output_fn=None, merge_fn=None, verbose=False, print_section=False, results_file='results.xml'): """Scan a results directory and generate a summary file""" reports = [] combined = JUnitXml() nr_files = 0 out_f = sys.stdout for filename in get_results(results_dir, results_file): reports.append(JUnitXml.fromfile(filename)) if len(reports) == 0: return 0 if output_fn is not None: out_f = open(output_fn, "w") props = copy.deepcopy(reports[0].child(Properties)) ltm = check_for_ltm(results_dir, props) print_header(out_f, props) sort_by = lambda ts: parse_timestamp(ts.timestamp) if ltm: sort_by = lambda ts: ts.hostname if total_tests(reports) < 30: verbose = True for testsuite in sorted(reports, key=sort_by): print_summary(out_f, testsuite, verbose, print_section) combined.add_testsuite(testsuite) nr_files += 1 out_f.write('Totals: %d tests, %d skipped, %d failures, %d errors, %ds\n' \ % sum_testsuites(reports)) print_trailer(out_f, props) if merge_fn is not None: combined.update_statistics() combined.write(merge_fn + '.new') if os.path.exists(merge_fn): os.rename(merge_fn, merge_fn + '.bak') os.rename(merge_fn + '.new', merge_fn) return nr_files
def parse_junit(junit_dir): test_suite = TestSuite("Combined TestSuite") for junit_xml in glob.glob(os.path.join(junit_dir, "junit_*.xml")): if "junit_runner.xml" not in junit_xml: parsed = JUnitXml.fromfile(junit_xml) for testcase in parsed: if isinstance(testcase, TestSuite) or isinstance( testcase.result, Skipped): continue test_suite.add_testcase(testcase) os.remove(junit_xml) xml = JUnitXml() xml.add_testsuite(test_suite) xml.write(os.path.join(junit_dir, "junit_combined.xml")) xml.update_statistics() test_failure_rate = 0 if xml.tests != 0: test_failure_rate = int( math.ceil(((xml.failures + xml.errors) * 100) / xml.tests)) return utils.generate_payload(CANARY_TEST_FAILURE_RATE, test_failure_rate)
def _main(args): # The "real" main(), which is wrapped to catch exceptions and report them # to GitHub. Returns the number of test failures. # The absolute path of the top-level git directory. Initialize it here so # that issues running Git can be reported to GitHub. global GIT_TOP GIT_TOP = git("rev-parse", "--show-toplevel") # The commit range passed in --commit, e.g. "HEAD~3" global COMMIT_RANGE COMMIT_RANGE = args.commits init_logs(args.loglevel) if args.list: for testcase in ComplianceTest.__subclasses__(): print(testcase.name) return 0 # Load saved test results from an earlier run, if requested if args.previous_run: if not os.path.exists(args.previous_run): # This probably means that an earlier pass had an internal error # (the script is currently run multiple times by the ci-pipelines # repo). Since that earlier pass might've posted an error to # GitHub, avoid generating a GitHub comment here, by avoiding # sys.exit() (which gets caught in main()). print("error: '{}' not found".format(args.previous_run), file=sys.stderr) return 1 logging.info("Loading previous results from " + args.previous_run) for loaded_suite in JUnitXml.fromfile(args.previous_run): suite = loaded_suite break else: suite = TestSuite("Compliance") for testcase in ComplianceTest.__subclasses__(): # "Modules" and "testcases" are the same thing. Better flags would have # been --tests and --exclude-tests or the like, but it's awkward to # change now. if args.module and testcase.name not in args.module: continue if testcase.name in args.exclude_module: print("Skipping " + testcase.name) continue test = testcase() try: print( f"Running {test.name:16} tests in " f"{GIT_TOP if test.path_hint == '<git-top>' else test.path_hint} ..." ) test.run() except EndTest: pass suite.add_testcase(test.case) xml = JUnitXml() xml.add_testsuite(suite) xml.update_statistics() xml.write(args.output, pretty=True) failed_cases = [] name2doc = { testcase.name: testcase.doc for testcase in ComplianceTest.__subclasses__() } for case in suite: if case.result: if case.result.type == 'skipped': logging.warning("Skipped %s, %s", case.name, case.result.message) else: failed_cases.append(case) else: # Some checks like codeowners can produce no .result logging.info("No JUnit result for %s", case.name)
def main(): """ Main function :return: """ args = parse_args() init_logs(args.loglevel) if args.list: for testcase in ComplianceTest.__subclasses__(): test = testcase(None, "") print("{}".format(test._name)) sys.exit(0) if args.status and args.sha is not None and args.repo: set_status(args.repo, args.sha) sys.exit(0) if not args.commits: print("No commit range given.") sys.exit(1) if args.previous_run and os.path.exists(args.previous_run) and args.module: junit_xml = JUnitXml.fromfile(args.previous_run) logging.info("Loaded previous results from %s", args.previous_run) for loaded_suite in junit_xml: suite = loaded_suite break else: suite = TestSuite("Compliance") docs = {} for testcase in ComplianceTest.__subclasses__(): test = testcase(None, "") docs[test._name] = test._doc for testcase in ComplianceTest.__subclasses__(): test = testcase(suite, args.commits) if args.module: if test._name in args.module: test.run() suite.add_testcase(test.case) else: if test._name in args.exclude_module: print("Skipping {}".format(test._name)) continue test.run() suite.add_testcase(test.case) xml = JUnitXml() xml.add_testsuite(suite) xml.update_statistics() xml.write(args.output) failed_cases = [] # TODO maybe: move all the github-related code to a different .py # file to draw a better line between developer code versus # infrastructure-specific code, in other words keep this file # 100% testable and maintainable by non-admins developers. if args.github and 'GH_TOKEN' in os.environ: errors = report_to_github(args.repo, args.pull_request, args.sha, suite, docs) else: for case in suite: if case.result: if case.result.type == 'skipped': logging.warning("Skipped %s, %s", case.name, case.result.message) else: failed_cases.append(case) else: # Some checks like codeowners can produce no .result logging.info("No JUnit result for %s", case.name) errors = len(failed_cases)
def _main(args): # The "real" main(), which is wrapped to catch exceptions and report them # to GitHub. Returns the number of test failures. init_logs(args.loglevel) if args.list: for testcase in ComplianceTest.__subclasses__(): print(testcase._name) return 0 if args.status: set_pending() return 0 if not args.commits: err("No commit range given") # Load saved test results from an earlier run, if requested if args.previous_run: if not os.path.exists(args.previous_run): # This probably means that an earlier pass had an internal error # (the script is currently run multiple times by the ci-pipelines # repo). Since that earlier pass might've posted an error to # GitHub, avoid generating a GitHub comment here, by avoiding # sys.exit() (which gets caught in main()). print("error: '{}' not found".format(args.previous_run), file=sys.stderr) return 1 logging.info("Loading previous results from " + args.previous_run) for loaded_suite in JUnitXml.fromfile(args.previous_run): suite = loaded_suite break else: suite = TestSuite("Compliance") for testcase in ComplianceTest.__subclasses__(): test = testcase(suite, args.commits) if args.module: if test._name not in args.module: continue elif test._name in args.exclude_module: print("Skipping " + test._name) continue try: test.run() except EndTest: pass suite.add_testcase(test.case) xml = JUnitXml() xml.add_testsuite(suite) xml.update_statistics() xml.write(args.output, pretty=True) failed_cases = [] # TODO maybe: move all the github-related code to a different .py # file to draw a better line between developer code versus # infrastructure-specific code, in other words keep this file # 100% testable and maintainable by non-admins developers. if args.github: n_fails = report_test_results_to_github(suite) else: for case in suite: if case.result: if case.result.type == 'skipped': logging.warning("Skipped %s, %s", case.name, case.result.message) else: failed_cases.append(case) else: # Some checks like codeowners can produce no .result logging.info("No JUnit result for %s", case.name) n_fails = len(failed_cases)