def publish_comment(self, title: str, stats: UnitTestRunResults, pull_request: PullRequest, check_run: Optional[CheckRun] = None, cases: Optional[UnitTestCaseResults] = None) -> PullRequest: # compare them with earlier stats base_check_run = None if self._settings.compare_earlier: base_commit_sha = self.get_base_commit_sha(pull_request) logger.debug(f'comparing against base={base_commit_sha}') base_check_run = self.get_check_run(base_commit_sha) base_stats = self.get_stats_from_check_run(base_check_run) if base_check_run is not None else None stats_with_delta = get_stats_delta(stats, base_stats, 'base') if base_stats is not None else stats logger.debug(f'stats with delta: {stats_with_delta}') # gather test lists from check run and cases before_all_tests, before_skipped_tests = self.get_test_lists_from_check_run(base_check_run) all_tests, skipped_tests = get_all_tests_list(cases), get_skipped_tests_list(cases) test_changes = SomeTestChanges(before_all_tests, all_tests, before_skipped_tests, skipped_tests) details_url = check_run.html_url if check_run else None summary = get_long_summary_md(stats_with_delta, details_url, test_changes, self._settings.test_changes_limit) body = f'## {title}\n{summary}' # reuse existing commend when comment_mode == comment_mode_update # if none exists or comment_mode != comment_mode_update, create new comment if self._settings.comment_mode != comment_mode_update or not self.reuse_comment(pull_request, body): logger.info('creating comment') pull_request.create_issue_comment(body) return pull_request
def publish_check(self, stats: UnitTestRunResults, cases: UnitTestCaseResults, compare_earlier: bool, conclusion: str) -> CheckRun: # get stats from earlier commits before_stats = None if compare_earlier: before_commit_sha = self._settings.event.get('before') logger.debug(f'comparing against before={before_commit_sha}') before_stats = self.get_stats_from_commit(before_commit_sha) stats_with_delta = get_stats_delta( stats, before_stats, 'earlier') if before_stats is not None else stats logger.debug(f'stats with delta: {stats_with_delta}') error_annotations = get_error_annotations(stats.errors) case_annotations = get_case_annotations( cases, self._settings.report_individual_runs) file_list_annotations = self.get_test_list_annotations(cases) all_annotations = error_annotations + case_annotations + file_list_annotations # we can send only 50 annotations at once, so we split them into chunks of 50 check_run = None all_annotations = [ all_annotations[x:x + 50] for x in range(0, len(all_annotations), 50) ] or [[]] for annotations in all_annotations: output = dict(title=get_short_summary(stats), summary=get_long_summary_with_digest_md( stats_with_delta, stats), annotations=[ annotation.to_dict() for annotation in annotations ]) logger.info('creating check') check_run = self._repo.create_check_run( name=self._settings.check_name, head_sha=self._settings.commit, status='completed', conclusion=conclusion, output=output) logger.debug(f'created check {check_run}') return check_run
def test_get_stats_delta(self): self.assertEqual(get_stats_delta(UnitTestRunResults( files=1, errors=errors, suites=2, duration=3, tests=20, tests_succ=2, tests_skip=5, tests_fail=6, tests_error=7, runs=40, runs_succ=12, runs_skip=8, runs_fail=9, runs_error=10, commit='commit' ), UnitTestRunResults( files=3, errors=[ParseError('other file', 'other error', None, None)], suites=5, duration=7, tests=41, tests_succ=5, tests_skip=11, tests_fail=13, tests_error=15, runs=81, runs_succ=25, runs_skip=17, runs_fail=19, runs_error=21, commit='ref' ), 'type'), UnitTestRunDeltaResults( files=n(1, -2), errors=errors, suites=n(2, -3), duration=d(3, -4), tests=n(20, -21), tests_succ=n(2, -3), tests_skip=n(5, -6), tests_fail=n(6, -7), tests_error=n(7, -8), runs=n(40, -41), runs_succ=n(12, -13), runs_skip=n(8, -9), runs_fail=n(9, -10), runs_error=n(10, -11), commit='commit', reference_commit='ref', reference_type='type' ))