def test_publish_comment_with_check_run_with_annotations(self):
        settings = self.create_settings()
        base_commit = 'base-commit'

        gh, req, repo, commit = self.create_mocks(
            digest=self.digest, check_names=[settings.check_name])
        pr = self.create_github_pr(settings.repo, base_commit)
        cr = mock.MagicMock(html_url='http://check-run.url')
        publisher = Publisher(settings, gh)

        publisher.publish_comment(settings.comment_title, self.stats, pr, cr)

        repo.get_commit.assert_called_once_with(base_commit)
        pr.create_issue_comment.assert_called_once_with(
            '## Comment Title\n'
            '\u205f\u20041 files\u2004 ±0\u2002\u20032 suites\u2004 ±0\u2002\u2003\u20023s :stopwatch: ±0s\n'
            '22 tests +1\u2002\u20034 :heavy_check_mark: \u2006-\u200a\u205f\u20048\u2002\u20035 :zzz: +1\u2002\u2003\u205f\u20046 :x: +4\u2002\u2003\u205f\u20047 :fire: +\u205f\u20044\u2002\n'
            '38 runs\u2006 +1\u2002\u20038 :heavy_check_mark: \u2006-\u200a17\u2002\u20039 :zzz: +2\u2002\u200310 :x: +6\u2002\u200311 :fire: +10\u2002\n'
            '\n'
            'For more details on these failures and errors, see [this check](http://check-run.url).\n'
            '\n'
            'Results for commit commit.\u2003± Comparison against base commit base.\n'
        )
    def test_publish_comment_with_check_run_without_annotations(self):
        settings = self.create_settings()
        base_commit = 'base-commit'

        gh, gha, req, repo, commit = self.create_mocks(
            digest=self.base_digest, check_names=[settings.check_name])
        pr = self.create_github_pr(settings.repo, base_commit)
        cr = mock.MagicMock(html_url='http://check-run.url')
        publisher = Publisher(settings, gh, gha)

        stats = dict(self.stats.to_dict())
        stats.update(tests_fail=0, tests_error=0, runs_fail=0, runs_error=0)
        stats = UnitTestRunResults.from_dict(stats)
        publisher.publish_comment(settings.comment_title, stats, pr, cr)

        repo.get_commit.assert_called_once_with(base_commit)
        pr.create_issue_comment.assert_called_once_with(
            '## Comment Title\n'
            '\u205f\u20041 files\u2004 ±0\u2002\u20032 suites\u2004 ±0\u2002\u2003\u20023s :stopwatch: ±0s\n'
            '22 tests +1\u2002\u20034 :heavy_check_mark: \u2006-\u200a\u205f\u20048\u2002\u20035 :zzz: +1\u2002\u20030 :x: \u2006-\u200a2\u2002\n'
            '38 runs\u2006 +1\u2002\u20038 :heavy_check_mark: \u2006-\u200a17\u2002\u20039 :zzz: +2\u2002\u20030 :x: \u2006-\u200a4\u2002\n'
            '\n'
            'Results for commit commit.\u2003± Comparison against base commit base.\n'
        )
def main(settings: Settings) -> None:
    gha = GithubAction()

    # we cannot create a check run or pull request comment
    # when running on pull_request event from a fork
    if settings.event_name == 'pull_request' and \
            settings.event.get('pull_request', {}).get('head', {}).get('repo', {}).get('full_name') != settings.repo:
        gha.warning(
            f'This action is running on a pull_request event for a fork repository. '
            f'It cannot do anything useful like creating check runs or pull request comments.'
        )
        return

    # resolve the files_glob to files
    files = [str(file) for file in pathlib.Path().glob(settings.files_glob)]
    if len(files) == 0:
        gha.warning(f'Could not find any files for {settings.files_glob}')
    else:
        logger.info(f'reading {settings.files_glob}')
        logger.debug(f'reading {list(files)}')

    # get the unit test results
    parsed = parse_junit_xml_files(files).with_commit(settings.commit)
    [
        gha.error(message=f'Error processing result file: {error.message}',
                  file=error.file,
                  line=error.line,
                  column=error.column) for error in parsed.errors
    ]

    # process the parsed results
    results = get_test_results(parsed, settings.dedup_classes_by_file_name)

    # turn them into stats
    stats = get_stats(results)

    # derive check run conclusion from files
    conclusion = get_conclusion(parsed,
                                fail_on_failures=settings.fail_on_failures,
                                fail_on_errors=settings.fail_on_errors)

    # publish the delta stats
    gh = get_github(token=settings.token,
                    url=settings.api_url,
                    retries=10,
                    backoff_factor=1)
    Publisher(settings, gh, gha).publish(stats, results.case_results,
                                         settings.compare_earlier, conclusion)
def main(settings: Settings) -> None:
    files = [str(file) for file in pathlib.Path().glob(settings.files_glob)]
    logger.info('reading {}: {}'.format(settings.files_glob, list(files)))

    # get the unit test results
    parsed = parse_junit_xml_files(files).with_commit(settings.commit)

    # process the parsed results
    results = get_test_results(parsed, settings.dedup_classes_by_file_name)

    # turn them into stats
    stats = get_stats(results)

    # publish the delta stats
    gh = Github(settings.token)
    Publisher(settings, gh).publish(stats, results.case_results)
Exemple #5
0
def main(settings: Settings, gha: GithubAction) -> None:
    # we cannot create a check run or pull request comment when running on pull_request event from a fork
    # when event_file is given we assume proper setup as in README.md#support-fork-repositories-and-dependabot-branches
    if settings.event_file is None and \
            settings.event_name == 'pull_request' and \
            settings.event.get('pull_request', {}).get('head', {}).get('repo', {}).get('full_name') != settings.repo:
        # bump the version if you change the target of this link (if it did not exist already) or change the section
        gha.warning(f'This action is running on a pull_request event for a fork repository. '
                    f'It cannot do anything useful like creating check runs or pull request comments. '
                    f'To run the action on fork repository pull requests, see '
                    f'https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#support-fork-repositories-and-dependabot-branches')
        return

    # resolve the files_glob to files
    files = get_files(settings.files_glob)
    if len(files) == 0:
        gha.warning(f'Could not find any files for {settings.files_glob}')
    else:
        logger.info(f'reading {settings.files_glob}')
        logger.debug(f'reading {list(files)}')

    # get the unit test results
    parsed = parse_junit_xml_files(files, settings.time_factor, settings.ignore_runs).with_commit(settings.commit)
    [gha.error(message=f'Error processing result file: {error.message}', file=error.file, line=error.line, column=error.column)
     for error in parsed.errors]

    # process the parsed results
    results = get_test_results(parsed, settings.dedup_classes_by_file_name)

    # turn them into stats
    stats = get_stats(results)

    # derive check run conclusion from files
    conclusion = get_conclusion(parsed, fail_on_failures=settings.fail_on_failures, fail_on_errors=settings.fail_on_errors)

    # publish the delta stats
    backoff_factor = max(settings.seconds_between_github_reads, settings.seconds_between_github_writes)
    gh = get_github(token=settings.token, url=settings.api_url, retries=settings.api_retries, backoff_factor=backoff_factor, gha=gha)
    gh._Github__requester._Requester__requestRaw = throttle_gh_request_raw(
        settings.seconds_between_github_reads,
        settings.seconds_between_github_writes,
        gh._Github__requester._Requester__requestRaw
    )
    Publisher(settings, gh, gha).publish(stats, results.case_results, conclusion)
Exemple #6
0
def main(settings: Settings) -> None:
    gha = GithubAction()

    # resolve the files_glob to files
    files = [str(file) for file in pathlib.Path().glob(settings.files_glob)]
    if len(files) == 0:
        gha.warning(f'Could not find any files for {settings.files_glob}')
    else:
        logger.info(f'reading {settings.files_glob}')
        logger.debug(f'reading {list(files)}')

    # get the unit test results
    parsed = parse_junit_xml_files(files).with_commit(settings.commit)
    [
        gha.error(message=f'Error processing result file: {error.message}',
                  file=error.file,
                  line=error.line,
                  column=error.column) for error in parsed.errors
    ]

    # process the parsed results
    results = get_test_results(parsed, settings.dedup_classes_by_file_name)

    # turn them into stats
    stats = get_stats(results)

    # derive check run conclusion from files
    conclusion = get_conclusion(parsed,
                                fail_on_failures=settings.fail_on_failures,
                                fail_on_errors=settings.fail_on_errors)

    # publish the delta stats
    gh = get_github(token=settings.token,
                    url=settings.api_url,
                    retries=10,
                    backoff_factor=1)
    Publisher(settings, gh, gha).publish(stats, results.case_results,
                                         settings.compare_earlier, conclusion)
 def publish(self):
     self.publisher = Publisher(classification=self.classification)
    def test_publish_check_with_multiple_annotation_pages(self):
        earlier_commit = 'past'
        settings = self.create_settings(before=earlier_commit)
        gh, gha, req, repo, commit = self.create_mocks(
            commit=mock.Mock(),
            digest=self.past_digest,
            check_names=[settings.check_name])
        publisher = Publisher(settings, gh, gha)

        # generate a lot cases
        cases = UnitTestCaseResults([
            ((None, 'class', f'test{i}'),
             dict(failure=[
                 UnitTestCase(result_file='result file',
                              test_file='test file',
                              line=i,
                              class_name='class',
                              test_name=f'test{i}',
                              result='failure',
                              message=f'message{i}',
                              content=f'content{i}',
                              time=1.234 + i / 1000)
             ])) for i in range(1, 151)
        ])

        # makes gzipped digest deterministic
        with mock.patch('gzip.time.time', return_value=0):
            check_run = publisher.publish_check(self.stats, cases,
                                                'conclusion')

        repo.get_commit.assert_called_once_with(earlier_commit)
        # we expect multiple calls to create_check_run
        create_check_run_kwargss = [
            dict(
                name=settings.check_name,
                head_sha=settings.commit,
                status='completed',
                conclusion='conclusion',
                output={
                    'title':
                    '7 errors, 6 fail, 5 skipped, 4 pass in 3s',
                    'summary':
                    '\u205f\u20041 files\u2004 ±0\u2002\u20032 suites\u2004 ±0\u2002\u2003\u20023s :stopwatch: ±0s\n'
                    '22 tests +1\u2002\u20034 :heavy_check_mark: \u2006-\u200a\u205f\u20048\u2002\u20035 :zzz: +1\u2002\u2003\u205f\u20046 :x: +4\u2002\u2003\u205f\u20047 :fire: +\u205f\u20044\u2002\n'
                    '38 runs\u2006 +1\u2002\u20038 :heavy_check_mark: \u2006-\u200a17\u2002\u20039 :zzz: +2\u2002\u200310 :x: +6\u2002\u200311 :fire: +10\u2002\n'
                    '\n'
                    'Results for commit commit.\u2003± Comparison against earlier commit past.\n'
                    '\n'
                    '[test-results]:data:application/gzip;base64,'
                    'H4sIAAAAAAAC/0WOSQqEMBBFryJZu+g4tK2XkRAVCoc0lWQl3t'
                    '3vULqr9z48alUDTb1XTaLTRPlI4YQM0EU2gdwCzIEYwjllAq2P'
                    '1sIUrxjpD1E+YjA0QXwf0TM7hqlgOC5HMP/dt/RevnK18F3THx'
                    'FS08fz1s0zBZBc2w5zHdX73QAAAA==',
                    'annotations': ([{
                        'path':
                        '.github',
                        'start_line':
                        0,
                        'end_line':
                        0,
                        'annotation_level':
                        'notice',
                        'message':
                        'There are 150 tests, see "Raw output" for the full list of tests.',
                        'title':
                        '150 tests found',
                        'raw_details':
                        '\n'.join(
                            sorted([f'class ‑ test{i}'
                                    for i in range(1, 151)]))
                    }] if start == 1 else []) + [
                        {
                            'path': 'test file',
                            'start_line': i,
                            'end_line': i,
                            'annotation_level': 'warning',
                            'message': 'result file',
                            'title': f'test{i} (class) failed',
                            'raw_details': f'content{i}'
                        }
                        # for each batch starting at start we expect 50 annotations
                        for i in range(
                            start, start +
                            (49 if start == 1 else 50 if start < 150 else 1))
                    ]
                })
            # we expect three calls, each batch starting at these starts
            for start in [1, 50, 100, 150]
        ]
        repo.create_check_run.assert_has_calls([
            mock.call(**create_check_run_kwargs)
            for create_check_run_kwargs in create_check_run_kwargss
        ],
                                               any_order=False)

        # this checks that publisher.publish_check returned
        # the result of the last call to repo.create_check_run
        self.assertEqual(
            {'check_run_for_kwargs': create_check_run_kwargss[-1]}, check_run)
    def do_test_publish_check_with_base_stats(self, errors: List[ParseError]):
        earlier_commit = 'past'
        settings = self.create_settings(before=earlier_commit)
        gh, gha, req, repo, commit = self.create_mocks(
            commit=mock.Mock(),
            digest=self.past_digest,
            check_names=[settings.check_name])
        publisher = Publisher(settings, gh, gha)

        # makes gzipped digest deterministic
        with mock.patch('gzip.time.time', return_value=0):
            check_run = publisher.publish_check(self.stats.with_errors(errors),
                                                self.cases, 'conclusion')

        repo.get_commit.assert_called_once_with(earlier_commit)
        error_annotations = [
            get_error_annotation(error).to_dict() for error in errors
        ]
        create_check_run_kwargs = dict(
            name=settings.check_name,
            head_sha=settings.commit,
            status='completed',
            conclusion='conclusion',
            output={
                'title':
                '{}7 errors, 6 fail, 5 skipped, 4 pass in 3s'.format(
                    '{} parse errors, '.format(len(errors)
                                               ) if len(errors) > 0 else ''),
                'summary':
                '\u205f\u20041 files\u2004 ±0\u2002\u2003{errors}2 suites\u2004 ±0\u2002\u2003\u20023s :stopwatch: ±0s\n'
                '22 tests +1\u2002\u20034 :heavy_check_mark: \u2006-\u200a\u205f\u20048\u2002\u20035 :zzz: +1\u2002\u2003\u205f\u20046 :x: +4\u2002\u2003\u205f\u20047 :fire: +\u205f\u20044\u2002\n'
                '38 runs\u2006 +1\u2002\u20038 :heavy_check_mark: \u2006-\u200a17\u2002\u20039 :zzz: +2\u2002\u200310 :x: +6\u2002\u200311 :fire: +10\u2002\n'
                '\n'
                'Results for commit commit.\u2003± Comparison against earlier commit past.\n'
                '\n'
                '[test-results]:data:application/gzip;base64,'
                'H4sIAAAAAAAC/0WOSQqEMBBFryJZu+g4tK2XkRAVCoc0lWQl3t'
                '3vULqr9z48alUDTb1XTaLTRPlI4YQM0EU2gdwCzIEYwjllAq2P'
                '1sIUrxjpD1E+YjA0QXwf0TM7hqlgOC5HMP/dt/RevnK18F3THx'
                'FS08fz1s0zBZBc2w5zHdX73QAAAA=='.format(
                    errors='{} errors\u2004\u2003'.
                    format(len(errors)) if len(errors) > 0 else ''),
                'annotations':
                error_annotations + [{
                    'path': '.github',
                    'start_line': 0,
                    'end_line': 0,
                    'annotation_level': 'notice',
                    'message':
                    'There is 1 skipped test, see "Raw output" for the name of the skipped test.',
                    'title': '1 skipped test found',
                    'raw_details': 'class ‑ test3'
                }, {
                    'path':
                    '.github',
                    'start_line':
                    0,
                    'end_line':
                    0,
                    'annotation_level':
                    'notice',
                    'message':
                    'There are 3 tests, see "Raw output" for the full list of tests.',
                    'title':
                    '3 tests found',
                    'raw_details':
                    'class ‑ test\nclass ‑ test2\nclass ‑ test3'
                }, {
                    'path': 'test file',
                    'start_line': 0,
                    'end_line': 0,
                    'annotation_level': 'warning',
                    'message': 'result file',
                    'title': '1 out of 2 runs failed: test (class)',
                    'raw_details': 'content'
                }, {
                    'path': 'test file',
                    'start_line': 0,
                    'end_line': 0,
                    'annotation_level': 'failure',
                    'message': 'result file',
                    'title': '1 out of 2 runs with error: test2 (class)',
                    'raw_details': 'error content'
                }]
            })
        repo.create_check_run.assert_called_once_with(
            **create_check_run_kwargs)

        # this checks that publisher.publish_check returned
        # the result of the last call to repo.create_check_run
        self.assertEqual({'check_run_for_kwargs': create_check_run_kwargs},
                         check_run)