def _cleanup_prs(cls, to_cleanup, gh_client, config, db_session): for pr in to_cleanup: processor = PullRequestProcessor(pr, gh_client, config) retrier = cls._build_retrier(config, pr) retrier.cleanup(processor) db_session.delete(pr)
def test_retrier_can_alter_db_objects(db_session, default_config): pull_request = PullRequest('moby/moby', 34567) gh_client = FakeGithubClient(pull_request, ['1' * 40], [[('coucou', 'pending', 12), ('blah', 'error', 28)]]) processor = PullRequestProcessor(pull_request, gh_client, default_config) def retry_func(pr_processor, pr_checks_status): pr_processor.pull_request.last_processed_sha = '3' * 40 assert len(pr_checks_status.retrying) == 1 pr_checks_status.retrying[0].last_errored_id = 82 retrier = FakeRetrier(processor, retry_func=retry_func) notifier = FakeNotifier(processor) processor.run(db_session, retrier, notifier) gh_client.assert_exhausted() # let's look at what's in the DB assert_pr_equal(pull_request, db_session.query(PullRequest).all()[0], ('3' * 40, 'pending')) assert_checks_equal(pull_request, db_session.query(Check).all(), ['coucou', ('blah', 82, 1)])
def test_it_ignores_checks_marked_as_such(db_session, default_config): pull_request = PullRequest('moby/moby', 34567) gh_client = FakeGithubClient(pull_request, ['1' * 40, '1' * 40], [[('coucou', 'success', 12), ('codecov/patch', 'error', 28)]]) processor = PullRequestProcessor(pull_request, gh_client, default_config) retrier = FakeRetrier(processor) notifier = FakeNotifier(processor) processor.run(db_session, retrier, notifier) assert_pr_equal(pull_request, pull_request, ('1' * 40, 'successful')) # we shouldn't have retried anything assert len(retrier.retried) == 0 # but we should have cleaned up assert retrier.cleanup_count == 1 # and let's check in the DB assert_pr_equal(pull_request, db_session.query(PullRequest).all()[0], ('1' * 40, 'successful')) assert_checks_equal(pull_request, db_session.query(Check).all(), ['coucou']) # running again should not do anything processor.run(db_session, retrier, notifier) gh_client.assert_exhausted()
def test_pending_retry_checks_are_retriggered_after_a_while( db_session, default_config): with patch.object(Datetime, 'now') as patched_now: generator = Generator(datetime.datetime(2019, 1, 1, 12, 12), datetime.datetime(2019, 1, 1, 12, 18)) patched_now.side_effect = generator.next pull_request = PullRequest('moby/moby', 34567) gh_client = FakeGithubClient(pull_request, ['1' * 40, '1' * 40], [[('coucou', 'pending', 12), ('blah', 'error', 28)], []]) processor = PullRequestProcessor(pull_request, gh_client, default_config) retrier = FakeRetrier(processor) notifier = FakeNotifier(processor) # then we run twice processor.run(db_session, retrier, notifier) processor.run(db_session, retrier, notifier) gh_client.assert_exhausted() generator.assert_exhausted() # we should have retried twice assert len(retrier.retried) == 2 for retried in retrier.retried: assert_checks_equal(pull_request, retried.retrying, [('blah', 28, 1)]) assert_checks_equal(pull_request, retried.pending, ['coucou']) assert len(retrier.retried[0]) == 2 assert retrier.cleanup_count == 0
def test_basic_retry(db_session, default_config): pull_request = PullRequest('moby/moby', 34567) gh_client = FakeGithubClient(pull_request, ['1' * 40], [[('coucou', 'pending', 12), ('blah', 'error', 28)]]) processor = PullRequestProcessor(pull_request, gh_client, default_config) retrier = FakeRetrier(processor) notifier = FakeNotifier(processor) processor.run(db_session, retrier, notifier) gh_client.assert_exhausted() assert_pr_equal(pull_request, pull_request, ('1' * 40, 'pending')) assert len(retrier.retried) == 1 assert_checks_equal(pull_request, retrier.retried[0].retrying, [('blah', 28, 1)]) assert_checks_equal(pull_request, retrier.retried[0].pending, ['coucou']) assert len(retrier.retried[0]) == 2 assert retrier.cleanup_count == 0 assert len(notifier.retrying()) == 1 assert notifier.retrying()[0] is retrier.retried[0] assert len(notifier) == 1 # let's look at what's in the DB assert_pr_equal(pull_request, db_session.query(PullRequest).all()[0], ('1' * 40, 'pending')) assert_checks_equal(pull_request, db_session.query(Check).all(), ['coucou', ('blah', 28, 1)])
def test_pending_retry_checks_are_left_alone(db_session, default_config): pull_request = PullRequest('moby/moby', 34567) gh_client = FakeGithubClient( pull_request, ['1' * 40, '1' * 40, '1' * 40], [[('coucou', 'pending', 12), ('blah', 'error', 28)], [], [('coucou', 'error', 12)]]) processor = PullRequestProcessor(pull_request, gh_client, default_config) retrier = FakeRetrier(processor) notifier = FakeNotifier(processor) # then we run twice processor.run(db_session, retrier, notifier) processor.run(db_session, retrier, notifier) assert_pr_equal(pull_request, pull_request, ('1' * 40, 'pending')) # everything should be the same as if we had just run once assert len(retrier.retried) == 1 assert_checks_equal(pull_request, retrier.retried[0].retrying, [('blah', 28, 1)]) assert_checks_equal(pull_request, retrier.retried[0].pending, ['coucou']) assert len(retrier.retried[0]) == 2 assert retrier.cleanup_count == 0 assert len(notifier.retrying()) == 1 assert notifier.retrying()[0] is retrier.retried[0] assert len(notifier) == 1 # let's look at what's in the DB assert_pr_equal(pull_request, db_session.query(PullRequest).all()[0], ('1' * 40, 'pending')) assert_checks_equal(pull_request, db_session.query(Check).all(), ['coucou', ('blah', 28, 1)]) # now let's run a 3rd time, 'coucou' fails processor.run(db_session, retrier, notifier) gh_client.assert_exhausted() assert len(retrier.retried) == 2 assert_checks_equal(pull_request, retrier.retried[1].retrying, [('coucou', 12, 1)]) assert_checks_equal(pull_request, retrier.retried[1].retry_pending, [('blah', 28, 1)]) assert len(retrier.retried[1]) == 2 assert retrier.cleanup_count == 0
def test_resume_after_failure_if_new_patch(db_session, default_config): pull_request = PullRequest('moby/moby', 34567) gh_client = FakeGithubClient( pull_request, ['1' * 40, '1' * 40, '2' * 40], [[('coucou', 'pending', 12), ('fast_fail', 'error', 28)], [('fast_fail', 'error', 82)], [('coucou', 'pending', 13), ('fast_fail', 'error', 93)]]) processor = PullRequestProcessor(pull_request, gh_client, default_config) retrier = FakeRetrier(processor) notifier = FakeNotifier(processor) # then we run twice processor.run(db_session, retrier, notifier) processor.run(db_session, retrier, notifier) # we should only have retried once assert len(retrier.retried) == 1 assert_checks_equal(pull_request, retrier.retried[0].retrying, [('fast_fail', 28, 1)]) assert_checks_equal(pull_request, retrier.retried[0].pending, ['coucou']) assert len(retrier.retried[0]) == 2 # and we should have cleaned up assert retrier.cleanup_count == 1 assert_pr_equal(pull_request, db_session.query(PullRequest).all()[0], ('1' * 40, 'failed')) # now let's run again, it's a new patch processor.run(db_session, retrier, notifier) gh_client.assert_exhausted() # we should have retried again assert len(retrier.retried) == 2 assert_checks_equal(pull_request, retrier.retried[1].retrying, [('fast_fail', 93, 1)]) assert_checks_equal(pull_request, retrier.retried[1].pending, ['coucou']) assert len(retrier.retried[0]) == 2 assert_pr_equal(pull_request, db_session.query(PullRequest).all()[0], ('2' * 40, 'pending'))
def test_too_many_failures(db_session, default_config): pull_request = PullRequest('moby/moby', 34567) gh_client = FakeGithubClient( pull_request, ['1' * 40, '1' * 40, '1' * 40], [[('coucou', 'pending', 12), ('fast_fail', 'error', 28)], [('fast_fail', 'error', 82)]]) processor = PullRequestProcessor(pull_request, gh_client, default_config) retrier = FakeRetrier(processor) notifier = FakeNotifier(processor) # then we run twice processor.run(db_session, retrier, notifier) processor.run(db_session, retrier, notifier) # we should only have retried once assert len(retrier.retried) == 1 assert_checks_equal(pull_request, retrier.retried[0].retrying, [('fast_fail', 28, 1)]) assert_checks_equal(pull_request, retrier.retried[0].pending, ['coucou']) assert len(retrier.retried[0]) == 2 # and we should have cleaned up assert retrier.cleanup_count == 1 assert len(notifier.retrying()) == 1 assert notifier.retrying()[0] is retrier.retried[0] assert len(notifier.too_many_failures()) == 1 assert_checks_equal(pull_request, notifier.too_many_failures()[0].too_many_failures, [('fast_fail', 82, 2)]) assert_checks_equal(pull_request, notifier.too_many_failures()[0].pending, ['coucou']) assert len(notifier.too_many_failures()[0]) == 2 assert len(notifier) == 2 assert_pr_equal(pull_request, db_session.query(PullRequest).all()[0], ('1' * 40, 'failed')) # running again should not do anything processor.run(db_session, retrier, notifier) gh_client.assert_exhausted()
def _process_prs(cls, to_process, gh_client, config, db_session): notifier = MailgunNotifier() for pr in to_process: processor = PullRequestProcessor(pr, gh_client, config) processor.run(db_session, cls._build_retrier(config, pr), notifier)
if ' ' in context: continue if retry_pending is None or context not in retry_pending: comment.delete() if __name__ == '__main__': from github import Github from pr_processor import PullRequestProcessor pull_request = PullRequest('kubernetes/kubernetes', 77953) # pull_request = PullRequest('moby/moby', 38349) config = Config() gh_client = Github(config.get('github', 'api_token')) processor = PullRequestProcessor(pull_request, gh_client, config) if False: GitAmendPushRetrier().retry(processor, None) print(pull_request.last_processed_sha) if False: print(CommentsRetrier._get_all_comments_by_user(processor)) new_comment = CommentsRetrier._post_comment(processor, 'coucou') print(new_comment) new_comment.delete() if False: from models import Check from pr_processor import PullRequestChecksStatus