def handle_pushes(repo_handler, payload, headers): """ Handle push events. """ event = headers['X-GitHub-Event'] if event not in ('push'): return "Not a push event" # Get the ref for the push - could be e.g. a branch or a tag git_ref = payload['ref'] # If we are on a branch, make a new repo handler with the correct branch if git_ref.startswith('refs/heads/'): branch = git_ref.replace('refs/heads/', '') repo_handler = RepoHandler(repo_handler.repo, branch, repo_handler.installation) # Get configuration for this plugin push_config = repo_handler.get_config_value("pushes", {}) if not push_config.get("enabled", False): return "Skipping commit handlers, disabled in configuration file" for function in PUSH_HANDLERS: function(repo_handler, git_ref) return 'Finished handling push event'
def process_pull_requests(repository, installation, warn_seconds=None, close_seconds=None): now = time.time() # Find app name bot_name = get_app_name() # Get issues labeled as 'Close?' repo = RepoHandler(repository, 'master', installation) pull_requests = repo.open_pull_requests() for n in pull_requests: print(f'Checking {n}') pr = PullRequestHandler(repository, n, installation) if 'keep-open' in pr.labels: print('-> PROTECTED by label, skipping') continue commit_time = pr.last_commit_date time_since_last_commit = now - commit_time # Note: if warning time is before commit time, it's as if the warning # didn't exist since it's no longer relevant. warning_time = pr.last_comment_date(f'{bot_name}[bot]', filter_keep=is_close_warning) if warning_time is None or warning_time < commit_time: time_since_last_warning = -1. else: # We use max() here to make sure that the value is positive time_since_last_warning = max(0, now - warning_time) # We only close pull requests if there has been a warning before, and # the time since the warning exceeds the threshold specified by # close_seconds. if time_since_last_warning > close_seconds: comment_ids = pr.find_comments(f'{bot_name}[bot]', filter_keep=is_close_epilogue) if len(comment_ids) == 0: print(f'-> CLOSING pull request {n}') pr.set_labels(['closed-by-bot']) pr.submit_comment(PULL_REQUESTS_CLOSE_EPILOGUE) pr.close() else: print(f'-> Skipping pull request {n} (already closed)') elif time_since_last_commit > warn_seconds: # A negative time_since_last_warning means no warning since last commit. if time_since_last_warning < 0.: print(f'-> WARNING pull request {n}') pr.submit_comment(PULL_REQUESTS_CLOSE_WARNING.format(pasttime=naturaldelta(time_since_last_commit), futuretime=naturaldelta(close_seconds))) else: print(f'-> Skipping pull request {n} (already warned)') else: print(f'-> OK pull request {n}') print('Finished checking for stale pull requests')
def process_issues(repository, installation, warn_seconds=None, close_seconds=None): now = time.time() # Find app name bot_name = get_app_name() # Get issues labeled as 'Close?' repo = RepoHandler(repository, 'master', installation) issuelist = repo.get_issues('open', 'Close?') for n in issuelist: print(f'Checking {n}') issue = IssueHandler(repository, n, installation) labeled_time = issue.get_label_added_date('Close?') if labeled_time is None: continue dt = now - labeled_time if dt > close_seconds: comment_ids = issue.find_comments(f'{bot_name}[bot]', filter_keep=is_close_epilogue) if len(comment_ids) == 0: print(f'-> CLOSING issue {n}') issue.set_labels(['closed-by-bot']) issue.submit_comment(ISSUE_CLOSE_EPILOGUE) issue.close() else: print(f'-> Skipping issue {n} (already closed)') elif dt > warn_seconds: comment_ids = issue.find_comments(f'{bot_name}[bot]', filter_keep=is_close_warning) if len(comment_ids) == 0: print(f'-> WARNING issue {n}') issue.submit_comment( ISSUE_CLOSE_WARNING.format( pasttime=naturaltime(dt), futuretime=naturaldelta(close_seconds - warn_seconds))) else: print(f'-> Skipping issue {n} (already warned)') else: print(f'-> OK issue {n}') print('Finished checking for stale issues')
def circleci_handler(): if not request.data: return "No payload received" payload = json.loads(request.data)['payload'] # Validate we have the keys we need, otherwise ignore the push required_keys = { 'vcs_revision', 'username', 'reponame', 'status', 'build_num' } if not required_keys.issubset(payload.keys()): return 'Payload missing {}'.format(' '.join(required_keys - payload.keys())) # Get installation id repos = repo_to_installation_id_mapping() repo = f"{payload['username']}/{payload['reponame']}" if repo not in repos: return f"circleci: Not installed for {repo}" repo_handler = RepoHandler(repo, branch="master", installation=repos[repo]) for handler in CIRCLECI_WEBHOOK_HANDLERS: handler(repo_handler, payload, request.headers) return "CirleCI Webhook Finsihed"
def autolabel(pr_handler, repo_handler): print(f'Running auto-labeller for {pr_handler.repo}#{pr_handler.number}') # Note: repo_handler is the fork, we need to use the upstream repository # for some tasks below. upstream_repo = RepoHandler(pr_handler.repo, installation=pr_handler.installation) al_config = upstream_repo.get_config_value("autolabel", {}) if not al_config.get("enabled", True): print("Skipping PR autolabel, disabled in config.") return files = pr_handler.get_modified_files() print(' Modified files:') for file in files: print(f' - {file}') all_labels = upstream_repo.get_all_labels() print(' All labels: ' + ', '.join(all_labels)) pr_labels = set(pr_handler.labels) print(' Pull request labels: ' + ', '.join(pr_labels)) new_labels = set() if al_config.get('subpackages', True): labels = get_subpackage_labels(files, all_labels) new_labels = new_labels.union(labels) # TODO: add other auto-labeling logic here if new_labels: final_labels = list(pr_labels.union(new_labels)) print(' Final labels to set: ' + ', '.join(final_labels)) pr_handler.set_labels(final_labels) return None
class TestRepoHandler: def setup_class(self): self.repo = RepoHandler('fakerepo/doesnotexist', branch='awesomebot') @patch('requests.get') def test_get_issues(self, mock_get): # http://engineroom.trackmaven.com/blog/real-life-mocking/ mock_response = Mock() mock_response.json.return_value = [{ 'number': 42, 'state': 'open' }, { 'number': 55, 'state': 'open', 'pull_request': { 'diff_url': 'blah' } }] mock_get.return_value = mock_response assert self.repo.get_issues('open', 'Close?') == [42] assert self.repo.get_issues('open', 'Close?', exclude_pr=False) == [42, 55] @patch('requests.get') def test_get_all_labels(self, mock_get): mock_response = Mock() mock_response.json.return_value = [{ 'name': 'io.fits' }, { 'name': 'Documentation' }] mock_response.headers = {} mock_get.return_value = mock_response assert self.repo.get_all_labels() == ['io.fits', 'Documentation'] def test_urls(self): assert self.repo._url_contents == 'https://api.github.com/repos/fakerepo/doesnotexist/contents/' assert self.repo._url_pull_requests == 'https://api.github.com/repos/fakerepo/doesnotexist/pulls' assert self.repo._headers == {}
def setup_method(self, method): self.get_file_contents_mock = patch( 'baldrick.github.github_api.GitHubHandler.get_file_contents') self.set_status_mock = patch( 'baldrick.github.github_api.RepoHandler.set_status') self.set_status = self.set_status_mock.start() self.get_artifacts_mock = patch( 'baldrick.plugins.circleci_artifacts.get_artifacts_from_build') self.get_artifacts = self.get_artifacts_mock.start() self.get_artifacts.return_value = [] self.repo_handler = RepoHandler("nota/repo", "1234") self.get_file_contents = self.get_file_contents_mock.start() FILE_CACHE.clear()
def setup_method(self, method): self.get_file_contents_mock = patch('baldrick.github.github_api.PullRequestHandler.get_file_contents') self.get_base_branch_mock = patch('baldrick.github.github_api.PullRequestHandler.base_branch') a = self.get_base_branch_mock.start() a.return_value = "master" self.milestone_mock = patch('baldrick.github.github_api.PullRequestHandler.milestone', new_callable=PropertyMock) self.repo_handler = RepoHandler("nota/repo", "1234") self.pr_handler = PullRequestHandler("nota/repo", "1234") self.milestone = self.milestone_mock.start() self.milestone.return_value = None self.get_file_contents = self.get_file_contents_mock.start() FILE_CACHE.clear()
def setup_method(self, method): self.get_file_contents_mock = patch( 'baldrick.github.github_api.PullRequestHandler.get_file_contents') self.get_base_branch_mock = patch( 'baldrick.github.github_api.PullRequestHandler.base_branch') a = self.get_base_branch_mock.start() a.return_value = "master" self.modified_files_mock = patch( 'baldrick.github.github_api.PullRequestHandler.get_modified_files') self.repo_handler = RepoHandler("nota/repo", "1234") self.pr_handler = PullRequestHandler("nota/repo", "1234") self.get_file_contents = self.get_file_contents_mock.start() self.modified_files = self.modified_files_mock.start() FILE_CACHE.clear()
def github_webhook(): if not request.data: return "No payload received" # Parse the JSON sent by GitHub payload = json.loads(request.data) if 'installation' not in payload: return "No installation key found in payload" else: installation = payload['installation']['id'] repo_name = payload['repository']['full_name'] repo = RepoHandler(repo_name, installation=installation) for handler in GITHUB_WEBHOOK_HANDLERS: handler(repo, payload, request.headers) return "GitHub Webhook Finished"
def process_issues(repository, installation, warn_seconds=None, close_seconds=None): now = time.time() # Find app name bot_name = get_app_name() # Get issues labeled as 'Close?' repo = RepoHandler(repository, 'master', installation) issuelist = repo.get_issues('open', 'Close?') for n in issuelist: print(f'Checking {n}') issue = IssueHandler(repository, n, installation) labeled_time = issue.get_label_added_date('Close?') if labeled_time is None: continue time_since_close_label = now - labeled_time # Note: if warning time is before label time, it's as if the warning # didn't exist since it's no longer relevant. warning_time = issue.last_comment_date(f'{bot_name}[bot]', filter_keep=is_close_warning) if warning_time is None or warning_time < labeled_time: time_since_last_warning = -1. else: # We use max() here to make sure that the value is positive time_since_last_warning = max(0, now - warning_time) # We only close issues if there has been a warning before, and # the time since the warning exceeds the threshold specified by # close_seconds. if time_since_last_warning > close_seconds: comment_ids = issue.find_comments(f'{bot_name}[bot]', filter_keep=is_close_epilogue) if len(comment_ids) == 0: print(f'-> CLOSING issue {n}') issue.set_labels(['closed-by-bot']) issue.submit_comment(ISSUE_CLOSE_EPILOGUE) issue.close() else: print(f'-> Skipping issue {n} (already closed)') elif time_since_close_label > warn_seconds: comment_ids = issue.find_comments(f'{bot_name}[bot]', filter_keep=is_close_warning) if len(comment_ids) == 0: print(f'-> WARNING issue {n}') issue.submit_comment( ISSUE_CLOSE_WARNING.format( pasttime=naturaltime(time_since_close_label), futuretime=naturaldelta(close_seconds))) else: print(f'-> Skipping issue {n} (already warned)') else: print(f'-> OK issue {n}') print('Finished checking for stale issues')
def setup_class(self): self.repo = RepoHandler('astropy/astropy-bot')
class TestRealRepoHandler: def setup_method(self, method): cfg_cache.clear() def setup_class(self): self.repo = RepoHandler('astropy/astropy-bot') def test_get_config(self, app): with app.app_context(): with patch.object(self.repo, 'get_file_contents') as mock_get: # noqa mock_get.return_value = TEST_CONFIG # These are set to False in YAML; defaults must not be used. assert self.repo.get_config_value('pr')['setting1'] == 2 assert self.repo.get_config_value('pr')['setting2'] == 3 def test_get_fallback_config(self, app): with app.app_context(): app.fall_back_config = "nottestbot" with patch.object(self.repo, 'get_file_contents') as mock_get: # noqa mock_get.return_value = TEST_FALLBACK_CONFIG # These are set to False in YAML; defaults must not be used. assert self.repo.get_config_value('pr')['setting1'] == 2 assert self.repo.get_config_value('pr')['setting2'] == 3 assert self.repo.get_config_value('pr')['setting3'] == 4 def test_get_config_with_app_defaults(self, app): with app.app_context(): with patch.object(self.repo, 'get_file_contents') as mock_get: # noqa mock_get.return_value = TEST_CONFIG # These are set to False in YAML; defaults must not be used. assert self.repo.get_config_value('pr') == { 'setting1': 2, 'setting2': 3 } assert self.repo.get_config_value('other') is None app.conf = loads(TEST_GLOBAL_CONFIG, tool='testbot') assert self.repo.get_config_value('pr') == { 'setting1': 2, 'setting2': 3, 'setting3': 6 } assert self.repo.get_config_value('other') == {'setting4': 5} @patch('requests.get') def test_get_file_contents(self, mock_get): content = b"I, for one, welcome our new robot overlords" mock_response = Mock() mock_response.ok = True mock_response.json.return_value = { 'content': base64.b64encode(content) } mock_get.return_value = mock_response result = self.repo.get_file_contents('some/file/here.txt') assert result == content.decode('utf-8') @patch('requests.get') def test_missing_file_contents(self, mock_get): mock_response = Mock() mock_response.ok = False mock_response.json.return_value = {'message': 'Not Found'} mock_get.return_value = mock_response with pytest.raises(FileNotFoundError): self.repo.get_file_contents('some/file/here.txt')
def setup_class(self): self.repo = RepoHandler('fakerepo/doesnotexist', branch='awesomebot')
def process_pull_request(repository, number, installation): # TODO: cache handlers and invalidate the internal cache of the handlers on # certain events. pr_handler = PullRequestHandler(repository, number, installation) pr_config = pr_handler.get_config_value("pull_requests", {}) post_comment = pr_config.get("post_pr_comment", False) pull_request_substring = pr_config.get('pull_request_substring', '') # Disable if the config is not present if pr_config is None: return # Don't comment on closed PR if pr_handler.is_closed: return "Pull request already closed, no need to check" repo_handler = RepoHandler(pr_handler.head_repo_name, pr_handler.head_branch, installation) def is_previous_comment(message): if len(pull_request_substring) > 0: return pull_request_substring in message else: return True # Find previous comments by this app comment_ids = pr_handler.find_comments(f'{current_app.bot_username}[bot]', filter_keep=is_previous_comment) if len(comment_ids) == 0: comment_id = None else: comment_id = comment_ids[-1] # First check whether there are labels that indicate the checks should be # skipped skip_labels = pr_config.get("skip_labels", []) skip_fails = pr_config.get("skip_fails", True) for label in pr_handler.labels: if label in skip_labels: skip_message = pr_config.get( "skip_message", "Pull request checks have " "been skipped as this pull request has been " f"labelled as **{label}**") skip_message = skip_message.format(pr_handler=pr_handler, repo_handler=repo_handler) pr_handler.submit_comment(skip_message, comment_id=comment_id) if skip_fails: pr_handler.set_status( 'failure', "Skipping checks due to {0} label".format(label), current_app.bot_username) return results = {} for function in PULL_REQUEST_CHECKS: result = function(pr_handler, repo_handler) results.update(result) failures = [ details['description'] for details in results.values() if details['state'] in ('error', 'failure') ] if post_comment: # Post all failures in a comment, and have a single status check if failures: pull_request_prologue = pr_config.get('fail_prologue', '') pull_request_epilogue = pr_config.get('fail_epilogue', '') fail_status = pr_config.get('fail_status', 'Failed some checks') message = pull_request_prologue.format(pr_handler=pr_handler, repo_handler=repo_handler) for failure in failures: message += f'* {failure}\n' message += pull_request_epilogue.format(pr_handler=pr_handler, repo_handler=repo_handler) comment_url = pr_handler.submit_comment(message, comment_id=comment_id, return_url=True) pr_handler.set_status('failure', fail_status, current_app.bot_username, target_url=comment_url) else: pass_status = pr_config.get('pass_status', 'Passed all checks') all_passed_message = pr_config.get('all_passed_message', '') all_passed_message = all_passed_message.format( pr_handler=pr_handler, repo_handler=repo_handler) if all_passed_message: pr_handler.submit_comment(all_passed_message, comment_id=comment_id) pr_handler.set_status('success', pass_status, current_app.bot_username) else: # Post each failure as a status existing_statuses = pr_handler.list_statuses() for context, details in sorted(results.items()): full_context = current_app.bot_username + ':' + context # Don't post again if status hasn't changed if full_context in existing_statuses: existing_details = existing_statuses[full_context] if (details['state'] == existing_details['state'] and details['description'] == existing_details['description'] and details.get('target_url') == existing_details.get('target_url')): continue pr_handler.set_status(details['state'], details['description'], full_context, target_url=details.get('target_url')) # For statuses that have been skipped this time but existed before, set # status to pass and set message to say skipped for full_context in existing_statuses: if full_context.startswith(current_app.bot_username + ':'): context = full_context[len(current_app.bot_username) + 1:] if context not in results: pr_handler.set_status( 'success', 'This check has been skipped', current_app.bot_username + ':' + context) return 'Finished pull requests checks'
def process_pull_request(repository, number, installation, is_new=False): # TODO: cache handlers and invalidate the internal cache of the handlers on # certain events. pr_handler = PullRequestHandler(repository, number, installation) pr_config = pr_handler.get_config_value("pull_requests", {}) if not pr_config.get("enabled", False): return "Skipping PR checks, disabled in config." # Disable if the config is not present if pr_config is None: return # Don't comment on closed PR if pr_handler.is_closed: return "Pull request already closed, no need to check" repo_handler = RepoHandler(pr_handler.head_repo_name, pr_handler.head_branch, installation) # First check whether there are labels that indicate the checks should be # skipped skip_labels = pr_config.get("skip_labels", []) skip_fails = pr_config.get("skip_fails", True) for label in pr_handler.labels: if label in skip_labels: if skip_fails: pr_handler.set_status( 'failure', "Skipping checks due to {0} label".format(label), current_app.bot_username) return results = {} for function in PULL_REQUEST_CHECKS: result = function(pr_handler, repo_handler) # Ignore skipped checks if result is not None: results.update(result) # Special message for a special day not_boring = pr_handler.get_config_value('not_boring', cfg_default=True) if not_boring: # pragma: no cover special_msg = '' if is_new: # Always be snarky for new PR special_msg = insert_special_message('') else: import random tensided_dice_roll = random.randrange(10) if tensided_dice_roll == 9: # 1 out of 10 for subsequent remarks special_msg = insert_special_message('') if special_msg: pr_handler.submit_comment(special_msg) # Post each failure as a status existing_statuses = pr_handler.list_statuses() for context, details in sorted(results.items()): full_context = current_app.bot_username + ':' + context # NOTE: we could in principle check if the status has been posted # before, and if so not post it again, but we had this in the past # and there were some strange caching issues where GitHub would # return old status messages, so we avoid doing that. pr_handler.set_status(details['state'], details['description'], full_context, target_url=details.get('target_url')) # For statuses that have been skipped this time but existed before, set # status to pass and set message to say skipped for full_context in existing_statuses: if full_context.startswith(current_app.bot_username + ':'): context = full_context[len(current_app.bot_username) + 1:] if context not in results: pr_handler.set_status('success', 'This check has been skipped', current_app.bot_username + ':' + context) # Also set the general 'single' status check as a skipped check if it # is present if full_context == current_app.bot_username: pr_handler.set_status('success', 'This check has been skipped', current_app.bot_username) return 'Finished pull requests checks'
def process_pull_request(repository, number, installation, action, is_new=False): # TODO: cache handlers and invalidate the internal cache of the handlers on # certain events. pr_handler = PullRequestHandler(repository, number, installation) pr_config = pr_handler.get_config_value("pull_requests", {}) if not pr_config.get("enabled", False): msg = "Skipping PR checks, disabled in config." logger.debug(msg) return msg # Don't comment on closed PR if pr_handler.is_closed: return "Pull request already closed, no need to check" repo_handler = RepoHandler(pr_handler.head_repo_name, pr_handler.head_branch, installation) # First check whether there are labels that indicate the checks should be # skipped skip_labels = pr_config.get("skip_labels", []) skip_fails = pr_config.get("skip_fails", True) for label in pr_handler.labels: if label in skip_labels: if skip_fails: pr_handler.set_check( current_app.bot_username, title="Skipping checks due to {0} label".format(label), name=current_app.bot_username, status='completed', conclusion='failure') return results = {} for function, actions in PULL_REQUEST_CHECKS.items(): if actions is None or action in actions: result = function(pr_handler, repo_handler) # Ignore skipped checks if result is not None: # Map old plugin keys to new checks names. # It's possible that the hook returns {} for context, check in result.items(): if check is not None: title = check.pop('description', None) if title: logger.warning( f"'description' is deprecated as a key in the return value from {function}," " it will be interpreted as 'title'") check['title'] = title check['title'] = check.pop('title', title) conclusion = check.pop('state', None) if conclusion: logger.warning( f"'state' is deprecated as a key in the return value from {function}," "it will be interpreted as 'conclusion'.") check['conclusion'] = conclusion check['conclusion'] = check.pop( 'conclusion', conclusion) result[context] = check results.update(result) # Get existing checks from our app, for the 'head' commit existing_checks = pr_handler.list_checks(only_ours=True) # For each existing check, see if it needs updating or skipping new_results = copy.copy(results) for external_id, check in existing_checks.items(): if external_id in results.keys(): details = new_results.pop(external_id) # Remove skip key. details.pop("skip_if_missing", False) # Update the previous check with the new check (this includes the check_id to update) check.update(details) # Send the check to be updated pr_handler.set_check(**check) else: # If check is in existing_checks but not results mark it as skipped. check.update({ 'title': 'This check has been skipped.', 'status': 'completed', 'conclusion': 'neutral' }) pr_handler.set_check(**check) # Any keys left in results are new checks we haven't sent on this commit yet. for external_id, details in sorted(new_results.items()): skip = details.pop("skip_if_missing", False) logger.trace(f"{details} skip is {skip}") if not skip: pr_handler.set_check(external_id, status="completed", **details) # Also set the general 'single' status check as a skipped check if it # is present if current_app.bot_username in new_results.keys(): check = new_results[current_app.bot_username] check.update({ 'title': 'This check has been skipped.', 'commit_hash': 'head', 'status': 'completed', 'conclusion': 'neutral' }) pr_handler.set_check(**check) # Special message for a special day not_boring = pr_handler.get_config_value('not_boring', cfg_default=True) if not_boring: # pragma: no cover special_msg = '' if is_new: # Always be snarky for new PR special_msg = insert_special_message('') else: import random tensided_dice_roll = random.randrange(10) if tensided_dice_roll == 9: # 1 out of 10 for subsequent remarks special_msg = insert_special_message('') if special_msg: pr_handler.submit_comment(special_msg) return 'Finished pull requests checks'