def github_pull_request_review(): d = request.json if 'zen' in d: log.info(f'received zen: {d["zen"]}') return '', 200 action = d['action'] gh_pr = GitHubPR.from_gh_json(d['pull_request']) if action == 'submitted': state = d['review']['state'].lower() if state == 'changes_requested': prs.review(gh_pr, state) else: # FIXME: track all reviewers, then we don't need to talk to github prs.review( gh_pr, overall_review_state( get_reviews(gh_pr.target_ref.repo, gh_pr.number))['state']) elif action == 'dismissed': # FIXME: track all reviewers, then we don't need to talk to github prs.review( gh_pr, overall_review_state(get_reviews(gh_pr.target_ref.repo, gh_pr.number))['state']) else: log.info(f'ignoring pull_request_review with action {action}') return '', 200
def refresh_ci_build_jobs(jobs): jobs = [ (FQSHA.from_json(json.loads(job.attributes['source'])), FQSHA.from_json(json.loads(job.attributes['target'])), job) for job in jobs ] jobs = [(s, t, j) for (s, t, j) in jobs if prs.exists(s, t)] latest_jobs = {} for (source, target, job) in jobs: key = (source, target) job2 = latest_jobs.get(key, None) if job2 is None: latest_jobs[key] = job else: if job_ordering(job, job2) > 0: log.info( f'cancelling {job2.id}, preferring {job.id}' ) try_to_cancel_job(job2) latest_jobs[key] = job else: log.info( f'cancelling {job.id}, preferring {job2.id}' ) try_to_cancel_job(job) prs.refresh_from_ci_jobs(latest_jobs)
def github_pull_request_review(): d = request.json if 'zen' in d: log.info(f'received zen: {d["zen"]}') return '', 200 action = d['action'] gh_pr = GitHubPR.from_gh_json(d['pull_request']) if action == 'submitted': state = d['review']['state'].lower() if state == 'changes_requested': prs.review(gh_pr, state) else: # FIXME: track all reviewers, then we don't need to talk to github prs.review( gh_pr, review_status(get_reviews(gh_pr.target_ref.repo, gh_pr.number))) elif action == 'dismissed': # FIXME: track all reviewers, then we don't need to talk to github prs.review( gh_pr, review_status(get_reviews(gh_pr.target_ref.repo, gh_pr.number))) else: log.info(f'ignoring pull_request_review with action {action}') return '', 200
def refresh_deploy_jobs(jobs): jobs = [ (FQSHA.from_json(json.loads(job.attributes['target'])), job) for job in jobs if 'target' in job.attributes ] jobs = [ (target, job) for (target, job) in jobs if target.ref in prs.deploy_jobs ] latest_jobs = {} for (target, job) in jobs: job2 = latest_jobs.get(target, None) if job2 is None: latest_jobs[target] = job else: if job_ordering(job, job2) > 0: log.info( f'cancelling {job2.id}, preferring {job.id}' ) try_to_cancel_job(job2) latest_jobs[target] = job else: log.info( f'cancelling {job.id}, preferring {job2.id}' ) try_to_cancel_job(job) prs.refresh_from_deploy_jobs(latest_jobs)
def update_from_github_review_state(self, review): if self.review != review: log.info( f'review state changing from {self.review} to {review} {self.short_str()}' ) return self.copy(review=review) else: return self
def refresh_from_deploy_jobs(self, jobs): lost_jobs = {target_ref for target_ref in self.deploy_jobs.keys()} for (target, job) in jobs.items(): lost_jobs.discard(target.ref) self.refresh_from_deploy_job(target, job) for target_ref in lost_jobs: log.info(f'{target_ref.short_str()} was not found in batch refresh ' f'and will be treated like a cancelled job') del self.deploy_jobs[target_ref]
def update_from_github_review_state(self, review): if self.review != review: log.info( f'review state changing from {self.review} to {review} {self.short_str()}' ) # FIXME: start merge flow if approved and success return self.copy(review=review) else: return self
def refresh_from_deploy_jobs(self, jobs): lost_jobs = {target_ref for target_ref in self.deploy_jobs.keys()} for (target, job) in jobs.items(): lost_jobs.discard(target.ref) self.refresh_from_deploy_job(target, job) for target_ref in lost_jobs: log.info( f'{target_ref.short_str()} was not found in batch refresh ' f'and will be treated like a cancelled job') del self.deploy_jobs[target_ref]
def transition(self, other): if isinstance(other, Merged): raise ValueError(f'bad transition {self} to {other}') if (not isinstance(other, Failure) and not isinstance(other, Mergeable) and not isinstance(other, NoMergeSHA)): log.info(f'cancelling unneeded job {self.job.id} {self} {other}') try_to_cancel_job(self.job) return other
def github_push(): d = request.json ref = d['ref'] if ref.startswith('refs/heads'): target_ref = FQRef(Repo.from_gh_json(d['repository']), ref[11:]) target = FQSHA(target_ref, d['after']) prs.push(target) else: log.info(f'ignoring ref push {ref} because it does not start with ' '"refs/heads/"') return '', 200
def refresh_from_ci_jobs(self, jobs): lost_jobs = {(pr.source, pr.target) for pr in self.building()} for ((source, target), job) in jobs.items(): lost_jobs.discard((source, target)) self.refresh_from_ci_job(source, target, job) for (source, target) in lost_jobs: log.info(f'{source.short_str()} {target.short_str()} were not ' f'found in batch refresh, so they will be reset to ' f'the buildable state') pr = self._get(source.ref, target.ref) self._set(source.ref, target.ref, pr.refresh_from_missing_job())
def heal_target(self, target): assert isinstance(target, FQRef) ready_to_merge = self.ready_to_merge(target) if len(ready_to_merge) != 0: pr = ready_to_merge[-1] self.merge(pr) else: self.build_next(target) if self.is_deployable_target_ref(target): self.try_deploy(target) else: log.info(f'not deploying target {target.short_str()}')
def _maybe_new_shas(self, new_source=None, new_target=None): assert new_source is not None or new_target is not None assert new_source is None or isinstance(new_source, FQSHA) assert new_target is None or isinstance(new_target, FQSHA) if new_source and self.source != new_source: assert not self.is_merged() if new_target and self.target != new_target: log.info( f'new source and target sha {new_target.short_str()} {new_source.short_str()} {self.short_str()}' ) return self._new_target_and_source(new_target, new_source) else: log.info( f'new source sha {new_source.short_str()} {self.short_str()}' ) return self._new_source(new_source) else: if new_target and self.target != new_target: if self.is_merged(): log.info( f'ignoring new target sha for merged PR {self.short_str()}' ) return self else: log.info( f'new target sha {new_target.short_str()} {self.short_str()}' ) return self._new_target(new_target) else: return self
def update_from_completed_batch_job(self, job): assert isinstance(job, Job) job_status = job.cached_status() exit_code = job_status['exit_code'] job_source = FQSHA.from_json(json.loads(job.attributes['source'])) job_target = FQSHA.from_json(json.loads(job.attributes['target'])) assert job_source.ref == self.source.ref assert job_target.ref == self.target.ref if job_target.sha != self.target.sha: log.info( f'notified of job for old target {job.id}' # too noisy: f' {job.attributes} {self.short_str()}' ) x = self elif job_source.sha != self.source.sha: log.info( f'notified of job for old source {job.id}' # too noisy: f' {job.attributes} {self.short_str()}' ) x = self elif exit_code == 0: log.info(f'job finished success {short_str_build_job(job)} {self.short_str()}') x = self._new_build(Mergeable(self.target.sha)) else: log.info(f'job finished failure {short_str_build_job(job)} {self.short_str()}') x = self._new_build( Failure(exit_code, job.attributes['image'], self.target.sha)) job.delete() return x
def update_from_completed_batch_job(self, job): assert isinstance(job, Job) job_status = job.cached_status() exit_code = job_status['exit_code'] job_source = FQSHA.from_json(json.loads(job.attributes['source'])) job_target = FQSHA.from_json(json.loads(job.attributes['target'])) assert job_source.ref == self.source.ref assert job_target.ref == self.target.ref if job_target.sha != self.target.sha: log.info(f'notified of job for old target {job.id}' # too noisy: f' {job.attributes} {self.short_str()}' ) x = self elif job_source.sha != self.source.sha: log.info(f'notified of job for old source {job.id}' # too noisy: f' {job.attributes} {self.short_str()}' ) x = self elif exit_code == 0: log.info( f'job finished success {short_str_build_job(job)} {self.short_str()}' ) x = self._new_build(Mergeable(self.target.sha)) else: log.info( f'job finished failure {short_str_build_job(job)} {self.short_str()}' ) x = self._new_build( Failure(exit_code, job.attributes['image'], self.target.sha)) job.delete() return x
def push(self, new_target): assert isinstance(new_target, FQSHA), new_target if self.is_watched_target_ref(new_target.ref): if self.is_deployable_target_ref(new_target.ref): self.try_deploy(new_target.ref) else: log.info(f'not deploying target {new_target.short_str()}') prs = self._get(target=new_target.ref).values() if len(prs) == 0: log.info(f'no PRs for target {new_target.ref.short_str()}') else: for pr in prs: self._set(pr.source.ref, pr.target.ref, pr.update_from_github_push(new_target)) self.heal_target(new_target.ref)
def refresh_pulls(target_repo, pulls_by_target): dead_targets = ( set(prs.live_target_refs_for_repo(target_repo)) - {x for x in pulls_by_target.keys()} ) for dead_target_ref in dead_targets: prs.forget_target(dead_target_ref) for (target_ref, pulls) in pulls_by_target.items(): for gh_pr in pulls: prs.pr_push(gh_pr) dead_prs = ({x.source.ref for x in prs.for_target(target_ref)} - {x.source.ref for x in pulls}) if len(dead_prs) != 0: log.info(f'for {target_ref.short_str()}, forgetting {[x.short_str() for x in dead_prs]}') for source_ref in dead_prs: prs.forget(source_ref, target_ref)
def github_pull_request(): d = request.json assert 'action' in d, d assert 'pull_request' in d, d action = d['action'] if action in ('opened', 'synchronize'): target_sha = FQSHA.from_gh_json(d['pull_request']['base']).sha gh_pr = GitHubPR.from_gh_json(d['pull_request'], target_sha) prs.pr_push(gh_pr) elif action == 'closed': gh_pr = GitHubPR.from_gh_json(d['pull_request']) log.info(f'forgetting closed pr {gh_pr.short_str()}') prs.forget(gh_pr.source.ref, gh_pr.target_ref) else: log.info(f'ignoring pull_request with action {action}') return '', 200
def github_push(): d = request.json if 'zen' in d: log.info(f'received zen: {d["zen"]}') return '', 200 ref = d['ref'] if ref.startswith('refs/heads'): target_ref = FQRef(Repo.from_gh_json(d['repository']), ref[11:]) target = FQSHA(target_ref, d['after']) prs.push(target) else: log.info( f'ignoring ref push {ref} because it does not start with ' '"refs/heads/"' ) return '', 200
def update_from_github_pr(self, gh_pr): assert isinstance(gh_pr, GitHubPR) assert self.target.ref == gh_pr.target_ref assert self.source.ref == gh_pr.source.ref # this will build new PRs when the server restarts if gh_pr.target_sha: result = self._maybe_new_shas( new_source=gh_pr.source, new_target=FQSHA(gh_pr.target_ref, gh_pr.target_sha)) else: result = self._maybe_new_shas(new_source=gh_pr.source) if self.title != gh_pr.title: log.info(f'found new title from github {gh_pr.title} {self.short_str()}') result = result.copy(title=gh_pr.title) if self.number != gh_pr.number: log.info(f'found new PR number from github {gh_pr.title} {self.short_str()}') result = result.copy(number=gh_pr.number) return result
def build_next(self, target): approved = [pr for pr in self.for_target(target) if pr.is_approved()] building = [x for x in approved if x.is_building()] if len(building) != 0: to_build = [] else: approved_and_need_status = [ x for x in approved if x.is_pending_build() ] if len(approved_and_need_status) != 0: to_build = [approved_and_need_status[-1]] else: all_pending_prs = [ x for x in self.for_target(target) if x.is_pending_build() ] to_build = all_pending_prs log.info(f'next to build for {target.short_str()}: {[x.short_str() for x in to_build]}') for pr in to_build: self._set(pr.source.ref, pr.target.ref, pr.build_it())
def refresh_pulls(target_repo, pulls_by_target): dead_targets = (set(prs.live_target_refs_for_repo(target_repo)) - {x for x in pulls_by_target.keys()}) for dead_target_ref in dead_targets: prs.forget_target(dead_target_ref) for (target_ref, pulls) in pulls_by_target.items(): for gh_pr in pulls: prs.pr_push(gh_pr) dead_prs = ({x.source.ref for x in prs.for_target(target_ref)} - {x.source.ref for x in pulls}) if len(dead_prs) != 0: log.info( f'for {target_ref.short_str()}, forgetting {[x.short_str() for x in dead_prs]}' ) for source_ref in dead_prs: prs.forget(source_ref, target_ref)
def deploy_build_finished(self, target, job): assert isinstance(target, FQSHA) assert isinstance(job, Job), f'{job.id} {job.attributes}' expected_job = self.deploy_jobs.get(target.ref, None) if expected_job is None: log.error(f'notified of unexpected deploy job {job.id} (I am not waiting ofr any for {target.short_str()})') return if expected_job.id != job.id: log.error(f'notified of unexpected deploy job {job.id}, expected {expected_job.id} for {target.short_str()}') return assert job.cached_status()['state'] == 'Complete' exit_code = job.cached_status()['exit_code'] del self.deploy_jobs[target.ref] if exit_code != 0: log.error(f'deploy job {job.id} failed for {target.short_str()}') else: log.info(f'deploy job {job.id} succeeded for {target.short_str()}') self.latest_deployed[target.ref] = target.sha job.delete()
def refresh_deploy_jobs(jobs): jobs = [(FQSHA.from_json(json.loads(job.attributes['target'])), job) for job in jobs if 'target' in job.attributes] jobs = [(target, job) for (target, job) in jobs if target.ref in prs.deploy_jobs] latest_jobs = {} for (target, job) in jobs: job2 = latest_jobs.get(target, None) if job2 is None: latest_jobs[target] = job else: if job_ordering(job, job2) > 0: log.info(f'cancelling {job2.id}, preferring {job.id}') try_to_cancel_job(job2) latest_jobs[target] = job else: log.info(f'cancelling {job.id}, preferring {job2.id}') try_to_cancel_job(job) prs.refresh_from_deploy_jobs(latest_jobs)
def build_next(self, target): approved = [pr for pr in self.for_target(target) if pr.is_approved()] running = [x for x in approved if x.is_running()] if len(running) != 0: to_build = [] else: approved_and_need_status = [ x for x in approved if x.is_pending_build() ] if len(approved_and_need_status) != 0: to_build = [approved_and_need_status[-1]] else: all_pending_prs = [ x for x in self.for_target(target) if x.is_pending_build() ] to_build = all_pending_prs log.info(f'next to build for {target.short_str()}: {[x.short_str() for x in to_build]}') for pr in to_build: self._set(pr.source.ref, pr.target.ref, pr.build_it())
def merge(self, pr): assert isinstance(pr, PR) log.info(f'merging {pr.short_str()}') (gh_response, status_code) = put_repo(pr.target.ref.repo.qname, f'pulls/{pr.number}/merge', json={ 'merge_method': 'squash', 'sha': pr.source.sha }, status_code=[200, 409]) if status_code == 200: log.info(f'successful merge of {pr.short_str()}') self._set(pr.source.ref, pr.target.ref, pr.merged()) else: assert status_code == 409, f'{status_code} {gh_response}' log.warning( f'failure to merge {pr.short_str()} due to {status_code} {gh_response}, ' f'removing PR, github state refresh will recover and retest ' f'if necessary') self.forget(pr.source.ref, pr.target.ref)
def refresh_ci_build_jobs(jobs): jobs = [(FQSHA.from_json(json.loads(job.attributes['source'])), FQSHA.from_json(json.loads(job.attributes['target'])), job) for job in jobs] jobs = [(s, t, j) for (s, t, j) in jobs if prs.exists(s, t)] latest_jobs = {} for (source, target, job) in jobs: key = (source, target) job2 = latest_jobs.get(key, None) if job2 is None: latest_jobs[key] = job else: if job_ordering(job, job2) > 0: log.info(f'cancelling {job2.id}, preferring {job.id}') try_to_cancel_job(job2) latest_jobs[key] = job else: log.info(f'cancelling {job.id}, preferring {job2.id}') try_to_cancel_job(job) prs.refresh_from_ci_jobs(latest_jobs)
def merge(self, pr): assert isinstance(pr, PR) log.info(f'merging {pr.short_str()}') (gh_response, status_code) = put_repo( pr.target.ref.repo.qname, f'pulls/{pr.number}/merge', json={ 'merge_method': 'squash', 'sha': pr.source.sha }, status_code=[200, 409]) if status_code == 200: log.info(f'successful merge of {pr.short_str()}') self._set(pr.source.ref, pr.target.ref, pr.merged()) else: assert status_code == 409, f'{status_code} {gh_response}' log.warning( f'failure to merge {pr.short_str()} due to {status_code} {gh_response}, ' f'removing PR, github state refresh will recover and retest ' f'if necessary') self.forget(pr.source.ref, pr.target.ref)
def refresh_from_batch_job(self, job): state = job.cached_status()['state'] if state == 'Complete': return self.update_from_completed_batch_job(job) elif state == 'Cancelled': log.error( f'a job for me was cancelled {short_str_build_job(job)} {self.short_str()}') job.delete() return self._new_build(try_new_build(self.source, self.target)) else: assert state == 'Created', f'{state} {job.id} {job.attributes} {self.short_str()}' assert 'target' in job.attributes, job.attributes assert 'image' in job.attributes, job.attributes target = FQSHA.from_json(json.loads(job.attributes['target'])) image = job.attributes['image'] if target == self.target: return self._new_build(Building(job, image, target.sha)) else: log.info(f'found deploy job {job.id} for wrong target {target}, should be {self.target}') job.delete() return self
def heal_target(self, target): assert isinstance(target, FQRef) ready_to_merge = self.ready_to_merge(target) if len(ready_to_merge) != 0: log.info(f'merging {ready_to_merge[-1].short_str()}') pr = ready_to_merge[-1] self.merge(pr) else: log.info(f'nothing ready to merge for {target.short_str()}') self.build_next(target) if self.is_deployable_target_ref(target): log.info(f'deploying {target.short_str()}') self.try_deploy(target) else: log.info(f'not deploying target {target.short_str()}')
def notify_github(self, build): log.info(f'notifying github of {build} for {self.short_str()}') json = { 'state': build.gh_state(), 'description': str(build), 'context': CONTEXT } if isinstance(build, Failure) or isinstance(build, Mergeable): json['target_url'] = \ f'https://storage.googleapis.com/{GCS_BUCKET}/ci/{self.source.sha}/{self.target.sha}/index.html' try: post_repo(self.target.ref.repo.qname, 'statuses/' + self.source.sha, json=json, status_code=201) except BadStatus as e: if e.status_code == 422: log.exception( f'Too many statuses applied to {self.source.sha}! This is a ' f'dangerous situation because I can no longer block merging ' f'of failing PRs.') else: raise e
def notify_github(self, build): log.info(f'notifying github of {build} for {self.short_str()}') json = { 'state': build.gh_state(), 'description': str(build), 'context': CONTEXT } if isinstance(build, Failure) or isinstance(build, Mergeable): json['target_url'] = \ f'https://storage.googleapis.com/{GCS_BUCKET}/ci/{self.source.sha}/{self.target.sha}/index.html' try: post_repo( self.target.ref.repo.qname, 'statuses/' + self.source.sha, json=json, status_code=201) except BadStatus as e: if e.status_code == 422: log.exception( f'Too many statuses applied to {self.source.sha}! This is a ' f'dangerous situation because I can no longer block merging ' f'of failing PRs.') else: raise e
def update_from_github_status(self, build): if isinstance(self.build, Unknown): if self.target.sha == build.target_sha: log.info( f'recovering from unknown build state via github. {build} {self.short_str()}' ) return self.copy(build=build) else: log.info('ignoring github build state for wrong target. ' f'{build} {self.short_str()}') return self else: log.info(f'ignoring github build state. {build} {self.short_str()}') return self
def _maybe_new_shas(self, new_source=None, new_target=None): assert new_source is not None or new_target is not None assert new_source is None or isinstance(new_source, FQSHA) assert new_target is None or isinstance(new_target, FQSHA) if new_source and self.source != new_source: assert not self.is_merged() if new_target and self.target != new_target: log.info( f'new source and target sha {new_target.short_str()} {new_source.short_str()} {self.short_str()}' ) return self._new_target_and_source(new_target, new_source) else: log.info(f'new source sha {new_source.short_str()} {self.short_str()}') return self._new_source(new_source) else: if new_target and self.target != new_target: if self.is_merged(): log.info(f'ignoring new target sha for merged PR {self.short_str()}') return self else: log.info(f'new target sha {new_target.short_str()} {self.short_str()}') return self._new_target(new_target) else: return self
def github_pull_request(): d = request.json if 'zen' in d: log.info(f'received zen: {d["zen"]}') return '', 200 assert 'action' in d, d assert 'pull_request' in d, d action = d['action'] if action in ('opened', 'synchronize'): target_sha = FQSHA.from_gh_json(d['pull_request']['base']).sha gh_pr = GitHubPR.from_gh_json(d['pull_request'], target_sha) prs.pr_push(gh_pr) elif action == 'closed': gh_pr = GitHubPR.from_gh_json(d['pull_request']) log.info(f'forgetting closed pr {gh_pr.short_str()}') prs.forget(gh_pr.source.ref, gh_pr.target_ref) else: log.info(f'ignoring pull_request with action {action}') return '', 200
def refresh_from_deploy_job(self, target, job): assert isinstance(job, Job), job assert isinstance(target, FQSHA), target state = job.cached_status()['state'] log.info( f'refreshing from deploy job {job.id} {state}' ) if state == 'Complete': self.deploy_build_finished(target, job) elif state == 'Cancelled': log.info(f'refreshing from cancelled deploy job {job.id} {job.attributes}') del self.deploy_jobs[target.ref] job.delete() else: assert state == 'Created', f'{state} {job.id} {job.attributes}' existing_job = self.deploy_jobs[target.ref] if existing_job is None: self.deploy_jobs[target.ref] = job elif existing_job.id != job.id: log.info(f'found deploy job {job.id} other than mine {existing_job.id}, deleting') job.delete()
PR_BUILD_SCRIPT = f.read() except FileNotFoundError as e: raise ValueError( "working directory must contain a file called `pr-build-script' " "containing a string that is passed to `/bin/sh -c'") from e try: with open('pr-deploy-script', 'r') as f: PR_DEPLOY_SCRIPT = f.read() except FileNotFoundError as e: raise ValueError( "working directory must contain a file called `pr-deploy-script' " "containing a string that is passed to `/bin/sh -c'") from e try: with open('oauth-token/oauth-token', 'r') as f: oauth_token = f.read().strip() except FileNotFoundError as e: raise ValueError( "working directory must contain `oauth-token/oauth-token' " "containing a valid GitHub oauth token") from e log.info(f'BATCH_SERVER_URL {BATCH_SERVER_URL}') log.info(f'SELF_HOSTNAME {SELF_HOSTNAME}') log.info(f'REFRESH_INTERVAL_IN_SECONDS {REFRESH_INTERVAL_IN_SECONDS}') log.info( f'WATCHED_TARGETS {[(ref.short_str(), deployable) for (ref, deployable) in WATCHED_TARGETS]}' ) log.info(f'INSTANCE_ID = {INSTANCE_ID}') log.info(f'CONTEXT = {CONTEXT}') batch_client = BatchClient(url=BATCH_SERVER_URL, api=API(timeout=5))
def try_deploy(self, target_ref): assert isinstance(target_ref, FQRef) assert self.is_deployable_target_ref(target_ref), \ f'{target_ref} is non-deployable {[(ref.short_str(), deployable) for ref, deployable in self._watched_targets.items()]}' old_job = self.deploy_jobs.get(target_ref, None) if old_job is not None: log.info(f'will not deploy while deploy job {old_job.id} is running') return latest_sha = latest_sha_for_ref(target_ref) if latest_sha == self.latest_deployed[target_ref]: log.info(f'already deployed {latest_sha}') return try: img = get_image_for_target(target_ref) attributes = { 'target': json.dumps(FQSHA(target_ref, latest_sha).to_json()), 'image': img, 'type': DEPLOY_JOB_TYPE } env = { 'DEPLOY_REPO_URL': target_ref.repo.url, 'DEPLOY_BRANCH': target_ref.name, 'DEPLOY_SHA': latest_sha } volumes = [{ 'volume': { 'name': 'docker-sock-volume', 'hostPath': { 'path': '/var/run/docker.sock', 'type': 'File' } }, 'volume_mount': { 'mountPath': '/var/run/docker.sock', 'name': 'docker-sock-volume' } }] if target_ref.repo.owner == "hail-ci-test": # special case for test repos deploy_secret = f'ci-deploy-{VERSION}--hail-is-ci-test-service-account-key' else: deploy_secret = PRS._deploy_secrets.get(target_ref.repo, None) if deploy_secret: volumes.append({ 'volume': { 'name': f'{deploy_secret}', 'secret': { 'optional': False, 'secretName': f'{deploy_secret}' } }, 'volume_mount': { 'mountPath': '/secrets', 'name': f'{deploy_secret}', 'readOnly': True } }) job = batch_client.create_job( img, command=['/bin/bash', '-c', PR_DEPLOY_SCRIPT], env=env, resources={'requests': { 'cpu': '3.7', 'memory': '4G' }}, volumes=volumes, tolerations=[{ 'key': 'preemptible', 'value': 'true' }], security_context={ 'fsGroup': 412, }, service_account_name='deploy-svc', attributes=attributes, callback=SELF_HOSTNAME + '/deploy_build_done') log.info(f'deploying {target_ref.short_str()}:{latest_sha} in job {job.id}') self.deploy_jobs[target_ref] = job except Exception as e: log.exception(f'could not start deploy job due to {e}')
def update_from_github_review_state(self, review): if self.review != review: log.info(f'review state changing from {self.review} to {review} {self.short_str()}') return self.copy(review=review) else: return self
with open('pr-build-script', 'r') as f: PR_BUILD_SCRIPT = f.read() except FileNotFoundError as e: raise ValueError( "working directory must contain a file called `pr-build-script' " "containing a string that is passed to `/bin/sh -c'") from e try: with open('pr-deploy-script', 'r') as f: PR_DEPLOY_SCRIPT = f.read() except FileNotFoundError as e: raise ValueError( "working directory must contain a file called `pr-deploy-script' " "containing a string that is passed to `/bin/sh -c'") from e try: with open('oauth-token/oauth-token', 'r') as f: oauth_token = f.read().strip() except FileNotFoundError as e: raise ValueError( "working directory must contain `oauth-token/oauth-token' " "containing a valid GitHub oauth token") from e log.info(f'BATCH_SERVER_URL {BATCH_SERVER_URL}') log.info(f'SELF_HOSTNAME {SELF_HOSTNAME}') log.info(f'REFRESH_INTERVAL_IN_SECONDS {REFRESH_INTERVAL_IN_SECONDS}') log.info(f'WATCHED_TARGETS {[(ref.short_str(), deployable) for (ref, deployable) in WATCHED_TARGETS]}') log.info(f'INSTANCE_ID = {INSTANCE_ID}') log.info(f'CONTEXT = {CONTEXT}') batch_client = BatchClient(url=BATCH_SERVER_URL, api=API(timeout=5))
def try_deploy(self, target_ref): assert isinstance(target_ref, FQRef) assert self.is_deployable_target_ref(target_ref), \ f'{target_ref} is non-deployable {[(ref.short_str(), deployable) for ref, deployable in self._watched_targets.items()]}' old_job = self.deploy_jobs.get(target_ref, None) if old_job is not None: log.info( f'will not deploy while deploy job {old_job.id} is running') return latest_sha = latest_sha_for_ref(target_ref) if latest_sha == self.latest_deployed[target_ref]: log.info(f'already deployed {latest_sha}') return try: img = get_image_for_target(target_ref) attributes = { 'target': json.dumps(FQSHA(target_ref, latest_sha).to_json()), 'image': img, 'type': DEPLOY_JOB_TYPE } env = { 'DEPLOY_REPO_URL': target_ref.repo.url, 'DEPLOY_BRANCH': target_ref.name, 'DEPLOY_SHA': latest_sha } volumes = [{ 'volume': { 'name': 'docker-sock-volume', 'hostPath': { 'path': '/var/run/docker.sock', 'type': 'File' } }, 'volume_mount': { 'mountPath': '/var/run/docker.sock', 'name': 'docker-sock-volume' } }] if target_ref.repo.owner == "hail-ci-test": # special case for test repos deploy_secret = f'ci-deploy-{VERSION}--hail-is-ci-test-service-account-key' else: deploy_secret = PRS._deploy_secrets.get(target_ref.repo, None) if deploy_secret: volumes.append({ 'volume': { 'name': f'{deploy_secret}', 'secret': { 'optional': False, 'secretName': f'{deploy_secret}' } }, 'volume_mount': { 'mountPath': '/secrets', 'name': f'{deploy_secret}', 'readOnly': True } }) job = batch_client.create_job( img, command=['/bin/bash', '-c', PR_DEPLOY_SCRIPT], env=env, resources={'requests': { 'cpu': '3.7', 'memory': '4G' }}, volumes=volumes, tolerations=[{ 'key': 'preemptible', 'value': 'true' }], security_context={ 'fsGroup': 412, }, attributes=attributes, callback=SELF_HOSTNAME + '/deploy_build_done') log.info( f'deploying {target_ref.short_str()}:{latest_sha} in job {job.id}' ) self.deploy_jobs[target_ref] = job except Exception as e: log.exception(f'could not start deploy job due to {e}')