def post(self, step_id): to_deallocate = JobStep.query.get(step_id) if to_deallocate is None: return '', 404 if to_deallocate.status != Status.allocated: return { "error": "Only {0} job steps may be deallocated.", "actual_status": to_deallocate.status.name }, 400 to_deallocate.status = Status.pending_allocation to_deallocate.date_started = None to_deallocate.date_finished = None db.session.add(to_deallocate) db.session.commit() sync_job_step.delay( step_id=to_deallocate.id.hex, task_id=to_deallocate.id.hex, parent_task_id=to_deallocate.job_id.hex, ) return self.respond(to_deallocate)
def create_job(self, job, replaces=None): """ Creates a job within Jenkins. Due to the way the API works, this consists of two steps: - Submitting the job - Polling for the newly created job to associate either a queue ID or a finalized build number. """ phase, created = get_or_create(JobPhase, where={ 'job': job, 'label': self.get_default_job_phase_label(job, self.job_name), 'project': job.project, }, defaults={ 'status': job.status, }) assert not created or not replaces step = self._create_job_step( phase=phase, data={'job_name': self.job_name}, status=job.status, force_create=bool(replaces), cluster=self.cluster ) if replaces: replaces.replacement_id = step.id db.session.add(replaces) db.session.commit() # now create the jenkins build # we don't commit immediately because we also want to update the job # and jobstep using the job_data we get from jenkins job_data = self.create_jenkins_build(step, commit=False) if job_data['queued']: job.status = Status.queued else: job.status = Status.in_progress db.session.add(job) assert 'master' in step.data assert 'job_name' in step.data assert 'build_no' in step.data or 'item_id' in step.data # now we have the build_no/item_id and can set the full jobstep label step.label = '{0} #{1}'.format(step.data['job_name'], step.data['build_no'] or step.data['item_id']) db.session.add(step) db.session.commit() sync_job_step.delay( step_id=step.id.hex, task_id=step.id.hex, parent_task_id=job.id.hex, ) return step
def execute(self, job): job.status = Status.queued db.session.add(job) phase, created = get_or_create( JobPhase, where={"job": job, "label": job.label}, defaults={"status": Status.queued, "project": job.project} ) step, created = get_or_create( JobStep, where={"phase": phase, "label": job.label}, defaults={"status": Status.pending_allocation, "job": phase.job, "project": phase.project}, ) for index, command in enumerate(self.iter_all_commands(job)): command_model, created = get_or_create( CommandModel, where={"jobstep": step, "order": index}, defaults={ "label": command.script.splitlines()[0][:128], "status": Status.queued, "script": command.script, "env": command.env, "cwd": command.path, "artifacts": command.artifacts, }, ) db.session.commit() sync_job_step.delay(step_id=step.id.hex, task_id=step.id.hex, parent_task_id=job.id.hex)
def create_job(self, job): """ Creates a job within Jenkins. Due to the way the API works, this consists of two steps: - Submitting the job - Polling for the newly created job to associate either a queue ID or a finalized build number. """ # We want to use the JobStep ID for the CHANGES_BID so that JobSteps can be easily # associated with Jenkins builds, but the JobStep probably doesn't exist yet and # requires information about the Jenkins build to be created. # So, we generate the JobStep ID before we create the build on Jenkins, and we deterministically derive # it from the Job ID to be sure that we don't create new builds/JobSteps when this # method is retried. # TODO(kylec): The process described above seems too complicated. Try to fix that so we # can delete the comment. jobstep_id = uuid.uuid5(JOB_NAMESPACE_UUID, job.id.hex) params = self.get_job_parameters(job, changes_bid=jobstep_id.hex) is_diff = not job.source.is_commit() job_data = self.create_job_from_params( changes_bid=jobstep_id.hex, params=params, is_diff=is_diff ) if job_data['queued']: job.status = Status.queued else: job.status = Status.in_progress db.session.add(job) phase, created = get_or_create(JobPhase, where={ 'job': job, 'label': self.get_default_job_phase_label(job, job_data), 'project': job.project, }, defaults={ 'status': job.status, }) if not created: return # TODO(dcramer): due to no unique constraints this section of code # presents a race condition when run concurrently step = self._create_job_step( id=jobstep_id, phase=phase, status=job.status, data=job_data, ) db.session.commit() sync_job_step.delay( step_id=step.id.hex, task_id=step.id.hex, parent_task_id=job.id.hex, )
def create_job(self, job, replaces=None): """ Creates a job within Jenkins. Due to the way the API works, this consists of two steps: - Submitting the job - Polling for the newly created job to associate either a queue ID or a finalized build number. """ phase, created = get_or_create(JobPhase, where={ 'job': job, 'label': self.get_default_job_phase_label(job, self.job_name), 'project': job.project, }, defaults={ 'status': job.status, }) assert not created or not replaces step = self._create_job_step( phase=phase, data={'job_name': self.job_name}, status=job.status, force_create=bool(replaces) ) if replaces: replaces.replacement_id = step.id db.session.add(replaces) db.session.commit() # now create the jenkins build # we don't commit immediately because we also want to update the job # and jobstep using the job_data we get from jenkins job_data = self.create_jenkins_build(step, commit=False) if job_data['queued']: job.status = Status.queued else: job.status = Status.in_progress db.session.add(job) assert 'master' in step.data assert 'job_name' in step.data assert 'build_no' in step.data or 'item_id' in step.data # now we have the build_no/item_id and can set the full jobstep label step.label = '{0} #{1}'.format(step.data['job_name'], step.data['build_no'] or step.data['item_id']) db.session.add(step) db.session.commit() sync_job_step.delay( step_id=step.id.hex, task_id=step.id.hex, parent_task_id=job.id.hex, ) return step
def create_job(self, job): """ Creates a job within Jenkins. Due to the way the API works, this consists of two steps: - Submitting the job - Polling for the newly created job to associate either a queue ID or a finalized build number. """ params = self.get_job_parameters(job) job_data = self.create_job_from_params( target_id=job.id.hex, params=params, ) if job_data['queued']: job.status = Status.queued else: job.status = Status.in_progress db.session.add(job) phase, created = get_or_create(JobPhase, where={ 'job': job, 'label': self.get_default_job_phase_label( job, job_data), 'project': job.project, }, defaults={ 'status': job.status, }) if not created: return # TODO(dcramer): due to no unique constraints this section of code # presents a race condition when run concurrently step = self._create_job_step( phase=phase, status=job.status, data=job_data, ) db.session.commit() sync_job_step.delay( step_id=step.id.hex, task_id=step.id.hex, parent_task_id=job.id.hex, )
def execute(self, job): job.status = Status.pending_allocation db.session.add(job) phase, created = get_or_create(JobPhase, where={ 'job': job, 'label': job.label, }, defaults={ 'status': Status.pending_allocation, 'project': job.project, }) step, created = get_or_create(JobStep, where={ 'phase': phase, 'label': job.label, }, defaults={ 'status': Status.pending_allocation, 'job': phase.job, 'project': phase.project, 'data': { 'release': self.release, 'max_executors': self.max_executors, 'cpus': self.resources['cpus'], 'mem': self.resources['mem'], }, }) # HACK(dcramer): we need to filter out non-setup commands # if we're running a snapshot build is_snapshot = job.build.cause == Cause.snapshot index = 0 for future_command in self.iter_all_commands(job): if is_snapshot and future_command.type not in (CommandType.setup, CommandType.teardown): continue index += 1 command = future_command.as_command( jobstep=step, order=index, ) db.session.add(command) # TODO(dcramer): improve error handling here assert index != 0, "No commands were registered for build plan" db.session.commit() sync_job_step.delay( step_id=step.id.hex, task_id=step.id.hex, parent_task_id=job.id.hex, )
def create_job(self, job): """ Creates a job within Jenkins. Due to the way the API works, this consists of two steps: - Submitting the job - Polling for the newly created job to associate either a queue ID or a finalized build number. """ params = self.get_job_parameters(job) job_data = self.create_job_from_params( target_id=job.id.hex, params=params, ) if job_data['queued']: job.status = Status.queued else: job.status = Status.in_progress db.session.add(job) phase, created = get_or_create(JobPhase, where={ 'job': job, 'label': self.get_default_job_phase_label(job, job_data), 'project': job.project, }, defaults={ 'status': job.status, }) if not created: return # TODO(dcramer): due to no unique constraints this section of code # presents a race condition when run concurrently step = self._create_job_step( phase=phase, status=job.status, data=job_data, ) db.session.commit() sync_job_step.delay( step_id=step.id.hex, task_id=step.id.hex, parent_task_id=job.id.hex, )
def execute(self, job): job.status = Status.queued db.session.add(job) phase, created = get_or_create(JobPhase, where={ 'job': job, 'label': job.label, }, defaults={ 'status': Status.queued, 'project': job.project, }) step, created = get_or_create(JobStep, where={ 'phase': phase, 'label': job.label, }, defaults={ 'status': Status.pending_allocation, 'job': phase.job, 'project': phase.project, }) for index, command in enumerate(self.iter_all_commands(job)): command_model, created = get_or_create( CommandModel, where={ 'jobstep': step, 'order': index, }, defaults={ 'label': command.script.splitlines()[0][:128], 'status': Status.queued, 'script': command.script, 'env': command.env, 'cwd': command.path, 'artifacts': command.artifacts, }) db.session.commit() sync_job_step.delay( step_id=step.id.hex, task_id=step.id.hex, parent_task_id=job.id.hex, )
def execute(self, job): job.status = Status.queued db.session.add(job) phase, created = get_or_create(JobPhase, where={ 'job': job, 'label': job.label, }, defaults={ 'status': Status.queued, 'project': job.project, }) step, created = get_or_create(JobStep, where={ 'phase': phase, 'label': job.label, }, defaults={ 'status': Status.pending_allocation, 'job': phase.job, 'project': phase.project, 'data': { 'release': self.release, }, }) for index, command in enumerate(self.iter_all_commands(job)): command_model, created = get_or_create(CommandModel, where={ 'jobstep': step, 'order': index, }, defaults={ 'label': command.script.splitlines()[0][:128], 'status': Status.queued, 'script': command.script, 'env': command.env, 'cwd': command.path, 'artifacts': command.artifacts, }) db.session.commit() sync_job_step.delay( step_id=step.id.hex, task_id=step.id.hex, parent_task_id=job.id.hex, )
def _setup_jobstep(self, phase, job, replaces=None): """ Does the work of setting up (or recreating) the single jobstep for a build. Args: phase (JobPhase): phase this JobStep will be part of job (Job): the job this JobStep will be part of replaces (JobStep): None for new builds, otherwise the (failed) JobStep that this JobStep will replace. Returns: The newly created JobStep """ where = {"phase": phase, "label": job.label} if replaces: # if we're replacing an old jobstep, we specify new id in the where # clause to ensure we create a new jobstep, not just get the old one where["id"] = uuid.uuid4() step, _ = get_or_create( JobStep, where=where, defaults={ "status": Status.pending_allocation, "job": phase.job, "project": phase.project, "data": { "release": self.release, "max_executors": self.max_executors, "cpus": self.resources["cpus"], "mem": self.resources["mem"], }, }, ) BuildStep.handle_debug_infra_failures(step, self.debug_config, "primary") all_commands = list(self.iter_all_commands(job)) # we skip certain commands for e.g. collection JobSteps. valid_command_pred = CommandType.is_valid_for_default if job.build.cause == Cause.snapshot: valid_command_pred = CommandType.is_valid_for_snapshot elif any(fc.type.is_collector() for fc in all_commands): valid_command_pred = CommandType.is_valid_for_collection index = 0 for future_command in all_commands: if not valid_command_pred(future_command.type): continue index += 1 command = future_command.as_command(jobstep=step, order=index) db.session.add(command) # TODO(dcramer): improve error handling here assert index != 0, "No commands were registered for build plan" if replaces: replaces.replacement_id = step.id db.session.add(replaces) db.session.commit() sync_job_step.delay(step_id=step.id.hex, task_id=step.id.hex, parent_task_id=job.id.hex) return step
def _setup_jobstep(self, phase, job, replaces=None): """ Does the work of setting up (or recreating) the single jobstep for a build. Args: phase (JobPhase): phase this JobStep will be part of job (Job): the job this JobStep will be part of replaces (JobStep): None for new builds, otherwise the (failed) JobStep that this JobStep will replace. Returns: The newly created JobStep """ where = { 'phase': phase, 'label': phase.label, } if replaces: # if we're replacing an old jobstep, we specify new id in the where # clause to ensure we create a new jobstep, not just get the old one where['id'] = uuid.uuid4() step, _ = get_or_create(JobStep, where=where, defaults={ 'status': Status.pending_allocation, 'job': phase.job, 'project': phase.project, 'cluster': self.cluster, 'data': { 'release': self.release, 'max_executors': self.max_executors, 'cpus': self.resources['cpus'], 'mem': self.resources['mem'], }, }) BuildStep.handle_debug_infra_failures(step, self.debug_config, 'primary') all_commands = list(self.iter_all_commands(job)) # we skip certain commands for e.g. collection JobSteps. valid_command_pred = CommandType.is_valid_for_default if job.build.cause == Cause.snapshot: valid_command_pred = CommandType.is_valid_for_snapshot elif any(fc.type.is_collector() for fc in all_commands): valid_command_pred = CommandType.is_valid_for_collection for index, future_command in enumerate(all_commands): if not valid_command_pred(future_command.type): continue command = future_command.as_command( jobstep=step, order=index, ) db.session.add(command) # TODO(dcramer): improve error handling here assert len(all_commands) != 0, "No commands were registered for build plan" if replaces: replaces.replacement_id = step.id if replaces.node: step.data['avoid_node'] = replaces.node.label db.session.add(replaces) db.session.add(step) db.session.commit() sync_job_step.delay( step_id=step.id.hex, task_id=step.id.hex, parent_task_id=job.id.hex, ) return step
def create_job(self, job): """ Creates a job within Jenkins. Due to the way the API works, this consists of two steps: - Submitting the job - Polling for the newly created job to associate either a queue ID or a finalized build number. """ # We want to use the JobStep ID for the CHANGES_BID so that JobSteps can be easily # associated with Jenkins builds, but the JobStep probably doesn't exist yet and # requires information about the Jenkins build to be created. # So, we generate the JobStep ID before we create the build on Jenkins, and we deterministically derive # it from the Job ID to be sure that we don't create new builds/JobSteps when this # method is retried. # TODO(kylec): The process described above seems too complicated. Try to fix that so we # can delete the comment. jobstep_id = uuid.uuid5(JOB_NAMESPACE_UUID, job.id.hex) params = self.get_job_parameters(job, changes_bid=jobstep_id.hex) is_diff = not job.source.is_commit() job_data = self.create_job_from_params(changes_bid=jobstep_id.hex, params=params, is_diff=is_diff) if job_data['queued']: job.status = Status.queued else: job.status = Status.in_progress db.session.add(job) phase, created = get_or_create(JobPhase, where={ 'job': job, 'label': self.get_default_job_phase_label( job, job_data), 'project': job.project, }, defaults={ 'status': job.status, }) if not created: return # TODO(dcramer): due to no unique constraints this section of code # presents a race condition when run concurrently step = self._create_job_step( id=jobstep_id, phase=phase, status=job.status, data=job_data, ) # Hook that allows other builders to add commands for the jobstep # which tells changes-client what to run self.create_commands(step, params) db.session.commit() sync_job_step.delay( step_id=step.id.hex, task_id=step.id.hex, parent_task_id=job.id.hex, )
def create_job(self, job): """ Creates a job within Jenkins. Due to the way the API works, this consists of two steps: - Submitting the job - Polling for the newly created job to associate either a queue ID or a finalized build number. """ job_name = self.job_name if not job_name: raise UnrecoverableException('Missing Jenkins project configuration') json_data = { 'parameter': [ {'name': 'CHANGES_BID', 'value': job.id.hex}, ] } if job.build.source.revision_sha: json_data['parameter'].append( {'name': 'REVISION', 'value': job.build.source.revision_sha}, ) if job.build.source.patch: # XXX(dcramer): Jenkins cannot pass file parameters to downstream # builds which prevents us from simply uploading a file # json_data['parameter'].append( # {'name': 'PATCH', 'file': 'patch'} # ) # files = { # 'patch': job.build.source.patch.diff, # } json_data['parameter'].append( { 'name': 'PATCH_URL', 'value': build_uri('/api/0/patches/{0}/?raw=1'.format( job.build.source.patch.id.hex)), } ) # TODO: Jenkins will return a 302 if it cannot queue the job which I # believe implies that there is already a job with the same parameters # queued. self._get_response('/job/{}/build'.format(job_name), method='POST', data={ 'json': json.dumps(json_data), }) # we retry for a period of time as Jenkins doesn't have strong consistency # guarantees and the job may not show up right away t = time.time() + 5 job_data = None while time.time() < t: job_data = self._find_job(job_name, job.id.hex) if job_data: break time.sleep(0.3) if job_data is None: raise Exception('Unable to find matching job after creation. GLHF') if job_data['queued']: job.status = Status.queued else: job.status = Status.in_progress db.session.add(job) phase, created = get_or_create(JobPhase, where={ 'job': job, 'label': job_data['job_name'], 'project': job.project, }, defaults={ 'status': Status.in_progress, }) step = self._create_job_step( phase=phase, status=Status.in_progress, data=job_data, ) db.session.commit() sync_job_step.delay( step_id=step.id.hex, task_id=step.id.hex, parent_task_id=job.id.hex, )
def _setup_jobstep(self, phase, job, replaces=None): """ Does the work of setting up (or recreating) the single jobstep for a build. Args: phase (JobPhase): phase this JobStep will be part of job (Job): the job this JobStep will be part of replaces (JobStep): None for new builds, otherwise the (failed) JobStep that this JobStep will replace. Returns: The newly created JobStep """ where = { 'phase': phase, 'label': job.label, } if replaces: # if we're replacing an old jobstep, we specify new id in the where # clause to ensure we create a new jobstep, not just get the old one where['id'] = uuid.uuid4() step, created = get_or_create(JobStep, where=where, defaults={ 'status': Status.pending_allocation, 'job': phase.job, 'project': phase.project, 'data': { 'release': self.release, 'max_executors': self.max_executors, 'cpus': self.resources['cpus'], 'mem': self.resources['mem'], }, }) BuildStep.handle_debug_infra_failures(step, self.debug_config, 'primary') all_commands = list(self.iter_all_commands(job)) # HACK(dcramer): we dont want to run setup on collect jobs # ideally the abstraction would be cleaner and it break out of # the commands array (same for setup commands) has_collect = any(fc.type.is_collector() for fc in all_commands) # HACK(dcramer): we need to filter out non-setup commands # if we're running a snapshot build is_snapshot = job.build.cause == Cause.snapshot index = 0 for future_command in all_commands: if is_snapshot: if future_command.type not in (CommandType.setup, CommandType.teardown): continue elif has_collect and not future_command.type.is_collector(): continue index += 1 command = future_command.as_command( jobstep=step, order=index, ) db.session.add(command) # TODO(dcramer): improve error handling here assert index != 0, "No commands were registered for build plan" if replaces: replaces.replacement_id = step.id db.session.add(replaces) db.session.commit() sync_job_step.delay( step_id=step.id.hex, task_id=step.id.hex, parent_task_id=job.id.hex, ) return step
def execute(self, job): job.status = Status.pending_allocation db.session.add(job) phase, created = get_or_create(JobPhase, where={ 'job': job, 'label': job.label, }, defaults={ 'status': Status.pending_allocation, 'project': job.project, }) step, created = get_or_create(JobStep, where={ 'phase': phase, 'label': job.label, }, defaults={ 'status': Status.pending_allocation, 'job': phase.job, 'project': phase.project, 'data': { 'release': self.release, 'max_executors': self.max_executors, 'cpus': self.resources['cpus'], 'mem': self.resources['mem'], }, }) all_commands = list(self.iter_all_commands(job)) # HACK(dcramer): we dont want to run setup on collect jobs # ideally the abstraction would be cleaner and it break out of # the commands array (same for setup commands) has_collect = any(fc.type.is_collector() for fc in all_commands) # HACK(dcramer): we need to filter out non-setup commands # if we're running a snapshot build is_snapshot = job.build.cause == Cause.snapshot index = 0 for future_command in all_commands: if is_snapshot: if future_command.type not in (CommandType.setup, CommandType.teardown): continue elif has_collect and not future_command.type.is_collector(): continue index += 1 command = future_command.as_command( jobstep=step, order=index, ) db.session.add(command) # TODO(dcramer): improve error handling here assert index != 0, "No commands were registered for build plan" db.session.commit() sync_job_step.delay( step_id=step.id.hex, task_id=step.id.hex, parent_task_id=job.id.hex, )
def _setup_jobstep(self, phase, job, replaces=None): """ Does the work of setting up (or recreating) the single jobstep for a build. Args: phase (JobPhase): phase this JobStep will be part of job (Job): the job this JobStep will be part of replaces (JobStep): None for new builds, otherwise the (failed) JobStep that this JobStep will replace. Returns: The newly created JobStep """ where = { 'phase': phase, 'label': job.label, } if replaces: # if we're replacing an old jobstep, we specify new id in the where # clause to ensure we create a new jobstep, not just get the old one where['id'] = uuid.uuid4() step, _ = get_or_create(JobStep, where=where, defaults={ 'status': Status.pending_allocation, 'job': phase.job, 'project': phase.project, 'data': { 'release': self.release, 'max_executors': self.max_executors, 'cpus': self.resources['cpus'], 'mem': self.resources['mem'], }, }) BuildStep.handle_debug_infra_failures(step, self.debug_config, 'primary') all_commands = list(self.iter_all_commands(job)) # we skip certain commands for e.g. collection JobSteps. valid_command_pred = CommandType.is_valid_for_default if job.build.cause == Cause.snapshot: valid_command_pred = CommandType.is_valid_for_snapshot elif any(fc.type.is_collector() for fc in all_commands): valid_command_pred = CommandType.is_valid_for_collection index = 0 for future_command in all_commands: if not valid_command_pred(future_command.type): continue index += 1 command = future_command.as_command( jobstep=step, order=index, ) db.session.add(command) # TODO(dcramer): improve error handling here assert index != 0, "No commands were registered for build plan" if replaces: replaces.replacement_id = step.id db.session.add(replaces) db.session.commit() sync_job_step.delay( step_id=step.id.hex, task_id=step.id.hex, parent_task_id=job.id.hex, ) return step
def create_job(self, job): """ Creates a job within Jenkins. Due to the way the API works, this consists of two steps: - Submitting the job - Polling for the newly created job to associate either a queue ID or a finalized build number. """ job_name = self.job_name if not job_name: raise UnrecoverableException('Missing Jenkins project configuration') params = self.get_job_parameters(job) json_data = { 'parameter': params } # TODO: Jenkins will return a 302 if it cannot queue the job which I # believe implies that there is already a job with the same parameters # queued. self._get_response('/job/{}/build'.format(job_name), method='POST', data={ 'json': json.dumps(json_data), }) # we retry for a period of time as Jenkins doesn't have strong consistency # guarantees and the job may not show up right away t = time.time() + 5 job_data = None while time.time() < t: job_data = self._find_job(job_name, job.id.hex) if job_data: break time.sleep(0.3) if job_data is None: raise Exception('Unable to find matching job after creation. GLHF') if job_data['queued']: job.status = Status.queued else: job.status = Status.in_progress db.session.add(job) phase, created = get_or_create(JobPhase, where={ 'job': job, 'label': job_data['job_name'], 'project': job.project, }, defaults={ 'status': Status.in_progress, }) if not created: return # TODO(dcramer): due to no unique constraints this section of code # presents a race condition when run concurrently step = self._create_job_step( phase=phase, status=Status.in_progress, data=job_data, ) db.session.commit() sync_job_step.delay( step_id=step.id.hex, task_id=step.id.hex, parent_task_id=job.id.hex, )