def _create_job_step(self, phase, data, force_create=False, **defaults): """ Gets or creates the primary JobStep for a Jenkins Job. Args: phase (JobPhase): JobPhase the JobStep should be part of. data (dict): JSON-serializable data associated with the Jenkins build. force_create (bool): Force this JobStep to be created (rather than retrieved). This is used when replacing a JobStep to make sure we don't just get the old one. Returns: JobStep: The JobStep that was retrieved or created. """ defaults['data'] = data # TODO(kylec): Get rid of the kwargs. if not defaults.get('label'): # we update this once we have the build_no for this jobstep defaults['label'] = '<Creating Jenkins build>' where = { 'job': phase.job, 'project': phase.project, 'phase': phase, } if force_create: # uuid is unique which forces jobstep to be created where['id'] = uuid.uuid4() step, created = get_or_create(JobStep, where=where, defaults=defaults) assert created or not force_create BuildStep.handle_debug_infra_failures(step, self.debug_config, 'primary') return step
def _expand_job(self, phase, label, cmd, replaces=None): where = { 'job': phase.job, 'project': phase.project, 'phase': phase, 'label': label, } if replaces: # uuid is unique which forces jobstep to be created where['id'] = uuid.uuid4() step, created = get_or_create(JobStep, where=where, defaults={ 'data': { 'cmd': cmd, 'job_name': self.job_name, 'build_no': None, 'expanded': True, }, 'status': Status.queued, }) assert created or not replaces BuildStep.handle_debug_infra_failures(step, self.debug_config, 'expanded') if replaces: replaces.replacement_id = step.id db.session.add(replaces) builder = self.get_builder() builder.create_jenkins_build(step, job_name=step.data['job_name'], script=step.data['cmd']) sync_job_step.delay_if_needed( step_id=step.id.hex, task_id=step.id.hex, parent_task_id=phase.job.id.hex, ) return step
def create_expanded_jobstep(self, base_jobstep, new_jobphase, future_jobstep, skip_setup_teardown=False): """ Converts an expanded FutureJobstep into a JobStep and sets up its commands accordingly. Args: base_jobstep: The base JobStep to copy data attributes from. new_jobphase: The JobPhase for the new JobStep future_jobstep: the FutureJobstep to convert from. skip_setup_teardown: if True, don't add setup and teardown commands to the new JobStep (e.g., if future_jobstep already has them) Returns the newly created JobStep (uncommitted). """ new_jobstep = future_jobstep.as_jobstep(new_jobphase) base_jobstep_data = deepcopy(base_jobstep.data) # inherit base properties from parent jobstep for key, value in base_jobstep_data.items(): if key not in JOBSTEP_DATA_COPY_WHITELIST: continue if key not in new_jobstep.data: new_jobstep.data[key] = value new_jobstep.status = Status.pending_allocation new_jobstep.cluster = self.cluster new_jobstep.data['expanded'] = True BuildStep.handle_debug_infra_failures(new_jobstep, self.debug_config, 'expanded') db.session.add(new_jobstep) # when we expand the command we need to include all setup and teardown # commands setup_commands = [] teardown_commands = [] # TODO(nate): skip_setup_teardown really means "we're whitewashing this jobstep" # since we also don't set the command's path in those cases. if not skip_setup_teardown: for future_command in self.iter_all_commands(base_jobstep.job): if future_command.type.is_setup(): setup_commands.append(future_command) elif future_command.type == CommandType.teardown: teardown_commands.append(future_command) # set any needed defaults for expanded commands for future_command in future_jobstep.commands: self._set_command_defaults(future_command) # setup -> newly generated commands from expander -> teardown for index, future_command in enumerate( chain(setup_commands, future_jobstep.commands, teardown_commands)): new_command = future_command.as_command(new_jobstep, index) db.session.add(new_command) self._create_targets_for_jobstep(new_jobstep) return new_jobstep
def _create_jobstep(self, phase, phase_cmd, phase_path, weight, test_list, shard_count=1, force_create=False): """ Create a JobStep in the database for a single shard. This creates the JobStep, but does not commit the transaction. Args: phase (JobPhase): The phase this step will be part of. phase_cmd (str): Command configured for the collection step. phase_path (str): Path configured for the collection step. weight (int): The weight of this shard. test_list (list): The list of tests names for this shard. shard_count (int): The total number of shards in this JobStep's phase. force_create (bool): Force this JobStep to be created (rather than retrieved). This is used when replacing a JobStep to make sure we don't just get the old one. Returns: JobStep: the (possibly-newly-created) JobStep. """ test_names = ' '.join(test_list) label = md5(test_names).hexdigest() where = { 'job': phase.job, 'project': phase.project, 'phase': phase, 'label': label, } if force_create: # uuid is unique so forces JobStep to be created where['id'] = uuid.uuid4() step, created = get_or_create(JobStep, where=where, defaults={ 'data': { 'cmd': phase_cmd, 'path': phase_path, 'tests': test_list, 'expanded': True, 'shard_count': shard_count, 'job_name': self.job_name, 'build_no': None, 'weight': weight, }, 'status': Status.queued, }) assert created or not force_create BuildStep.handle_debug_infra_failures(step, self.debug_config, 'expanded') db.session.add(step) return step
def create_expanded_jobstep(self, base_jobstep, new_jobphase, future_jobstep, skip_setup_teardown=False): """ Converts an expanded FutureJobstep into a JobStep and sets up its commands accordingly. Args: base_jobstep: The base JobStep to copy data attributes from. new_jobphase: The JobPhase for the new JobStep future_jobstep: the FutureJobstep to convert from. skip_setup_teardown: if True, don't add setup and teardown commands to the new JobStep (e.g., if future_jobstep already has them) Returns the newly created JobStep (uncommitted). """ new_jobstep = future_jobstep.as_jobstep(new_jobphase) base_jobstep_data = deepcopy(base_jobstep.data) # inherit base properties from parent jobstep for key, value in base_jobstep_data.items(): if key not in JOBSTEP_DATA_COPY_WHITELIST: continue if key not in new_jobstep.data: new_jobstep.data[key] = value new_jobstep.status = Status.pending_allocation new_jobstep.data['expanded'] = True BuildStep.handle_debug_infra_failures(new_jobstep, self.debug_config, 'expanded') db.session.add(new_jobstep) # when we expand the command we need to include all setup and teardown # commands setup_commands = [] teardown_commands = [] # TODO(nate): skip_setup_teardown really means "we're whitewashing this jobstep" # since we also don't set the command's path in those cases. if not skip_setup_teardown: for future_command in self.iter_all_commands(base_jobstep.job): if future_command.type.is_setup(): setup_commands.append(future_command) elif future_command.type == CommandType.teardown: teardown_commands.append(future_command) for future_command in future_jobstep.commands: # TODO(dcramer): we need to remove path as an end-user option if not future_command.path: future_command.path = self.path # setup -> newly generated commands from expander -> teardown for index, future_command in enumerate(chain(setup_commands, future_jobstep.commands, teardown_commands)): new_command = future_command.as_command(new_jobstep, index) # TODO(dcramer): this API isn't really ideal. Future command should # set things to NoneType and we should deal with unset values if not new_command.artifacts: new_command.artifacts = self.artifacts db.session.add(new_command) return new_jobstep
def _create_jobstep(self, phase, phase_cmd, phase_path, weight, test_list, shard_count=1, force_create=False, cluster=None): """ Create a JobStep in the database for a single shard. This creates the JobStep, but does not commit the transaction. Args: phase (JobPhase): The phase this step will be part of. phase_cmd (str): Command configured for the collection step. phase_path (str): Path configured for the collection step. weight (int): The weight of this shard. test_list (list): The list of tests names for this shard. shard_count (int): The total number of shards in this JobStep's phase. force_create (bool): Force this JobStep to be created (rather than retrieved). This is used when replacing a JobStep to make sure we don't just get the old one. cluster (Optional[str]): The value to be used for JobStep.cluster. Returns: JobStep: the (possibly-newly-created) JobStep. """ test_names = ' '.join(test_list) label = md5(test_names).hexdigest() where = { 'job': phase.job, 'project': phase.project, 'phase': phase, 'label': label, } if force_create: # uuid is unique so forces JobStep to be created where['id'] = uuid.uuid4() step, created = get_or_create(JobStep, where=where, defaults={ 'data': { 'cmd': phase_cmd, 'path': phase_path, 'tests': test_list, 'expanded': True, 'shard_count': shard_count, 'job_name': self.job_name, 'build_no': None, 'weight': weight, }, 'status': Status.queued, 'cluster': cluster, }) assert created or not force_create BuildStep.handle_debug_infra_failures(step, self.debug_config, 'expanded') db.session.add(step) return step
def _create_jobstep(self, phase, phase_cmd, phase_path, weight, test_list, shard_count=1, force_create=False): """ Create a JobStep in the database for a single shard. This creates the JobStep, but does not commit the transaction. Args: phase (JobPhase): The phase this step will be part of. phase_cmd (str): Command configured for the collection step. phase_path (str): Path configured for the collection step. weight (int): The weight of this shard. test_list (list): The list of tests names for this shard. shard_count (int): The total number of shards in this JobStep's phase. force_create (bool): Force this JobStep to be created (rather than retrieved). This is used when replacing a JobStep to make sure we don't just get the old one. Returns: JobStep: the (possibly-newly-created) JobStep. """ test_names = " ".join(test_list) label = md5(test_names).hexdigest() where = {"job": phase.job, "project": phase.project, "phase": phase, "label": label} if force_create: # uuid is unique so forces JobStep to be created where["id"] = uuid.uuid4() step, created = get_or_create( JobStep, where=where, defaults={ "data": { "cmd": phase_cmd, "path": phase_path, "tests": test_list, "expanded": True, "shard_count": shard_count, "job_name": self.job_name, "build_no": None, "weight": weight, }, "status": Status.queued, }, ) assert created or not force_create BuildStep.handle_debug_infra_failures(step, self.debug_config, "expanded") db.session.add(step) return step
def _setup_jobstep(self, phase, job, replaces=None): """ Does the work of setting up (or recreating) the single jobstep for a build. Args: phase (JobPhase): phase this JobStep will be part of job (Job): the job this JobStep will be part of replaces (JobStep): None for new builds, otherwise the (failed) JobStep that this JobStep will replace. Returns: The newly created JobStep """ where = { 'phase': phase, 'label': job.label, } if replaces: # if we're replacing an old jobstep, we specify new id in the where # clause to ensure we create a new jobstep, not just get the old one where['id'] = uuid.uuid4() step, _ = get_or_create(JobStep, where=where, defaults={ 'status': Status.pending_allocation, 'job': phase.job, 'project': phase.project, 'data': { 'release': self.release, 'max_executors': self.max_executors, 'cpus': self.resources['cpus'], 'mem': self.resources['mem'], }, }) BuildStep.handle_debug_infra_failures(step, self.debug_config, 'primary') all_commands = list(self.iter_all_commands(job)) # we skip certain commands for e.g. collection JobSteps. valid_command_pred = CommandType.is_valid_for_default if job.build.cause == Cause.snapshot: valid_command_pred = CommandType.is_valid_for_snapshot elif any(fc.type.is_collector() for fc in all_commands): valid_command_pred = CommandType.is_valid_for_collection index = 0 for future_command in all_commands: if not valid_command_pred(future_command.type): continue index += 1 command = future_command.as_command( jobstep=step, order=index, ) db.session.add(command) # TODO(dcramer): improve error handling here assert index != 0, "No commands were registered for build plan" if replaces: replaces.replacement_id = step.id db.session.add(replaces) db.session.commit() sync_job_step.delay( step_id=step.id.hex, task_id=step.id.hex, parent_task_id=job.id.hex, ) return step
def get_buildstep(self): return BuildStep()
def _setup_jobstep(self, phase, job, replaces=None): """ Does the work of setting up (or recreating) the single jobstep for a build. Args: phase (JobPhase): phase this JobStep will be part of job (Job): the job this JobStep will be part of replaces (JobStep): None for new builds, otherwise the (failed) JobStep that this JobStep will replace. Returns: The newly created JobStep """ where = {"phase": phase, "label": job.label} if replaces: # if we're replacing an old jobstep, we specify new id in the where # clause to ensure we create a new jobstep, not just get the old one where["id"] = uuid.uuid4() step, _ = get_or_create( JobStep, where=where, defaults={ "status": Status.pending_allocation, "job": phase.job, "project": phase.project, "data": { "release": self.release, "max_executors": self.max_executors, "cpus": self.resources["cpus"], "mem": self.resources["mem"], }, }, ) BuildStep.handle_debug_infra_failures(step, self.debug_config, "primary") all_commands = list(self.iter_all_commands(job)) # we skip certain commands for e.g. collection JobSteps. valid_command_pred = CommandType.is_valid_for_default if job.build.cause == Cause.snapshot: valid_command_pred = CommandType.is_valid_for_snapshot elif any(fc.type.is_collector() for fc in all_commands): valid_command_pred = CommandType.is_valid_for_collection index = 0 for future_command in all_commands: if not valid_command_pred(future_command.type): continue index += 1 command = future_command.as_command(jobstep=step, order=index) db.session.add(command) # TODO(dcramer): improve error handling here assert index != 0, "No commands were registered for build plan" if replaces: replaces.replacement_id = step.id db.session.add(replaces) db.session.commit() sync_job_step.delay(step_id=step.id.hex, task_id=step.id.hex, parent_task_id=job.id.hex) return step
def _setup_jobstep(self, phase, job, replaces=None): """ Does the work of setting up (or recreating) the single jobstep for a build. Args: phase (JobPhase): phase this JobStep will be part of job (Job): the job this JobStep will be part of replaces (JobStep): None for new builds, otherwise the (failed) JobStep that this JobStep will replace. Returns: The newly created JobStep """ where = { 'phase': phase, 'label': job.label, } if replaces: # if we're replacing an old jobstep, we specify new id in the where # clause to ensure we create a new jobstep, not just get the old one where['id'] = uuid.uuid4() step, created = get_or_create(JobStep, where=where, defaults={ 'status': Status.pending_allocation, 'job': phase.job, 'project': phase.project, 'data': { 'release': self.release, 'max_executors': self.max_executors, 'cpus': self.resources['cpus'], 'mem': self.resources['mem'], }, }) BuildStep.handle_debug_infra_failures(step, self.debug_config, 'primary') all_commands = list(self.iter_all_commands(job)) # HACK(dcramer): we dont want to run setup on collect jobs # ideally the abstraction would be cleaner and it break out of # the commands array (same for setup commands) has_collect = any(fc.type.is_collector() for fc in all_commands) # HACK(dcramer): we need to filter out non-setup commands # if we're running a snapshot build is_snapshot = job.build.cause == Cause.snapshot index = 0 for future_command in all_commands: if is_snapshot: if future_command.type not in (CommandType.setup, CommandType.teardown): continue elif has_collect and not future_command.type.is_collector(): continue index += 1 command = future_command.as_command( jobstep=step, order=index, ) db.session.add(command) # TODO(dcramer): improve error handling here assert index != 0, "No commands were registered for build plan" if replaces: replaces.replacement_id = step.id db.session.add(replaces) db.session.commit() sync_job_step.delay( step_id=step.id.hex, task_id=step.id.hex, parent_task_id=job.id.hex, ) return step
def _setup_jobstep(self, phase, job, replaces=None): """ Does the work of setting up (or recreating) the single jobstep for a build. Args: phase (JobPhase): phase this JobStep will be part of job (Job): the job this JobStep will be part of replaces (JobStep): None for new builds, otherwise the (failed) JobStep that this JobStep will replace. Returns: The newly created JobStep """ where = { 'phase': phase, 'label': phase.label, } if replaces: # if we're replacing an old jobstep, we specify new id in the where # clause to ensure we create a new jobstep, not just get the old one where['id'] = uuid.uuid4() step, _ = get_or_create(JobStep, where=where, defaults={ 'status': Status.pending_allocation, 'job': phase.job, 'project': phase.project, 'cluster': self.cluster, 'data': { 'release': self.release, 'max_executors': self.max_executors, 'cpus': self.resources['cpus'], 'mem': self.resources['mem'], }, }) BuildStep.handle_debug_infra_failures(step, self.debug_config, 'primary') all_commands = list(self.iter_all_commands(job)) # we skip certain commands for e.g. collection JobSteps. valid_command_pred = CommandType.is_valid_for_default if job.build.cause == Cause.snapshot: valid_command_pred = CommandType.is_valid_for_snapshot elif any(fc.type.is_collector() for fc in all_commands): valid_command_pred = CommandType.is_valid_for_collection for index, future_command in enumerate(all_commands): if not valid_command_pred(future_command.type): continue command = future_command.as_command( jobstep=step, order=index, ) db.session.add(command) # TODO(dcramer): improve error handling here assert len(all_commands) != 0, "No commands were registered for build plan" if replaces: replaces.replacement_id = step.id if replaces.node: step.data['avoid_node'] = replaces.node.label db.session.add(replaces) db.session.add(step) db.session.commit() sync_job_step.delay( step_id=step.id.hex, task_id=step.id.hex, parent_task_id=job.id.hex, ) return step