def post(self, step_id): jobstep = JobStep.query.options( joinedload('project', innerjoin=True), ).get(step_id) if jobstep is None: return '', 404 args = self.post_parser.parse_args() current_datetime = args.date or datetime.utcnow() if args.result: jobstep.result = Result[args.result] if args.status: jobstep.status = Status[args.status] # if we've finished this job, lets ensure we have set date_finished if jobstep.status == Status.finished and jobstep.date_finished is None: jobstep.date_finished = current_datetime elif jobstep.status != Status.finished and jobstep.date_finished: jobstep.date_finished = None if jobstep.status != Status.queued and jobstep.date_started is None: jobstep.date_started = current_datetime elif jobstep.status == Status.queued and jobstep.date_started: jobstep.date_started = None if args.node: node, _ = get_or_create(Node, where={ 'label': args.node, }) jobstep.node_id = node.id db.session.add(jobstep) if db.session.is_modified(jobstep): db.session.commit() # TODO(dcramer): this is a little bit hacky, but until we can entirely # move to push APIs we need a good way to handle the existing sync job = jobstep.job sync_job.delay_if_needed( task_id=job.id.hex, parent_task_id=job.id.hex, job_id=job.build_id.hex, ) return self.respond(jobstep)
def post(self, step_id): jobstep = JobStep.query.options(joinedload( 'project', innerjoin=True), ).get(step_id) if jobstep is None: return '', 404 args = self.post_parser.parse_args() current_datetime = args.date or datetime.utcnow() if args.result: jobstep.result = Result[args.result] if args.status: jobstep.status = Status[args.status] # if we've finished this job, lets ensure we have set date_finished if jobstep.status == Status.finished and jobstep.date_finished is None: jobstep.date_finished = current_datetime elif jobstep.status != Status.finished and jobstep.date_finished: jobstep.date_finished = None if jobstep.status != Status.queued and jobstep.date_started is None: jobstep.date_started = current_datetime elif jobstep.status == Status.queued and jobstep.date_started: jobstep.date_started = None if args.node: node, _ = get_or_create(Node, where={ 'label': args.node, }) jobstep.node_id = node.id db.session.add(jobstep) if db.session.is_modified(jobstep): db.session.commit() # TODO(dcramer): this is a little bit hacky, but until we can entirely # move to push APIs we need a good way to handle the existing sync job = jobstep.job sync_job.delay_if_needed( task_id=job.id.hex, parent_task_id=job.id.hex, job_id=job.build_id.hex, ) return self.respond(jobstep)
def post(self, step_id): jobstep = JobStep.query.options(joinedload( 'project', innerjoin=True), ).get(step_id) if jobstep is None: return '', 404 args = self.post_parser.parse_args() current_datetime = args.date or datetime.utcnow() if args.result: jobstep.result = Result[args.result] if args.status: jobstep.status = Status[args.status] # if we've finished this job, lets ensure we have set date_finished if jobstep.status == Status.finished and jobstep.date_finished is None: jobstep.date_finished = current_datetime elif jobstep.status != Status.finished and jobstep.date_finished: jobstep.date_finished = None if jobstep.status != Status.queued and jobstep.date_started is None: jobstep.date_started = current_datetime elif jobstep.status == Status.queued and jobstep.date_started: jobstep.date_started = None if args.node: node, _ = get_or_create(Node, where={ 'label': args.node, }) jobstep.node_id = node.id # we want to guarantee that even if the jobstep seems to succeed, that # we accurately reflect what we internally would consider a success state if jobstep.result == Result.passed and jobstep.status == Status.finished: last_command = Command.query.filter( Command.jobstep_id == jobstep.id, ).order_by( Command.order.desc()).first() if not last_command: pass elif last_command.status != Status.finished: jobstep.result = Result.failed elif last_command.return_code != 0: jobstep.result = Result.failed # are we missing an expansion step? it must happen before reporting # the result, and would falsely give us a success metric elif last_command.type.is_collector() and self._is_final_jobphase( jobstep.phase): jobstep.result = Result.failed job = jobstep.job # TODO(dcramer): we should add a better failure reason db.session.add( FailureReason( step_id=jobstep.id, job_id=job.id, build_id=job.build_id, project_id=job.project_id, reason='missing_artifact', )) db.session.add(jobstep) if db.session.is_modified(jobstep): db.session.commit() # TODO(dcramer): this is a little bit hacky, but until we can entirely # move to push APIs we need a good way to handle the existing sync job = jobstep.job sync_job.delay_if_needed( task_id=job.id.hex, parent_task_id=job.id.hex, job_id=job.build_id.hex, ) return self.respond(jobstep)
def post(self, step_id): jobstep = JobStep.query.options( joinedload('project', innerjoin=True), ).get(step_id) if jobstep is None: return '', 404 args = self.post_parser.parse_args() current_datetime = args.date or datetime.utcnow() if args.result: jobstep.result = Result[args.result] if args.status: jobstep.status = Status[args.status] # if we've finished this job, lets ensure we have set date_finished if jobstep.status == Status.finished and jobstep.date_finished is None: jobstep.date_finished = current_datetime elif jobstep.status != Status.finished and jobstep.date_finished: jobstep.date_finished = None if jobstep.status != Status.queued and jobstep.date_started is None: jobstep.date_started = current_datetime elif jobstep.status == Status.queued and jobstep.date_started: jobstep.date_started = None if args.node: node, _ = get_or_create(Node, where={ 'label': args.node, }) jobstep.node_id = node.id # we want to guarantee that even if the jobstep seems to succeed, that # we accurately reflect what we internally would consider a success state if jobstep.result == Result.passed and jobstep.status == Status.finished: last_command = Command.query.filter( Command.jobstep_id == jobstep.id, ).order_by(Command.order.desc()).first() if not last_command: pass elif last_command.status != Status.finished: jobstep.result = Result.failed elif last_command.return_code != 0: jobstep.result = Result.failed # are we missing an expansion step? it must happen before reporting # the result, and would falsely give us a success metric elif last_command.type.is_collector() and self._is_final_jobphase(jobstep.phase): jobstep.result = Result.failed job = jobstep.job # TODO(dcramer): we should add a better failure reason db.session.add(FailureReason( step_id=jobstep.id, job_id=job.id, build_id=job.build_id, project_id=job.project_id, reason='missing_artifact', )) db.session.add(jobstep) if db.session.is_modified(jobstep): db.session.commit() # TODO(dcramer): this is a little bit hacky, but until we can entirely # move to push APIs we need a good way to handle the existing sync job = jobstep.job sync_job.delay_if_needed( task_id=job.id.hex, parent_task_id=job.id.hex, job_id=job.build_id.hex, ) return self.respond(jobstep)