def update_existing_entry(project): try: job = Job.query.filter(Job.status == Status.in_progress, )[0] except IndexError: return create_new_entry(project) job.status = Status.finished job.result = Result.failed if random.randint(0, 3) == 1 else Result.passed job.date_finished = datetime.utcnow() db.session.add(job) publish_job_update(job) jobstep = JobStep.query.filter(JobStep.job == job).first() if jobstep: test_results = [] for _ in xrange(50): if job.result == Result.failed: result = Result.failed if random.randint( 0, 3) == 1 else Result.passed else: result = Result.passed test_results.append(mock.test_result(jobstep, result=result)) TestResultManager(jobstep).save(test_results) if job.status == Status.finished: job.build.status = job.status job.build.result = job.result job.build.date_finished = job.date_finished db.session.add(job.build) publish_build_update(job.build) return job
def update_existing_entry(project): try: job = Job.query.filter( Job.status == Status.in_progress, )[0] except IndexError: return create_new_entry(project) job.status = Status.finished job.result = Result.failed if random.randint(0, 3) == 1 else Result.passed job.date_finished = datetime.utcnow() db.session.add(job) publish_job_update(job) test_results = [] for _ in xrange(50): if job.result == Result.failed: result = Result.failed if random.randint(0, 3) == 1 else Result.passed else: result = Result.passed test_results.append(mock.test_result(job, result=result)) TestResultManager(job).save(test_results) if job.status == Status.finished: job.build.status = job.status job.build.result = job.result job.build.date_finished = job.date_finished db.session.add(job.build) publish_build_update(job.build) return job
def execute_build(build): # TODO(dcramer): most of this should be abstracted into sync_build as if it # were a "im on step 0, create step 1" project = build.project jobs = [] for plan in project.plans: cur_no_query = db.session.query( coalesce(func.max(Job.number), 0) ).filter( Job.build_id == build.id, ).scalar() job = Job( build=build, build_id=build.id, number=cur_no_query + 1, project=project, project_id=project.id, source=build.source, source_id=build.source_id, status=build.status, label=plan.label, ) db.session.add(job) jobplan = JobPlan( project=project, job=job, build=build, plan=plan, ) db.session.add(jobplan) jobs.append(job) db.session.commit() publish_build_update(build) for job in jobs: publish_job_update(job) create_job.delay( job_id=job.id.hex, task_id=job.id.hex, parent_task_id=job.build_id.hex, ) db.session.commit() sync_build.delay( build_id=job.build_id.hex, task_id=job.build_id.hex, ) return build
def execute_build(build): # TODO(dcramer): most of this should be abstracted into sync_build as if it # were a "im on step 0, create step 1" project = build.project jobs = [] for plan in project.plans: job = Job( build=build, build_id=build.id, project=project, project_id=project.id, source=build.source, source_id=build.source_id, status=build.status, label=plan.label, ) db.session.add(job) jobplan = JobPlan( project=project, job=job, build=build, plan=plan, ) db.session.add(jobplan) jobs.append(job) db.session.commit() publish_build_update(build) for job in jobs: publish_job_update(job) create_job.delay( job_id=job.id.hex, task_id=job.id.hex, parent_task_id=job.build_id.hex, ) db.session.commit() sync_build.delay( build_id=job.build_id.hex, task_id=job.build_id.hex, ) return build
def test_simple(self, publish): build = self.create_build(self.project) job = self.create_job(build=build) json = as_json(job) publish_job_update(job) publish.assert_any_call('jobs:{0}'.format(job.id.hex), { 'data': json, 'event': 'job.update', }) publish.assert_any_call('builds:{0}:jobs'.format(job.build_id.hex), { 'data': json, 'event': 'job.update', })
def execute_build(build): # TODO(dcramer): most of this should be abstracted into sync_build as if it # were a "im on step 0, create step 1" project = build.project jobs = [] for plan in project.plans: job = Job( build=build, build_id=build.id, project=project, project_id=project.id, source=build.source, source_id=build.source_id, status=build.status, label=plan.label, ) db.session.add(job) jobplan = JobPlan(project=project, job=job, build=build, plan=plan) db.session.add(jobplan) jobs.append(job) db.session.commit() publish_build_update(build) for job in jobs: publish_job_update(job) create_job.delay(job_id=job.id.hex, task_id=job.id.hex, parent_task_id=job.build_id.hex) db.session.commit() sync_build.delay(build_id=job.build_id.hex, task_id=job.build_id.hex) return build
def sync_job(job_id): job = Job.query.get(job_id) if not job: return if job.status == Status.finished: return # TODO(dcramer): we make an assumption that there is a single step job_plan = JobPlan.query.options( subqueryload_all('plan.steps') ).filter( JobPlan.job_id == job.id, ).join(Plan).first() try: if not job_plan: raise UnrecoverableException('Got sync_job task without job plan: %s' % (job.id,)) try: step = job_plan.plan.steps[0] except IndexError: raise UnrecoverableException('Missing steps for plan') implementation = step.get_implementation() implementation.update(job=job) except UnrecoverableException: job.status = Status.finished job.result = Result.aborted current_app.logger.exception('Unrecoverable exception syncing %s', job.id) is_finished = sync_job.verify_all_children() == Status.finished if is_finished: job.status = Status.finished all_phases = list(job.phases) job.date_started = safe_agg( min, (j.date_started for j in all_phases if j.date_started)) if is_finished: job.date_finished = safe_agg( max, (j.date_finished for j in all_phases if j.date_finished)) else: job.date_finished = None if job.date_started and job.date_finished: job.duration = int((job.date_finished - job.date_started).total_seconds() * 1000) else: job.duration = None # if any phases are marked as failing, fail the build if any(j.result is Result.failed for j in all_phases): job.result = Result.failed # if any test cases were marked as failing, fail the build elif TestCase.query.filter(TestCase.result == Result.failed, TestCase.job_id == job.id).first(): job.result = Result.failed # if we've finished all phases, use the best result available elif is_finished: job.result = safe_agg( max, (j.result for j in all_phases), Result.unknown) else: job.result = Result.unknown if is_finished: job.status = Status.finished elif any(j.status is Status.in_progress for j in all_phases): job.status = Status.in_progress else: job.status = Status.queued if db.session.is_modified(job): job.date_modified = datetime.utcnow() db.session.add(job) db.session.commit() publish_job_update(job) if not is_finished: raise sync_job.NotFinished _record_tests_missing(job) queue.delay('notify_job_finished', kwargs={ 'job_id': job.id.hex, }) if job_plan: queue.delay('update_project_plan_stats', kwargs={ 'project_id': job.project_id.hex, 'plan_id': job_plan.plan_id.hex, }, countdown=1)
def sync_job(job_id): job = Job.query.get(job_id) if not job: return if job.status == Status.finished: return # TODO(dcramer): we make an assumption that there is a single step job_plan = JobPlan.query.options(subqueryload_all('plan.steps')).filter( JobPlan.job_id == job.id, ).join(Plan).first() try: if not job_plan: raise UnrecoverableException( 'Got sync_job task without job plan: %s' % (job.id, )) try: step = job_plan.plan.steps[0] except IndexError: raise UnrecoverableException('Missing steps for plan') implementation = step.get_implementation() implementation.update(job=job) except UnrecoverableException: job.status = Status.finished job.result = Result.aborted current_app.logger.exception('Unrecoverable exception syncing %s', job.id) is_finished = sync_job.verify_all_children() == Status.finished if is_finished: job.status = Status.finished all_phases = list(job.phases) job.date_started = safe_agg(min, (j.date_started for j in all_phases if j.date_started)) if is_finished: job.date_finished = safe_agg(max, (j.date_finished for j in all_phases if j.date_finished)) else: job.date_finished = None if job.date_started and job.date_finished: job.duration = int( (job.date_finished - job.date_started).total_seconds() * 1000) else: job.duration = None # if any phases are marked as failing, fail the build if any(j.result is Result.failed for j in all_phases): job.result = Result.failed # if any test cases were marked as failing, fail the build elif TestCase.query.filter(TestCase.result == Result.failed, TestCase.job_id == job.id).first(): job.result = Result.failed # if we've finished all phases, use the best result available elif is_finished: job.result = safe_agg(max, (j.result for j in all_phases), Result.unknown) else: job.result = Result.unknown if is_finished: job.status = Status.finished elif any(j.status is Status.in_progress for j in all_phases): job.status = Status.in_progress else: job.status = Status.queued if db.session.is_modified(job): job.date_modified = datetime.utcnow() db.session.add(job) db.session.commit() publish_job_update(job) if not is_finished: raise sync_job.NotFinished _record_tests_missing(job) queue.delay('notify_job_finished', kwargs={ 'job_id': job.id.hex, }) if job_plan: queue.delay('update_project_plan_stats', kwargs={ 'project_id': job.project_id.hex, 'plan_id': job_plan.plan_id.hex, }, countdown=1)
def create_new_entry(project): new_change = (random.randint(0, 2) == 1) if not new_change: try: change = Change.query.all()[0] except IndexError: new_change = True if new_change: author = mock.author() revision = mock.revision(project.repository, author) change = create_new_change( project=project, author=author, message=revision.message, ) else: change.date_modified = datetime.utcnow() db.session.add(change) revision = mock.revision(project.repository, change.author) if random.randint(0, 1) == 1: patch = mock.patch(project) else: patch = None source = mock.source(project.repository, revision_sha=revision.sha, patch=patch) date_started = datetime.utcnow() build = mock.build( author=change.author, project=project, source=source, message=change.message, result=Result.unknown, status=Status.in_progress, date_started=date_started, ) db.session.commit() publish_build_update(build) for x in xrange(0, random.randint(1, 3)): job = mock.job( build=build, change=change, status=Status.in_progress, ) db.session.commit() publish_job_update(job) for step in JobStep.query.filter(JobStep.job == job): logsource = LogSource( job=job, project=job.project, step=step, name=step.label, ) db.session.add(logsource) db.session.commit() offset = 0 for x in xrange(30): lc = mock.logchunk(source=logsource, offset=offset) db.session.commit() offset += lc.size return build
def create_new_entry(project): new_change = (random.randint(0, 2) == 1) if not new_change: try: change = Change.query.all()[0] except IndexError: new_change = True if new_change: author = mock.author() revision = mock.revision(project.repository, author) change = create_new_change( project=project, author=author, message=revision.message, ) else: change.date_modified = datetime.utcnow() db.session.add(change) revision = mock.revision(project.repository, change.author) if random.randint(0, 1) == 1: patch = mock.patch(project) else: patch = None print patch source = mock.source( project.repository, revision_sha=revision.sha, patch=patch) date_started = datetime.utcnow() build = mock.build( author=change.author, project=project, source=source, message=change.message, result=Result.unknown, status=Status.in_progress, date_started=date_started, ) publish_build_update(build) for x in xrange(3): job = mock.job( build=build, change=change, status=Status.in_progress, ) publish_job_update(job) logsource = LogSource( job=job, project=job.project, name='console', ) db.session.add(logsource) db.session.commit() offset = 0 for x in xrange(30): lc = mock.logchunk(source=logsource, offset=offset) db.session.commit() offset += lc.size return build