def update_existing_entry(project): try: job = Job.query.filter(Job.status == Status.in_progress, )[0] except IndexError: return create_new_entry(project) job.status = Status.finished job.result = Result.failed if random.randint(0, 3) == 1 else Result.passed job.date_finished = datetime.utcnow() db.session.add(job) publish_job_update(job) jobstep = JobStep.query.filter(JobStep.job == job).first() if jobstep: test_results = [] for _ in xrange(50): if job.result == Result.failed: result = Result.failed if random.randint( 0, 3) == 1 else Result.passed else: result = Result.passed test_results.append(mock.test_result(jobstep, result=result)) TestResultManager(jobstep).save(test_results) if job.status == Status.finished: job.build.status = job.status job.build.result = job.result job.build.date_finished = job.date_finished db.session.add(job.build) publish_build_update(job.build) return job
def update_existing_entry(project): try: job = Job.query.filter( Job.status == Status.in_progress, )[0] except IndexError: return create_new_entry(project) job.status = Status.finished job.result = Result.failed if random.randint(0, 3) == 1 else Result.passed job.date_finished = datetime.utcnow() db.session.add(job) publish_job_update(job) test_results = [] for _ in xrange(50): if job.result == Result.failed: result = Result.failed if random.randint(0, 3) == 1 else Result.passed else: result = Result.passed test_results.append(mock.test_result(job, result=result)) TestResultManager(job).save(test_results) if job.status == Status.finished: job.build.status = job.status job.build.result = job.result job.build.date_finished = job.date_finished db.session.add(job.build) publish_build_update(job.build) return job
def execute_build(build): # TODO(dcramer): most of this should be abstracted into sync_build as if it # were a "im on step 0, create step 1" project = build.project jobs = [] for plan in project.plans: cur_no_query = db.session.query( coalesce(func.max(Job.number), 0) ).filter( Job.build_id == build.id, ).scalar() job = Job( build=build, build_id=build.id, number=cur_no_query + 1, project=project, project_id=project.id, source=build.source, source_id=build.source_id, status=build.status, label=plan.label, ) db.session.add(job) jobplan = JobPlan( project=project, job=job, build=build, plan=plan, ) db.session.add(jobplan) jobs.append(job) db.session.commit() publish_build_update(build) for job in jobs: publish_job_update(job) create_job.delay( job_id=job.id.hex, task_id=job.id.hex, parent_task_id=job.build_id.hex, ) db.session.commit() sync_build.delay( build_id=job.build_id.hex, task_id=job.build_id.hex, ) return build
def execute_build(build): # TODO(dcramer): most of this should be abstracted into sync_build as if it # were a "im on step 0, create step 1" project = build.project jobs = [] for plan in project.plans: job = Job( build=build, build_id=build.id, project=project, project_id=project.id, source=build.source, source_id=build.source_id, status=build.status, label=plan.label, ) db.session.add(job) jobplan = JobPlan( project=project, job=job, build=build, plan=plan, ) db.session.add(jobplan) jobs.append(job) db.session.commit() publish_build_update(build) for job in jobs: publish_job_update(job) create_job.delay( job_id=job.id.hex, task_id=job.id.hex, parent_task_id=job.build_id.hex, ) db.session.commit() sync_build.delay( build_id=job.build_id.hex, task_id=job.build_id.hex, ) return build
def test_simple(self, publish): build = self.create_build(self.project) json = as_json(build) publish_build_update(build) publish.assert_any_call('builds:{0}'.format(build.id.hex), { 'data': json, 'event': 'build.update', }) publish.assert_any_call('projects:{0}:builds'.format(build.project_id.hex), { 'data': json, 'event': 'build.update', })
def test_simple(self, publish): build = self.create_build(self.project) json = as_json(build) publish_build_update(build) publish.assert_any_call('builds:{0}'.format(build.id.hex), { 'data': json, 'event': 'build.update', }) publish.assert_any_call( 'projects:{0}:builds'.format(build.project_id.hex), { 'data': json, 'event': 'build.update', })
def execute_build(build): # TODO(dcramer): most of this should be abstracted into sync_build as if it # were a "im on step 0, create step 1" project = build.project jobs = [] for plan in project.plans: job = Job( build=build, build_id=build.id, project=project, project_id=project.id, source=build.source, source_id=build.source_id, status=build.status, label=plan.label, ) db.session.add(job) jobplan = JobPlan(project=project, job=job, build=build, plan=plan) db.session.add(jobplan) jobs.append(job) db.session.commit() publish_build_update(build) for job in jobs: publish_job_update(job) create_job.delay(job_id=job.id.hex, task_id=job.id.hex, parent_task_id=job.build_id.hex) db.session.commit() sync_build.delay(build_id=job.build_id.hex, task_id=job.build_id.hex) return build
def sync_build(build_id): """ Synchronizing the build happens continuously until all jobs have reported in as finished or have failed/aborted. This task is responsible for: - Checking in with jobs - Aborting/retrying them if they're beyond limits - Aggregating the results from jobs into the build itself """ build = Build.query.get(build_id) if not build: return if build.status == Status.finished: return all_jobs = list(Job.query.filter( Job.build_id == build_id, )) is_finished = sync_build.verify_all_children() == Status.finished build.date_started = safe_agg( min, (j.date_started for j in all_jobs if j.date_started)) if is_finished: build.date_finished = safe_agg( max, (j.date_finished for j in all_jobs if j.date_finished)) else: build.date_finished = None if build.date_started and build.date_finished: build.duration = int((build.date_finished - build.date_started).total_seconds() * 1000) else: build.duration = None if any(j.result is Result.failed for j in all_jobs): build.result = Result.failed elif is_finished: build.result = safe_agg( max, (j.result for j in all_jobs), Result.unknown) else: build.result = Result.unknown if is_finished: build.status = Status.finished elif any(j.status is Status.in_progress for j in all_jobs): build.status = Status.in_progress else: build.status = Status.queued if db.session.is_modified(build): build.date_modified = datetime.utcnow() db.session.add(build) db.session.commit() publish_build_update(build) if not is_finished: raise sync_build.NotFinished _record_tests_missing(build) queue.delay('notify_build_finished', kwargs={ 'build_id': build.id.hex, }) queue.delay('update_project_stats', kwargs={ 'project_id': build.project_id.hex, }, countdown=1)
def create_new_entry(project): new_change = (random.randint(0, 2) == 1) if not new_change: try: change = Change.query.all()[0] except IndexError: new_change = True if new_change: author = mock.author() revision = mock.revision(project.repository, author) change = create_new_change( project=project, author=author, message=revision.message, ) else: change.date_modified = datetime.utcnow() db.session.add(change) revision = mock.revision(project.repository, change.author) if random.randint(0, 1) == 1: patch = mock.patch(project) else: patch = None source = mock.source(project.repository, revision_sha=revision.sha, patch=patch) date_started = datetime.utcnow() build = mock.build( author=change.author, project=project, source=source, message=change.message, result=Result.unknown, status=Status.in_progress, date_started=date_started, ) db.session.commit() publish_build_update(build) for x in xrange(0, random.randint(1, 3)): job = mock.job( build=build, change=change, status=Status.in_progress, ) db.session.commit() publish_job_update(job) for step in JobStep.query.filter(JobStep.job == job): logsource = LogSource( job=job, project=job.project, step=step, name=step.label, ) db.session.add(logsource) db.session.commit() offset = 0 for x in xrange(30): lc = mock.logchunk(source=logsource, offset=offset) db.session.commit() offset += lc.size return build
def create_new_entry(project): new_change = (random.randint(0, 2) == 1) if not new_change: try: change = Change.query.all()[0] except IndexError: new_change = True if new_change: author = mock.author() revision = mock.revision(project.repository, author) change = create_new_change( project=project, author=author, message=revision.message, ) else: change.date_modified = datetime.utcnow() db.session.add(change) revision = mock.revision(project.repository, change.author) if random.randint(0, 1) == 1: patch = mock.patch(project) else: patch = None print patch source = mock.source( project.repository, revision_sha=revision.sha, patch=patch) date_started = datetime.utcnow() build = mock.build( author=change.author, project=project, source=source, message=change.message, result=Result.unknown, status=Status.in_progress, date_started=date_started, ) publish_build_update(build) for x in xrange(3): job = mock.job( build=build, change=change, status=Status.in_progress, ) publish_job_update(job) logsource = LogSource( job=job, project=job.project, name='console', ) db.session.add(logsource) db.session.commit() offset = 0 for x in xrange(30): lc = mock.logchunk(source=logsource, offset=offset) db.session.commit() offset += lc.size return build
def sync_build(build_id): """ Synchronizing the build happens continuously until all jobs have reported in as finished or have failed/aborted. This task is responsible for: - Checking in with jobs - Aborting/retrying them if they're beyond limits - Aggregating the results from jobs into the build itself """ build = Build.query.get(build_id) if not build: return if build.status == Status.finished: return all_jobs = list(Job.query.filter(Job.build_id == build_id, )) is_finished = sync_build.verify_all_children() == Status.finished build.date_started = safe_agg(min, (j.date_started for j in all_jobs if j.date_started)) if is_finished: build.date_finished = safe_agg(max, (j.date_finished for j in all_jobs if j.date_finished)) else: build.date_finished = None if build.date_started and build.date_finished: build.duration = int( (build.date_finished - build.date_started).total_seconds() * 1000) else: build.duration = None if any(j.result is Result.failed for j in all_jobs): build.result = Result.failed elif is_finished: build.result = safe_agg(max, (j.result for j in all_jobs), Result.unknown) else: build.result = Result.unknown if is_finished: build.status = Status.finished elif any(j.status is Status.in_progress for j in all_jobs): build.status = Status.in_progress else: build.status = Status.queued if db.session.is_modified(build): build.date_modified = datetime.utcnow() db.session.add(build) db.session.commit() publish_build_update(build) if not is_finished: raise sync_build.NotFinished _record_tests_missing(build) queue.delay('notify_build_finished', kwargs={ 'build_id': build.id.hex, }) queue.delay('update_project_stats', kwargs={ 'project_id': build.project_id.hex, }, countdown=1)