def test_empty_cobertura_file(self): jobstep = JobStep( id=uuid.uuid4(), job=Job(id=uuid.uuid4(), project_id=uuid.uuid4()) ) fp = StringIO('') handler = CoverageHandler(jobstep) results = handler.get_coverage(fp) # most importantly, it shouldn't crash assert len(results) == 0
def test_jacoco_result_generation(self): jobstep = JobStep( id=uuid.uuid4(), job=Job(id=uuid.uuid4(), project_id=uuid.uuid4()) ) handler = CoverageHandler(jobstep) with open(os.path.join(os.path.dirname(__file__), 'fixtures', 'jacoco-coverage.xml')) as fp: results = handler.get_coverage(fp) assert len(results) == 1 r1 = results[0] assert type(r1) == FileCoverage assert r1.job_id == jobstep.job.id assert r1.project_id == jobstep.job.project_id assert r1.filename == 'src/main/java/com/dropbox/apx/onyx/api/resource/stats/StatsResource.java' assert r1.data == 'NNNNCCCCNNCCUU'
def test_simple(): job = Job( id=UUID(hex='33846695b2774b29a71795a009e8168a'), label='Hello world', project=Project(slug='test', name='test'), date_created=datetime(2013, 9, 19, 22, 15, 22), date_started=datetime(2013, 9, 19, 22, 15, 23), date_finished=datetime(2013, 9, 19, 22, 15, 33), build=Build(id=UUID('1e7958a368f44b0eb5a57372a9910d50'), ), build_id=UUID('1e7958a368f44b0eb5a57372a9910d50'), change=Change( id=UUID(hex='2e18a7cbc0c24316b2ef9d41fea191d6'), label='Hello world', ), ) result = serialize(job) assert result['name'] == 'Hello world' assert result['id'] == '33846695b2774b29a71795a009e8168a' assert result['dateCreated'] == '2013-09-19T22:15:22' assert result['dateStarted'] == '2013-09-19T22:15:23' assert result['dateFinished'] == '2013-09-19T22:15:33' assert result['duration'] == 10000
def create_job(self, build, **kwargs): project = build.project kwargs.setdefault('label', build.label) kwargs.setdefault('status', build.status) kwargs.setdefault('result', build.result) kwargs.setdefault('duration', build.duration) kwargs.setdefault('date_started', build.date_started) kwargs.setdefault('date_finished', build.date_finished) kwargs.setdefault('source', build.source) if kwargs.get('change', False) is False: kwargs['change'] = self.create_change(project) job = Job(build=build, build_id=build.id, project=project, project_id=project.id, **kwargs) db.session.add(job) db.session.commit() return job
def test_cobertura_result_generation(self): jobstep = JobStep( id=uuid.uuid4(), job=Job(id=uuid.uuid4(), project_id=uuid.uuid4()) ) fp = StringIO(SAMPLE_COVERAGE) handler = CoverageHandler(jobstep) results = handler.get_coverage(fp) assert len(results) == 2 r1 = results[0] assert type(r1) == FileCoverage assert r1.job_id == jobstep.job.id assert r1.project_id == jobstep.job.project_id assert r1.filename == 'setup.py' assert r1.data == 'NUNNNNNNNNNUCCNU' r2 = results[1] assert type(r2) == FileCoverage assert r2.job_id == jobstep.job.id assert r2.project_id == jobstep.job.project_id assert r2.data == 'CUCNNNU' # partial branch coverage is considered uncovered
def execute_build(build, snapshot_id, no_snapshot): if no_snapshot: assert snapshot_id is None, 'Cannot specify snapshot with no_snapshot option' # TODO(dcramer): most of this should be abstracted into sync_build as if it # were a "im on step 0, create step 1" project = build.project # We choose a snapshot before creating jobplans. This is so that different # jobplans won't end up using different snapshots in a build. if snapshot_id is None and not no_snapshot: snapshot = Snapshot.get_current(project.id) if snapshot: snapshot_id = snapshot.id plans = get_build_plans(project) options = ItemOptionsHelper.get_options([p.id for p in plans], ['snapshot.require']) jobs = [] for plan in get_build_plans(project): if (options[plan.id].get('snapshot.require', '0') == '1' and not no_snapshot and SnapshotImage.get(plan, snapshot_id) is None): logging.warning( 'Skipping plan %r (%r) because no snapshot exists yet', plan.label, project.slug) continue job = Job( build=build, build_id=build.id, project=project, project_id=project.id, source=build.source, source_id=build.source_id, status=build.status, label=plan.label, ) db.session.add(job) jobplan = JobPlan.build_jobplan(plan, job, snapshot_id=snapshot_id) db.session.add(jobplan) jobs.append(job) db.session.commit() for job in jobs: create_job.delay( job_id=job.id.hex, task_id=job.id.hex, parent_task_id=job.build_id.hex, ) db.session.commit() sync_build.delay( build_id=build.id.hex, task_id=build.id.hex, ) return build
def job(build, change=None, **kwargs): kwargs.setdefault('project', build.project) kwargs.setdefault('label', get_sentences(1)[0][:128]) kwargs.setdefault('status', Status.finished) kwargs.setdefault('result', Result.passed) kwargs.setdefault('duration', random.randint(10000, 100000)) kwargs['source'] = build.source kwargs['source_id'] = kwargs['source'].id kwargs['project_id'] = kwargs['project'].id kwargs['build_id'] = build.id if change: kwargs['change_id'] = change.id job = Job( build=build, change=change, **kwargs ) db.session.add(job) node, created = get_or_create(Node, where={ 'label': get_sentences(1)[0][:32], }) if created: cluster, _ = get_or_create(Cluster, where={ 'label': get_sentences(1)[0][:32], }) clusternode = ClusterNode(cluster=cluster, node=node) db.session.add(clusternode) jobplan = JobPlan.build_jobplan(plan(build.project), job) db.session.add(jobplan) phase1_setup = JobPhase( project=job.project, job=job, date_started=job.date_started, date_finished=job.date_finished, status=Status.finished, result=Result.passed, label='Setup', ) db.session.add(phase1_setup) phase1_compile = JobPhase( project=job.project, job=job, date_started=job.date_started, date_finished=job.date_finished, status=Status.finished, result=Result.passed, label='Compile', ) db.session.add(phase1_compile) phase1_test = JobPhase( project=job.project, job=job, date_started=job.date_started, date_finished=job.date_finished, status=kwargs['status'], result=kwargs['result'], label='Test', ) db.session.add(phase1_test) step = JobStep( project=job.project, job=job, phase=phase1_setup, status=phase1_setup.status, result=phase1_setup.result, label='Setup', node=node, ) db.session.add(step) command = Command( jobstep=step, script="echo 1", label="echo 1", ) db.session.add(command) step = JobStep( project=job.project, job=job, phase=phase1_compile, status=phase1_compile.status, result=phase1_compile.result, label='Compile', node=node, ) db.session.add(step) command = Command( jobstep=step, script="echo 2", label="echo 2", ) db.session.add(command) step = JobStep( project=job.project, job=job, phase=phase1_test, status=phase1_test.status, result=phase1_test.result, label=TEST_STEP_LABELS.next(), node=node, ) db.session.add(step) command = Command( jobstep=step, script="echo 3", label="echo 3", ) db.session.add(command) step = JobStep( project=job.project, job=job, phase=phase1_test, status=phase1_test.status, result=phase1_test.result, label=TEST_STEP_LABELS.next(), node=node, ) db.session.add(step) command = Command( jobstep=step, script="echo 4", label="echo 4", ) db.session.add(command) if phase1_test.result == Result.failed: db.session.add(FailureReason( reason='test_failures', build_id=build.id, job_id=job.id, step_id=step.id, project_id=job.project_id )) return job
def post(self, project_id): """Initiates a new snapshot for this project.""" project = Project.get(project_id) if not project: return '', 404 args = self.post_parser.parse_args() repository = project.repository try: revision = identify_revision(repository, args.sha) except MissingRevision: # if the default fails, we absolutely can't continue and the # client should send a valid revision return error("Unable to find a matching revision.") if revision: sha = revision.sha else: sha = args.sha plan_list = get_snapshottable_plans(project) if not plan_list: return error("No snapshottable plans associated with project.") source, _ = get_or_create(Source, where={ 'repository': repository, 'revision_sha': sha, 'patch_id': None, }) build = Build( source_id=source.id, source=source, project_id=project.id, project=project, label='Create Snapshot', status=Status.queued, cause=Cause.snapshot, target=sha[:12], tags=['snapshot'], # Snapshot builds are often part of the solution to queueing, so we make them # high priority to schedule them sooner. priority=BuildPriority.high, ) db.session.add(build) # TODO(dcramer): this needs to update with the build result snapshot = Snapshot( project_id=project.id, source_id=source.id, build_id=build.id, status=SnapshotStatus.pending, ) db.session.add(snapshot) jobs = [] for plan in plan_list: job = Job( build=build, build_id=build.id, project=project, project_id=project.id, source=build.source, source_id=build.source_id, status=build.status, label='Create Snapshot: %s' % (plan.label, ), ) db.session.add(job) jobplan = JobPlan.build_jobplan(plan, job) db.session.add(jobplan) image = SnapshotImage( job=job, snapshot=snapshot, plan=plan, ) db.session.add(image) jobs.append(job) db.session.commit() for job in jobs: create_job.delay( job_id=job.id.hex, task_id=job.id.hex, parent_task_id=job.build_id.hex, ) db.session.commit() sync_build.delay( build_id=build.id.hex, task_id=build.id.hex, ) return self.respond(snapshot)