예제 #1
0
def sync_artifact(artifact_id=None, **kwargs):
    with RCount('sync_artifact'):
        artifact = Artifact.query.get(artifact_id)
        if artifact is None:
            return

        step = artifact.step

        if step.result == Result.aborted:
            return

        # TODO(dcramer): we eventually want to abstract the entirety of Jenkins
        # artifact syncing so that we pull files and then process them
        if artifact.file:
            try:
                manager.process(artifact)
            except Exception:
                current_app.logger.exception(
                    'Unrecoverable exception processing artifact %s: %s',
                    artifact.step_id, artifact)
        else:
            jobplan, implementation = JobPlan.get_build_step_for_job(
                job_id=step.job_id)

            try:
                implementation.fetch_artifact(artifact=artifact, **kwargs)

            except UnrecoverableException:
                current_app.logger.exception(
                    'Unrecoverable exception fetching artifact %s: %s',
                    artifact.step_id, artifact)
예제 #2
0
def cleanup_tasks():
    """
    Find any tasks which haven't checked in within a reasonable time period and
    requeue them if necessary.

    Additionally remove any old Task entries which are completed.
    """
    with RCount('cleanup_tasks'):
        now = datetime.utcnow()

        pending_tasks = Task.query.filter(
            Task.status != Status.finished,
            Task.date_modified < now - CHECK_TIME,
        )

        for task in pending_tasks:
            incr('cleanup_unfinished')
            task_func = TrackedTask(queue.get_task(task.task_name))
            task_func.delay(
                task_id=task.task_id.hex,
                parent_task_id=task.parent_id.hex if task.parent_id else None,
                **task.data['kwargs']
            )

        Task.query.filter(
            Task.status == Status.finished,
            Task.date_modified < now - EXPIRE_TIME,
        ).delete()
예제 #3
0
def run_event_listener(listener, signal, kwargs):
    with RCount('run_event_listener'):
        # simple check to make sure this is registered
        if not any(l == listener for l, _ in current_app.config['EVENT_LISTENERS']):
            raise SuspiciousOperation('%s is not a registered event listener' % (listener,))

        func = import_string(listener)
        func(**kwargs)
예제 #4
0
def fire_signal(signal, kwargs):
    with RCount('fire_signal'):
        for listener, l_signal in current_app.config['EVENT_LISTENERS']:
            if l_signal == signal:
                run_event_listener.delay(
                    listener=listener,
                    signal=signal,
                    kwargs=kwargs,
                )
예제 #5
0
def import_repo(repo_id, parent=None):
    with RCount('import_repo'):
        repo = Repository.query.get(repo_id)
        if not repo:
            logger.error('Repository %s not found', repo_id)
            return

        vcs = repo.get_vcs()
        if vcs is None:
            logger.warning('Repository %s has no VCS backend set', repo.id)
            return

        if repo.status == RepositoryStatus.inactive:
            logger.info('Repository %s is inactive', repo.id)
            return

        Repository.query.filter(
            Repository.id == repo.id,
        ).update({
            'last_update_attempt': datetime.utcnow(),
        }, synchronize_session=False)
        db.session.commit()

        if vcs.exists():
            vcs.update()
        else:
            vcs.clone()

        for commit in vcs.log(parent=parent):
            revision, created = commit.save(repo)
            db.session.commit()
            parent = commit.id

        Repository.query.filter(
            Repository.id == repo.id,
        ).update({
            'last_update': datetime.utcnow(),
            'status': RepositoryStatus.active,
        }, synchronize_session=False)
        db.session.commit()

        if parent:
            import_repo.delay(
                repo_id=repo.id.hex,
                task_id=repo.id.hex,
                parent=parent,
            )
예제 #6
0
def create_job(job_id):
    with RCount('create_job'):
        job = Job.query.get(job_id)
        if not job:
            return

        if job.project.status == ProjectStatus.inactive:
            current_app.logger.warn('Project is not active: %s', job.project.slug)
            job.status = Status.finished
            job.result = Result.failed
            db.session.add(job)
            db.session.flush()
            return

        # we might already be marked as finished for various reasons
        # (such as aborting the task)
        if job.status == Status.finished:
            return

        jobplan, implementation = JobPlan.get_build_step_for_job(job_id=job.id)
        if implementation is None:
            # TODO(dcramer): record a FailureReason?
            job.status = Status.finished
            job.result = Result.failed
            db.session.add(job)
            db.session.flush()
            current_app.logger.exception('No build plan set %s', job_id)
            return

        try:
            implementation.execute(job=job)
        except UnrecoverableException:
            job.status = Status.finished
            job.result = Result.aborted
            db.session.add(job)
            db.session.flush()
            current_app.logger.exception('Unrecoverable exception creating %s', job_id)
            return

        sync_job.delay(
            job_id=job.id.hex,
            task_id=job.id.hex,
            parent_task_id=job.build_id.hex,
        )
예제 #7
0
def sync_repo(repo_id, continuous=True):
    with RCount('sync_repo'):
        repo = Repository.query.get(repo_id)
        if not repo:
            logger.error('Repository %s not found', repo_id)
            return

        vcs = repo.get_vcs()
        if vcs is None:
            logger.warning('Repository %s has no VCS backend set', repo.id)
            return

        if repo.status != RepositoryStatus.active:
            logger.info('Repository %s is not active', repo.id)
            return

        Repository.query.filter(Repository.id == repo.id, ).update(
            {
                'last_update_attempt': datetime.utcnow(),
            },
            synchronize_session=False)
        db.session.commit()

        if vcs.exists():
            vcs.update()
        else:
            vcs.clone()

        # TODO(dcramer): this doesnt scrape everything, and really we wouldn't
        # want to do this all in a single job so we should split this into a
        # backfill task
        # TODO(dcramer): this doesn't collect commits in non-default branches
        might_have_more = True
        parent = None
        while might_have_more:
            might_have_more = False
            for commit in vcs.log(parent=parent):
                revision, created = commit.save(repo)
                db.session.commit()
                if not created:
                    break

                might_have_more = True
                parent = commit.id

                fire_signal.delay(
                    signal='revision.created',
                    kwargs={
                        'repository_id': repo.id.hex,
                        'revision_sha': revision.sha
                    },
                )

        Repository.query.filter(Repository.id == repo.id, ).update(
            {
                'last_update': datetime.utcnow(),
            }, synchronize_session=False)
        db.session.commit()

        if continuous:
            raise sync_repo.NotFinished
예제 #8
0
def sync_job_step(step_id):
    with RCount('sync_job_step'):
        step = JobStep.query.get(step_id)
        if not step:
            return

        jobplan, implementation = JobPlan.get_build_step_for_job(
            job_id=step.job_id)

        # only synchronize if upstream hasn't suggested we're finished
        if step.status != Status.finished:
            implementation.update_step(step=step)

        db.session.flush()

        if step.status != Status.finished:
            is_finished = False
        else:
            is_finished = sync_job_step.verify_all_children(
            ) == Status.finished

        if not is_finished:
            if has_timed_out(step, jobplan):
                implementation.cancel_step(step=step)

                step.result = Result.failed
                db.session.add(step)

                job = step.job
                try_create(
                    FailureReason, {
                        'step_id': step.id,
                        'job_id': job.id,
                        'build_id': job.build_id,
                        'project_id': job.project_id,
                        'reason': 'timeout'
                    })

                db.session.flush()
            if step.status != Status.in_progress:
                retry_after = QUEUED_RETRY_DELAY
            else:
                retry_after = None
            raise sync_job_step.NotFinished(retry_after=retry_after)

        # ignore any 'failures' if its aborted
        if step.result == Result.aborted:
            return

        try:
            record_coverage_stats(step)
        except Exception:
            current_app.logger.exception(
                'Failing recording coverage stats for step %s', step.id)

        missing_tests = is_missing_tests(step, jobplan)

        try_create(ItemStat,
                   where={
                       'item_id': step.id,
                       'name': 'tests_missing',
                   },
                   defaults={'value': int(missing_tests)})

        if step.result == Result.passed and missing_tests:
            step.result = Result.failed
            db.session.add(step)

        if missing_tests:
            if step.result != Result.failed:
                step.result = Result.failed
                db.session.add(step)

            try_create(
                FailureReason, {
                    'step_id': step.id,
                    'job_id': step.job_id,
                    'build_id': step.job.build_id,
                    'project_id': step.project_id,
                    'reason': 'missing_tests'
                })
            db.session.commit()

        db.session.flush()

        if has_test_failures(step):
            if step.result != Result.failed:
                step.result = Result.failed
                db.session.add(step)

            try_create(
                FailureReason, {
                    'step_id': step.id,
                    'job_id': step.job_id,
                    'build_id': step.job.build_id,
                    'project_id': step.project_id,
                    'reason': 'test_failures'
                })
            db.session.commit()
예제 #9
0
def sync_job(job_id):
    with RCount('sync_job'):
        job = Job.query.get(job_id)
        if not job:
            return

        if job.status == Status.finished:
            return

        # TODO(dcramer): we make an assumption that there is a single step
        jobplan, implementation = JobPlan.get_build_step_for_job(job_id=job.id)

        try:
            implementation.update(job=job)

        except UnrecoverableException:
            job.status = Status.finished
            job.result = Result.aborted
            current_app.logger.exception('Unrecoverable exception syncing %s',
                                         job.id)

        all_phases = list(job.phases)

        # propagate changes to any phases as they live outside of the
        # normalize synchronization routines
        sync_job_phases(job, all_phases)

        is_finished = sync_job.verify_all_children() == Status.finished
        if any(p.status != Status.finished for p in all_phases):
            is_finished = False

        job.date_started = safe_agg(min,
                                    (j.date_started
                                     for j in all_phases if j.date_started))

        if is_finished:
            job.date_finished = safe_agg(
                max, (j.date_finished for j in all_phases if j.date_finished))
        else:
            job.date_finished = None

        if job.date_started and job.date_finished:
            job.duration = int(
                (job.date_finished - job.date_started).total_seconds() * 1000)
        else:
            job.duration = None

        # if any phases are marked as failing, fail the build
        if any(j.result is Result.failed for j in all_phases):
            job.result = Result.failed
        # if any test cases were marked as failing, fail the build
        elif TestCase.query.filter(TestCase.result == Result.failed,
                                   TestCase.job_id == job.id).first():
            job.result = Result.failed
        # if we've finished all phases, use the best result available
        elif is_finished:
            job.result = aggregate_result((j.result for j in all_phases))
        else:
            job.result = Result.unknown

        if is_finished:
            job.status = Status.finished
        else:
            # ensure we dont set the status to finished unless it actually is
            new_status = aggregate_status((j.status for j in all_phases))
            if new_status != Status.finished:
                job.status = new_status
            elif job.status == Status.finished:
                job.status = Status.in_progress
                current_app.logger.exception(
                    'Job incorrectly marked as finished: %s', job.id)

        if db.session.is_modified(job):
            job.date_modified = datetime.utcnow()

            db.session.add(job)
            db.session.commit()

        if not is_finished:
            raise sync_job.NotFinished

        try:
            aggregate_job_stat(job, 'test_count')
            aggregate_job_stat(job, 'test_duration')
            aggregate_job_stat(job, 'test_failures')
            aggregate_job_stat(job, 'test_rerun_count')
            aggregate_job_stat(job, 'tests_missing')
            aggregate_job_stat(job, 'lines_covered')
            aggregate_job_stat(job, 'lines_uncovered')
            aggregate_job_stat(job, 'diff_lines_covered')
            aggregate_job_stat(job, 'diff_lines_uncovered')
        except Exception:
            current_app.logger.exception(
                'Failing recording aggregate stats for job %s', job.id)

        fire_signal.delay(
            signal='job.finished',
            kwargs={'job_id': job.id.hex},
        )

        if jobplan:
            queue.delay('update_project_plan_stats',
                        kwargs={
                            'project_id': job.project_id.hex,
                            'plan_id': jobplan.plan_id.hex,
                        },
                        countdown=1)
예제 #10
0
def sync_build(build_id):
    """
    Synchronizing the build happens continuously until all jobs have reported in
    as finished or have failed/aborted.

    This task is responsible for:
    - Checking in with jobs
    - Aborting/retrying them if they're beyond limits
    - Aggregating the results from jobs into the build itself
    """
    with RCount('sync_build'):
        build = Build.query.get(build_id)
        if not build:
            return

        if build.status == Status.finished:
            return

        all_jobs = list(Job.query.filter(
            Job.build_id == build_id,
        ))

        is_finished = sync_build.verify_all_children() == Status.finished
        if any(p.status != Status.finished for p in all_jobs):
            is_finished = False

        build.date_started = safe_agg(
            min, (j.date_started for j in all_jobs if j.date_started))

        if is_finished:
            build.date_finished = safe_agg(
                max, (j.date_finished for j in all_jobs if j.date_finished))
        else:
            build.date_finished = None

        if build.date_started and build.date_finished:
            build.duration = int((build.date_finished - build.date_started).total_seconds() * 1000)
        else:
            build.duration = None

        if any(j.result is Result.failed for j in all_jobs):
            build.result = Result.failed
        elif is_finished:
            build.result = aggregate_result((j.result for j in all_jobs))
        else:
            build.result = Result.unknown

        if is_finished:
            build.status = Status.finished
        else:
            # ensure we dont set the status to finished unless it actually is
            new_status = aggregate_status((j.status for j in all_jobs))
            if new_status != Status.finished:
                build.status = new_status

        if db.session.is_modified(build):
            build.date_modified = datetime.utcnow()
            db.session.add(build)
            db.session.commit()

        if not is_finished:
            raise sync_build.NotFinished

        try:
            aggregate_build_stat(build, 'test_count')
            aggregate_build_stat(build, 'test_duration')
            aggregate_build_stat(build, 'test_failures')
            aggregate_build_stat(build, 'test_rerun_count')
            aggregate_build_stat(build, 'tests_missing')
            aggregate_build_stat(build, 'lines_covered')
            aggregate_build_stat(build, 'lines_uncovered')
            aggregate_build_stat(build, 'diff_lines_covered')
            aggregate_build_stat(build, 'diff_lines_uncovered')
        except Exception:
            current_app.logger.exception('Failing recording aggregate stats for build %s', build.id)

        fire_signal.delay(
            signal='build.finished',
            kwargs={'build_id': build.id.hex},
        )

        queue.delay('update_project_stats', kwargs={
            'project_id': build.project_id.hex,
        }, countdown=1)