Exemplo n.º 1
0
    def process_aggregates(self, data, many, **kwargs):
        if not many:
            items = [data]
        else:
            items = data
        if "origin_build" in self.exclude or "build" not in self.context:
            failure_origins = {}
        else:
            # TODO(dcramer): technically this could support multiple builds,
            # or identify the referenced build
            failure_origins = find_failure_origins(
                self.context["build"],
                [
                    i.hash for i in items if any(
                        Result(int(e[3])) == Result.failed for e in i.runs)
                ],
            )

        if "build" in self.exclude or not hasattr(items[0], "build_id"):
            builds = {}
        else:
            builds = {
                b.id: b
                for b in Build.query.filter(
                    Build.id.in_(i.build_id for i in items))
            }

        if failure_origins:
            origin_builds = {
                b.id: b
                for b in Build.query.filter(
                    Build.id.in_(frozenset(failure_origins.values())))
            }
        else:
            origin_builds = {}

        results = [{
            "hash":
            i.hash,
            "name":
            i.name,
            "runs": [{
                "id": UUID(e[0]),
                "job_id": UUID(e[1]),
                "duration": int(e[2]),
                "result": Result(int(e[3])),
            } for e in i.runs],
            "build":
            builds.get(getattr(i, "build_id", None)),
            "origin_build":
            origin_builds.get(failure_origins.get(i.hash)),
            "result":
            aggregate_result(Result(int(e[3])) for e in i.runs),
        } for i in items]
        if many:
            return results
        return results[0]
Exemplo n.º 2
0
def aggregate_build_stats(build_id: UUID):
    """
    Updates various denormalized / aggregate attributes on Build per its
    jobs. These attributes include start and completion dates, as well as
    the status and result.
    """
    # now we pull in the entirety of the build's data to aggregate state upward
    build = Build.query.get(build_id)
    if not build:
        raise ValueError

    job_list = Job.query.filter(Job.build_id == build.id)

    is_finished = any(p.status == Status.finished for p in job_list)

    # ensure build's dates are reflective of jobs
    build.date_started = safe_agg(min, (j.date_started
                                        for j in job_list if j.date_started))

    if is_finished:
        build.date_finished = safe_agg(max,
                                       (j.date_finished
                                        for j in job_list if j.date_finished))
    else:
        build.date_finished = None

    # if theres any failure, the build failed
    if any(j.result is Result.failed for j in job_list):
        build.result = Result.failed
    # else, if we're finished, we can aggregate from results
    elif is_finished:
        build.result = aggregate_result((j.result for j in job_list))
    # we should never get here as long we've got jobs and correct data
    else:
        build.result = Result.unknown

    if is_finished:
        build.status = Status.finished
    else:
        # ensure we dont set the status to finished unless it actually is
        new_status = aggregate_status((j.status for j in job_list))
        if build.status != new_status:
            build.status = new_status

    if db.session.is_modified(build):
        db.session.add(build)
        db.session.commit()

    # we dont bother aggregating stats unless we're finished
    if is_finished:
        for stat in STATS:
            aggregate_stat_for_build(build, stat)
Exemplo n.º 3
0
 def process_aggregates(self, data):
     return {
         "name":
         data.name,
         "runs": [{
             "id": UUID(e[0]),
             "job_id": UUID(e[1]),
             "duration": int(e[2]),
             "result": Result(int(e[3])),
         } for e in data.runs],
         "result":
         aggregate_result(Result(int(e[3])) for e in data.runs),
     }
Exemplo n.º 4
0
 def process_aggregates(self, data):
     return {
         'name':
         data.name,
         'runs': [{
             'id': UUID(e[0]),
             'job_id': UUID(e[1]),
             'duration': int(e[2]),
             'result': Result(int(e[3])),
         } for e in data.runs],
         'result':
         aggregate_result(Result(int(e[3])) for e in data.runs)
     }
Exemplo n.º 5
0
def aggregate_build_stats(build_id: UUID):
    """
    Updates various denormalized / aggregate attributes on Build per its
    jobs. These attributes include start and completion dates, as well as
    the status and result.
    """
    # now we pull in the entirety of the build's data to aggregate state upward
    lock_key = "build:{build_id}".format(build_id=build_id)
    with redis.lock(lock_key):
        build = (Build.query.unrestricted_unsafe().with_for_update(
            nowait=True).get(build_id))
        if not build:
            raise ValueError(
                "Unable to find build with id = {}".format(build_id))

        auth.set_current_tenant(
            auth.RepositoryTenant(repository_id=build.repository_id))

        record_coverage_stats(build.id)

        job_list = Job.query.filter(Job.build_id == build.id)

        was_finished = build.status == Status.finished
        is_finished = all(p.status == Status.finished for p in job_list)

        # ensure build's dates are reflective of jobs
        build.date_started = safe_agg(min,
                                      (j.date_started
                                       for j in job_list if j.date_started))

        if is_finished:
            build.date_finished = safe_agg(
                max, (j.date_finished for j in job_list if j.date_finished))
        else:
            build.date_finished = None

        # if theres any failure, the build failed
        if any(j.result is Result.failed for j in job_list
               if not j.allow_failure):
            build.result = Result.failed
        # else, if we're finished, we can aggregate from results
        elif is_finished:
            if not job_list:
                build.result = Result.errored
            elif not any(j for j in job_list if not j.allow_failure):
                build.result = Result.passed
            else:
                build.result = aggregate_result(
                    (j.result for j in job_list if not j.allow_failure))
        # we should never get here as long we've got jobs and correct data
        else:
            build.result = Result.unknown

        if is_finished:
            build.status = Status.finished
        else:
            # ensure we dont set the status to finished unless it actually is
            new_status = aggregate_status((j.status for j in job_list))
            if build.status != new_status:
                build.status = new_status

        db.session.add(build)
        db.session.commit()

        # we dont bother aggregating stats unless we're finished
        if build.status == Status.finished and not was_finished:
            for stat in AGGREGATED_BUILD_STATS:
                aggregate_stat_for_build(build, stat)
            db.session.commit()
            send_build_notifications.delay(build_id=build.id)