Exemple #1
0
def test_tenant_queries_repo_with_tenant(default_repo):
    auth.set_current_tenant(auth.Tenant(access={default_repo.id: Permission.read}))

    assert list(Repository.query.all()) == [default_repo]
Exemple #2
0
def test_tenant_allows_public_repos(default_repo):
    auth.set_current_tenant(auth.Tenant())
    repo = factories.RepositoryFactory(name="public", public=True)
    assert list(Repository.query.all()) == [repo]
Exemple #3
0
def test_tenant_limits_to_access(default_repo):
    auth.set_current_tenant(
        auth.Tenant(access={default_repo.id: Permission.read}))
    build = factories.BuildFactory(repository=default_repo)
    assert list(Build.query.all()) == [build]
Exemple #4
0
def sync_repo(repo_id, max_log_passes=10, force=False, time_limit=300):
    auth.set_current_tenant(auth.Tenant(access={repo_id: Permission.admin}))

    repo = Repository.query.get(repo_id)
    if not repo:
        current_app.logger.error("Repository %s not found", repo_id)
        return

    if (not force and repo.last_update_attempt and repo.last_update_attempt >
        (timezone.now() - current_app.config["REPO_SYNC_INTERVAL"])):
        current_app.logger.warning(
            "Repository %s was synced recently, refusing to sync", repo.id)
        return

    try:
        vcs = repo.get_vcs()
    except UnknownRepositoryBackend:
        current_app.logger.warning("Repository %s has no VCS backend set",
                                   repo.id)
        return

    if repo.status != RepositoryStatus.active:
        current_app.logger.info("Repository %s is not active", repo.id)
        return

    Repository.query.filter(Repository.id == repo.id).update(
        {"last_update_attempt": timezone.now()})
    db.session.commit()

    try:
        if vcs.exists():
            vcs.update(allow_cleanup=True)
        else:
            vcs.clone()
    except InvalidPublicKey:
        # TODO(dcramer): this is a quick short-circuit for repo syncing, which will
        # at least prevent workers from endlessly querying repos which were revoked.
        # Ideally this would be implemented in a larger number of places (maybe via
        # a context manager?)
        repo.status = RepositoryStatus.inactive
        ItemOption.query.filter(
            ItemOption.item_id == repo.id,
            ItemOption.name == "auth.private-key").delete()
        db.session.add(repo)
        db.session.commit()
        return

    # TODO(dcramer): this doesn't collect commits in non-default branches
    might_have_more = True
    parent = None
    while might_have_more and max_log_passes:
        might_have_more = False
        for commit in vcs.log(parent=parent):
            revision, created = commit.save(repo)
            db.session.commit()
            if not created:
                break

            current_app.logger.info("Created revision {}".format(repo.id))
            might_have_more = True
            parent = commit.sha
        max_log_passes -= 1

    Repository.query.filter(Repository.id == repo.id).update(
        {"last_update": datetime.now(timezone.utc)})
    db.session.commit()

    # is there more data to sync?
    return might_have_more
Exemple #5
0
def test_tenant_allows_public_repos(default_repo):
    auth.set_current_tenant(
        auth.Tenant(access={default_repo.id: Permission.read}))
    repo = factories.RepositoryFactory(name="public", public=True)
    build = factories.BuildFactory(repository=repo)
    assert list(Build.query.all()) == [build]
Exemple #6
0
def test_tenant_allows_public_repos_with_acess(default_repo):
    auth.set_current_tenant(auth.Tenant())
    repo = factories.RepositoryFactory(name="public", public=True)
    build = factories.BuildFactory(repository=repo)
    assert list(Build.query.all()) == [build]
Exemple #7
0
def aggregate_build_stats(build_id: UUID):
    """
    Updates various denormalized / aggregate attributes on Build per its
    jobs. These attributes include start and completion dates, as well as
    the status and result.
    """
    # now we pull in the entirety of the build's data to aggregate state upward
    lock_key = 'build:{build_id}'.format(build_id=build_id, )
    with redis.lock(lock_key):
        build = Build.query.unrestricted_unsafe().with_for_update(
            nowait=True).get(build_id)
        if not build:
            raise ValueError(
                'Unable to find build with id = {}'.format(build_id))

        auth.set_current_tenant(
            auth.Tenant(repository_ids=[build.repository_id]))

        record_coverage_stats(build.id)

        job_list = Job.query.filter(Job.build_id == build.id)

        was_finished = build.status == Status.finished
        is_finished = all(p.status == Status.finished for p in job_list)

        # ensure build's dates are reflective of jobs
        build.date_started = safe_agg(min,
                                      (j.date_started
                                       for j in job_list if j.date_started))

        if is_finished:
            build.date_finished = safe_agg(
                max, (j.date_finished for j in job_list if j.date_finished))
        else:
            build.date_finished = None

        # if theres any failure, the build failed
        if any(j.result is Result.failed for j in job_list
               if not j.allow_failure):
            build.result = Result.failed
        # else, if we're finished, we can aggregate from results
        elif is_finished:
            if not job_list:
                build.result = Result.errored
            elif not any(j for j in job_list if not j.allow_failure):
                build.result = Result.passed
            else:
                build.result = aggregate_result(
                    (j.result for j in job_list if not j.allow_failure))
        # we should never get here as long we've got jobs and correct data
        else:
            build.result = Result.unknown

        if is_finished:
            build.status = Status.finished
        else:
            # ensure we dont set the status to finished unless it actually is
            new_status = aggregate_status((j.status for j in job_list))
            if build.status != new_status:
                build.status = new_status

        db.session.add(build)
        db.session.commit()

        # we dont bother aggregating stats unless we're finished
        if build.status == Status.finished and not was_finished:
            for stat in AGGREGATED_BUILD_STATS:
                aggregate_stat_for_build(build, stat)
            db.session.commit()
            send_build_notifications.delay(build_id=build.id)
Exemple #8
0
def test_full(default_job):
    auth.set_current_tenant(
        auth.Tenant(repository_ids=[default_job.repository_id]))

    results = [
        ZeusTestResult(job=default_job,
                       name='test_bar',
                       package='tests.changes.handlers.test_xunit',
                       result=Result.failed,
                       message='collection failed',
                       duration=156,
                       artifacts=[{
                           'name':
                           'artifact_name',
                           'type':
                           'text',
                           'base64':
                           b64encode('sample content'.encode('utf-8'))
                       }]),
        ZeusTestResult(
            job=default_job,
            name='test_foo',
            package='tests.changes.handlers.test_coverage',
            result=Result.passed,
            message='foobar failed',
            duration=12,
        ),
    ]
    manager = ZeusTestResultManager(default_job)
    manager.save(results)

    testcase_list = sorted(ZeusTestCase.query.all(), key=lambda x: x.name)

    assert len(testcase_list) == 2

    for test in testcase_list:
        assert test.job_id == default_job.id

    assert testcase_list[
        0].name == 'tests.changes.handlers.test_coverage.test_foo'
    assert testcase_list[0].result == Result.passed
    assert testcase_list[0].message == 'foobar failed'
    assert testcase_list[0].duration == 12

    assert testcase_list[
        1].name == 'tests.changes.handlers.test_xunit.test_bar'
    assert testcase_list[1].result == Result.failed
    assert testcase_list[1].message == 'collection failed'
    assert testcase_list[1].duration == 156

    artifacts = list(Artifact.query.unrestricted_unsafe().filter(
        Artifact.testcase_id == testcase_list[1].id))
    assert len(artifacts) == 1
    assert artifacts[0].file.get_file().read() == b'sample content'

    teststat = ItemStat.query.filter(
        ItemStat.name == 'tests.count',
        ItemStat.item_id == default_job.id,
    )[0]
    assert teststat.value == 2

    teststat = ItemStat.query.filter(
        ItemStat.name == 'tests.failures',
        ItemStat.item_id == default_job.id,
    )[0]
    assert teststat.value == 1

    teststat = ItemStat.query.filter(
        ItemStat.name == 'tests.duration',
        ItemStat.item_id == default_job.id,
    )[0]
    assert teststat.value == 168
def process_travis_webhook(hook_id: str, payload: dict, timestamp_ms: int):
    # TODO(dcramer): we want to utilize timestamp_ms to act as a version and
    # ensure we dont process older updates after newer updates are already present
    with nplusone.ignore("eager_load"):
        hook = (Hook.query.unrestricted_unsafe().options(
            joinedload("repository")).get(hook_id))

    auth.set_current_tenant(
        auth.Tenant(access={hook.repository_id: Permission.admin}))

    data = {"ref": payload["commit"], "url": payload["build_url"]}

    domain = urlparse(data["url"]).netloc

    try:
        if payload["pull_request"]:
            data["label"] = "PR #{} - {}".format(
                payload["pull_request_number"], payload["pull_request_title"])

            upsert_change_request(
                repository=hook.repository,
                provider="github",
                external_id=str(payload["pull_request_number"]),
                data={
                    "parent_revision_sha": payload["base_commit"],
                    "head_revision_sha": payload["head_commit"],
                    "message": payload["pull_request_title"],
                },
            )

        response = upsert_build(hook=hook,
                                external_id=str(payload["id"]),
                                data=data)
        build = Build.query.get(response.json()["id"])
        for job_payload in payload["matrix"]:
            upsert_job(
                build=build,
                hook=hook,
                external_id=str(job_payload["id"]),
                data={
                    "status": ("finished" if job_payload["status"] is not None
                               else "in_progress"),
                    "result":
                    get_result(job_payload["state"]),
                    "allow_failure":
                    bool(job_payload["allow_failure"]),
                    "label":
                    get_job_label(job_payload),
                    "url":
                    "https://{domain}/{owner}/{name}/jobs/{job_id}".format(
                        domain=domain,
                        owner=payload["repository"]["owner_name"],
                        name=payload["repository"]["name"],
                        job_id=job_payload["id"],
                    ),
                    "started_at":
                    (dateutil.parser.parse(job_payload["started_at"])
                     if job_payload["started_at"] else None),
                    "finished_at":
                    (dateutil.parser.parse(job_payload["finished_at"])
                     if job_payload["finished_at"] else None),
                },
            )
    except ApiError:
        current_app.logger.error("travis.webhook-unexpected-error",
                                 exc_info=True)
        raise
Exemple #10
0
def test_full(default_job):
    auth.set_current_tenant(
        auth.Tenant(access={default_job.repository_id: Permission.read}))

    results = [
        ZeusTestResult(
            job=default_job,
            name="test_bar",
            package="tests.changes.handlers.test_xunit",
            result=Result.failed,
            message="collection failed",
            duration=156,
            artifacts=[{
                "name": "artifact_name",
                "type": "text",
                "base64": b64encode("sample content".encode("utf-8")),
            }],
        ),
        ZeusTestResult(
            job=default_job,
            name="test_foo",
            package="tests.changes.handlers.test_coverage",
            result=Result.passed,
            message="foobar failed",
            duration=12,
        ),
    ]
    manager = ZeusTestResultManager(default_job)
    manager.save(results)

    testcase_list = sorted(ZeusTestCase.query.all(), key=lambda x: x.name)

    assert len(testcase_list) == 2

    for test in testcase_list:
        assert test.job_id == default_job.id

    assert testcase_list[
        0].name == "tests.changes.handlers.test_coverage.test_foo"
    assert testcase_list[0].result == Result.passed
    assert testcase_list[0].message == "foobar failed"
    assert testcase_list[0].duration == 12

    assert testcase_list[
        1].name == "tests.changes.handlers.test_xunit.test_bar"
    assert testcase_list[1].result == Result.failed
    assert testcase_list[1].message == "collection failed"
    assert testcase_list[1].duration == 156

    artifacts = list(Artifact.query.unrestricted_unsafe().filter(
        Artifact.testcase_id == testcase_list[1].id))
    assert len(artifacts) == 1
    assert artifacts[0].file.get_file().read() == b"sample content"

    teststat = ItemStat.query.filter(ItemStat.name == "tests.count",
                                     ItemStat.item_id == default_job.id)[0]
    assert teststat.value == 2

    teststat = ItemStat.query.filter(ItemStat.name == "tests.failures",
                                     ItemStat.item_id == default_job.id)[0]
    assert teststat.value == 1

    teststat = ItemStat.query.filter(ItemStat.name == "tests.duration",
                                     ItemStat.item_id == default_job.id)[0]
    assert teststat.value == 168
Exemple #11
0
def test_failure_origin(default_repo):
    auth.set_current_tenant(
        auth.Tenant(access={default_repo.id: Permission.read}))

    new_revision = factories.RevisionFactory(repository=default_repo)
    new_source = factories.SourceFactory(revision=new_revision)
    new_build = factories.BuildFactory(source=new_source, failed=True)
    new_job = factories.JobFactory(build=new_build, failed=True)
    new_testcase = factories.TestCaseFactory(job=new_job, failed=True)
    new_testcase2 = factories.TestCaseFactory(job=new_job, passed=True)

    old_revision = factories.RevisionFactory(
        repository=default_repo,
        date_created=new_revision.date_created - timedelta(hours=1),
    )
    old_source = factories.SourceFactory(revision=old_revision,
                                         date_created=new_source.date_created -
                                         timedelta(hours=1))
    old_build = factories.BuildFactory(
        source=old_source,
        failed=True,
        date_created=new_build.date_created - timedelta(hours=1),
    )
    old_job = factories.JobFactory(
        build=old_build,
        failed=True,
        date_created=new_job.date_created - timedelta(hours=1),
    )
    factories.TestCaseFactory(job=old_job, failed=True, name=new_testcase.name)

    oldold_revision = factories.RevisionFactory(
        repository=default_repo,
        date_created=old_revision.date_created - timedelta(hours=1),
    )
    oldold_source = factories.SourceFactory(
        revision=oldold_revision,
        date_created=old_source.date_created - timedelta(hours=1),
    )
    factories.BuildFactory(
        source=oldold_source,
        passed=True,
        date_created=old_build.date_created - timedelta(hours=1),
    )

    schema = AggregateTestCaseSummarySchema(many=True,
                                            strict=True,
                                            context={"build": new_build})
    result = schema.dump([
        AggregateTestCase(
            hash=new_testcase.hash,
            name=new_testcase.name,
            runs=[[
                str(new_testcase.id),
                str(new_testcase.job_id),
                new_testcase.duration,
                int(new_testcase.result),
            ]],
        ),
        AggregateTestCase(
            hash=new_testcase2.hash,
            name=new_testcase2.name,
            runs=[[
                str(new_testcase2.id),
                str(new_testcase2.job_id),
                new_testcase2.duration,
                int(new_testcase2.result),
            ]],
        ),
    ]).data
    assert len(result) == 2
    assert result[0]["hash"] == new_testcase.hash
    assert result[0]["name"] == new_testcase.name
    assert result[0]["origin_build"]["id"] == str(old_build.id)
    assert result[1]["hash"] == new_testcase2.hash
    assert result[1]["name"] == new_testcase2.name
    assert result[1]["origin_build"] is None
Exemple #12
0
def default_repo_write_tenant(default_repo):
    auth.set_current_tenant(
        auth.Tenant(access={default_repo.id: Permission.write}))
Exemple #13
0
def default_repo_tenant(default_repo):
    auth.set_current_tenant(
        auth.Tenant(access={default_repo.id: Permission.read}))