def dispatch_request(self, hook_id, signature=None, *args, **kwargs) -> Response: current_app.logger.info("received webhook id=%s", hook_id) with nplusone.ignore("eager_load"): hook = Hook.query.unrestricted_unsafe().options( joinedload("repository")).get(hook_id) if not self.public and not hook.is_valid_signature(signature): current_app.logger.warn("invalid webhook signature id=%s", hook_id) return "", 403 try: method = getattr(self, request.method.lower()) except AttributeError: current_app.logger.warn("invalid webhook method id=%s, method=%s", hook_id, request.method) return self.respond({"message": "resource not found"}, 405) auth.set_current_tenant( auth.RepositoryTenant(repository_id=hook.repository_id, permission=Permission.admin)) try: resp = method(hook, *args, **kwargs) except ApiError as exc: return self.respond(exc.json, exc.code) if isinstance(resp, Response): return resp return self.respond(resp)
def resolve_ref_for_change_request(change_request_id: UUID): lock_key = f"resolve-cr-ref:{change_request_id}" with redis.lock(lock_key, timeout=60.0, nowait=True): cr = ChangeRequest.query.unrestricted_unsafe().get(change_request_id) if not cr: raise ValueError( "Unable to find change request with id = {}".format( change_request_id)) auth.set_current_tenant( auth.RepositoryTenant(repository_id=cr.repository_id)) if not cr.parent_revision_sha and cr.parent_ref: try: revision = revisions.identify_revision(cr.repository, cr.parent_ref, with_vcs=True) except InvalidPublicKey: raise cr.parent_revision_sha = revision.sha db.session.add(cr) db.session.commit() if not cr.head_revision_sha and cr.head_ref: revision = revisions.identify_revision(cr.repository, cr.head_ref, with_vcs=True) cr.head_revision_sha = revision.sha if not cr.authors and revision.authors: cr.authors = revision.authors db.session.add(cr) db.session.commit()
def delete_repo(repo_id: UUID): auth.set_current_tenant(auth.RepositoryTenant(repo_id, Permission.admin)) repo = Repository.query.unrestricted_unsafe().get(repo_id) if not repo: current_app.logger.error("Repository %s not found", repo_id) return if repo.status != RepositoryStatus.inactive: current_app.logger.error("Repository %s not marked as inactive", repo_id) return # delete repo abstract entities ItemOption.query.filter_by(item_id=repo.id).delete() ItemStat.query.filter_by(item_id=repo.id).delete() # delete related abstract entities (build/job) for model in ItemStat, ItemOption: model.query.filter( model.item_id.in_( db.session.query(Build.id).filter( Build.repository_id == repo.id).subquery())).delete( synchronize_session=False) model.query.filter( model.item_id.in_( db.session.query(Job.id).filter( Job.repository_id == repo.id).subquery())).delete( synchronize_session=False) db.session.delete(repo) db.session.commit()
def test_failing_tests_duplicate_reason(mocker, db_session, default_source): auth.set_current_tenant( auth.RepositoryTenant(repository_id=default_source.repository_id)) build = factories.BuildFactory(source=default_source, in_progress=True) db_session.add(build) job = factories.JobFactory(build=build, passed=True) db_session.add(job) factories.TestCaseFactory(job=job, failed=True) db_session.add( FailureReason( reason=FailureReason.Reason.failing_tests, job_id=job.id, repository_id=job.repository_id, )) aggregate_build_stats_for_job(job.id) assert job.result == Result.failed reasons = list(FailureReason.query.filter(FailureReason.job_id == job.id)) assert len(reasons) == 1 assert reasons[0].reason == FailureReason.Reason.failing_tests
async def load_all(repos=3, commits_per_repo=10): db_pool = await create_db_pool() user_ids = [u for u, in db.session.query(models.User.id)] for n in range(repos): repo = await mock_single_repository(user_ids=user_ids) async with db_pool.acquire() as conn: vcs = await get_vcs(conn, repo.id) auth.set_current_tenant(auth.RepositoryTenant(repository_id=repo.id)) file_list = find_files_in_repo(vcs) await load_revisions(vcs, repo) revision_iter = iter( list(models.Revision.query.unrestricted_unsafe( ).filter(models.Revision.repository_id == repo.id).order_by( models.Revision.date_created.desc()).limit(commits_per_repo))) parent_revision = None for n in range(commits_per_repo): revision = next(revision_iter) build = await mock_build( repo, parent_revision=parent_revision, revision=revision, user_ids=user_ids, file_list=file_list, ) parent_revision = build.revision
def process_artifact(artifact_id, manager=None, force=False, **kwargs): artifact = Artifact.query.unrestricted_unsafe().get(artifact_id) if artifact is None: current_app.logger.error("Artifact %s not found", artifact_id) return if artifact.status == Status.finished and not force: current_app.logger.info( "Skipping artifact processing (%s) - already marked as finished", artifact_id, ) return artifact.status = Status.in_progress artifact.date_started = timezone.now() db.session.add(artifact) db.session.flush() auth.set_current_tenant( auth.RepositoryTenant(repository_id=artifact.repository_id)) job = Job.query.get(artifact.job_id) if job.result == Result.aborted: current_app.logger.info( "Skipping artifact processing (%s) - Job aborted", artifact_id) artifact.status = Status.finished db.session.add(artifact) db.session.commit() return if artifact.file: if manager is None: manager = default_manager try: with db.session.begin_nested(): manager.process(artifact) except Exception: current_app.logger.exception( "Unrecoverable exception processing artifact %s: %s", artifact.job_id, artifact, ) else: current_app.logger.info( "Skipping artifact processing (%s) due to missing file", artifact_id) artifact.status = Status.finished artifact.date_finished = timezone.now() db.session.add(artifact) db.session.commit() # we always aggregate results to avoid locking here aggregate_build_stats_for_job.delay(job_id=job.id)
def ssh_connect(args, repository): if "/" in repository: r_provider, r_owner, r_name = repository.split("/", 2) repo = Repository.query.unrestricted_unsafe().filter( Repository.provider == RepositoryProvider(r_provider), Repository.owner_name == r_owner, Repository.name == r_name, ).first() else: repo = Repository.query.unrestricted_unsafe().get(repository) if not repo: click.echo("Unable to find repository", err=True) sys.exit(1) auth.set_current_tenant(auth.RepositoryTenant(repo.id)) options = dict( db.session.query(ItemOption.name, ItemOption.value).filter( ItemOption.item_id == repo.id, ItemOption.name.in_(["auth.private-key", "auth.private-key-file"]), )) command = [ "ssh", # Not supported in all ssh client versions # '-oUserAuthorizedKeysFile=/dev/null', "-oLogLevel=ERROR", "-oStrictHostKeyChecking=no", "-oUserKnownHostsFile=/dev/null", ] tmp_file = None if options.get("auth.private-key"): tmp_file = NamedTemporaryFile(delete=False) tmp_file.write(options["auth.private-key"].encode("utf-8")) tmp_file.close() command.append("-i{0}".format(tmp_file.name)) elif options.get("auth.private-key-file"): command.append("-i{0}".format(options["auth.private-key-file"])) command.append("--") command.extend(args) try: exit_code = subprocess.call( command, cwd=os.getcwd(), env=os.environ, stdout=sys.stdout, stderr=sys.stderr, ) finally: if tmp_file: os.unlink(tmp_file.name) sys.exit(exit_code)
def test_sends_failure(mocker, db_session, default_source): auth.set_current_tenant( auth.RepositoryTenant(repository_id=default_source.repository_id)) mock_send_email_notification = mocker.patch( "zeus.notifications.email.send_email_notification") build = factories.BuildFactory(source=default_source, failed=True) db_session.add(build) send_build_notifications(build.id) mock_send_email_notification.assert_called_once_with(build=build)
def test_no_repo_access( mocker, db_session, default_user, default_repo, default_source, outbox ): auth.set_current_tenant( auth.RepositoryTenant(repository_id=default_source.repository_id) ) build = factories.BuildFactory(source=default_source, failed=True) db_session.add(build) send_email_notification(build) assert len(outbox) == 0
def test_does_not_send_passing(mocker, db_session, default_source): auth.set_current_tenant( auth.RepositoryTenant(repository_id=default_source.repository_id)) mock_send_email_notification = mocker.patch( "zeus.notifications.email.send_email_notification") build = factories.BuildFactory(source=default_source, passed=True) db_session.add(build) send_build_notifications(build.id) assert not mock_send_email_notification.mock_calls
def send_build_notifications(build_id: UUID): build = Build.query.unrestricted_unsafe().get(build_id) if not build: raise ValueError("Unable to find build with id = {}".format(build_id)) auth.set_current_tenant(auth.RepositoryTenant(repository_id=build.repository_id)) # double check that the build is still finished and only send when # its failing if build.result != Result.failed or build.status != Status.finished: return email.send_email_notification(build=build)
def test_newly_unfinished_job(mocker, db_session, default_source): auth.set_current_tenant( auth.RepositoryTenant(repository_id=default_source.repository_id)) build = factories.BuildFactory(source=default_source, finished=True) db_session.add(build) job = factories.JobFactory(build=build, in_progress=True) db_session.add(job) aggregate_build_stats(build.id) assert build.status == Status.in_progress assert build.result == Result.unknown
def test_failure_with_allow_failure(mocker, db_session, default_source): auth.set_current_tenant( auth.RepositoryTenant(repository_id=default_source.repository_id)) build = factories.BuildFactory(source=default_source, in_progress=True) db_session.add(build) job = factories.JobFactory(build=build, failed=True, allow_failure=True) db_session.add(job) aggregate_build_stats(build.id) assert build.status == Status.finished assert build.result == Result.passed
def aggregate_build_stats_for_job(job_id: UUID): """ Given a job, aggregate its data upwards into the Build.abs This should generally be fired upon a job's completion, or alternatively it can be used to repair aggregate data. """ lock_key = "job:{job_id}".format(job_id=job_id) with redis.lock(lock_key): job = (Job.query.unrestricted_unsafe().with_for_update( nowait=True).filter(Job.id == job_id).first()) if not job: raise ValueError auth.set_current_tenant( auth.RepositoryTenant(repository_id=job.repository_id)) # we need to handle the race between when the mutations were made to <Job> and # when the only remaining artifact may have finished processing if job.status == Status.collecting_results: if not has_unprocessed_artifacts(job): job.status = Status.finished if not job.date_finished: job.date_finished = timezone.now() db.session.add(job) db.session.commit() else: pending_artifact_ids = db.session.query( PendingArtifact.id).filter( PendingArtifact.repository_id == job.repository_id, PendingArtifact.provider == job.provider, PendingArtifact.external_build_id == job.build.external_id, PendingArtifact.external_job_id == job.external_id, ) for pa_id in pending_artifact_ids: process_pending_artifact.delay(pending_artifact_id=pa_id) # record any job-specific stats that might not have been taken care elsewhere if job.status == Status.finished: record_test_stats(job.id) record_style_violation_stats(job.id) record_bundle_stats(job.id) record_failure_reasons(job) db.session.commit() lock_key = "aggstatsbuild:{build_id}".format(build_id=job.build_id.hex) with redis.lock(lock_key): aggregate_build_stats.delay(build_id=job.build_id)
def resolve_ref_for_build(build_id: UUID): lock_key = f"resolve-build-ref:{build_id}" with redis.lock(lock_key, timeout=60.0, nowait=True): build = Build.query.unrestricted_unsafe().get(build_id) if not build: raise ValueError( "Unable to find build with id = {}".format(build_id)) if build.revision_sha: return auth.set_current_tenant( auth.RepositoryTenant(repository_id=build.repository_id)) revision: Optional[Revision] = None try: revision = revisions.identify_revision(build.repository, build.ref, with_vcs=True) except UnknownRevision: build.result = Result.errored build.status = Status.finished try: with db.session.begin_nested(): db.session.add( FailureReason( repository_id=build.repository_id, build_id=build.id, reason=FailureReason.Reason.unresolvable_ref, )) db.session.flush() except IntegrityError as exc: if "duplicate" not in str(exc): raise except InvalidPublicKey: pass if revision: build.revision_sha = revision.sha if not build.authors and revision.authors: build.authors = revision.authors if not build.label: build.label = revision.message.split("\n")[0] db.session.add(build) db.session.commit() data = build_schema.dump(build) publish("builds", "build.update", data)
def get(self, path: str, repo_id: UUID, **params): if "tenant" not in params: tenant = auth.RepositoryTenant(repository_id=repo_id) else: tenant = params.pop("tenant", None) headers = {} if tenant: headers["Authorization"] = "Bearer zeus-t-{}".format( auth.generate_token(tenant).decode("utf-8")) return self.client.get( f"{path}?repo_id={repo_id}&{'&'.join('{}={}'.format(k, v) for k, v in params.items())}", headers=headers, )
def test_test_stats(mocker, db_session, default_source): auth.set_current_tenant( auth.RepositoryTenant(repository_id=default_source.repository_id)) build = factories.BuildFactory(source=default_source, in_progress=True) db_session.add(build) job = factories.JobFactory(build=build, passed=True) db_session.add(job) job2 = factories.JobFactory(build=build, passed=True) db_session.add(job2) db_session.add( factories.TestCaseFactory(job=job, name="foo", failed=True, duration=8)) db_session.add( factories.TestCaseFactory(job=job, name="bar", passed=True, duration=2)) db_session.add( factories.TestCaseFactory(job=job2, name="bar", failed=True, duration=2)) aggregate_build_stats_for_job(job.id) aggregate_build_stats_for_job(job2.id) aggregate_build_stats(build.id) build_stats = { i.name: i.value for i in ItemStat.query.filter(ItemStat.item_id == build.id) } assert build_stats["tests.count"] == 3 assert build_stats["tests.count_unique"] == 2 assert build_stats["tests.failures"] == 2 assert build_stats["tests.failures_unique"] == 2 assert build_stats["tests.duration"] == 12 job_stats = { i.name: i.value for i in ItemStat.query.filter(ItemStat.item_id == job.id) } assert job_stats["tests.count"] == 2 assert job_stats["tests.failures"] == 1 assert job_stats["tests.duration"] == 10
def test_coverage_stats(mocker, db_session, default_source): auth.set_current_tenant( auth.RepositoryTenant(repository_id=default_source.repository_id)) build = factories.BuildFactory(source=default_source) db_session.add(build) job = factories.JobFactory(build=build, passed=True) db_session.add(job) db_session.add( factories.FileCoverageFactory( build=build, lines_covered=20, lines_uncovered=50, diff_lines_covered=5, diff_lines_uncovered=2, )) db_session.add( factories.FileCoverageFactory( build=build, lines_covered=10, lines_uncovered=10, diff_lines_covered=5, diff_lines_uncovered=0, )) aggregate_build_stats(build.id) stats = { i.name: i.value for i in ItemStat.query.filter( ItemStat.item_id == build.id, ItemStat.name.in_([ "coverage.lines_covered", "coverage.lines_uncovered", "coverage.diff_lines_covered", "coverage.diff_lines_uncovered", ]), ) } assert stats["coverage.lines_covered"] == 30 assert stats["coverage.lines_uncovered"] == 60 assert stats["coverage.diff_lines_covered"] == 10 assert stats["coverage.diff_lines_uncovered"] == 2
def test_finished_job(mocker, db_session, default_source): auth.set_current_tenant( auth.RepositoryTenant(repository_id=default_source.repository_id)) build = factories.BuildFactory(source=default_source, in_progress=True) db_session.add(build) job = factories.JobFactory(build=build, failed=True) db_session.add(job) mock_send_build_notifications = mocker.patch( "zeus.tasks.send_build_notifications.delay") aggregate_build_stats(build.id) assert build.status == Status.finished assert build.result == Result.failed mock_send_build_notifications.assert_called_once_with(build_id=build.id)
def send_build_notifications(build_id: UUID, time_limit=30): build = Build.query.unrestricted_unsafe().get(build_id) if not build: raise ValueError("Unable to find build with id = {}".format(build_id)) if not build.date_started: current_app.logger.warn( "send_build_notifications: build %s missing date_started", build_id) return if not build.date_finished: current_app.logger.warn( "send_build_notifications: build %s missing date_finished", build_id) return auth.set_current_tenant( auth.RepositoryTenant(repository_id=build.repository_id)) # double check that the build is still finished and only send when # its failing if build.result != Result.failed or build.status != Status.finished: current_app.logger.warn( "send_build_notifications: build %s not marked as failed", build_id) return if build.date_finished < timezone.now() - timedelta(days=1): current_app.logger.warn( "send_build_notifications: build %s fimished a long time ago", build_id) return if build.date_started < timezone.now() - timedelta(days=7): current_app.logger.warn( "send_build_notifications: build %s started a long time ago", build_id) return email.send_email_notification(build=build)
async def log(repository, parent, local, limit): if local: db_pool = await create_db_pool() async with db_pool.acquire() as conn: vcs = await get_vcs(conn, repository.id) results = vcs.log(parent=parent, limit=limit) for entry in results: click.echo(f"{entry.sha}\n {entry.author}") else: tenant = auth.RepositoryTenant(repository.id) results = vcs_client.log( repository.id, parent=parent, limit=limit, tenant=tenant ) for entry in results: click.echo( f"{entry['sha']}\n {entry['authors'][0][0]} <{entry['authors'][0][1]}>" )
def test_success( mocker, db_session, default_user, default_repo, default_repo_access, default_source, outbox, ): auth.set_current_tenant( auth.RepositoryTenant(repository_id=default_source.repository_id) ) build = factories.BuildFactory(source=default_source, failed=True) db_session.add(build) send_email_notification(build) assert len(outbox) == 1 msg = outbox[0] assert msg.subject == "Build Failed - getsentry/zeus #1" assert msg.recipients == [default_user.email]
def test_record_bundle_stats(mocker, db_session, default_source): auth.set_current_tenant( auth.RepositoryTenant(repository_id=default_source.repository_id)) build = factories.BuildFactory(source=default_source, in_progress=True) db_session.add(build) job = factories.JobFactory(build=build, passed=True) db_session.add(job) bundle = factories.BundleFactory(job=job) db_session.add(factories.BundleFactory(job=job)) db_session.add(factories.BundleAssetFactory(bundle=bundle, size=1000)) db_session.add(factories.BundleAssetFactory(bundle=bundle, size=1500)) record_bundle_stats(job.id) job_stats = { i.name: i.value for i in ItemStat.query.filter(ItemStat.item_id == job.id) } assert job_stats["bundle.total_asset_size"] == 2500
def test_disabled( mocker, db_session, default_user, default_repo, default_repo_access, default_source, outbox, ): auth.set_current_tenant( auth.RepositoryTenant(repository_id=default_source.repository_id) ) build = factories.BuildFactory(source=default_source, failed=True) db_session.add(build) db_session.add( ItemOption(item_id=default_user.id, name="mail.notify_author", value="0") ) db_session.flush() send_email_notification(build) assert len(outbox) == 0
def load_all(repos=3, commits_per_repo=10): user_ids = [u for u, in db.session.query(models.User.id)] for n in range(repos): repo = mock_single_repository(user_ids=user_ids) auth.set_current_tenant(auth.RepositoryTenant(repository_id=repo.id)) file_list = find_files_in_repo(repo) load_revisions(repo) revision_iter = iter( list(models.Revision.query.unrestricted_unsafe( ).filter(models.Revision.repository_id == repo.id).order_by( models.Revision.date_created.desc()).limit(commits_per_repo))) parent_revision = None for n in range(commits_per_repo): revision = next(revision_iter) build = mock_build( repo, parent_revision=parent_revision, revision=revision, user_ids=user_ids, file_list=file_list, ) parent_revision = build.source.revision
def process_pending_artifact(pending_artifact_id, **kwargs): pending_artifact = PendingArtifact.query.unrestricted_unsafe().get( pending_artifact_id) if pending_artifact is None: current_app.logger.error("PendingArtifact %s not found", pending_artifact_id) return db.session.delete(pending_artifact) db.session.flush() auth.set_current_tenant( auth.RepositoryTenant(repository_id=pending_artifact.repository_id)) build = Build.query.filter( Build.repository_id == pending_artifact.repository_id, Build.provider == pending_artifact.provider, Build.external_id == pending_artifact.external_build_id, ).first() if not build: raise UnknownBuild job = Job.query.filter( Job.repository_id == pending_artifact.repository_id, Job.build_id == build.id, Job.provider == pending_artifact.provider, Job.external_id == pending_artifact.external_job_id, ).first() if not job: raise UnknownJob artifact = Artifact( job_id=job.id, repository_id=pending_artifact.repository_id, name=pending_artifact.name, status=Status.queued, ) try: db.session.add(artifact) db.session.flush() except IntegrityError: current_app.logger.error( "Skipping pending artifact processing (%s) - duplicate key", pending_artifact_id, ) # XXX(dcramer): this is more of an error but we make an assumption # that this happens because it was already sent db.session.rollback() db.session.delete(pending_artifact) db.session.commit() return artifact.file.save( pending_artifact.file, # XXX(dcramer): we reference the same file, so it lives in the old path # "{0}/{1}/{2}_{3}".format( # job.id.hex[:4], job.id.hex[4:], artifact.id.hex, artifact.name # ), ) db.session.add(artifact) if job.status == Status.finished and job.result != Result.aborted: job.status = Status.collecting_results db.session.add(job) db.session.commit() process_artifact.delay(artifact_id=artifact.id)
def aggregate_build_stats(build_id: UUID): """ Updates various denormalized / aggregate attributes on Build per its jobs. These attributes include start and completion dates, as well as the status and result. """ # now we pull in the entirety of the build's data to aggregate state upward lock_key = "build:{build_id}".format(build_id=build_id) with redis.lock(lock_key): build = (Build.query.unrestricted_unsafe().with_for_update( nowait=True).get(build_id)) if not build: raise ValueError( "Unable to find build with id = {}".format(build_id)) auth.set_current_tenant( auth.RepositoryTenant(repository_id=build.repository_id)) record_coverage_stats(build.id) job_list = Job.query.filter(Job.build_id == build.id) was_finished = build.status == Status.finished is_finished = all(p.status == Status.finished for p in job_list) # ensure build's dates are reflective of jobs build.date_started = safe_agg(min, (j.date_started for j in job_list if j.date_started)) if is_finished: build.date_finished = safe_agg( max, (j.date_finished for j in job_list if j.date_finished)) else: build.date_finished = None # if theres any failure, the build failed if any(j.result is Result.failed for j in job_list if not j.allow_failure): build.result = Result.failed # else, if we're finished, we can aggregate from results elif is_finished: if not job_list: build.result = Result.errored elif not any(j for j in job_list if not j.allow_failure): build.result = Result.passed else: build.result = aggregate_result( (j.result for j in job_list if not j.allow_failure)) # we should never get here as long we've got jobs and correct data else: build.result = Result.unknown if is_finished: build.status = Status.finished else: # ensure we dont set the status to finished unless it actually is new_status = aggregate_status((j.status for j in job_list)) if build.status != new_status: build.status = new_status db.session.add(build) db.session.commit() # we dont bother aggregating stats unless we're finished if build.status == Status.finished and not was_finished: for stat in AGGREGATED_BUILD_STATS: aggregate_stat_for_build(build, stat) db.session.commit() send_build_notifications.delay(build_id=build.id)
def default_tenant(default_repo): auth.set_current_tenant( auth.RepositoryTenant(repository_id=default_repo.id))