def test_manifest_labels(registry_model): repo = model.repository.get_repository("devtable", "simple") repository_ref = RepositoryReference.for_repo_obj(repo) found_tag = registry_model.find_matching_tag(repository_ref, ["latest"]) found_manifest = registry_model.get_manifest_for_tag(found_tag) # Create a new label. created = registry_model.create_manifest_label(found_manifest, "foo", "bar", "api") assert created.key == "foo" assert created.value == "bar" assert created.source_type_name == "api" assert created.media_type_name == "text/plain" # Ensure we can look it up. assert registry_model.get_manifest_label(found_manifest, created.uuid) == created # Ensure it is in our list of labels. assert created in registry_model.list_manifest_labels(found_manifest) assert created in registry_model.list_manifest_labels(found_manifest, key_prefix="fo") # Ensure it is *not* in our filtered list. assert created not in registry_model.list_manifest_labels(found_manifest, key_prefix="ba") # Delete the label and ensure it is gone. assert registry_model.delete_manifest_label(found_manifest, created.uuid) assert registry_model.get_manifest_label(found_manifest, created.uuid) is None assert created not in registry_model.list_manifest_labels(found_manifest)
def test_manifest_labels(registry_model): repo = model.repository.get_repository('devtable', 'simple') repository_ref = RepositoryReference.for_repo_obj(repo) found_tag = registry_model.find_matching_tag(repository_ref, ['latest']) found_manifest = registry_model.get_manifest_for_tag(found_tag) # Create a new label. created = registry_model.create_manifest_label(found_manifest, 'foo', 'bar', 'api') assert created.key == 'foo' assert created.value == 'bar' assert created.source_type_name == 'api' assert created.media_type_name == 'text/plain' # Ensure we can look it up. assert registry_model.get_manifest_label(found_manifest, created.uuid) == created # Ensure it is in our list of labels. assert created in registry_model.list_manifest_labels(found_manifest) assert created in registry_model.list_manifest_labels(found_manifest, key_prefix='fo') # Ensure it is *not* in our filtered list. assert created not in registry_model.list_manifest_labels(found_manifest, key_prefix='ba') # Delete the label and ensure it is gone. assert registry_model.delete_manifest_label(found_manifest, created.uuid) assert registry_model.get_manifest_label(found_manifest, created.uuid) is None assert created not in registry_model.list_manifest_labels(found_manifest)
def move_tag(repository, tag, image_ids, expect_gc=True): namespace = repository.namespace_user.username name = repository.name repo_ref = RepositoryReference.for_repo_obj(repository) builder = DockerSchema1ManifestBuilder(namespace, name, tag) # NOTE: Building root to leaf. parent_id = None for image_id in image_ids: config = {"id": image_id, "config": {"Labels": {"foo": "bar", "meh": "grah",}}} if parent_id: config["parent"] = parent_id # Create a storage row for the layer blob. _, layer_blob_digest = _populate_blob(repository, image_id.encode("ascii")) builder.insert_layer(layer_blob_digest, json.dumps(config)) parent_id = image_id # Store the manifest. manifest = builder.build(docker_v2_signing_key) registry_model.create_manifest_and_retarget_tag( repo_ref, manifest, tag, storage, raise_on_error=True ) if expect_gc: assert model.gc.garbage_collect_repo(repository) == expect_gc
def _create_tag(repo, name): repo_ref = RepositoryReference.for_repo_obj(repo) with upload_blob(repo_ref, storage, BlobUploadSettings(500, 500)) as upload: app_config = {"TESTING": True} config_json = json.dumps({ "config": { "author": "Repo Mirror", }, "rootfs": { "type": "layers", "diff_ids": [] }, "history": [ { "created": "2019-07-30T18:37:09.284840891Z", "created_by": "base", "author": "Repo Mirror", }, ], }) upload.upload_chunk(app_config, BytesIO(config_json.encode("utf-8"))) blob = upload.commit_to_blob(app_config) builder = DockerSchema2ManifestBuilder() builder.set_config_digest(blob.digest, blob.compressed_size) builder.add_layer("sha256:abcd", 1234, urls=["http://hello/world"]) manifest = builder.build() manifest, tag = registry_model.create_manifest_and_retarget_tag( repo_ref, manifest, name, storage)
def test_get_most_recent_tag(repo_namespace, repo_name, expected, registry_model): repo = model.repository.get_repository(repo_namespace, repo_name) repository_ref = RepositoryReference.for_repo_obj(repo) found = registry_model.get_most_recent_tag(repository_ref) if expected is None: assert found is None else: assert found.name in expected
def test_image_with_cas(default_tag_policy, initialized_db): """ A repository with a tag pointing to an image backed by CAS. Deleting and GCing the tag should result in the storage and its CAS data being removed. """ with assert_gc_integrity(expect_storage_removed=True): repository = create_repository() # Create an image storage record under CAS. content = b"hello world" digest = "sha256:" + hashlib.sha256(content).hexdigest() preferred = storage.preferred_locations[0] storage.put_content({preferred}, storage.blob_path(digest), content) image_storage = database.ImageStorage.create(content_checksum=digest) location = database.ImageStorageLocation.get(name=preferred) database.ImageStoragePlacement.create(location=location, storage=image_storage) # Temp link so its available. model.blob.store_blob_record_and_temp_link_in_repo( repository, digest, location, len(content), 120 ) # Ensure the CAS path exists. assert storage.exists({preferred}, storage.blob_path(digest)) # Store a manifest pointing to that path. builder = DockerSchema1ManifestBuilder( repository.namespace_user.username, repository.name, "first" ) builder.insert_layer( digest, json.dumps( { "id": "i1", } ), ) # Store the manifest. manifest = builder.build(docker_v2_signing_key) repo_ref = RepositoryReference.for_repo_obj(repository) registry_model.create_manifest_and_retarget_tag( repo_ref, manifest, "first", storage, raise_on_error=True ) # Delete the temp reference. _delete_temp_links(repository) # Delete the tag. delete_tag(repository, "first") assert_deleted(repository, "i1") # Ensure the CAS path is gone. assert not storage.exists({preferred}, storage.blob_path(digest))
def lookup_repository(self, namespace_name, repo_name, kind_filter=None, raise_on_error=False, manifest_ref=None): """ Looks up and returns a reference to the repository with the given namespace and name, or None if none. If the repository does not exist and the given manifest_ref exists upstream, creates the repository. """ repo = get_repository(namespace_name, repo_name) exists = repo is not None if exists: return RepositoryReference.for_repo_obj( repo, namespace_name, repo_name, repo.namespace_user.stripe_id is None if repo else None, state=repo.state if repo is not None else None, ) # we only create a repository for images that exist upstream, and if # we're not given a manifest reference then we can't check whether the # image exists upstream or not, so we refuse to create the repo. if manifest_ref is None: return None try: self._proxy.manifest_exists(manifest_ref, ACCEPTED_MEDIA_TYPES) except UpstreamRegistryError as e: if raise_on_error: raise RepositoryDoesNotExist(str(e)) return None repo = create_repository(namespace_name, repo_name, self._user) return RepositoryReference.for_repo_obj( repo, namespace_name, repo_name, repo.namespace_user.stripe_id is None if repo else None, state=repo.state if repo is not None else None, )
def test_find_matching_tag(names, expected, registry_model): repo = model.repository.get_repository("devtable", "simple") repository_ref = RepositoryReference.for_repo_obj(repo) found = registry_model.find_matching_tag(repository_ref, names) if expected is None: assert found is None else: assert found.name in expected assert found.repository.name == "simple"
def test_get_most_recent_tag_lifetime_start(repositories, expected_tag_count, registry_model): last_modified_map = registry_model.get_most_recent_tag_lifetime_start( [registry_model.lookup_repository(name, namespace) for name, namespace in repositories] ) assert len(last_modified_map) == expected_tag_count for repo_id, last_modified in list(last_modified_map.items()): tag = registry_model.get_most_recent_tag(RepositoryReference.for_id(repo_id)) assert last_modified == tag.lifetime_start_ms // 1000
def get_repo(self, namespace_name, repository_name, user, include_tags=True, max_tags=500): repo = model.repository.get_repository(namespace_name, repository_name) if repo is None: return None is_starred = model.repository.repository_is_starred( user, repo) if user else False is_public = model.repository.is_repository_public(repo) kind_name = RepositoryTable.kind.get_name(repo.kind_id) base = RepositoryBaseElement( namespace_name, repository_name, is_starred, is_public, kind_name, repo.description, repo.namespace_user.organization, repo.namespace_user.removed_tag_expiration_s, None, None, False, False, False, repo.namespace_user.stripe_id is None, repo.state) if base.kind_name == 'application': channels = channel_model.get_repo_channels(repo, appr_model.models_ref) releases = release_model.get_release_objs(repo, appr_model.models_ref) releases_channels_map = defaultdict(list) return ApplicationRepository(base, [ _create_channel(channel, releases_channels_map) for channel in channels ], [ Release(release.name, release.lifetime_start, releases_channels_map) for release in releases ], repo.state) tags = None repo_ref = RepositoryReference.for_repo_obj(repo) if include_tags: tags, _ = registry_model.list_repository_tag_history( repo_ref, page=1, size=max_tags, active_tags_only=True) tags = [ Tag( tag.name, tag.legacy_image.docker_image_id if tag.legacy_image_if_present else None, tag.legacy_image.aggregate_size if tag.legacy_image_if_present else None, tag.lifetime_start_ts, tag.manifest_digest, tag.lifetime_end_ts) for tag in tags ] start_date = datetime.now() - timedelta(days=MAX_DAYS_IN_3_MONTHS) counts = model.log.get_repository_action_counts(repo, start_date) assert repo.state is not None return ImageRepositoryRepository( base, tags, [Count(count.date, count.count) for count in counts], repo.badge_token, repo.trust_enabled, repo.state)
def test_lookup_manifests(repo_namespace, repo_name, registry_model): repo = model.repository.get_repository(repo_namespace, repo_name) repository_ref = RepositoryReference.for_repo_obj(repo) found_tag = registry_model.find_matching_tag(repository_ref, ["latest"]) found_manifest = registry_model.get_manifest_for_tag(found_tag) found = registry_model.lookup_manifest_by_digest(repository_ref, found_manifest.digest) assert found._db_id == found_manifest._db_id assert found.digest == found_manifest.digest schema1_parsed = registry_model.get_schema1_parsed_manifest(found, "foo", "bar", "baz", storage) assert schema1_parsed is not None
def lookup_repository(self, namespace_name, repo_name, kind_filter=None): """ Looks up and returns a reference to the repository with the given namespace and name, or None if none. """ repo = model.repository.get_repository(namespace_name, repo_name, kind_filter=kind_filter) state = repo.state if repo is not None else None return RepositoryReference.for_repo_obj( repo, namespace_name, repo_name, repo.namespace_user.stripe_id is None if repo else None, state=state)
def test_batch_labels(registry_model): repo = model.repository.get_repository("devtable", "history") repository_ref = RepositoryReference.for_repo_obj(repo) found_tag = registry_model.find_matching_tag(repository_ref, ["latest"]) found_manifest = registry_model.get_manifest_for_tag(found_tag) with registry_model.batch_create_manifest_labels(found_manifest) as add_label: add_label("foo", "1", "api") add_label("bar", "2", "api") add_label("baz", "3", "api") # Ensure we can look them up. assert len(registry_model.list_manifest_labels(found_manifest)) == 3
def test_tag_names_for_manifest(initialized_db, registry_model): verified_tag = False for repository in Repository.select(): repo_ref = RepositoryReference.for_repo_obj(repository) for tag in registry_model.list_all_active_repository_tags(repo_ref): manifest = registry_model.get_manifest_for_tag(tag) tag_names = set(registry_model.tag_names_for_manifest(manifest, 1000)) assert tag.name in tag_names verified_tag = True for found_name in tag_names: found_tag = registry_model.get_repo_tag(repo_ref, found_name) assert registry_model.get_manifest_for_tag(found_tag) == manifest assert verified_tag
def test_batch_labels(registry_model): repo = model.repository.get_repository('devtable', 'history') repository_ref = RepositoryReference.for_repo_obj(repo) found_tag = registry_model.find_matching_tag(repository_ref, ['latest']) found_manifest = registry_model.get_manifest_for_tag(found_tag) with registry_model.batch_create_manifest_labels( found_manifest) as add_label: add_label('foo', '1', 'api') add_label('bar', '2', 'api') add_label('baz', '3', 'api') # Ensure we can look them up. assert len(registry_model.list_manifest_labels(found_manifest)) == 3
def test_manifest_label_handlers(registry_model): repo = model.repository.get_repository("devtable", "simple") repository_ref = RepositoryReference.for_repo_obj(repo) found_tag = registry_model.get_repo_tag(repository_ref, "latest") found_manifest = registry_model.get_manifest_for_tag(found_tag) # Ensure the tag has no expiration. assert found_tag.lifetime_end_ts is None # Create a new label with an expires-after. registry_model.create_manifest_label(found_manifest, "quay.expires-after", "2h", "api") # Ensure the tag now has an expiration. updated_tag = registry_model.get_repo_tag(repository_ref, "latest") assert updated_tag.lifetime_end_ts == (updated_tag.lifetime_start_ts + (60 * 60 * 2))
def test_lookup_manifests(repo_namespace, repo_name, registry_model): repo = model.repository.get_repository(repo_namespace, repo_name) repository_ref = RepositoryReference.for_repo_obj(repo) found_tag = registry_model.find_matching_tag(repository_ref, ['latest']) found_manifest = registry_model.get_manifest_for_tag(found_tag) found = registry_model.lookup_manifest_by_digest(repository_ref, found_manifest.digest, include_legacy_image=True) assert found._db_id == found_manifest._db_id assert found.digest == found_manifest.digest assert found.legacy_image assert found.legacy_image.parents schema1_parsed = registry_model.get_schema1_parsed_manifest( found, 'foo', 'bar', 'baz', storage) assert schema1_parsed is not None
def _determine_cached_tag_by_tag(self): """ Determines the cached tag by looking for one of the tags being built, and seeing if it exists in the repository. This is a fallback for when no comment information is available. """ with UseThenDisconnect(app.config): tags = self.build_config.get("docker_tags", ["latest"]) repository = RepositoryReference.for_repo_obj(self.repo_build.repository) matching_tag = registry_model.find_matching_tag(repository, tags) if matching_tag is not None: return matching_tag.name most_recent_tag = registry_model.get_most_recent_tag(repository) if most_recent_tag is not None: return most_recent_tag.name return None
def lookup_repository( self, namespace_name, repo_name, kind_filter=None, raise_on_error=False, manifest_ref=None ): """ Looks up and returns a reference to the repository with the given namespace and name, or None if none. """ repo = model.repository.get_repository(namespace_name, repo_name, kind_filter=kind_filter) if repo is None: if raise_on_error: raise model.RepositoryDoesNotExist() return None state = repo.state return RepositoryReference.for_repo_obj( repo, namespace_name, repo_name, repo.namespace_user.stripe_id is None, state=state, )
def list_manifest_layers(self, manifest, storage, include_placements=False): try: tag_manifest = database.TagManifest.get(id=manifest._db_id) except database.TagManifest.DoesNotExist: logger.exception('Could not find tag manifest for manifest `%s`', manifest._db_id) return None try: parsed = manifest.get_parsed_manifest() except ManifestException: logger.exception('Could not parse and validate manifest `%s`', manifest._db_id) return None repo_ref = RepositoryReference.for_id(tag_manifest.tag.repository_id) return self.list_parsed_manifest_layers(repo_ref, parsed, storage, include_placements)
def populate_database(minimal=False): logger.debug("Populating the DB with test data.") # Check if the data already exists. If so, we skip. This can happen between calls from the # "old style" tests and the new py.test's. try: User.get(username="******") logger.debug("DB already populated") return except User.DoesNotExist: pass # Note: databases set up with "real" schema (via Alembic) will not have these types # type, so we it here it necessary. try: ImageStorageLocation.get(name="local_eu") ImageStorageLocation.get(name="local_us") except ImageStorageLocation.DoesNotExist: ImageStorageLocation.create(name="local_eu") ImageStorageLocation.create(name="local_us") try: NotificationKind.get(name="test_notification") except NotificationKind.DoesNotExist: NotificationKind.create(name="test_notification") new_user_1 = model.user.create_user("devtable", "password", "*****@*****.**") new_user_1.verified = True new_user_1.stripe_id = TEST_STRIPE_ID new_user_1.save() if minimal: logger.debug( "Skipping most db population because user requested mininal db") return UserRegion.create(user=new_user_1, location=ImageStorageLocation.get(name="local_us")) model.release.set_region_release("quay", "us", "v0.1.2") model.user.create_confirm_email_code(new_user_1, new_email="*****@*****.**") disabled_user = model.user.create_user("disabled", "password", "*****@*****.**") disabled_user.verified = True disabled_user.enabled = False disabled_user.save() dtrobot = model.user.create_robot("dtrobot", new_user_1) dtrobot2 = model.user.create_robot("dtrobot2", new_user_1) new_user_2 = model.user.create_user("public", "password", "*****@*****.**") new_user_2.verified = True new_user_2.save() new_user_3 = model.user.create_user("freshuser", "password", "*****@*****.**") new_user_3.verified = True new_user_3.save() another_robot = model.user.create_robot("anotherrobot", new_user_3) new_user_4 = model.user.create_user("randomuser", "password", "*****@*****.**") new_user_4.verified = True new_user_4.save() new_user_5 = model.user.create_user("unverified", "password", "*****@*****.**") new_user_5.save() reader = model.user.create_user("reader", "password", "*****@*****.**") reader.verified = True reader.save() creatoruser = model.user.create_user("creator", "password", "*****@*****.**") creatoruser.verified = True creatoruser.save() outside_org = model.user.create_user("outsideorg", "password", "*****@*****.**") outside_org.verified = True outside_org.save() model.notification.create_notification( "test_notification", new_user_1, metadata={ "some": "value", "arr": [1, 2, 3], "obj": { "a": 1, "b": 2 } }, ) from_date = datetime.utcnow() to_date = from_date + timedelta(hours=1) notification_metadata = { "from_date": formatdate(calendar.timegm(from_date.utctimetuple())), "to_date": formatdate(calendar.timegm(to_date.utctimetuple())), "reason": "database migration", } model.notification.create_notification("maintenance", new_user_1, metadata=notification_metadata) __generate_repository( new_user_4, "randomrepo", "Random repo repository.", False, [], (4, [], ["latest", "prod"]), ) simple_repo = __generate_repository( new_user_1, "simple", "Simple repository.", False, [], (4, [], ["latest", "prod"]), ) # Add some labels to the latest tag's manifest. repo_ref = RepositoryReference.for_repo_obj(simple_repo) tag = registry_model.get_repo_tag(repo_ref, "latest") manifest = registry_model.get_manifest_for_tag(tag) assert manifest first_label = registry_model.create_manifest_label(manifest, "foo", "bar", "manifest") registry_model.create_manifest_label(manifest, "foo", "baz", "api") registry_model.create_manifest_label(manifest, "anotherlabel", "1234", "internal") registry_model.create_manifest_label(manifest, "jsonlabel", '{"hey": "there"}', "internal", "application/json") label_metadata = { "key": "foo", "value": "bar", "id": first_label._db_id, "manifest_digest": manifest.digest, } logs_model.log_action( "manifest_label_add", new_user_1.username, performer=new_user_1, timestamp=datetime.now(), metadata=label_metadata, repository=simple_repo, ) model.blob.initiate_upload(new_user_1.username, simple_repo.name, str(uuid4()), "local_us", {}) model.notification.create_repo_notification(simple_repo, "repo_push", "quay_notification", {}, {}) __generate_repository( new_user_1, "sharedtags", "Shared tags repository", False, [(new_user_2, "read"), (dtrobot[0], "read")], ( 2, [ (3, [], ["v2.0", "v2.1", "v2.2"]), ( 1, [(1, [(1, [], ["prod", "581a284"]) ], ["staging", "8423b58"]), (1, [], None)], None, ), ], None, ), ) __generate_repository( new_user_1, "history", "Historical repository.", False, [], (4, [(2, [], "#latest"), (3, [], "latest")], None), ) __generate_repository( new_user_1, "complex", "Complex repository with many branches and tags.", False, [(new_user_2, "read"), (dtrobot[0], "read")], ( 2, [(3, [], "v2.0"), (1, [(1, [(2, [], ["prod"])], "staging"), (1, [], None)], None)], None, ), ) __generate_repository( new_user_1, "gargantuan", None, False, [], ( 2, [ (3, [], "v2.0"), (1, [(1, [(1, [], ["latest", "prod"])], "staging"), (1, [], None)], None), (20, [], "v3.0"), (5, [], "v4.0"), (1, [(1, [], "v5.0"), (1, [], "v6.0")], None), ], None, ), ) trusted_repo = __generate_repository( new_user_1, "trusted", "Trusted repository.", False, [], (4, [], ["latest", "prod"]), ) trusted_repo.trust_enabled = True trusted_repo.save() publicrepo = __generate_repository( new_user_2, "publicrepo", "Public repository pullable by the world.", True, [], (10, [], "latest"), ) __generate_repository(outside_org, "coolrepo", "Some cool repo.", False, [], (5, [], "latest")) __generate_repository( new_user_1, "shared", "Shared repository, another user can write.", False, [(new_user_2, "write"), (reader, "read")], (5, [], "latest"), ) __generate_repository( new_user_1, "text-full-repo", "This is a repository for testing text search", False, [(new_user_2, "write"), (reader, "read")], (5, [], "latest"), ) building = __generate_repository( new_user_1, "building", "Empty repository which is building.", False, [(new_user_2, "write"), (reader, "read")], (0, [], None), ) new_token = model.token.create_access_token(building, "write", "build-worker") trigger = model.build.create_build_trigger(building, "github", "123authtoken", new_user_1, pull_robot=dtrobot[0]) trigger.config = json.dumps({ "build_source": "jakedt/testconnect", "subdir": "", "dockerfile_path": "Dockerfile", "context": "/", }) trigger.save() repo = "ci.devtable.com:5000/%s/%s" % (building.namespace_user.username, building.name) job_config = { "repository": repo, "docker_tags": ["latest"], "build_subdir": "", "trigger_metadata": { "commit": "3482adc5822c498e8f7db2e361e8d57b3d77ddd9", "ref": "refs/heads/master", "default_branch": "master", }, } model.repository.star_repository(new_user_1, simple_repo) record = model.repository.create_email_authorization_for_repo( new_user_1.username, "simple", "*****@*****.**") record.confirmed = True record.save() model.repository.create_email_authorization_for_repo( new_user_1.username, "simple", "*****@*****.**") build2 = model.build.create_repository_build( building, new_token, job_config, "68daeebd-a5b9-457f-80a0-4363b882f8ea", "build-name", trigger, ) build2.uuid = "deadpork-dead-pork-dead-porkdeadpork" build2.save() build3 = model.build.create_repository_build( building, new_token, job_config, "f49d07f9-93da-474d-ad5f-c852107c3892", "build-name", trigger, ) build3.uuid = "deadduck-dead-duck-dead-duckdeadduck" build3.save() build1 = model.build.create_repository_build( building, new_token, job_config, "701dcc3724fb4f2ea6c31400528343cd", "build-name", trigger) build1.uuid = "deadbeef-dead-beef-dead-beefdeadbeef" build1.save() org = model.organization.create_organization("buynlarge", "*****@*****.**", new_user_1) org.stripe_id = TEST_STRIPE_ID org.save() liborg = model.organization.create_organization( "library", "*****@*****.**", new_user_1) liborg.save() titiorg = model.organization.create_organization("titi", "*****@*****.**", new_user_1) titiorg.save() thirdorg = model.organization.create_organization( "sellnsmall", "*****@*****.**", new_user_1) thirdorg.save() model.user.create_robot("coolrobot", org) oauth_app_1 = model.oauth.create_application( org, "Some Test App", "http://localhost:8000", "http://localhost:8000/o2c.html", client_id="deadbeef", ) model.oauth.create_application( org, "Some Other Test App", "http://quay.io", "http://localhost:8000/o2c.html", client_id="deadpork", description="This is another test application", ) model.oauth.create_user_access_token(new_user_1, "deadbeef", "repo:admin", access_token="%s%s" % ("b" * 40, "c" * 40)) oauth_credential = Credential.from_string("dswfhasdf1") OAuthAuthorizationCode.create( application=oauth_app_1, code="Z932odswfhasdf1", scope="repo:admin", data='{"somejson": "goeshere"}', code_name="Z932odswfhasdf1Z932o", code_credential=oauth_credential, ) model.user.create_robot("neworgrobot", org) ownerbot = model.user.create_robot("ownerbot", org)[0] creatorbot = model.user.create_robot("creatorbot", org)[0] owners = model.team.get_organization_team("buynlarge", "owners") owners.description = "Owners have unfetterd access across the entire org." owners.save() org_repo = __generate_repository( org, "orgrepo", "Repository owned by an org.", False, [(outside_org, "read")], (4, [], ["latest", "prod"]), ) __generate_repository( org, "anotherorgrepo", "Another repository owned by an org.", False, [], (4, [], ["latest", "prod"]), ) creators = model.team.create_team("creators", org, "creator", "Creators of orgrepo.") reader_team = model.team.create_team("readers", org, "member", "Readers of orgrepo.") model.team.add_or_invite_to_team(new_user_1, reader_team, outside_org) model.permission.set_team_repo_permission(reader_team.name, org_repo.namespace_user.username, org_repo.name, "read") model.team.add_user_to_team(new_user_2, reader_team) model.team.add_user_to_team(reader, reader_team) model.team.add_user_to_team(ownerbot, owners) model.team.add_user_to_team(creatorbot, creators) model.team.add_user_to_team(creatoruser, creators) sell_owners = model.team.get_organization_team("sellnsmall", "owners") sell_owners.description = "Owners have unfettered access across the entire org." sell_owners.save() model.team.add_user_to_team(new_user_4, sell_owners) sync_config = { "group_dn": "cn=Test-Group,ou=Users", "group_id": "somegroupid" } synced_team = model.team.create_team("synced", org, "member", "Some synced team.") model.team.set_team_syncing(synced_team, "ldap", sync_config) another_synced_team = model.team.create_team("synced", thirdorg, "member", "Some synced team.") model.team.set_team_syncing(another_synced_team, "ldap", {"group_dn": "cn=Test-Group,ou=Users"}) __generate_repository( new_user_1, "superwide", None, False, [], [ (10, [], "latest2"), (2, [], "latest3"), (2, [(1, [], "latest11"), (2, [], "latest12")], "latest4"), (2, [], "latest5"), (2, [], "latest6"), (2, [], "latest7"), (2, [], "latest8"), (2, [], "latest9"), (2, [], "latest10"), (2, [], "latest13"), (2, [], "latest14"), (2, [], "latest15"), (2, [], "latest16"), (2, [], "latest17"), (2, [], "latest18"), ], ) mirror_repo = __generate_repository( new_user_1, "mirrored", "Mirrored repository.", False, [(dtrobot[0], "write"), (dtrobot2[0], "write")], (4, [], ["latest", "prod"]), ) mirror_rule = model.repo_mirror.create_mirroring_rule( mirror_repo, ["latest", "3.3*"]) mirror_args = (mirror_repo, mirror_rule, dtrobot[0], "quay.io/coreos/etcd", 60 * 60 * 24) mirror_kwargs = { "external_registry_username": "******", "external_registry_password": "******", "external_registry_config": {}, "is_enabled": True, "sync_start_date": datetime.utcnow(), } mirror = model.repo_mirror.enable_mirroring_for_repository( *mirror_args, **mirror_kwargs) read_only_repo = __generate_repository( new_user_1, "readonly", "Read-Only Repo.", False, [], (4, [], ["latest", "prod"]), ) read_only_repo.state = RepositoryState.READ_ONLY read_only_repo.save() model.permission.add_prototype_permission(org, "read", activating_user=new_user_1, delegate_user=new_user_2) model.permission.add_prototype_permission(org, "read", activating_user=new_user_1, delegate_team=reader_team) model.permission.add_prototype_permission(org, "write", activating_user=new_user_2, delegate_user=new_user_1) today = datetime.today() week_ago = today - timedelta(6) six_ago = today - timedelta(5) four_ago = today - timedelta(4) yesterday = datetime.combine(date.today(), datetime.min.time()) - timedelta(hours=6) __generate_service_key("kid1", "somesamplekey", new_user_1, today, ServiceKeyApprovalType.SUPERUSER) __generate_service_key( "kid2", "someexpiringkey", new_user_1, week_ago, ServiceKeyApprovalType.SUPERUSER, today + timedelta(days=14), ) __generate_service_key("kid3", "unapprovedkey", new_user_1, today, None) __generate_service_key( "kid4", "autorotatingkey", new_user_1, six_ago, ServiceKeyApprovalType.KEY_ROTATION, today + timedelta(days=1), rotation_duration=timedelta(hours=12).total_seconds(), ) __generate_service_key( "kid5", "key for another service", new_user_1, today, ServiceKeyApprovalType.SUPERUSER, today + timedelta(days=14), service="different_sample_service", ) __generate_service_key( "kid6", "someexpiredkey", new_user_1, week_ago, ServiceKeyApprovalType.SUPERUSER, today - timedelta(days=1), ) __generate_service_key( "kid7", "somewayexpiredkey", new_user_1, week_ago, ServiceKeyApprovalType.SUPERUSER, today - timedelta(days=30), ) # Add the test pull key as pre-approved for local and unittest registry testing. # Note: this must match the private key found in the local/test config. _TEST_JWK = { "e": "AQAB", "kty": "RSA", "n": "yqdQgnelhAPMSeyH0kr3UGePK9oFOmNfwD0Ymnh7YYXr21VHWwyM2eVW3cnLd9KXywDFtGSe9oFDbnOuMCdUowdkBcaHju-isbv5KEbNSoy_T2Rip-6L0cY63YzcMJzv1nEYztYXS8wz76pSK81BKBCLapqOCmcPeCvV9yaoFZYvZEsXCl5jjXN3iujSzSF5Z6PpNFlJWTErMT2Z4QfbDKX2Nw6vJN6JnGpTNHZvgvcyNX8vkSgVpQ8DFnFkBEx54PvRV5KpHAq6AsJxKONMo11idQS2PfCNpa2hvz9O6UZe-eIX8jPo5NW8TuGZJumbdPT_nxTDLfCqfiZboeI0Pw", } key = model.service_keys.create_service_key("test_service_key", "test_service_key", "quay", _TEST_JWK, {}, None) model.service_keys.approve_service_key( key.kid, ServiceKeyApprovalType.SUPERUSER, notes="Test service key for local/test registry testing", ) # Add an app specific token. token = model.appspecifictoken.create_token(new_user_1, "some app") token.token_name = "a" * 60 token.token_secret = "b" * 60 token.save() logs_model.log_action( "org_create_team", org.username, performer=new_user_1, timestamp=week_ago, metadata={"team": "readers"}, ) logs_model.log_action( "org_set_team_role", org.username, performer=new_user_1, timestamp=week_ago, metadata={ "team": "readers", "role": "read" }, ) logs_model.log_action( "create_repo", org.username, performer=new_user_1, repository=org_repo, timestamp=week_ago, metadata={ "namespace": org.username, "repo": "orgrepo" }, ) logs_model.log_action( "change_repo_permission", org.username, performer=new_user_2, repository=org_repo, timestamp=six_ago, metadata={ "username": new_user_1.username, "repo": "orgrepo", "role": "admin" }, ) logs_model.log_action( "change_repo_permission", org.username, performer=new_user_1, repository=org_repo, timestamp=six_ago, metadata={ "username": new_user_2.username, "repo": "orgrepo", "role": "read" }, ) logs_model.log_action( "add_repo_accesstoken", org.username, performer=new_user_1, repository=org_repo, timestamp=four_ago, metadata={ "repo": "orgrepo", "token": "deploytoken" }, ) logs_model.log_action( "push_repo", org.username, performer=new_user_2, repository=org_repo, timestamp=today, metadata={ "username": new_user_2.username, "repo": "orgrepo" }, ) logs_model.log_action( "pull_repo", org.username, performer=new_user_2, repository=org_repo, timestamp=today, metadata={ "username": new_user_2.username, "repo": "orgrepo" }, ) logs_model.log_action( "pull_repo", org.username, repository=org_repo, timestamp=today, metadata={ "token": "sometoken", "token_code": "somecode", "repo": "orgrepo" }, ) logs_model.log_action( "delete_tag", org.username, performer=new_user_2, repository=org_repo, timestamp=today, metadata={ "username": new_user_2.username, "repo": "orgrepo", "tag": "sometag" }, ) logs_model.log_action( "pull_repo", org.username, repository=org_repo, timestamp=today, metadata={ "token_code": "somecode", "repo": "orgrepo" }, ) logs_model.log_action( "pull_repo", new_user_2.username, repository=publicrepo, timestamp=yesterday, metadata={ "token_code": "somecode", "repo": "publicrepo" }, ) logs_model.log_action( "build_dockerfile", new_user_1.username, repository=building, timestamp=today, metadata={ "repo": "building", "namespace": new_user_1.username, "trigger_id": trigger.uuid, "config": json.loads(trigger.config), "service": trigger.service.name, }, ) model.message.create([{ "content": "We love you, Quay customers!", "severity": "info", "media_type": "text/plain", }]) model.message.create([{ "content": "This is a **development** install of Quay", "severity": "warning", "media_type": "text/markdown", }]) fake_queue = WorkQueue("fakequeue", tf) fake_queue.put(["canonical", "job", "name"], "{}") model.user.create_user_prompt(new_user_4, "confirm_username") for to_count in Repository.select(): model.repositoryactioncount.count_repository_actions( to_count, datetime.utcnow()) model.repositoryactioncount.update_repository_score(to_count)
def get_repo_list( self, starred, user, repo_kind, namespace, username, public, page_token, last_modified, popularity, ): next_page_token = None # Lookup the requested repositories (either starred or non-starred.) if starred: # Return the full list of repos starred by the current user that are still visible to them. def can_view_repo(repo): assert repo.state != RepositoryState.MARKED_FOR_DELETION can_view = ReadRepositoryPermission(repo.namespace_user.username, repo.name).can() return can_view or model.repository.is_repository_public(repo) unfiltered_repos = model.repository.get_user_starred_repositories( user, kind_filter=repo_kind ) repos = [repo for repo in unfiltered_repos if can_view_repo(repo)] else: # Determine the starting offset for pagination. Note that we don't use the normal # model.modelutil.paginate method here, as that does not operate over UNION queries, which # get_visible_repositories will return if there is a logged-in user (for performance reasons). # # Also note the +1 on the limit, as paginate_query uses the extra result to determine whether # there is a next page. start_id = model.modelutil.pagination_start(page_token) repo_query = model.repository.get_visible_repositories( username=username, include_public=public, start_id=start_id, limit=REPOS_PER_PAGE + 1, kind_filter=repo_kind, namespace=namespace, ) repos, next_page_token = model.modelutil.paginate_query( repo_query, limit=REPOS_PER_PAGE, sort_field_name="rid" ) repos = list(repos) assert len(repos) <= REPOS_PER_PAGE # Collect the IDs of the repositories found for subsequent lookup of popularity # and/or last modified. last_modified_map = {} action_sum_map = {} if last_modified or popularity: repository_refs = [RepositoryReference.for_id(repo.rid) for repo in repos] repository_ids = [repo.rid for repo in repos] if last_modified: last_modified_map = ( registry_model.get_most_recent_tag_lifetime_start(repository_refs) if repo_kind == "image" else apprtags_model.get_most_recent_tag_lifetime_start( repository_ids, appr_model.models_ref ) ) if popularity: action_sum_map = model.log.get_repositories_action_sums(repository_ids) # Collect the IDs of the repositories that are starred for the user, so we can mark them # in the returned results. star_set = set() if username: starred_repos = model.repository.get_user_starred_repositories(user, repo_kind) star_set = {starred.id for starred in starred_repos} return ( [ RepositoryBaseElement( repo.namespace_user.username, repo.name, repo.rid in star_set, model.repository.is_repository_public(repo), repo_kind, repo.description, repo.namespace_user.organization, repo.namespace_user.removed_tag_expiration_s, last_modified_map.get(repo.rid), action_sum_map.get(repo.rid), last_modified, popularity, username, None, repo.state, ) for repo in repos ], next_page_token, )
def find_repository_with_garbage(self, limit_to_gc_policy_s): repo = model.oci.tag.find_repository_with_garbage(limit_to_gc_policy_s) if repo is None: return None return RepositoryReference.for_repo_obj(repo)
def __create_manifest_and_tags(repo, structure, creator_username, tag_map, current_level=0, builder=None, last_leaf_id=None): num_layers, subtrees, tag_names = structure num_layers = num_layers or 1 tag_names = tag_names or [] tag_names = [tag_names] if not isinstance(tag_names, list) else tag_names repo_ref = RepositoryReference.for_repo_obj(repo) builder = (builder if builder else DockerSchema1ManifestBuilder( repo.namespace_user.username, repo.name, "")) # TODO: Change this to a mixture of Schema1 and Schema2 manifest once we no longer need to # read from storage for Schema2. # Populate layers. Note, we do this in reverse order using insert_layer, as it is easier to # add the leaf last (even though Schema1 has it listed first). parent_id = last_leaf_id leaf_id = None for layer_index in range(0, num_layers): content = "layer-%s-%s-%s" % (layer_index, current_level, get_epoch_timestamp_ms()) _, digest = _populate_blob(repo, content.encode("ascii")) current_id = "abcdef%s%s%s" % (layer_index, current_level, get_epoch_timestamp_ms()) if layer_index == num_layers - 1: leaf_id = current_id config = { "id": current_id, "Size": len(content), } if parent_id: config["parent"] = parent_id builder.insert_layer(digest, json.dumps(config)) parent_id = current_id for tag_name in tag_names: adjusted_tag_name = tag_name now = datetime.utcnow() if tag_name[0] == "#": adjusted_tag_name = tag_name[1:] now = now - timedelta(seconds=1) manifest = builder.clone(adjusted_tag_name).build() with freeze_time(now): created_tag, _ = registry_model.create_manifest_and_retarget_tag( repo_ref, manifest, adjusted_tag_name, store, raise_on_error=True) assert created_tag tag_map[adjusted_tag_name] = created_tag for subtree in subtrees: __create_manifest_and_tags( repo, subtree, creator_username, tag_map, current_level=current_level + 1, builder=builder, last_leaf_id=leaf_id, )
def delete_tag(repository, tag, perform_gc=True, expect_gc=True): repo_ref = RepositoryReference.for_repo_obj(repository) registry_model.delete_tag(repo_ref, tag) if perform_gc: assert gc_now(repository) == expect_gc
def test_images_shared_cas(default_tag_policy, initialized_db): """ A repository, each two tags, pointing to the same image, which has image storage with the same *CAS path*, but *distinct records*. Deleting the first tag should delete the first image, and its storage, but not the file in storage, as it shares its CAS path. """ with assert_gc_integrity(expect_storage_removed=True): repository = create_repository() # Create two image storage records with the same content checksum. content = b"hello world" digest = "sha256:" + hashlib.sha256(content).hexdigest() preferred = storage.preferred_locations[0] storage.put_content({preferred}, storage.blob_path(digest), content) is1 = database.ImageStorage.create(content_checksum=digest) is2 = database.ImageStorage.create(content_checksum=digest) location = database.ImageStorageLocation.get(name=preferred) database.ImageStoragePlacement.create(location=location, storage=is1) database.ImageStoragePlacement.create(location=location, storage=is2) # Temp link so its available. model.blob.store_blob_record_and_temp_link_in_repo( repository, digest, location, len(content), 120) # Ensure the CAS path exists. assert storage.exists({preferred}, storage.blob_path(digest)) repo_ref = RepositoryReference.for_repo_obj(repository) # Store a manifest pointing to that path as `first`. builder = DockerSchema1ManifestBuilder( repository.namespace_user.username, repository.name, "first") builder.insert_layer( digest, json.dumps({ "id": "i1", }), ) manifest = builder.build(docker_v2_signing_key) registry_model.create_manifest_and_retarget_tag(repo_ref, manifest, "first", storage, raise_on_error=True) tag_ref = registry_model.get_repo_tag(repo_ref, "first") manifest_ref = registry_model.get_manifest_for_tag(tag_ref) registry_model.populate_legacy_images_for_testing( manifest_ref, storage) # Store another as `second`. builder = DockerSchema1ManifestBuilder( repository.namespace_user.username, repository.name, "second") builder.insert_layer( digest, json.dumps({ "id": "i2", }), ) manifest = builder.build(docker_v2_signing_key) created, _ = registry_model.create_manifest_and_retarget_tag( repo_ref, manifest, "second", storage, raise_on_error=True) tag_ref = registry_model.get_repo_tag(repo_ref, "second") manifest_ref = registry_model.get_manifest_for_tag(tag_ref) registry_model.populate_legacy_images_for_testing( manifest_ref, storage) # Manually retarget the second manifest's blob to the second row. try: second_blob = ManifestBlob.get(manifest=created._db_id, blob=is1) second_blob.blob = is2 second_blob.save() except ManifestBlob.DoesNotExist: second_blob = ManifestBlob.get(manifest=created._db_id, blob=is2) second_blob.blob = is1 second_blob.save() # Delete the temp reference. _delete_temp_links(repository) # Ensure the legacy images exist. assert_not_deleted(repository, "i1", "i2") # Delete the first tag. delete_tag(repository, "first") assert_deleted(repository, "i1") assert_not_deleted(repository, "i2") # Ensure the CAS path still exists. assert storage.exists({preferred}, storage.blob_path(digest))
def _authorize_or_downscope_request(scope_param, has_valid_auth_context): # TODO: The complexity of this function is difficult to follow and maintain. Refactor/Cleanup. if len(scope_param) == 0: if not has_valid_auth_context: # In this case, we are doing an auth flow, and it's not an anonymous pull. logger.debug("No user and no token sent for empty scope list") raise Unauthorized() return None match = _get_scope_regex().match(scope_param) if match is None: logger.debug("Match: %s", match) logger.debug("len: %s", len(scope_param)) logger.warning("Unable to decode repository and actions: %s", scope_param) raise InvalidRequest("Unable to decode repository and actions: %s" % scope_param) logger.debug("Match: %s", match.groups()) registry_and_repo = match.group(1) namespace_and_repo = match.group(2) requested_actions = match.group(3).split(",") lib_namespace = app.config["LIBRARY_NAMESPACE"] namespace, reponame = parse_namespace_repository(namespace_and_repo, lib_namespace) # Ensure that we are never creating an invalid repository. if not REPOSITORY_NAME_REGEX.match(reponame): logger.debug("Found invalid repository name in auth flow: %s", reponame) if len(namespace_and_repo.split("/")) > 1: msg = "Nested repositories are not supported. Found: %s" % namespace_and_repo raise NameInvalid(message=msg) raise NameInvalid(message="Invalid repository name: %s" % namespace_and_repo) # Ensure the namespace is enabled. if registry_model.is_existing_disabled_namespace(namespace): msg = "Namespace %s has been disabled. Please contact a system administrator." % namespace raise NamespaceDisabled(message=msg) final_actions = [] repository_ref = registry_model.lookup_repository(namespace, reponame) repo_is_public = repository_ref is not None and repository_ref.is_public invalid_repo_message = "" if repository_ref is not None and repository_ref.kind != "image": invalid_repo_message = ( "This repository is for managing %s " + "and not container images.") % repository_ref.kind # Ensure the repository is not marked for deletion. if repository_ref is not None and repository_ref.state == RepositoryState.MARKED_FOR_DELETION: raise Unknown(message="Unknown repository") if "push" in requested_actions: # Check if there is a valid user or token, as otherwise the repository cannot be # accessed. if has_valid_auth_context: user = get_authenticated_user() # Lookup the repository. If it exists, make sure the entity has modify # permission. Otherwise, make sure the entity has create permission. if repository_ref: if ModifyRepositoryPermission(namespace, reponame).can(): if repository_ref is not None and repository_ref.kind != "image": raise Unsupported(message=invalid_repo_message) # Check for different repository states. if repository_ref.state == RepositoryState.NORMAL: # In NORMAL mode, if the user has permission, then they can push. final_actions.append("push") elif repository_ref.state == RepositoryState.MIRROR: # In MIRROR mode, only the mirroring robot can push. mirror = model.repo_mirror.get_mirror( repository_ref.id) robot = mirror.internal_robot if mirror is not None else None if robot is not None and user is not None and robot == user: assert robot.robot final_actions.append("push") else: logger.debug( "Repository %s/%s push requested for non-mirror robot %s: %s", namespace, reponame, robot, user, ) elif repository_ref.state == RepositoryState.READ_ONLY: # No pushing allowed in read-only state. pass else: logger.warning( "Unknown state for repository %s: %s", repository_ref, repository_ref.state, ) else: logger.debug("No permission to modify repository %s/%s", namespace, reponame) else: # TODO: Push-to-create functionality should be configurable if CreateRepositoryPermission( namespace).can() and user is not None: logger.debug("Creating repository: %s/%s", namespace, reponame) repository_ref = RepositoryReference.for_repo_obj( model.repository.create_repository( namespace, reponame, user)) final_actions.append("push") else: logger.debug("No permission to create repository %s/%s", namespace, reponame) if "pull" in requested_actions: # Grant pull if the user can read the repo or it is public. if ReadRepositoryPermission(namespace, reponame).can() or repo_is_public: if repository_ref is not None and repository_ref.kind != "image": raise Unsupported(message=invalid_repo_message) final_actions.append("pull") else: logger.debug("No permission to pull repository %s/%s", namespace, reponame) if "*" in requested_actions: # Grant * user is admin if AdministerRepositoryPermission(namespace, reponame).can(): if repository_ref is not None and repository_ref.kind != "image": raise Unsupported(message=invalid_repo_message) if repository_ref and repository_ref.state in ( RepositoryState.MIRROR, RepositoryState.READ_ONLY, ): logger.debug("No permission to administer repository %s/%s", namespace, reponame) else: assert repository_ref.state == RepositoryState.NORMAL final_actions.append("*") else: logger.debug("No permission to administer repository %s/%s", namespace, reponame) # Final sanity checks. if "push" in final_actions: assert repository_ref.state != RepositoryState.READ_ONLY if "*" in final_actions: assert repository_ref.state == RepositoryState.NORMAL return scopeResult( actions=final_actions, namespace=namespace, repository=reponame, registry_and_repo=registry_and_repo, tuf_root=_get_tuf_root(repository_ref, namespace, reponame), )
def delete_tag(repository, tag, perform_gc=True, expect_gc=True): repo_ref = RepositoryReference.for_repo_obj(repository) registry_model.delete_tag(repo_ref, tag) if perform_gc: assert model.gc.garbage_collect_repo(repository) == expect_gc
def test_lookup_unknown_manifest(registry_model): repo = model.repository.get_repository('devtable', 'simple') repository_ref = RepositoryReference.for_repo_obj(repo) found = registry_model.lookup_manifest_by_digest(repository_ref, 'sha256:deadbeef') assert found is None