def init_app(self, app, chunk_cleanup_queue, instance_keys, config_provider, ip_resolver): storages = {} for location, storage_params in app.config.get("DISTRIBUTED_STORAGE_CONFIG").items(): storages[location] = get_storage_driver( location, chunk_cleanup_queue, config_provider, ip_resolver, storage_params, ) preference = app.config.get("DISTRIBUTED_STORAGE_PREFERENCE", None) if not preference: preference = storages.keys() default_locations = app.config.get("DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS") or [] download_proxy = None if app.config.get("FEATURE_PROXY_STORAGE", False) and instance_keys is not None: download_proxy = DownloadProxy(app, instance_keys) d_storage = DistributedStorage( storages, preference, default_locations, download_proxy, app.config.get("REGISTRY_STATE") == "readonly", ) # register extension with app app.extensions = getattr(app, "extensions", {}) app.extensions["storage"] = d_storage return d_storage
def test_basic_upload_blob(chunk_count, subchunk, registry_model): repository_ref = registry_model.lookup_repository("devtable", "complex") storage = DistributedStorage({"local_us": FakeStorage(None)}, ["local_us"]) settings = BlobUploadSettings("2M", 3600) app_config = {"TESTING": True} data = b"" with upload_blob(repository_ref, storage, settings) as manager: assert manager assert manager.blob_upload_id for index in range(0, chunk_count): chunk_data = os.urandom(100) data += chunk_data if subchunk: manager.upload_chunk(app_config, BytesIO(chunk_data)) manager.upload_chunk(app_config, BytesIO(chunk_data), (index * 100) + 50) else: manager.upload_chunk(app_config, BytesIO(chunk_data)) blob = manager.commit_to_blob(app_config) # Check the blob. assert blob.compressed_size == len(data) assert not blob.uploading assert blob.digest == "sha256:" + hashlib.sha256(data).hexdigest() # Ensure the blob exists in storage and has the expected data. assert storage.get_content(["local_us"], blob.storage_path) == data
def test_extra_blob_stream_handlers(registry_model): handler1_result = [] handler2_result = [] def handler1(bytes_data): handler1_result.append(bytes_data) def handler2(bytes_data): handler2_result.append(bytes_data) repository_ref = registry_model.lookup_repository("devtable", "complex") storage = DistributedStorage({"local_us": FakeStorage(None)}, ["local_us"]) settings = BlobUploadSettings("1K", 3600) app_config = {"TESTING": True} with upload_blob(repository_ref, storage, settings, extra_blob_stream_handlers=[handler1, handler2]) as manager: manager.upload_chunk(app_config, BytesIO(b"hello ")) manager.upload_chunk(app_config, BytesIO(b"world")) assert b"".join(handler1_result) == b"hello world" assert b"".join(handler2_result) == b"hello world"
def test_too_large(registry_model): repository_ref = registry_model.lookup_repository("devtable", "complex") storage = DistributedStorage({"local_us": FakeStorage(None)}, ["local_us"]) settings = BlobUploadSettings("1K", 3600) app_config = {"TESTING": True} with upload_blob(repository_ref, storage, settings) as manager: with pytest.raises(BlobTooLargeException): manager.upload_chunk(app_config, BytesIO(os.urandom(1024 * 1024 * 2)))
def test_build_manifest_missing_parent(fake_session, registry_model): storage = DistributedStorage({'local_us': FakeStorage(None)}, ['local_us']) repository_ref = registry_model.lookup_repository('devtable', 'complex') builder = create_manifest_builder(repository_ref, storage, docker_v2_signing_key) assert builder.start_layer( 'somelayer', json.dumps({ 'id': 'somelayer', 'parent': 'someparent' }), 'local_us', None, 60) is None
def test_uncompressed_size(registry_model): repository_ref = registry_model.lookup_repository("devtable", "complex") storage = DistributedStorage({"local_us": FakeStorage(None)}, ["local_us"]) settings = BlobUploadSettings("1K", 3600) app_config = {"TESTING": True} with upload_blob(repository_ref, storage, settings) as manager: manager.upload_chunk(app_config, BytesIO(valid_tar_gz(b"hello world"))) blob = manager.commit_to_blob(app_config) assert blob.compressed_size is not None assert blob.uncompressed_size is not None
def test_build_manifest_missing_parent(fake_session, registry_model): storage = DistributedStorage({"local_us": FakeStorage(None)}, ["local_us"]) repository_ref = registry_model.lookup_repository("devtable", "complex") builder = create_manifest_builder(repository_ref, storage, docker_v2_signing_key) assert ( builder.start_layer( "somelayer", json.dumps({"id": "somelayer", "parent": "someparent"}), "local_us", None, 60, ) is None )
def test_cancel_upload(registry_model): repository_ref = registry_model.lookup_repository("devtable", "complex") storage = DistributedStorage({"local_us": FakeStorage(None)}, ["local_us"]) settings = BlobUploadSettings("2M", 3600) app_config = {"TESTING": True} blob_upload_id = None with upload_blob(repository_ref, storage, settings) as manager: blob_upload_id = manager.blob_upload_id assert registry_model.lookup_blob_upload(repository_ref, blob_upload_id) is not None manager.upload_chunk(app_config, BytesIO(b"hello world")) # Since the blob was not comitted, the upload should be deleted. assert blob_upload_id assert registry_model.lookup_blob_upload(repository_ref, blob_upload_id) is None
def storage(app): return DistributedStorage({'local_us': FakeStorage(None)}, preferred_locations=['local_us'])
def storage(): return DistributedStorage( {"local_us": FakeStorage("local"), "local_eu": FakeStorage("local")}, ["local_us"] )
def test_build_manifest(layers, fake_session, registry_model): repository_ref = registry_model.lookup_repository("devtable", "complex") storage = DistributedStorage({"local_us": FakeStorage(None)}, ["local_us"]) settings = BlobUploadSettings("2M", 3600) app_config = {"TESTING": True} builder = create_manifest_builder(repository_ref, storage, docker_v2_signing_key) assert ( lookup_manifest_builder(repository_ref, "anotherid", storage, docker_v2_signing_key) is None ) assert ( lookup_manifest_builder(repository_ref, builder.builder_id, storage, docker_v2_signing_key) is not None ) blobs_by_layer = {} for layer_id, parent_id, layer_bytes in layers: # Start a new layer. assert builder.start_layer( layer_id, json.dumps({"id": layer_id, "parent": parent_id}), "local_us", None, 60 ) checksum = hashlib.sha1(layer_bytes).hexdigest() # Assign it a blob. with upload_blob(repository_ref, storage, settings) as uploader: uploader.upload_chunk(app_config, BytesIO(layer_bytes)) blob = uploader.commit_to_blob(app_config) blobs_by_layer[layer_id] = blob builder.assign_layer_blob(builder.lookup_layer(layer_id), blob, [checksum]) # Validate the checksum. assert builder.validate_layer_checksum(builder.lookup_layer(layer_id), checksum) # Commit the manifest to a tag. tag = builder.commit_tag_and_manifest("somenewtag", builder.lookup_layer(layers[-1][0])) assert tag assert tag in builder.committed_tags # Mark the builder as done. builder.done() # Verify the legacy image for the tag. found = registry_model.get_repo_tag(repository_ref, "somenewtag", include_legacy_image=True) assert found assert found.name == "somenewtag" assert found.legacy_image.docker_image_id == layers[-1][0] # Verify the blob and manifest. manifest = registry_model.get_manifest_for_tag(found) assert manifest parsed = manifest.get_parsed_manifest() assert len(list(parsed.layers)) == len(layers) for index, (layer_id, parent_id, layer_bytes) in enumerate(layers): assert list(parsed.blob_digests)[index] == blobs_by_layer[layer_id].digest assert list(parsed.layers)[index].v1_metadata.image_id == layer_id assert list(parsed.layers)[index].v1_metadata.parent_image_id == parent_id assert parsed.leaf_layer_v1_image_id == layers[-1][0]
def storage(app): return DistributedStorage({"local_us": FakeStorage(None)}, preferred_locations=["local_us"])
def storage(): return DistributedStorage( { 'local_us': FakeStorage('local'), 'local_eu': FakeStorage('local') }, ['local_us'])