def get_image_layer(namespace, repository, image_id, headers): permission = ReadRepositoryPermission(namespace, repository) repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter="image") logger.debug("Checking repo permissions") if permission.can() or (repository_ref is not None and repository_ref.is_public): if repository_ref is None: abort(404) legacy_image = registry_model.get_legacy_image(repository_ref, image_id, include_blob=True) if legacy_image is None: abort(404, "Image %(image_id)s not found", issue="unknown-image", image_id=image_id) path = legacy_image.blob.storage_path image_pulled_bytes.labels("v1").inc(legacy_image.blob.compressed_size) try: logger.debug("Looking up the direct download URL for path: %s", path) direct_download_url = store.get_direct_download_url( legacy_image.blob.placements, path, get_request_ip() ) if direct_download_url: logger.debug("Returning direct download URL") resp = redirect(direct_download_url) return resp # Close the database handle here for this process before we send the long download. database.close_db_filter(None) logger.debug("Streaming layer data") return Response(store.stream_read(legacy_image.blob.placements, path), headers=headers) except (IOError, AttributeError): logger.exception("Image layer data not found") abort(404, "Image %(image_id)s not found", issue="unknown-image", image_id=image_id) abort(403)
def download_blob(namespace_name, repo_name, digest): # Find the blob. blob = registry_model.get_cached_repo_blob(model_cache, namespace_name, repo_name, digest) if blob is None: raise BlobUnknown() # Build the response headers. headers = {"Docker-Content-Digest": digest} # If our storage supports range requests, let the client know. if storage.get_supports_resumable_downloads(blob.placements): headers["Accept-Ranges"] = "bytes" image_pulled_bytes.labels("v2").inc(blob.compressed_size) # Short-circuit by redirecting if the storage supports it. path = blob.storage_path logger.debug("Looking up the direct download URL for path: %s", path) direct_download_url = storage.get_direct_download_url(blob.placements, path, get_request_ip()) if direct_download_url: logger.debug("Returning direct download URL") resp = redirect(direct_download_url) resp.headers.extend(headers) return resp # Close the database connection before we stream the download. logger.debug("Closing database connection before streaming layer data") headers.update( { "Content-Length": blob.compressed_size, "Content-Type": BLOB_CONTENT_TYPE, } ) with database.CloseForLongOperation(app.config): # Stream the response to the client. return Response( storage.stream_read(blob.placements, path), headers=headers, )
def _repo_verb( namespace, repository, tag_name, verb, formatter, sign=False, checker=None, **kwargs ): # Verify that the image exists and that we have access to it. logger.debug( "Verifying repo verb %s for repository %s/%s with user %s with mimetype %s", verb, namespace, repository, get_authenticated_user(), request.accept_mimetypes.best, ) tag, manifest, schema1_manifest = _verify_repo_verb( storage, namespace, repository, tag_name, verb, checker ) # Load the repository for later. repo = model.repository.get_repository(namespace, repository) if repo is None: abort(404) # Check for torrent, which is no longer supported. if request.accept_mimetypes.best == "application/x-bittorrent": abort(406) # Log the action. track_and_log("repo_verb", wrap_repository(repo), tag=tag.name, verb=verb, **kwargs) is_readonly = app.config.get("REGISTRY_STATE", "normal") == "readonly" # Lookup/create the derived image for the verb and repo image. if is_readonly: derived_image = registry_model.lookup_derived_image( manifest, verb, storage, varying_metadata={"tag": tag.name}, include_placements=True ) else: derived_image = registry_model.lookup_or_create_derived_image( manifest, verb, storage.preferred_locations[0], storage, varying_metadata={"tag": tag.name}, include_placements=True, ) if derived_image is None: logger.error("Could not create or lookup a derived image for manifest %s", manifest) abort(400) if derived_image is not None and not derived_image.blob.uploading: logger.debug("Derived %s image %s exists in storage", verb, derived_image) is_head_request = request.method == "HEAD" if derived_image.blob.compressed_size: image_pulled_bytes.labels("verbs").inc(derived_image.blob.compressed_size) download_url = storage.get_direct_download_url( derived_image.blob.placements, derived_image.blob.storage_path, head=is_head_request ) if download_url: logger.debug("Redirecting to download URL for derived %s image %s", verb, derived_image) return redirect(download_url) # Close the database handle here for this process before we send the long download. database.close_db_filter(None) logger.debug("Sending cached derived %s image %s", verb, derived_image) return send_file( storage.stream_read_file( derived_image.blob.placements, derived_image.blob.storage_path ), mimetype=LAYER_MIMETYPE, ) logger.debug("Building and returning derived %s image", verb) hasher = SimpleHasher() # Close the database connection before any process forking occurs. This is important because # the Postgres driver does not react kindly to forking, so we need to make sure it is closed # so that each process will get its own unique connection. database.close_db_filter(None) def _cleanup(): # Close any existing DB connection once the process has exited. database.close_db_filter(None) def _store_metadata_and_cleanup(): if is_readonly: return with database.UseThenDisconnect(app.config): registry_model.set_derived_image_size(derived_image, hasher.hashed_bytes) # Create a queue process to generate the data. The queue files will read from the process # and send the results to the client and storage. unique_id = ( derived_image.unique_id if derived_image is not None else hashlib.sha256(("%s:%s" % (verb, uuid.uuid4())).encode("utf-8")).hexdigest() ) handlers = [hasher.update] reporter = VerbReporter(verb) args = (formatter, tag, schema1_manifest, unique_id, handlers, reporter) queue_process = QueueProcess( _open_stream, 8 * 1024, 10 * 1024 * 1024, # 8K/10M chunk/max args, finished=_store_metadata_and_cleanup, ) client_queue_file = QueueFile( queue_process.create_queue(), "client", timeout=QUEUE_FILE_TIMEOUT ) if not is_readonly: storage_queue_file = QueueFile( queue_process.create_queue(), "storage", timeout=QUEUE_FILE_TIMEOUT ) # If signing is required, add a QueueFile for signing the image as we stream it out. signing_queue_file = None if sign and signer.name: signing_queue_file = QueueFile( queue_process.create_queue(), "signing", timeout=QUEUE_FILE_TIMEOUT ) # Start building. queue_process.run() # Start the storage saving. if not is_readonly: storage_args = (verb, derived_image, storage_queue_file, namespace, repository, tag_name) QueueProcess.run_process(_write_derived_image_to_storage, storage_args, finished=_cleanup) if sign and signer.name: signing_args = (verb, derived_image, signing_queue_file) QueueProcess.run_process(_sign_derived_image, signing_args, finished=_cleanup) # Close the database handle here for this process before we send the long download. database.close_db_filter(None) # Return the client's data. return send_file(client_queue_file, mimetype=LAYER_MIMETYPE)