def start_blob_upload(namespace_name, repo_name): repository_ref = registry_model.lookup_repository(namespace_name, repo_name) if repository_ref is None: raise NameUnknown() # Check for mounting of a blob from another repository. mount_blob_digest = request.args.get('mount', None) if mount_blob_digest is not None: response = _try_to_mount_blob(repository_ref, mount_blob_digest) if response is not None: return response # Begin the blob upload process. blob_uploader = create_blob_upload(repository_ref, storage, _upload_settings()) if blob_uploader is None: logger.debug('Could not create a blob upload for `%s/%s`', namespace_name, repo_name) raise InvalidRequest( message='Unable to start blob upload for unknown repository') # Check if the blob will be uploaded now or in followup calls. If the `digest` is given, then # the upload will occur as a monolithic chunk in this call. Otherwise, we return a redirect # for the client to upload the chunks as distinct operations. digest = request.args.get('digest', None) if digest is None: # Short-circuit because the user will send the blob data in another request. return Response( status=202, headers={ 'Docker-Upload-UUID': blob_uploader.blob_upload_id, 'Range': _render_range(0), 'Location': get_app_url() + url_for('v2.upload_chunk', repository='%s/%s' % (namespace_name, repo_name), upload_uuid=blob_uploader.blob_upload_id) }, ) # Upload the data sent and commit it to a blob. with complete_when_uploaded(blob_uploader): _upload_chunk(blob_uploader, digest) # Write the response to the client. return Response( status=201, headers={ 'Docker-Content-Digest': digest, 'Location': get_app_url() + url_for('v2.download_blob', repository='%s/%s' % (namespace_name, repo_name), digest=digest), }, )
def _download_blob(self, repo_ref: RepositoryReference, digest: str) -> int: """ Download blob from upstream registry and perform a monolitic upload to Quay's own storage. """ self._enforce_repository_quota(repo_ref) expiration = (self._config.expiration_s if self._config.expiration_s else app.config["PUSH_TEMP_TAG_EXPIRATION_SEC"]) settings = BlobUploadSettings( maximum_blob_size=app.config["MAXIMUM_LAYER_SIZE"], committed_blob_expiration=expiration, ) uploader = create_blob_upload(repo_ref, storage, settings) with self._proxy.get_blob(digest) as resp: start_offset = 0 length = int(resp.headers.get("content-length", -1)) with complete_when_uploaded(uploader): uploader.upload_chunk(app.config, resp.raw, start_offset, length) uploader.commit_to_blob(app.config, digest) self._recalculate_repository_size(repo_ref)
def start_blob_upload(namespace_name, repo_name): repository_ref = registry_model.lookup_repository(namespace_name, repo_name) if repository_ref is None: raise NameUnknown("repository not found") if app.config.get("FEATURE_QUOTA_MANAGEMENT", False): quota = namespacequota.verify_namespace_quota(repository_ref) if quota["severity_level"] == "Reject": namespacequota.notify_organization_admins( repository_ref, "quota_error", {"severity": "Reject"} ) raise QuotaExceeded # Check for mounting of a blob from another repository. mount_blob_digest = request.args.get("mount", None) if mount_blob_digest is not None: response = _try_to_mount_blob(repository_ref, mount_blob_digest) if response is not None: return response # Begin the blob upload process. blob_uploader = create_blob_upload(repository_ref, storage, _upload_settings()) if blob_uploader is None: logger.debug("Could not create a blob upload for `%s/%s`", namespace_name, repo_name) raise InvalidRequest(message="Unable to start blob upload for unknown repository") # Check if the blob will be uploaded now or in followup calls. If the `digest` is given, then # the upload will occur as a monolithic chunk in this call. Otherwise, we return a redirect # for the client to upload the chunks as distinct operations. digest = request.args.get("digest", None) if digest is None: # Short-circuit because the user will send the blob data in another request. return Response( status=202, headers={ "Docker-Upload-UUID": blob_uploader.blob_upload_id, "Range": _render_range(0), "Location": get_app_url() + url_for( "v2.upload_chunk", repository="%s/%s" % (namespace_name, repo_name), upload_uuid=blob_uploader.blob_upload_id, ), }, ) # Upload the data sent and commit it to a blob. with complete_when_uploaded(blob_uploader): _upload_chunk(blob_uploader, digest) # Write the response to the client. return Response( status=201, headers={ "Docker-Content-Digest": digest, "Location": get_app_url() + url_for( "v2.download_blob", repository="%s/%s" % (namespace_name, repo_name), digest=digest ), }, )