def delete_manifest_by_digest(namespace_name, repo_name, manifest_ref): """ Delete the manifest specified by the digest. Note: there is no equivalent method for deleting by tag name because it is forbidden by the spec. """ with db_disallow_replica_use(): repository_ref = registry_model.lookup_repository( namespace_name, repo_name) if repository_ref is None: raise NameUnknown() manifest = registry_model.lookup_manifest_by_digest( repository_ref, manifest_ref) if manifest is None: raise ManifestUnknown() tags = registry_model.delete_tags_for_manifest(manifest) if not tags: raise ManifestUnknown() for tag in tags: track_and_log("delete_tag", repository_ref, tag=tag.name, digest=manifest_ref) return Response(status=202)
def post(self, namespace_name, repository_name): """ Create a RepoMirrorConfig for a given Repository. """ # TODO: Tidy up this function # TODO: Specify only the data we want to pass on when creating the RepoMirrorConfig. Avoid # the possibility of data injection. repo = model.repository.get_repository(namespace_name, repository_name) if not repo: raise NotFound() if model.repo_mirror.get_mirror(repo): return {'detail': 'Mirror configuration already exits for repository %s/%s' % ( namespace_name, repository_name)}, 409 data = request.get_json() data['sync_start_date'] = self._string_to_dt(data['sync_start_date']) rule = model.repo_mirror.create_rule(repo, data['root_rule']['rule_value']) del data['root_rule'] # Verify the robot is part of the Repository's namespace robot = self._setup_robot_for_mirroring(namespace_name, repository_name, data['robot_username']) del data['robot_username'] mirror = model.repo_mirror.enable_mirroring_for_repository(repo, root_rule=rule, internal_robot=robot, **data) if mirror: track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='external_reference', to=data['external_reference']) return '', 201 else: # TODO: Determine appropriate Response return {'detail': 'RepoMirrorConfig already exists for this repository.'}, 409
def log_action( self, event_name, namespace_name, repo_name=None, analytics_name=None, analytics_sample=1, metadata=None, ): metadata = {} if metadata is None else metadata repo = None if repo_name is not None: db_repo = data.model.repository.get_repository( namespace_name, repo_name, kind_filter="application") repo = AttrDict({ "id": db_repo.id, "name": db_repo.name, "namespace_name": db_repo.namespace_user.username, "is_free_namespace": db_repo.namespace_user.stripe_id is None, }) track_and_log(event_name, repo, analytics_name=analytics_name, analytics_sample=analytics_sample, **metadata)
def fetch_manifest_by_tagname(namespace_name, repo_name, manifest_ref, registry_model): try: repository_ref = registry_model.lookup_repository( namespace_name, repo_name, raise_on_error=True, manifest_ref=manifest_ref ) except RepositoryDoesNotExist as e: image_pulls.labels("v2", "tag", 404).inc() raise NameUnknown("repository not found") try: tag = registry_model.get_repo_tag(repository_ref, manifest_ref, raise_on_error=True) except TagDoesNotExist as e: if registry_model.has_expired_tag(repository_ref, manifest_ref): logger.debug( "Found expired tag %s for repository %s/%s", manifest_ref, namespace_name, repo_name ) msg = ( "Tag %s was deleted or has expired. To pull, revive via time machine" % manifest_ref ) image_pulls.labels("v2", "tag", 404).inc() raise TagExpired(msg) image_pulls.labels("v2", "tag", 404).inc() raise ManifestUnknown(str(e)) manifest = registry_model.get_manifest_for_tag(tag) if manifest is None: # Something went wrong. image_pulls.labels("v2", "tag", 400).inc() raise ManifestInvalid() try: manifest_bytes, manifest_digest, manifest_media_type = _rewrite_schema_if_necessary( namespace_name, repo_name, manifest_ref, manifest, registry_model ) except (ManifestException, ManifestDoesNotExist) as e: image_pulls.labels("v2", "tag", 404).inc() raise ManifestUnknown(str(e)) if manifest_bytes is None: image_pulls.labels("v2", "tag", 404).inc() raise ManifestUnknown() track_and_log( "pull_repo", repository_ref, analytics_name="pull_repo_100x", analytics_sample=0.01, tag=manifest_ref, ) image_pulls.labels("v2", "tag", 200).inc() return Response( manifest_bytes.as_unicode(), status=200, headers={ "Content-Type": manifest_media_type, "Docker-Content-Digest": manifest_digest, }, )
def update_images(namespace_name, repo_name): permission = ModifyRepositoryPermission(namespace_name, repo_name) if permission.can(): logger.debug("Looking up repository") repository_ref = registry_model.lookup_repository(namespace_name, repo_name, kind_filter="image") if repository_ref is None: # Make sure the repo actually exists. image_pushes.labels("v1", 404, "").inc() abort(404, message="Unknown repository", issue="unknown-repo") builder = lookup_manifest_builder(repository_ref, session.get("manifest_builder"), storage, docker_v2_signing_key) if builder is None: image_pushes.labels("v1", 400, "").inc() abort(400) # Generate a job for each notification that has been added to this repo logger.debug("Adding notifications for repository") event_data = { "updated_tags": [tag.name for tag in builder.committed_tags], } builder.done() track_and_log("push_repo", repository_ref) spawn_notification(repository_ref, "repo_push", event_data) image_pushes.labels("v1", 204, "").inc() return make_response("Updated", 204) image_pushes.labels("v1", 403, "").inc() abort(403)
def fetch_manifest_by_digest(namespace_name, repo_name, manifest_ref, registry_model): try: repository_ref = registry_model.lookup_repository( namespace_name, repo_name, raise_on_error=True, manifest_ref=manifest_ref ) except RepositoryDoesNotExist as e: image_pulls.labels("v2", "manifest", 404).inc() raise NameUnknown("repository not found") try: manifest = registry_model.lookup_manifest_by_digest( repository_ref, manifest_ref, raise_on_error=True ) except ManifestDoesNotExist as e: image_pulls.labels("v2", "manifest", 404).inc() raise ManifestUnknown(str(e)) track_and_log("pull_repo", repository_ref, manifest_digest=manifest_ref) image_pulls.labels("v2", "manifest", 200).inc() return Response( manifest.internal_manifest_bytes.as_unicode(), status=200, headers={ "Content-Type": manifest.media_type, "Docker-Content-Digest": manifest.digest, }, )
def delete_manifest_by_digest(namespace_name, repo_name, manifest_ref): """ Delete the manifest specified by the digest. Note: there is no equivalent method for deleting by tag name because it is forbidden by the spec. """ with db_disallow_replica_use(): repository_ref = registry_model.lookup_repository(namespace_name, repo_name) if repository_ref is None: raise NameUnknown("repository not found") manifest = registry_model.lookup_manifest_by_digest(repository_ref, manifest_ref) if manifest is None: raise ManifestUnknown() tags = registry_model.delete_tags_for_manifest(manifest) if not tags: raise ManifestUnknown() for tag in tags: track_and_log("delete_tag", repository_ref, tag=tag.name, digest=manifest_ref) if app.config.get("FEATURE_QUOTA_MANAGEMENT", False): repository.force_cache_repo_size(repository_ref.id) return Response(status=202)
def _write_manifest_and_log(namespace_name, repo_name, tag_name, manifest_impl): repository_ref, manifest, tag = _write_manifest(namespace_name, repo_name, tag_name, manifest_impl) # Queue all blob manifests for replication. if features.STORAGE_REPLICATION: blobs = registry_model.get_manifest_local_blobs(manifest) if blobs is None: logger.error('Could not lookup blobs for manifest `%s`', manifest.digest) else: with queue_replication_batch( namespace_name) as queue_storage_replication: for blob_digest in blobs: queue_storage_replication(blob_digest) track_and_log('push_repo', repository_ref, tag=tag_name) spawn_notification(repository_ref, 'repo_push', {'updated_tags': [tag_name]}) metric_queue.repository_push.Inc( labelvalues=[namespace_name, repo_name, 'v2', True]) return Response( 'OK', status=202, headers={ 'Docker-Content-Digest': manifest.digest, 'Location': url_for('v2.fetch_manifest_by_digest', repository='%s/%s' % (namespace_name, repo_name), manifest_ref=manifest.digest), }, )
def fetch_manifest_by_digest(namespace_name, repo_name, manifest_ref): repository_ref = registry_model.lookup_repository(namespace_name, repo_name) if repository_ref is None: raise NameUnknown() manifest = registry_model.lookup_manifest_by_digest( repository_ref, manifest_ref) if manifest is None: raise ManifestUnknown() manifest_bytes, manifest_digest, manifest_media_type = _rewrite_schema_if_necessary( namespace_name, repo_name, '$digest', manifest) if manifest_digest is None: raise ManifestUnknown() track_and_log('pull_repo', repository_ref, manifest_digest=manifest_ref) metric_queue.repository_pull.Inc( labelvalues=[namespace_name, repo_name, 'v2', True]) return Response(manifest_bytes.as_unicode(), status=200, headers={ 'Content-Type': manifest_media_type, 'Docker-Content-Digest': manifest_digest, })
def fetch_manifest_by_digest(namespace_name, repo_name, manifest_ref): repository_ref = registry_model.lookup_repository(namespace_name, repo_name) if repository_ref is None: image_pulls.labels("v2_1", "manifest", 404).inc() raise NameUnknown() manifest = registry_model.lookup_manifest_by_digest( repository_ref, manifest_ref) if manifest is None: image_pulls.labels("v2_1", "manifest", 404).inc() raise ManifestUnknown() manifest_bytes, manifest_digest, manifest_media_type = _rewrite_schema_if_necessary( namespace_name, repo_name, "$digest", manifest) if manifest_digest is None: image_pulls.labels("v2_1", "manifest", 404).inc() raise ManifestUnknown() track_and_log("pull_repo", repository_ref, manifest_digest=manifest_ref) image_pulls.labels("v2_1", "manifest", 200).inc() return Response( manifest_bytes.as_unicode(), status=200, headers={ "Content-Type": manifest_media_type, "Docker-Content-Digest": manifest_digest, }, )
def get_repository_images(namespace_name, repo_name): repository_ref = registry_model.lookup_repository(namespace_name, repo_name, kind_filter="image") permission = ReadRepositoryPermission(namespace_name, repo_name) if permission.can() or (repository_ref and repository_ref.is_public): # We can't rely on permissions to tell us if a repo exists anymore if repository_ref is None: image_pulls.labels("v1", "tag", 404).inc() abort(404, message="Unknown repository", issue="unknown-repo") logger.debug("Building repository image response") resp = make_response(json.dumps([]), 200) resp.mimetype = "application/json" track_and_log("pull_repo", repository_ref, analytics_name="pull_repo_100x", analytics_sample=0.01) image_pulls.labels("v1", "tag", 200).inc() return resp image_pulls.labels("v1", "tag", 403).inc() abort(403)
def update_images(namespace_name, repo_name): permission = ModifyRepositoryPermission(namespace_name, repo_name) if permission.can(): logger.debug('Looking up repository') repository_ref = registry_model.lookup_repository(namespace_name, repo_name, kind_filter='image') if repository_ref is None: # Make sure the repo actually exists. abort(404, message='Unknown repository', issue='unknown-repo') builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'), storage, docker_v2_signing_key) if builder is None: abort(400) # Generate a job for each notification that has been added to this repo logger.debug('Adding notifications for repository') event_data = { 'updated_tags': [tag.name for tag in builder.committed_tags], } builder.done() track_and_log('push_repo', repository_ref) spawn_notification(repository_ref, 'repo_push', event_data) metric_queue.repository_push.Inc( labelvalues=[namespace_name, repo_name, 'v1', True]) return make_response('Updated', 204) abort(403)
def _write_manifest_and_log(namespace_name, repo_name, tag_name, manifest_impl): repository_ref, manifest, tag = _write_manifest( namespace_name, repo_name, tag_name, manifest_impl ) # Queue all blob manifests for replication. if features.STORAGE_REPLICATION: blobs = registry_model.get_manifest_local_blobs(manifest) if blobs is None: logger.error("Could not lookup blobs for manifest `%s`", manifest.digest) else: with queue_replication_batch(namespace_name) as queue_storage_replication: for blob_digest in blobs: queue_storage_replication(blob_digest) track_and_log("push_repo", repository_ref, tag=tag_name) spawn_notification(repository_ref, "repo_push", {"updated_tags": [tag_name]}) metric_queue.repository_push.Inc(labelvalues=[namespace_name, repo_name, "v2", True]) return Response( "OK", status=202, headers={ "Docker-Content-Digest": manifest.digest, "Location": url_for( "v2.fetch_manifest_by_digest", repository="%s/%s" % (namespace_name, repo_name), manifest_ref=manifest.digest, ), }, )
def _torrent_repo_verb(repository, tag, manifest, verb, **kwargs): """ Handles returning a torrent for the given verb on the given image and tag. """ if not features.BITTORRENT: # Torrent feature is not enabled. abort(406) # Lookup an *existing* derived storage for the verb. If the verb's image storage doesn't exist, # we cannot create it here, so we 406. derived_image = registry_model.lookup_derived_image( manifest, verb, storage, varying_metadata={"tag": tag.name}, include_placements=True) if derived_image is None: abort(406) # Return the torrent. torrent = _torrent_for_blob( derived_image.blob, model.repository.is_repository_public(repository)) # Log the action. track_and_log("repo_verb", wrap_repository(repository), tag=tag.name, verb=verb, torrent=True, **kwargs) return torrent
def delete_tag(namespace_name, repo_name, tag): permission = ModifyRepositoryPermission(namespace_name, repo_name) repository_ref = registry_model.lookup_repository(namespace_name, repo_name, kind_filter="image") if permission.can() and repository_ref is not None: if not registry_model.delete_tag(repository_ref, tag): abort(404) track_and_log("delete_tag", repository_ref, tag=tag) return make_response("Deleted", 200) abort(403)
def post(self, namespace_name, repository_name): """ Update the sync_status for a given Repository's mirroring configuration. """ repo = model.repository.get_repository(namespace_name, repository_name) if not repo: raise NotFound() mirror = model.repo_mirror.get_mirror(repository=repo) if not mirror: raise NotFound() if mirror and model.repo_mirror.update_sync_status_to_cancel(mirror): track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed="sync_status", to="SYNC_CANCEL") return '', 204 raise NotFound()
def fetch_manifest_by_tagname(namespace_name, repo_name, manifest_ref): repository_ref = registry_model.lookup_repository(namespace_name, repo_name) if repository_ref is None: raise NameUnknown() tag = registry_model.get_repo_tag(repository_ref, manifest_ref) if tag is None: if registry_model.has_expired_tag(repository_ref, manifest_ref): logger.debug( "Found expired tag %s for repository %s/%s", manifest_ref, namespace_name, repo_name ) msg = ( "Tag %s was deleted or has expired. To pull, revive via time machine" % manifest_ref ) raise TagExpired(msg) raise ManifestUnknown() manifest = registry_model.get_manifest_for_tag(tag, backfill_if_necessary=True) if manifest is None: # Something went wrong. raise ManifestInvalid() manifest_bytes, manifest_digest, manifest_media_type = _rewrite_schema_if_necessary( namespace_name, repo_name, manifest_ref, manifest ) if manifest_bytes is None: raise ManifestUnknown() track_and_log( "pull_repo", repository_ref, analytics_name="pull_repo_100x", analytics_sample=0.01, tag=manifest_ref, ) metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, "v2", True]) return Response( manifest_bytes.as_unicode(), status=200, headers={"Content-Type": manifest_media_type, "Docker-Content-Digest": manifest_digest,}, )
def put(self, namespace_name, repository_name): """ Update an existing RepoMirrorRule """ repo = model.repository.get_repository(namespace_name, repository_name) if not repo: raise NotFound() rule = model.repo_mirror.get_root_rule(repo) if not rule: return {'detail': 'The rule appears to be missing.'}, 400 data = request.get_json() if model.repo_mirror.change_rule_value( rule, data['root_rule']['rule_value']): track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed="mirror_rule", to=data['root_rule']['rule_value']) return 200 else: return {'detail': 'Unable to update rule.'}, 400
def get_repository_images(namespace_name, repo_name): repository_ref = registry_model.lookup_repository(namespace_name, repo_name, kind_filter='image') permission = ReadRepositoryPermission(namespace_name, repo_name) if permission.can() or (repository_ref and repository_ref.is_public): # We can't rely on permissions to tell us if a repo exists anymore if repository_ref is None: abort(404, message='Unknown repository', issue='unknown-repo') logger.debug('Building repository image response') resp = make_response(json.dumps([]), 200) resp.mimetype = 'application/json' track_and_log('pull_repo', repository_ref, analytics_name='pull_repo_100x', analytics_sample=0.01) metric_queue.repository_pull.Inc( labelvalues=[namespace_name, repo_name, 'v1', True]) return resp abort(403)
def put(self, namespace_name, repository_name): """ Allow users to modifying the repository's mirroring configuration. """ values = request.get_json() repo = model.repository.get_repository(namespace_name, repository_name) if not repo: raise NotFound() mirror = model.repo_mirror.get_mirror(repo) if not mirror: raise NotFound() if "is_enabled" in values: if values["is_enabled"] == True: if model.repo_mirror.enable_mirror(repo): track_and_log( "repo_mirror_config_changed", wrap_repository(repo), changed="is_enabled", to=True, ) if values["is_enabled"] == False: if model.repo_mirror.disable_mirror(repo): track_and_log( "repo_mirror_config_changed", wrap_repository(repo), changed="is_enabled", to=False, ) if "external_reference" in values: if values["external_reference"] == "": return { "detail": "Empty string is an invalid repository location." }, 400 if model.repo_mirror.change_remote(repo, values["external_reference"]): track_and_log( "repo_mirror_config_changed", wrap_repository(repo), changed="external_reference", to=values["external_reference"], ) if "robot_username" in values: robot_username = values["robot_username"] robot = self._setup_robot_for_mirroring(namespace_name, repository_name, robot_username) if model.repo_mirror.set_mirroring_robot(repo, robot): track_and_log( "repo_mirror_config_changed", wrap_repository(repo), changed="robot_username", to=robot_username, ) if "sync_start_date" in values: try: sync_start_date = self._string_to_dt(values["sync_start_date"]) except ValueError as e: return { "detail": "Incorrect DateTime format for sync_start_date." }, 400 if model.repo_mirror.change_sync_start_date(repo, sync_start_date): track_and_log( "repo_mirror_config_changed", wrap_repository(repo), changed="sync_start_date", to=sync_start_date, ) if "sync_interval" in values: if model.repo_mirror.change_sync_interval(repo, values["sync_interval"]): track_and_log( "repo_mirror_config_changed", wrap_repository(repo), changed="sync_interval", to=values["sync_interval"], ) if "external_registry_username" in values and "external_registry_password" in values: username = values["external_registry_username"] password = values["external_registry_password"] if username is None and password is not None: return { "detail": "Unable to delete username while setting a password." }, 400 if model.repo_mirror.change_credentials(repo, username, password): track_and_log( "repo_mirror_config_changed", wrap_repository(repo), changed="external_registry_username", to=username, ) if password is None: track_and_log( "repo_mirror_config_changed", wrap_repository(repo), changed="external_registry_password", to=None, ) else: track_and_log( "repo_mirror_config_changed", wrap_repository(repo), changed="external_registry_password", to="********", ) elif "external_registry_username" in values: username = values["external_registry_username"] if model.repo_mirror.change_username(repo, username): track_and_log( "repo_mirror_config_changed", wrap_repository(repo), changed="external_registry_username", to=username, ) # Do not allow specifying a password without setting a username if "external_registry_password" in values and "external_registry_username" not in values: return ( { "detail": "Unable to set a new password without also specifying a username." }, 400, ) if "external_registry_config" in values: external_registry_config = values.get("external_registry_config", {}) if "verify_tls" in external_registry_config: updates = { "verify_tls": external_registry_config["verify_tls"] } if model.repo_mirror.change_external_registry_config( repo, updates): track_and_log( "repo_mirror_config_changed", wrap_repository(repo), changed="verify_tls", to=external_registry_config["verify_tls"], ) if "unsigned_images" in external_registry_config: updates = { "unsigned_images": external_registry_config["unsigned_images"] } if model.repo_mirror.change_external_registry_config( repo, updates): track_and_log( "repo_mirror_config_changed", wrap_repository(repo), changed="unsigned_images", to=external_registry_config["unsigned_images"], ) if "proxy" in external_registry_config: proxy_values = external_registry_config.get("proxy", {}) if "http_proxy" in proxy_values: updates = { "proxy": { "http_proxy": proxy_values["http_proxy"] } } if model.repo_mirror.change_external_registry_config( repo, updates): track_and_log( "repo_mirror_config_changed", wrap_repository(repo), changed="http_proxy", to=proxy_values["http_proxy"], ) if "https_proxy" in proxy_values: updates = { "proxy": { "https_proxy": proxy_values["https_proxy"] } } if model.repo_mirror.change_external_registry_config( repo, updates): track_and_log( "repo_mirror_config_changed", wrap_repository(repo), changed="https_proxy", to=proxy_values["https_proxy"], ) if "no_proxy" in proxy_values: updates = {"proxy": {"no_proxy": proxy_values["no_proxy"]}} if model.repo_mirror.change_external_registry_config( repo, updates): track_and_log( "repo_mirror_config_changed", wrap_repository(repo), changed="no_proxy", to=proxy_values["no_proxy"], ) if "root_rule" in values: if values["root_rule"]["rule_kind"] != "tag_glob_csv": raise ValidationError( 'validation failed: rule_kind must be "tag_glob_csv"') if model.repo_mirror.change_rule( repo, RepoMirrorRuleType.TAG_GLOB_CSV, values["root_rule"]["rule_value"]): track_and_log( "repo_mirror_config_changed", wrap_repository(repo), changed="mirror_rule", to=values["root_rule"]["rule_value"], ) return "", 201
def _repo_verb(namespace, repository, tag_name, verb, formatter, sign=False, checker=None, **kwargs): # Verify that the image exists and that we have access to it. logger.debug( 'Verifying repo verb %s for repository %s/%s with user %s with mimetype %s', verb, namespace, repository, get_authenticated_user(), request.accept_mimetypes.best) tag, manifest, schema1_manifest = _verify_repo_verb( storage, namespace, repository, tag_name, verb, checker) # Load the repository for later. repo = model.repository.get_repository(namespace, repository) if repo is None: abort(404) # Check for torrent. If found, we return a torrent for the repo verb image (if the derived # image already exists). if request.accept_mimetypes.best == 'application/x-bittorrent': metric_queue.repository_pull.Inc( labelvalues=[namespace, repository, verb + '+torrent', True]) return _torrent_repo_verb(repo, tag, manifest, verb, **kwargs) # Log the action. track_and_log('repo_verb', wrap_repository(repo), tag=tag.name, verb=verb, **kwargs) metric_queue.repository_pull.Inc( labelvalues=[namespace, repository, verb, True]) is_readonly = app.config.get('REGISTRY_STATE', 'normal') == 'readonly' # Lookup/create the derived image for the verb and repo image. if is_readonly: derived_image = registry_model.lookup_derived_image( manifest, verb, storage, varying_metadata={'tag': tag.name}, include_placements=True) else: derived_image = registry_model.lookup_or_create_derived_image( manifest, verb, storage.preferred_locations[0], storage, varying_metadata={'tag': tag.name}, include_placements=True) if derived_image is None: logger.error( 'Could not create or lookup a derived image for manifest %s', manifest) abort(400) if derived_image is not None and not derived_image.blob.uploading: logger.debug('Derived %s image %s exists in storage', verb, derived_image) is_head_request = request.method == 'HEAD' metric_queue.pull_byte_count.Inc(derived_image.blob.compressed_size, labelvalues=[verb]) download_url = storage.get_direct_download_url( derived_image.blob.placements, derived_image.blob.storage_path, head=is_head_request) if download_url: logger.debug('Redirecting to download URL for derived %s image %s', verb, derived_image) return redirect(download_url) # Close the database handle here for this process before we send the long download. database.close_db_filter(None) logger.debug('Sending cached derived %s image %s', verb, derived_image) return send_file(storage.stream_read_file( derived_image.blob.placements, derived_image.blob.storage_path), mimetype=LAYER_MIMETYPE) logger.debug('Building and returning derived %s image', verb) # Close the database connection before any process forking occurs. This is important because # the Postgres driver does not react kindly to forking, so we need to make sure it is closed # so that each process will get its own unique connection. database.close_db_filter(None) def _cleanup(): # Close any existing DB connection once the process has exited. database.close_db_filter(None) hasher = PieceHasher(app.config['BITTORRENT_PIECE_SIZE']) def _store_metadata_and_cleanup(): if is_readonly: return with database.UseThenDisconnect(app.config): registry_model.set_torrent_info( derived_image.blob, app.config['BITTORRENT_PIECE_SIZE'], hasher.final_piece_hashes()) registry_model.set_derived_image_size(derived_image, hasher.hashed_bytes) # Create a queue process to generate the data. The queue files will read from the process # and send the results to the client and storage. unique_id = (derived_image.unique_id if derived_image is not None else hashlib.sha256('%s:%s' % (verb, uuid.uuid4())).hexdigest()) handlers = [hasher.update] reporter = VerbReporter(verb) args = (formatter, tag, schema1_manifest, unique_id, handlers, reporter) queue_process = QueueProcess( _open_stream, 8 * 1024, 10 * 1024 * 1024, # 8K/10M chunk/max args, finished=_store_metadata_and_cleanup) client_queue_file = QueueFile(queue_process.create_queue(), 'client') if not is_readonly: storage_queue_file = QueueFile(queue_process.create_queue(), 'storage') # If signing is required, add a QueueFile for signing the image as we stream it out. signing_queue_file = None if sign and signer.name: signing_queue_file = QueueFile(queue_process.create_queue(), 'signing') # Start building. queue_process.run() # Start the storage saving. if not is_readonly: storage_args = (verb, derived_image, storage_queue_file) QueueProcess.run_process(_write_derived_image_to_storage, storage_args, finished=_cleanup) if sign and signer.name: signing_args = (verb, derived_image, signing_queue_file) QueueProcess.run_process(_sign_derived_image, signing_args, finished=_cleanup) # Close the database handle here for this process before we send the long download. database.close_db_filter(None) # Return the client's data. return send_file(client_queue_file, mimetype=LAYER_MIMETYPE)
def put(self, namespace_name, repository_name): """ Allow users to modifying the repository's mirroring configuration. """ values = request.get_json() repo = model.repository.get_repository(namespace_name, repository_name) if not repo: raise NotFound() mirror = model.repo_mirror.get_mirror(repo) if not mirror: raise NotFound() if 'is_enabled' in values: if values['is_enabled'] == True: if model.repo_mirror.enable_mirror(repo): track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='is_enabled', to=True) if values['is_enabled'] == False: if model.repo_mirror.disable_mirror(repo): track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='is_enabled', to=False) if 'external_reference' in values: if values['external_reference'] == '': return { 'detail': 'Empty string is an invalid repository location.' }, 400 if model.repo_mirror.change_remote(repo, values['external_reference']): track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='external_reference', to=values['external_reference']) if 'robot_username' in values: robot_username = values['robot_username'] robot = self._setup_robot_for_mirroring(namespace_name, repository_name, robot_username) if model.repo_mirror.set_mirroring_robot(repo, robot): track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='robot_username', to=robot_username) if 'sync_start_date' in values: try: sync_start_date = self._string_to_dt(values['sync_start_date']) except ValueError as e: return { 'detail': 'Incorrect DateTime format for sync_start_date.' }, 400 if model.repo_mirror.change_sync_start_date(repo, sync_start_date): track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='sync_start_date', to=sync_start_date) if 'sync_interval' in values: if model.repo_mirror.change_sync_interval(repo, values['sync_interval']): track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='sync_interval', to=values['sync_interval']) if 'external_registry_username' in values and 'external_registry_password' in values: username = values['external_registry_username'] password = values['external_registry_password'] if username is None and password is not None: return { 'detail': 'Unable to delete username while setting a password.' }, 400 if model.repo_mirror.change_credentials(repo, username, password): track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='external_registry_username', to=username) if password is None: track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='external_registry_password', to=None) else: track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='external_registry_password', to="********") elif 'external_registry_username' in values: username = values['external_registry_username'] if model.repo_mirror.change_username(repo, username): track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='external_registry_username', to=username) # Do not allow specifying a password without setting a username if 'external_registry_password' in values and 'external_registry_username' not in values: return { 'detail': 'Unable to set a new password without also specifying a username.' }, 400 if 'external_registry_config' in values: external_registry_config = values.get('external_registry_config', {}) if 'verify_tls' in external_registry_config: updates = { 'verify_tls': external_registry_config['verify_tls'] } if model.repo_mirror.change_external_registry_config( repo, updates): track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='verify_tls', to=external_registry_config['verify_tls']) if 'proxy' in external_registry_config: proxy_values = external_registry_config.get('proxy', {}) if 'http_proxy' in proxy_values: updates = { 'proxy': { 'http_proxy': proxy_values['http_proxy'] } } if model.repo_mirror.change_external_registry_config( repo, updates): track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='http_proxy', to=proxy_values['http_proxy']) if 'https_proxy' in proxy_values: updates = { 'proxy': { 'https_proxy': proxy_values['https_proxy'] } } if model.repo_mirror.change_external_registry_config( repo, updates): track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='https_proxy', to=proxy_values['https_proxy']) if 'no_proxy' in proxy_values: updates = {'proxy': {'no_proxy': proxy_values['no_proxy']}} if model.repo_mirror.change_external_registry_config( repo, updates): track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='no_proxy', to=proxy_values['no_proxy']) return '', 201
def _repo_verb( namespace, repository, tag_name, verb, formatter, sign=False, checker=None, **kwargs ): # Verify that the image exists and that we have access to it. logger.debug( "Verifying repo verb %s for repository %s/%s with user %s with mimetype %s", verb, namespace, repository, get_authenticated_user(), request.accept_mimetypes.best, ) tag, manifest, schema1_manifest = _verify_repo_verb( storage, namespace, repository, tag_name, verb, checker ) # Load the repository for later. repo = model.repository.get_repository(namespace, repository) if repo is None: abort(404) # Check for torrent, which is no longer supported. if request.accept_mimetypes.best == "application/x-bittorrent": abort(406) # Log the action. track_and_log("repo_verb", wrap_repository(repo), tag=tag.name, verb=verb, **kwargs) is_readonly = app.config.get("REGISTRY_STATE", "normal") == "readonly" # Lookup/create the derived image for the verb and repo image. if is_readonly: derived_image = registry_model.lookup_derived_image( manifest, verb, storage, varying_metadata={"tag": tag.name}, include_placements=True ) else: derived_image = registry_model.lookup_or_create_derived_image( manifest, verb, storage.preferred_locations[0], storage, varying_metadata={"tag": tag.name}, include_placements=True, ) if derived_image is None: logger.error("Could not create or lookup a derived image for manifest %s", manifest) abort(400) if derived_image is not None and not derived_image.blob.uploading: logger.debug("Derived %s image %s exists in storage", verb, derived_image) is_head_request = request.method == "HEAD" if derived_image.blob.compressed_size: image_pulled_bytes.labels("verbs").inc(derived_image.blob.compressed_size) download_url = storage.get_direct_download_url( derived_image.blob.placements, derived_image.blob.storage_path, head=is_head_request ) if download_url: logger.debug("Redirecting to download URL for derived %s image %s", verb, derived_image) return redirect(download_url) # Close the database handle here for this process before we send the long download. database.close_db_filter(None) logger.debug("Sending cached derived %s image %s", verb, derived_image) return send_file( storage.stream_read_file( derived_image.blob.placements, derived_image.blob.storage_path ), mimetype=LAYER_MIMETYPE, ) logger.debug("Building and returning derived %s image", verb) hasher = SimpleHasher() # Close the database connection before any process forking occurs. This is important because # the Postgres driver does not react kindly to forking, so we need to make sure it is closed # so that each process will get its own unique connection. database.close_db_filter(None) def _cleanup(): # Close any existing DB connection once the process has exited. database.close_db_filter(None) def _store_metadata_and_cleanup(): if is_readonly: return with database.UseThenDisconnect(app.config): registry_model.set_derived_image_size(derived_image, hasher.hashed_bytes) # Create a queue process to generate the data. The queue files will read from the process # and send the results to the client and storage. unique_id = ( derived_image.unique_id if derived_image is not None else hashlib.sha256(("%s:%s" % (verb, uuid.uuid4())).encode("utf-8")).hexdigest() ) handlers = [hasher.update] reporter = VerbReporter(verb) args = (formatter, tag, schema1_manifest, unique_id, handlers, reporter) queue_process = QueueProcess( _open_stream, 8 * 1024, 10 * 1024 * 1024, # 8K/10M chunk/max args, finished=_store_metadata_and_cleanup, ) client_queue_file = QueueFile( queue_process.create_queue(), "client", timeout=QUEUE_FILE_TIMEOUT ) if not is_readonly: storage_queue_file = QueueFile( queue_process.create_queue(), "storage", timeout=QUEUE_FILE_TIMEOUT ) # If signing is required, add a QueueFile for signing the image as we stream it out. signing_queue_file = None if sign and signer.name: signing_queue_file = QueueFile( queue_process.create_queue(), "signing", timeout=QUEUE_FILE_TIMEOUT ) # Start building. queue_process.run() # Start the storage saving. if not is_readonly: storage_args = (verb, derived_image, storage_queue_file, namespace, repository, tag_name) QueueProcess.run_process(_write_derived_image_to_storage, storage_args, finished=_cleanup) if sign and signer.name: signing_args = (verb, derived_image, signing_queue_file) QueueProcess.run_process(_sign_derived_image, signing_args, finished=_cleanup) # Close the database handle here for this process before we send the long download. database.close_db_filter(None) # Return the client's data. return send_file(client_queue_file, mimetype=LAYER_MIMETYPE)