def test_basic(): queue = FakeQueue() queue.put(QueueResult("hello world", None)) queue.put(QueueResult("! how goes there?", None)) queue.put(QueueResult(None, None)) queuefile = QueueFile(queue) assert queuefile.read() == "hello world! how goes there?"
def test_unhandled_exception(): queue = FakeQueue() queue.put(QueueResult("hello world", None)) queue.put(QueueResult(None, IOError("some exception"))) queue.put(QueueResult("! how goes there?", None)) queue.put(QueueResult(None, None)) queuefile = QueueFile(queue) with pytest.raises(IOError): queuefile.read(size=12)
def test_chunk_reading(): queue = FakeQueue() queue.put(QueueResult("hello world", None)) queue.put(QueueResult("! how goes there?", None)) queue.put(QueueResult(None, None)) queuefile = QueueFile(queue) data = "" while True: result = queuefile.read(size=2) if not result: break data += result assert data == "hello world! how goes there?"
def test_binary_data(): queue = FakeQueue() # Generate some binary data. binary_data = os.urandom(1024) queue.put(QueueResult(binary_data, None)) queue.put(QueueResult(None, None)) queuefile = QueueFile(queue) found_data = "" while True: current_data = queuefile.read(size=37) if len(current_data) == 0: break found_data = found_data + current_data assert found_data == binary_data
def test_handled_exception(): queue = FakeQueue() queue.put(QueueResult("hello world", None)) queue.put(QueueResult(None, IOError("some exception"))) queue.put(QueueResult("! how goes there?", None)) queue.put(QueueResult(None, None)) ex_found = [None] def handler(ex): ex_found[0] = ex queuefile = QueueFile(queue) queuefile.add_exception_handler(handler) queuefile.read(size=12) assert ex_found[0] is not None
def _repo_verb( namespace, repository, tag_name, verb, formatter, sign=False, checker=None, **kwargs ): # Verify that the image exists and that we have access to it. logger.debug( "Verifying repo verb %s for repository %s/%s with user %s with mimetype %s", verb, namespace, repository, get_authenticated_user(), request.accept_mimetypes.best, ) tag, manifest, schema1_manifest = _verify_repo_verb( storage, namespace, repository, tag_name, verb, checker ) # Load the repository for later. repo = model.repository.get_repository(namespace, repository) if repo is None: abort(404) # Check for torrent, which is no longer supported. if request.accept_mimetypes.best == "application/x-bittorrent": abort(406) # Log the action. track_and_log("repo_verb", wrap_repository(repo), tag=tag.name, verb=verb, **kwargs) is_readonly = app.config.get("REGISTRY_STATE", "normal") == "readonly" # Lookup/create the derived image for the verb and repo image. if is_readonly: derived_image = registry_model.lookup_derived_image( manifest, verb, storage, varying_metadata={"tag": tag.name}, include_placements=True ) else: derived_image = registry_model.lookup_or_create_derived_image( manifest, verb, storage.preferred_locations[0], storage, varying_metadata={"tag": tag.name}, include_placements=True, ) if derived_image is None: logger.error("Could not create or lookup a derived image for manifest %s", manifest) abort(400) if derived_image is not None and not derived_image.blob.uploading: logger.debug("Derived %s image %s exists in storage", verb, derived_image) is_head_request = request.method == "HEAD" if derived_image.blob.compressed_size: image_pulled_bytes.labels("verbs").inc(derived_image.blob.compressed_size) download_url = storage.get_direct_download_url( derived_image.blob.placements, derived_image.blob.storage_path, head=is_head_request ) if download_url: logger.debug("Redirecting to download URL for derived %s image %s", verb, derived_image) return redirect(download_url) # Close the database handle here for this process before we send the long download. database.close_db_filter(None) logger.debug("Sending cached derived %s image %s", verb, derived_image) return send_file( storage.stream_read_file( derived_image.blob.placements, derived_image.blob.storage_path ), mimetype=LAYER_MIMETYPE, ) logger.debug("Building and returning derived %s image", verb) hasher = SimpleHasher() # Close the database connection before any process forking occurs. This is important because # the Postgres driver does not react kindly to forking, so we need to make sure it is closed # so that each process will get its own unique connection. database.close_db_filter(None) def _cleanup(): # Close any existing DB connection once the process has exited. database.close_db_filter(None) def _store_metadata_and_cleanup(): if is_readonly: return with database.UseThenDisconnect(app.config): registry_model.set_derived_image_size(derived_image, hasher.hashed_bytes) # Create a queue process to generate the data. The queue files will read from the process # and send the results to the client and storage. unique_id = ( derived_image.unique_id if derived_image is not None else hashlib.sha256(("%s:%s" % (verb, uuid.uuid4())).encode("utf-8")).hexdigest() ) handlers = [hasher.update] reporter = VerbReporter(verb) args = (formatter, tag, schema1_manifest, unique_id, handlers, reporter) queue_process = QueueProcess( _open_stream, 8 * 1024, 10 * 1024 * 1024, # 8K/10M chunk/max args, finished=_store_metadata_and_cleanup, ) client_queue_file = QueueFile( queue_process.create_queue(), "client", timeout=QUEUE_FILE_TIMEOUT ) if not is_readonly: storage_queue_file = QueueFile( queue_process.create_queue(), "storage", timeout=QUEUE_FILE_TIMEOUT ) # If signing is required, add a QueueFile for signing the image as we stream it out. signing_queue_file = None if sign and signer.name: signing_queue_file = QueueFile( queue_process.create_queue(), "signing", timeout=QUEUE_FILE_TIMEOUT ) # Start building. queue_process.run() # Start the storage saving. if not is_readonly: storage_args = (verb, derived_image, storage_queue_file, namespace, repository, tag_name) QueueProcess.run_process(_write_derived_image_to_storage, storage_args, finished=_cleanup) if sign and signer.name: signing_args = (verb, derived_image, signing_queue_file) QueueProcess.run_process(_sign_derived_image, signing_args, finished=_cleanup) # Close the database handle here for this process before we send the long download. database.close_db_filter(None) # Return the client's data. return send_file(client_queue_file, mimetype=LAYER_MIMETYPE)
def _repo_verb(namespace, repository, tag_name, verb, formatter, sign=False, checker=None, **kwargs): # Verify that the image exists and that we have access to it. logger.debug( 'Verifying repo verb %s for repository %s/%s with user %s with mimetype %s', verb, namespace, repository, get_authenticated_user(), request.accept_mimetypes.best) tag, manifest, schema1_manifest = _verify_repo_verb( storage, namespace, repository, tag_name, verb, checker) # Load the repository for later. repo = model.repository.get_repository(namespace, repository) if repo is None: abort(404) # Check for torrent. If found, we return a torrent for the repo verb image (if the derived # image already exists). if request.accept_mimetypes.best == 'application/x-bittorrent': metric_queue.repository_pull.Inc( labelvalues=[namespace, repository, verb + '+torrent', True]) return _torrent_repo_verb(repo, tag, manifest, verb, **kwargs) # Log the action. track_and_log('repo_verb', wrap_repository(repo), tag=tag.name, verb=verb, **kwargs) metric_queue.repository_pull.Inc( labelvalues=[namespace, repository, verb, True]) is_readonly = app.config.get('REGISTRY_STATE', 'normal') == 'readonly' # Lookup/create the derived image for the verb and repo image. if is_readonly: derived_image = registry_model.lookup_derived_image( manifest, verb, storage, varying_metadata={'tag': tag.name}, include_placements=True) else: derived_image = registry_model.lookup_or_create_derived_image( manifest, verb, storage.preferred_locations[0], storage, varying_metadata={'tag': tag.name}, include_placements=True) if derived_image is None: logger.error( 'Could not create or lookup a derived image for manifest %s', manifest) abort(400) if derived_image is not None and not derived_image.blob.uploading: logger.debug('Derived %s image %s exists in storage', verb, derived_image) is_head_request = request.method == 'HEAD' metric_queue.pull_byte_count.Inc(derived_image.blob.compressed_size, labelvalues=[verb]) download_url = storage.get_direct_download_url( derived_image.blob.placements, derived_image.blob.storage_path, head=is_head_request) if download_url: logger.debug('Redirecting to download URL for derived %s image %s', verb, derived_image) return redirect(download_url) # Close the database handle here for this process before we send the long download. database.close_db_filter(None) logger.debug('Sending cached derived %s image %s', verb, derived_image) return send_file(storage.stream_read_file( derived_image.blob.placements, derived_image.blob.storage_path), mimetype=LAYER_MIMETYPE) logger.debug('Building and returning derived %s image', verb) # Close the database connection before any process forking occurs. This is important because # the Postgres driver does not react kindly to forking, so we need to make sure it is closed # so that each process will get its own unique connection. database.close_db_filter(None) def _cleanup(): # Close any existing DB connection once the process has exited. database.close_db_filter(None) hasher = PieceHasher(app.config['BITTORRENT_PIECE_SIZE']) def _store_metadata_and_cleanup(): if is_readonly: return with database.UseThenDisconnect(app.config): registry_model.set_torrent_info( derived_image.blob, app.config['BITTORRENT_PIECE_SIZE'], hasher.final_piece_hashes()) registry_model.set_derived_image_size(derived_image, hasher.hashed_bytes) # Create a queue process to generate the data. The queue files will read from the process # and send the results to the client and storage. unique_id = (derived_image.unique_id if derived_image is not None else hashlib.sha256('%s:%s' % (verb, uuid.uuid4())).hexdigest()) handlers = [hasher.update] reporter = VerbReporter(verb) args = (formatter, tag, schema1_manifest, unique_id, handlers, reporter) queue_process = QueueProcess( _open_stream, 8 * 1024, 10 * 1024 * 1024, # 8K/10M chunk/max args, finished=_store_metadata_and_cleanup) client_queue_file = QueueFile(queue_process.create_queue(), 'client') if not is_readonly: storage_queue_file = QueueFile(queue_process.create_queue(), 'storage') # If signing is required, add a QueueFile for signing the image as we stream it out. signing_queue_file = None if sign and signer.name: signing_queue_file = QueueFile(queue_process.create_queue(), 'signing') # Start building. queue_process.run() # Start the storage saving. if not is_readonly: storage_args = (verb, derived_image, storage_queue_file) QueueProcess.run_process(_write_derived_image_to_storage, storage_args, finished=_cleanup) if sign and signer.name: signing_args = (verb, derived_image, signing_queue_file) QueueProcess.run_process(_sign_derived_image, signing_args, finished=_cleanup) # Close the database handle here for this process before we send the long download. database.close_db_filter(None) # Return the client's data. return send_file(client_queue_file, mimetype=LAYER_MIMETYPE)