def _store_metadata_and_cleanup(): if is_readonly: return with database.UseThenDisconnect(app.config): registry_model.set_derived_image_size(derived_image, hasher.hashed_bytes)
def process_mirrors(skopeo, token=None): """ Performs mirroring of repositories whose last sync time is greater than sync interval. If a token is provided, scanning will begin where the token indicates it previously completed. """ if not features.REPO_MIRROR: logger.debug("Repository mirror disabled; skipping RepoMirrorWorker process_mirrors") return None iterator, next_token = model.repositories_to_mirror(start_token=token) if iterator is None: logger.debug("Found no additional repositories to mirror") return next_token with database.UseThenDisconnect(app.config): for mirror, abt, num_remaining in iterator: try: perform_mirror(skopeo, mirror) except PreemptedException: logger.info( "Another repository mirror worker pre-empted us for repository: %s", mirror.id ) abt.set() except Exception as e: # TODO: define exceptions logger.exception("Repository Mirror service unavailable") return None unmirrored_repositories.set(num_remaining) return next_token
def _write_derived_image_to_storage(verb, derived_image, queue_file): """ Read from the generated stream and write it back to the storage engine. This method runs in a separate process. """ def handle_exception(ex): logger.debug('Exception when building %s derived image %s: %s', verb, derived_image, ex) with database.UseThenDisconnect(app.config): registry_model.delete_derived_image(derived_image) queue_file.add_exception_handler(handle_exception) # Re-Initialize the storage engine because some may not respond well to forking (e.g. S3) store = Storage(app, metric_queue, config_provider=config_provider, ip_resolver=ip_resolver) try: store.stream_write(derived_image.blob.placements, derived_image.blob.storage_path, queue_file) except IOError as ex: logger.debug('Exception when writing %s derived image %s: %s', verb, derived_image, ex) with database.UseThenDisconnect(app.config): registry_model.delete_derived_image(derived_image) queue_file.close()
def _store_metadata_and_cleanup(): if is_readonly: return with database.UseThenDisconnect(app.config): registry_model.set_torrent_info( derived_image.blob, app.config["BITTORRENT_PIECE_SIZE"], hasher.final_piece_hashes() ) registry_model.set_derived_image_size(derived_image, hasher.hashed_bytes)
def handle_exception(ex): logger.debug( "Exception when building %s derived image %s (%s/%s:%s): %s", verb, derived_image, namespace, repository, tag_name, ex, ) with database.UseThenDisconnect(app.config): registry_model.delete_derived_image(derived_image)
def _sign_derived_image(verb, derived_image, queue_file): """ Read from the queue file and sign the contents which are generated. This method runs in a separate process. """ signature = None try: signature = signer.detached_sign(queue_file) except: logger.exception("Exception when signing %s deriving image %s", verb, derived_image) return # Setup the database (since this is a new process) and then disconnect immediately # once the operation completes. if not queue_file.raised_exception: with database.UseThenDisconnect(app.config): registry_model.set_derived_image_signature(derived_image, signer.name, signature)
def _open_stream(formatter, tag, schema1_manifest, derived_image_id, handlers, reporter): """ This method generates a stream of data which will be replicated and read from the queue files. This method runs in a separate process. """ # For performance reasons, we load the full image list here, cache it, then disconnect from # the database. with database.UseThenDisconnect(app.config): layers = registry_model.list_parsed_manifest_layers( tag.repository, schema1_manifest, storage, include_placements=True) def image_stream_getter(store, blob): def get_stream_for_storage(): current_image_stream = store.stream_read_file( blob.placements, blob.storage_path) logger.debug("Returning blob %s: %s", blob.digest, blob.storage_path) return current_image_stream return get_stream_for_storage def tar_stream_getter_iterator(): # Re-Initialize the storage engine because some may not respond well to forking (e.g. S3) store = Storage(app, config_provider=config_provider, ip_resolver=ip_resolver) # Note: We reverse because we have to start at the leaf layer and move upward, # as per the spec for the formatters. for layer in reversed(layers): yield image_stream_getter(store, layer.blob) stream = formatter.build_stream( tag, schema1_manifest, derived_image_id, layers, tar_stream_getter_iterator, reporter=reporter, ) for handler_fn in handlers: stream = wrap_with_handler(stream, handler_fn) return stream.read
def handle_exception(ex): logger.debug('Exception when building %s derived image %s: %s', verb, derived_image, ex) with database.UseThenDisconnect(app.config): registry_model.delete_derived_image(derived_image)