class Cleaner(object): def __init__(self, options): self.options = options self.cache = ImageCache(options) def run(self): self.cache.clean()
class Cleaner(object): def __init__(self, conf, **local_conf): self.conf = conf self.cache = ImageCache(conf) def run(self): self.cache.clean()
class Pruner(object): def __init__(self, conf, **local_conf): self.conf = conf self.cache = ImageCache(conf) def run(self): self.cache.prune()
class Pruner(object): def __init__(self, options): self.options = options self.cache = ImageCache(options) def run(self): self.cache.prune()
class Reaper(object): def __init__(self, options): self.options = options self.cache = ImageCache(options) def run(self): invalid_grace = int( self.options.get('image_cache_invalid_entry_grace_period', 3600)) self.cache.reap_invalid(grace=invalid_grace) self.cache.reap_stalled()
class Reaper(object): def __init__(self, options): self.options = options self.cache = ImageCache(options) def run(self): invalid_grace = int(self.options.get( 'image_cache_invalid_entry_grace_period', 3600)) self.cache.reap_invalid(grace=invalid_grace) self.cache.reap_stalled()
class Prefetcher(object): def __init__(self, options): self.options = options glance.store.create_stores(options) self.cache = ImageCache(options) registry.configure_registry_client(options) def fetch_image_into_cache(self, image_id): auth_tok = self.options.get('admin_token') ctx = context.RequestContext(is_admin=True, show_deleted=True, auth_tok=auth_tok) try: image_meta = registry.get_image_metadata(ctx, image_id) if image_meta['status'] != 'active': logger.warn(_("Image '%s' is not active. Not caching."), image_id) return False except exception.NotFound: logger.warn(_("No metadata found for image '%s'"), image_id) return False image_data, image_size = get_from_backend(image_meta['location']) logger.debug(_("Caching image '%s'"), image_id) self.cache.cache_image_iter(image_id, image_data) return True def run(self): images = self.cache.get_queued_images() if not images: logger.debug(_("Nothing to prefetch.")) return True num_images = len(images) logger.debug(_("Found %d images to prefetch"), num_images) pool = eventlet.GreenPool(num_images) results = pool.imap(self.fetch_image_into_cache, images) successes = sum([1 for r in results if r is True]) if successes != num_images: logger.error( _("Failed to successfully cache all " "images in queue.")) return False logger.info(_("Successfully cached all %d images"), num_images) return True
class Prefetcher(object): def __init__(self, options): self.options = options glance.store.create_stores(options) self.cache = ImageCache(options) registry.configure_registry_client(options) def fetch_image_into_cache(self, image_id): auth_tok = self.options.get('admin_token') ctx = context.RequestContext(is_admin=True, show_deleted=True, auth_tok=auth_tok) try: image_meta = registry.get_image_metadata(ctx, image_id) if image_meta['status'] != 'active': logger.warn(_("Image '%s' is not active. Not caching."), image_id) return False except exception.NotFound: logger.warn(_("No metadata found for image '%s'"), image_id) return False image_data, image_size = get_from_backend(image_meta['location']) logger.debug(_("Caching image '%s'"), image_id) self.cache.cache_image_iter(image_id, image_data) return True def run(self): images = self.cache.get_cache_queue() if not images: logger.debug(_("Nothing to prefetch.")) return True num_images = len(images) logger.debug(_("Found %d images to prefetch"), num_images) pool = eventlet.GreenPool(num_images) results = pool.imap(self.fetch_image_into_cache, images) successes = sum([1 for r in results if r is True]) if successes != num_images: logger.error(_("Failed to successfully cache all " "images in queue.")) return False logger.info(_("Successfully cached all %d images"), num_images) return True
class Queuer(object): def __init__(self, options): self.options = options self.cache = ImageCache(options) registry.configure_registry_client(options) def queue_image(self, image_id): auth_tok = self.options.get('admin_token') ctx = context.RequestContext(is_admin=True, show_deleted=True, auth_tok=auth_tok) try: image_meta = registry.get_image_metadata(ctx, image_id) if image_meta['status'] != 'active': logger.warn(_("Image '%s' is not active. Not queueing."), image_id) return False except exception.NotFound: logger.warn(_("No metadata found for image '%s'"), image_id) return False logger.debug(_("Queueing image '%s'"), image_id) self.cache.queue_image(image_id) return True def run(self, images): num_images = len(images) if num_images == 0: logger.debug(_("No images to queue!")) return True logger.debug(_("Received %d images to queue"), num_images) pool = eventlet.GreenPool(num_images) results = pool.imap(self.queue_image, images) successes = sum([1 for r in results if r is True]) if successes != num_images: logger.error(_("Failed to successfully queue all " "images in queue.")) return False logger.info(_("Successfully queued all %d images"), num_images) return True
def __init__(self, conf, **local_conf): self.conf = conf self.cache = ImageCache(conf)
def __init__(self, options): self.options = options glance.store.create_stores(options) self.cache = ImageCache(options)
class Prefetcher(object): def __init__(self, options): self.options = options glance.store.create_stores(options) self.cache = ImageCache(options) def fetch_image_into_cache(self, image_id): ctx = context.RequestContext(is_admin=True, show_deleted=True) image_meta = registry.get_image_metadata( self.options, ctx, image_id) with self.cache.open(image_meta, "wb") as cache_file: chunks = get_from_backend(image_meta['location'], expected_size=image_meta['size'], options=self.options) for chunk in chunks: cache_file.write(chunk) def run(self): if self.cache.is_currently_prefetching_any_images(): logger.debug(_("Currently prefetching, going back to sleep...")) return try: image_id = self.cache.pop_prefetch_item() except IndexError: logger.debug(_("Nothing to prefetch, going back to sleep...")) return if self.cache.hit(image_id): logger.warn(_("Image %s is already in the cache, deleting " "prefetch job and going back to sleep..."), image_id) self.cache.delete_queued_prefetch_image(image_id) return # NOTE(sirp): if someone is already downloading an image that is in # the prefetch queue, then go ahead and delete that item and try to # prefetch another if self.cache.is_image_currently_being_written(image_id): logger.warn(_("Image %s is already being cached, deleting " "prefetch job and going back to sleep..."), image_id) self.cache.delete_queued_prefetch_image(image_id) return logger.debug(_("Prefetching '%s'"), image_id) self.cache.do_prefetch(image_id) try: self.fetch_image_into_cache(image_id) finally: self.cache.delete_prefetching_image(image_id)
def __init__(self): self.cache = ImageCache()
def __init__(self, options): self.options = options self.cache = ImageCache(options)
class Pruner(object): def __init__(self, options): self.options = options self.cache = ImageCache(options) @property def max_size(self): default = 1 * 1024 * 1024 * 1024 # 1 GB return config.get_option( self.options, 'image_cache_max_size_bytes', type='int', default=default) @property def percent_extra_to_free(self): return config.get_option( self.options, 'image_cache_percent_extra_to_free', type='float', default=0.05) def run(self): self.prune_cache() def prune_cache(self): """Prune the cache using an LRU strategy""" # NOTE(sirp): 'Recency' is determined via the filesystem, first using # atime (access time) and falling back to mtime (modified time). # # It has become more common to disable access-time updates by setting # the `noatime` option for the filesystem. `noatime` is NOT compatible # with this method. # # If `noatime` needs to be supported, we will need to persist access # times elsewhere (either as a separate file, in the DB, or as # an xattr). def get_stats(): stats = [] for path in self.cache.get_all_regular_files(self.cache.path): file_info = os.stat(path) stats.append((file_info[stat.ST_ATIME], # access time file_info[stat.ST_MTIME], # modification time file_info[stat.ST_SIZE], # size in bytes path)) # absolute path return stats def prune_lru(stats, to_free): # Sort older access and modified times to the back stats.sort(reverse=True) freed = 0 while to_free > 0: atime, mtime, size, path = stats.pop() logger.debug("deleting '%(path)s' to free %(size)d B", locals()) os.unlink(path) to_free -= size freed += size return freed stats = get_stats() # Check for overage cur_size = sum(s[2] for s in stats) max_size = self.max_size logger.debug("cur_size=%(cur_size)d B max_size=%(max_size)d B", locals()) if cur_size <= max_size: logger.debug("cache has free space, skipping prune...") return overage = cur_size - max_size extra = max_size * self.percent_extra_to_free to_free = overage + extra logger.debug("overage=%(overage)d B extra=%(extra)d B" " total=%(to_free)d B", locals()) freed = prune_lru(stats, to_free) logger.debug("finished pruning, freed %(freed)d bytes", locals())
class Pruner(object): def __init__(self, options): self.options = options self.cache = ImageCache(options) @property def max_size(self): default = 1 * 1024 * 1024 * 1024 # 1 GB return config.get_option(self.options, 'image_cache_max_size_bytes', type='int', default=default) @property def percent_extra_to_free(self): return config.get_option(self.options, 'image_cache_percent_extra_to_free', type='float', default=0.05) def run(self): self.prune_cache() def prune_cache(self): """Prune the cache using an LRU strategy""" # NOTE(sirp): 'Recency' is determined via the filesystem, first using # atime (access time) and falling back to mtime (modified time). # # It has become more common to disable access-time updates by setting # the `noatime` option for the filesystem. `noatime` is NOT compatible # with this method. # # If `noatime` needs to be supported, we will need to persist access # times elsewhere (either as a separate file, in the DB, or as # an xattr). def get_stats(): stats = [] for path in self.cache.get_all_regular_files(self.cache.path): file_info = os.stat(path) stats.append(( file_info[stat.ST_ATIME], # access time file_info[stat.ST_MTIME], # modification time file_info[stat.ST_SIZE], # size in bytes path)) # absolute path return stats def prune_lru(stats, to_free): # Sort older access and modified times to the back stats.sort(reverse=True) freed = 0 while to_free > 0: atime, mtime, size, path = stats.pop() logger.debug(_("deleting '%(path)s' to free %(size)d B"), locals()) os.unlink(path) to_free -= size freed += size return freed stats = get_stats() # Check for overage cur_size = sum(s[2] for s in stats) max_size = self.max_size logger.debug(_("cur_size=%(cur_size)d B max_size=%(max_size)d B"), locals()) if cur_size <= max_size: logger.debug(_("cache has free space, skipping prune...")) return overage = cur_size - max_size extra = max_size * self.percent_extra_to_free to_free = overage + extra logger.debug( _("overage=%(overage)d B extra=%(extra)d B" " total=%(to_free)d B"), locals()) freed = prune_lru(stats, to_free) logger.debug(_("finished pruning, freed %(freed)d bytes"), locals())
def __init__(self, options): self.options = options glance.store.create_stores(options) self.cache = ImageCache(options) registry.configure_registry_client(options)
def __init__(self, conf, **local_conf): self.conf = conf glance.store.create_stores(conf) self.cache = ImageCache(conf) registry.configure_registry_client(conf) registry.configure_registry_admin_creds(conf)
def __init__(self, conf, **local_conf): self.conf = conf self.cache = ImageCache(conf) registry.configure_registry_client(conf) registry.configure_registry_admin_creds(conf)
def __init__(self, options): self.options = options self.cache = ImageCache(options) registry.configure_registry_client(options)
def __init__(self, conf, **local_conf): self.conf = conf self.cache = ImageCache(conf) registry.configure_registry_client(conf)