class ImageCache(object): """Provides an LRU cache for image data.""" opts = [ cfg.StrOpt('image_cache_driver', default='sqlite'), cfg.IntOpt('image_cache_max_size', default=10 * (1024 ** 3)), # 10 GB cfg.IntOpt('image_cache_stall_time', default=86400), # 24 hours cfg.StrOpt('image_cache_dir'), ] def __init__(self, conf): self.conf = conf self.conf.register_opts(self.opts) self.init_driver() def init_driver(self): """ Create the driver for the cache """ driver_name = self.conf.image_cache_driver driver_module = (__name__ + '.drivers.' + driver_name + '.Driver') try: self.driver_class = utils.import_class(driver_module) logger.info(_("Image cache loaded driver '%s'.") % driver_name) except exception.ImportFailure, import_err: logger.warn(_("Image cache driver " "'%(driver_name)s' failed to load. " "Got error: '%(import_err)s.") % locals()) driver_module = __name__ + '.drivers.sqlite.Driver' logger.info(_("Defaulting to SQLite driver.")) self.driver_class = utils.import_class(driver_module) self.configure_driver()
class Store(tank.store.base.Store): """An implementation of the chase backend adapter.""" EXAMPLE_URL = "chase://<USER>:<KEY>@<AUTH_ADDRESS>/<CONTAINER>/<FILE>" CHUNKSIZE = 65536 opts = [ cfg.BoolOpt('chase_enable_snet', default=False), cfg.StrOpt('chase_store_auth_address'), cfg.StrOpt('chase_store_user'), cfg.StrOpt('chase_store_key'), cfg.StrOpt('chase_store_container', default=DEFAULT_CONTAINER), cfg.IntOpt('chase_store_large_object_size', default=DEFAULT_LARGE_OBJECT_SIZE), cfg.IntOpt('chase_store_large_object_chunk_size', default=DEFAULT_LARGE_OBJECT_CHUNK_SIZE), cfg.BoolOpt('chase_store_create_container_on_put', default=False), ] def configure(self): self.conf.register_opts(self.opts) self.snet = self.conf.chase_enable_snet def configure_add(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadStoreConfiguration` """ self.auth_address = self._option_get('chase_store_auth_address') self.user = self._option_get('chase_store_user') self.key = self._option_get('chase_store_key') self.container = self.conf.chase_store_container try: # The config file has chase_store_large_object_*size in MB, but # internally we store it in bytes, since the image_size parameter # passed to add() is also in bytes. self.large_object_size = \ self.conf.chase_store_large_object_size * ONE_MB self.large_object_chunk_size = \ self.conf.chase_store_large_object_chunk_size * ONE_MB except cfg.ConfigFileValueError, e: reason = _("Error in configuration conf: %s") % e logger.error(reason) raise exception.BadStoreConfiguration(store_name="chase", reason=reason) self.scheme = 'chase+https' if self.auth_address.startswith('http://'): self.scheme = 'chase+http' self.full_auth_address = self.auth_address elif self.auth_address.startswith('https://'): self.full_auth_address = self.auth_address else: # Defaults https self.full_auth_address = 'https://' + self.auth_address
class Store(tank.store.base.Store): """An implementation of the RBD backend adapter.""" EXAMPLE_URL = "rbd://<IMAGE>" opts = [ cfg.IntOpt('rbd_store_chunk_size', default=DEFAULT_CHUNKSIZE), cfg.StrOpt('rbd_store_pool', default=DEFAULT_POOL), cfg.StrOpt('rbd_store_user', default=DEFAULT_USER), cfg.StrOpt('rbd_store_ceph_conf', default=DEFAULT_CONFFILE), ] def configure_add(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadStoreConfiguration` """ self.conf.register_opts(self.opts) try: self.chunk_size = self.conf.rbd_store_chunk_size * 1024 * 1024 # these must not be unicode since they will be passed to a # non-unicode-aware C library self.pool = str(self.conf.rbd_store_pool) self.user = str(self.conf.rbd_store_user) self.conf_file = str(self.conf.rbd_store_ceph_conf) except cfg.ConfigFileValueError, e: reason = _("Error in store configuration: %s") % e logger.error(reason) raise exception.BadStoreConfiguration(store_name='rbd', reason=reason)
class Controller(object): opts = [ cfg.IntOpt('limit_param_default', default=25), cfg.IntOpt('api_limit_max', default=1000), ] def __init__(self, conf): self.conf = conf self.conf.register_opts(self.opts) db_api.configure_db(conf) def _get_images(self, context, **params): """ Get images, wrapping in exception if necessary. """ try: return db_api.image_get_all(context, **params) except exception.NotFound, e: msg = _("Invalid marker. Image could not be found.") raise exc.HTTPBadRequest(explanation=msg)
IMAGE_ATTRS = BASE_MODEL_ATTRS | set([ 'name', 'status', 'size', 'disk_format', 'container_format', 'min_disk', 'min_ram', 'is_public', 'location', 'checksum', 'owner', 'protected' ]) CONTAINER_FORMATS = ['ami', 'ari', 'aki', 'bare', 'ovf'] DISK_FORMATS = [ 'ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso' ] STATUSES = [ 'active', 'saving', 'queued', 'killed', 'pending_delete', 'deleted' ] db_opts = [ cfg.IntOpt('sql_idle_timeout', default=3600), cfg.StrOpt('sql_connection', default='sqlite:///tank.sqlite'), ] def configure_db(conf): """ Establish the database, create an engine if needed, and register the models. :param conf: Mapping of configuration options """ global _ENGINE, sa_logger, logger if not _ENGINE: conf.register_opts(db_opts) timeout = conf.sql_idle_timeout
class RabbitStrategy(strategy.Strategy): """A notifier that puts a message on a queue when called.""" opts = [ cfg.StrOpt('rabbit_host', default='localhost'), cfg.IntOpt('rabbit_port', default=5672), cfg.BoolOpt('rabbit_use_ssl', default=False), cfg.StrOpt('rabbit_userid', default='guest'), cfg.StrOpt('rabbit_password', default='guest'), cfg.StrOpt('rabbit_virtual_host', default='/'), cfg.StrOpt('rabbit_notification_exchange', default='tank'), cfg.StrOpt('rabbit_notification_topic', default='tank_notifications') ] def __init__(self, conf): """Initialize the rabbit notification strategy.""" self._conf = conf self._conf.register_opts(self.opts) self.topic = self._conf.rabbit_notification_topic self.connect() def connect(self): self.connection = kombu.connection.BrokerConnection( hostname=self._conf.rabbit_host, userid=self._conf.rabbit_userid, password=self._conf.rabbit_password, virtual_host=self._conf.rabbit_virtual_host, ssl=self._conf.rabbit_use_ssl) self.channel = self.connection.channel() self.exchange = kombu.entity.Exchange( channel=self.channel, type="topic", name=self._conf.rabbit_notification_exchange) self.exchange.declare() def _send_message(self, message, priority): routing_key = "%s.%s" % (self.topic, priority.lower()) # NOTE(jerdfelt): Normally the consumer would create the queue, but # we do this to ensure that messages don't get dropped if the # consumer is started after we do queue = kombu.entity.Queue(channel=self.channel, exchange=self.exchange, durable=True, name=routing_key, routing_key=routing_key) queue.declare() msg = self.exchange.Message(json.dumps(message)) self.exchange.publish(msg, routing_key=routing_key) def warn(self, msg): self._send_message(msg, "WARN") def info(self, msg): self._send_message(msg, "INFO") def error(self, msg): self._send_message(msg, "ERROR")
loc = location.get_location_from_uri(uri) return loc.store_name scrubber_datadir_opt = cfg.StrOpt('scrubber_datadir', default='/var/lib/tank/scrubber') def get_scrubber_datadir(conf): conf.register_opt(scrubber_datadir_opt) return conf.scrubber_datadir delete_opts = [ cfg.BoolOpt('delayed_delete', default=False), cfg.IntOpt('scrub_time', default=0) ] def schedule_delete_from_backend(uri, conf, context, image_id, **kwargs): """ Given a uri and a time, schedule the deletion of an image. """ conf.register_opts(delete_opts) if not conf.delayed_delete: registry.update_image_metadata(context, image_id, {'status': 'deleted'}) try: return delete_from_backend(uri, **kwargs) except (UnsupportedBackend, exception.NotFound): msg = _("Failed to delete image from store (%(uri)s).") % locals()
from tank.common import cfg from tank.common import exception from tank.registry import client logger = logging.getLogger('tank.registry') _CLIENT_HOST = None _CLIENT_PORT = None _CLIENT_KWARGS = {} # AES key used to encrypt 'location' metadata _METADATA_ENCRYPTION_KEY = None registry_addr_opts = [ cfg.StrOpt('registry_host', default='0.0.0.0'), cfg.IntOpt('registry_port', default=9191), ] registry_client_opts = [ cfg.StrOpt('registry_client_protocol', default='http'), cfg.StrOpt('registry_client_key_file'), cfg.StrOpt('registry_client_cert_file'), cfg.StrOpt('registry_client_ca_file'), cfg.StrOpt('metadata_encryption_key'), ] admin_token_opt = cfg.StrOpt('admin_token') def get_registry_addr(conf): conf.register_opts(registry_addr_opts) return (conf.registry_host, conf.registry_port)
import eventlet from eventlet.green import socket, ssl import eventlet.wsgi from paste import deploy import routes import routes.middleware import webob.dec import webob.exc from tank.common import cfg from tank.common import exception from tank.common import utils bind_opts = [ cfg.StrOpt('bind_host', default='0.0.0.0'), cfg.IntOpt('bind_port'), ] socket_opts = [ cfg.IntOpt('backlog', default=4096), cfg.StrOpt('cert_file'), cfg.StrOpt('key_file'), ] class WritableLogger(object): """A thin wrapper that responds to `write` and logs.""" def __init__(self, logger, level=logging.DEBUG): self.logger = logger self.level = level
class Scrubber(object): CLEANUP_FILE = ".cleanup" opts = [ cfg.BoolOpt('cleanup_scrubber', default=False), cfg.IntOpt('cleanup_scrubber_time', default=86400) ] def __init__(self, conf, **local_conf): self.conf = conf self.conf.register_opts(self.opts) self.datadir = store.get_scrubber_datadir(conf) self.cleanup = self.conf.cleanup_scrubber self.cleanup_time = self.conf.cleanup_scrubber_time host, port = registry.get_registry_addr(conf) logger.info( _("Initializing scrubber with conf: %s") % { 'datadir': self.datadir, 'cleanup': self.cleanup, 'cleanup_time': self.cleanup_time, 'registry_host': host, 'registry_port': port }) self.registry = client.RegistryClient(host, port) utils.safe_mkdirs(self.datadir) store.create_stores(conf) def run(self, pool, event=None): now = time.time() if not os.path.exists(self.datadir): logger.info(_("%s does not exist") % self.datadir) return delete_work = [] for root, dirs, files in os.walk(self.datadir): for id in files: if id == self.CLEANUP_FILE: continue file_name = os.path.join(root, id) delete_time = os.stat(file_name).st_mtime if delete_time > now: continue uri, delete_time = read_queue_file(file_name) if delete_time > now: continue delete_work.append((id, uri, now)) logger.info(_("Deleting %s images") % len(delete_work)) pool.starmap(self._delete, delete_work) if self.cleanup: self._cleanup() def _delete(self, id, uri, now): file_path = os.path.join(self.datadir, str(id)) try: logger.debug(_("Deleting %(uri)s") % {'uri': uri}) store.delete_from_backend(uri) except store.UnsupportedBackend: msg = _("Failed to delete image from store (%(uri)s).") logger.error(msg % {'uri': uri}) write_queue_file(file_path, uri, now) self.registry.update_image(id, {'status': 'deleted'}) utils.safe_remove(file_path) def _cleanup(self): now = time.time() cleanup_file = os.path.join(self.datadir, self.CLEANUP_FILE) if not os.path.exists(cleanup_file): write_queue_file(cleanup_file, 'cleanup', now) return _uri, last_run_time = read_queue_file(cleanup_file) cleanup_time = last_run_time + self.cleanup_time if cleanup_time > now: return logger.info(_("Getting images deleted before %s") % self.cleanup_time) write_queue_file(cleanup_file, 'cleanup', now) filters = { 'deleted': True, 'is_public': 'none', 'status': 'pending_delete' } pending_deletes = self.registry.get_images_detailed(filters=filters) delete_work = [] for pending_delete in pending_deletes: deleted_at = pending_delete.get('deleted_at') if not deleted_at: continue time_fmt = "%Y-%m-%dT%H:%M:%S" delete_time = calendar.timegm(time.strptime(deleted_at, time_fmt)) if delete_time + self.cleanup_time > now: continue delete_work.append( (pending_delete['id'], pending_delete['location'], now)) logger.info(_("Deleting %s images") % len(delete_work)) pool.starmap(self._delete, delete_work)