class ImageCache(object): """Provides an LRU cache for image data.""" opts = [ cfg.StrOpt('image_cache_driver', default='sqlite'), cfg.IntOpt('image_cache_max_size', default=10 * (1024 ** 3)), # 10 GB cfg.IntOpt('image_cache_stall_time', default=86400), # 24 hours cfg.StrOpt('image_cache_dir'), ] def __init__(self, conf): self.conf = conf self.conf.register_opts(self.opts) self.init_driver() def init_driver(self): """ Create the driver for the cache """ driver_name = self.conf.image_cache_driver driver_module = (__name__ + '.drivers.' + driver_name + '.Driver') try: self.driver_class = utils.import_class(driver_module) logger.info(_("Image cache loaded driver '%s'.") % driver_name) except exception.ImportFailure, import_err: logger.warn(_("Image cache driver " "'%(driver_name)s' failed to load. " "Got error: '%(import_err)s.") % locals()) driver_module = __name__ + '.drivers.sqlite.Driver' logger.info(_("Defaulting to SQLite driver.")) self.driver_class = utils.import_class(driver_module) self.configure_driver()
class Store(tank.store.base.Store): """An implementation of the RBD backend adapter.""" EXAMPLE_URL = "rbd://<IMAGE>" opts = [ cfg.IntOpt('rbd_store_chunk_size', default=DEFAULT_CHUNKSIZE), cfg.StrOpt('rbd_store_pool', default=DEFAULT_POOL), cfg.StrOpt('rbd_store_user', default=DEFAULT_USER), cfg.StrOpt('rbd_store_ceph_conf', default=DEFAULT_CONFFILE), ] def configure_add(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadStoreConfiguration` """ self.conf.register_opts(self.opts) try: self.chunk_size = self.conf.rbd_store_chunk_size * 1024 * 1024 # these must not be unicode since they will be passed to a # non-unicode-aware C library self.pool = str(self.conf.rbd_store_pool) self.user = str(self.conf.rbd_store_user) self.conf_file = str(self.conf.rbd_store_ceph_conf) except cfg.ConfigFileValueError, e: reason = _("Error in store configuration: %s") % e logger.error(reason) raise exception.BadStoreConfiguration(store_name='rbd', reason=reason)
class Store(tank.store.base.Store): """An implementation of the chase backend adapter.""" EXAMPLE_URL = "chase://<USER>:<KEY>@<AUTH_ADDRESS>/<CONTAINER>/<FILE>" CHUNKSIZE = 65536 opts = [ cfg.BoolOpt('chase_enable_snet', default=False), cfg.StrOpt('chase_store_auth_address'), cfg.StrOpt('chase_store_user'), cfg.StrOpt('chase_store_key'), cfg.StrOpt('chase_store_container', default=DEFAULT_CONTAINER), cfg.IntOpt('chase_store_large_object_size', default=DEFAULT_LARGE_OBJECT_SIZE), cfg.IntOpt('chase_store_large_object_chunk_size', default=DEFAULT_LARGE_OBJECT_CHUNK_SIZE), cfg.BoolOpt('chase_store_create_container_on_put', default=False), ] def configure(self): self.conf.register_opts(self.opts) self.snet = self.conf.chase_enable_snet def configure_add(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadStoreConfiguration` """ self.auth_address = self._option_get('chase_store_auth_address') self.user = self._option_get('chase_store_user') self.key = self._option_get('chase_store_key') self.container = self.conf.chase_store_container try: # The config file has chase_store_large_object_*size in MB, but # internally we store it in bytes, since the image_size parameter # passed to add() is also in bytes. self.large_object_size = \ self.conf.chase_store_large_object_size * ONE_MB self.large_object_chunk_size = \ self.conf.chase_store_large_object_chunk_size * ONE_MB except cfg.ConfigFileValueError, e: reason = _("Error in configuration conf: %s") % e logger.error(reason) raise exception.BadStoreConfiguration(store_name="chase", reason=reason) self.scheme = 'chase+https' if self.auth_address.startswith('http://'): self.scheme = 'chase+http' self.full_auth_address = self.auth_address elif self.auth_address.startswith('https://'): self.full_auth_address = self.auth_address else: # Defaults https self.full_auth_address = 'https://' + self.auth_address
def test_walk_versions(self): """ Walks all version scripts for each tested database, ensuring that there are no errors in the version scripts for each engine """ for key, engine in self.engines.items(): conf = utils.TestConfigOpts( {'sql_connection': TestMigrations.TEST_DATABASES[key]}) conf.register_opt(cfg.StrOpt('sql_connection')) self._walk_versions(conf)
def test_no_data_loss_2_to_3_to_2(self): """ Here, we test that in the case when we moved a column "type" from the base images table to be records in the image_properties table, that we don't lose any data during the migration. Similarly, we test that on downgrade, we don't lose any data, as the records are moved from the image_properties table back into the base image table. """ for key, engine in self.engines.items(): conf = utils.TestConfigOpts( {'sql_connection': TestMigrations.TEST_DATABASES[key]}) conf.register_opt(cfg.StrOpt('sql_connection')) self._no_data_loss_2_to_3_to_2(engine, conf)
class Notifier(object): """Uses a notification strategy to send out messages about events.""" opts = [ cfg.StrOpt('notifier_strategy', default='default') ] def __init__(self, conf, strategy=None): conf.register_opts(self.opts) strategy = conf.notifier_strategy try: self.strategy = utils.import_class(_STRATEGIES[strategy])(conf) except KeyError, ImportError: raise exception.InvalidNotifierStrategy(strategy=strategy)
class Driver(base.Driver): """ Cache driver that uses xattr file tags and requires a filesystem that has atimes set. """ opts = [ cfg.StrOpt('image_cache_sqlite_db', default='cache.db'), ] def configure(self): """ Configure the driver to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadDriverConfiguration` """ super(Driver, self).configure() self.conf.register_opts(self.opts) # Create the SQLite database that will hold our cache attributes self.initialize_db() def initialize_db(self): db = self.conf.image_cache_sqlite_db self.db_path = os.path.join(self.base_dir, db) try: conn = sqlite3.connect(self.db_path, check_same_thread=False, factory=SqliteConnection) conn.executescript(""" CREATE TABLE IF NOT EXISTS cached_images ( image_id TEXT PRIMARY KEY, last_accessed REAL DEFAULT 0.0, last_modified REAL DEFAULT 0.0, size INTEGER DEFAULT 0, hits INTEGER DEFAULT 0, checksum TEXT ); """) conn.close() except sqlite3.DatabaseError, e: msg = _("Failed to initialize the image cache database. " "Got error: %s") % e logger.error(msg) raise exception.BadDriverConfiguration(driver_name='sqlite', reason=msg)
def add_options(conf): """ Adds any configuration options that the db layer might have. :param conf: A ConfigOpts object :retval None """ conf.register_group(cfg.OptGroup('registrydb', title='Registry Database Options', help='The following configuration options ' 'are specific to the Tank image ' 'registry database.')) conf.register_cli_opt(cfg.StrOpt('sql_connection', metavar='CONNECTION', help='A valid SQLAlchemy connection ' 'string for the registry database. ' 'Default: %default'))
IMAGE_ATTRS = BASE_MODEL_ATTRS | set([ 'name', 'status', 'size', 'disk_format', 'container_format', 'min_disk', 'min_ram', 'is_public', 'location', 'checksum', 'owner', 'protected' ]) CONTAINER_FORMATS = ['ami', 'ari', 'aki', 'bare', 'ovf'] DISK_FORMATS = [ 'ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso' ] STATUSES = [ 'active', 'saving', 'queued', 'killed', 'pending_delete', 'deleted' ] db_opts = [ cfg.IntOpt('sql_idle_timeout', default=3600), cfg.StrOpt('sql_connection', default='sqlite:///tank.sqlite'), ] def configure_db(conf): """ Establish the database, create an engine if needed, and register the models. :param conf: Mapping of configuration options """ global _ENGINE, sa_logger, logger if not _ENGINE: conf.register_opts(db_opts) timeout = conf.sql_idle_timeout sql_connection = conf.sql_connection
class RabbitStrategy(strategy.Strategy): """A notifier that puts a message on a queue when called.""" opts = [ cfg.StrOpt('rabbit_host', default='localhost'), cfg.IntOpt('rabbit_port', default=5672), cfg.BoolOpt('rabbit_use_ssl', default=False), cfg.StrOpt('rabbit_userid', default='guest'), cfg.StrOpt('rabbit_password', default='guest'), cfg.StrOpt('rabbit_virtual_host', default='/'), cfg.StrOpt('rabbit_notification_exchange', default='tank'), cfg.StrOpt('rabbit_notification_topic', default='tank_notifications') ] def __init__(self, conf): """Initialize the rabbit notification strategy.""" self._conf = conf self._conf.register_opts(self.opts) self.topic = self._conf.rabbit_notification_topic self.connect() def connect(self): self.connection = kombu.connection.BrokerConnection( hostname=self._conf.rabbit_host, userid=self._conf.rabbit_userid, password=self._conf.rabbit_password, virtual_host=self._conf.rabbit_virtual_host, ssl=self._conf.rabbit_use_ssl) self.channel = self.connection.channel() self.exchange = kombu.entity.Exchange( channel=self.channel, type="topic", name=self._conf.rabbit_notification_exchange) self.exchange.declare() def _send_message(self, message, priority): routing_key = "%s.%s" % (self.topic, priority.lower()) # NOTE(jerdfelt): Normally the consumer would create the queue, but # we do this to ensure that messages don't get dropped if the # consumer is started after we do queue = kombu.entity.Queue(channel=self.channel, exchange=self.exchange, durable=True, name=routing_key, routing_key=routing_key) queue.declare() msg = self.exchange.Message(json.dumps(message)) self.exchange.publish(msg, routing_key=routing_key) def warn(self, msg): self._send_message(msg, "WARN") def info(self, msg): self._send_message(msg, "INFO") def error(self, msg): self._send_message(msg, "ERROR")
raise exception.StoreDeleteNotSupported def get_store_from_location(uri): """ Given a location (assumed to be a URL), attempt to determine the store from the location. We use here a simple guess that the scheme of the parsed URL is the store... :param uri: Location to check for the store """ loc = location.get_location_from_uri(uri) return loc.store_name scrubber_datadir_opt = cfg.StrOpt('scrubber_datadir', default='/var/lib/tank/scrubber') def get_scrubber_datadir(conf): conf.register_opt(scrubber_datadir_opt) return conf.scrubber_datadir delete_opts = [ cfg.BoolOpt('delayed_delete', default=False), cfg.IntOpt('scrub_time', default=0) ] def schedule_delete_from_backend(uri, conf, context, image_id, **kwargs): """
import logging from tank.common import cfg from tank.common import exception from tank.registry import client logger = logging.getLogger('tank.registry') _CLIENT_HOST = None _CLIENT_PORT = None _CLIENT_KWARGS = {} # AES key used to encrypt 'location' metadata _METADATA_ENCRYPTION_KEY = None registry_addr_opts = [ cfg.StrOpt('registry_host', default='0.0.0.0'), cfg.IntOpt('registry_port', default=9191), ] registry_client_opts = [ cfg.StrOpt('registry_client_protocol', default='http'), cfg.StrOpt('registry_client_key_file'), cfg.StrOpt('registry_client_cert_file'), cfg.StrOpt('registry_client_ca_file'), cfg.StrOpt('metadata_encryption_key'), ] admin_token_opt = cfg.StrOpt('admin_token') def get_registry_addr(conf): conf.register_opts(registry_addr_opts) return (conf.registry_host, conf.registry_port)
class Store(tank.store.base.Store): datadir_opt = cfg.StrOpt('filesystem_store_datadir') def configure_add(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadStoreConfiguration` """ self.conf.register_opt(self.datadir_opt) self.datadir = self.conf.filesystem_store_datadir if self.datadir is None: reason = _("Could not find %s in configuration options.") % \ 'filesystem_store_datadir' logger.error(reason) raise exception.BadStoreConfiguration(store_name="filesystem", reason=reason) if not os.path.exists(self.datadir): msg = _("Directory to write image files does not exist " "(%s). Creating.") % self.datadir logger.info(msg) try: os.makedirs(self.datadir) except IOError: reason = _("Unable to create datadir: %s") % self.datadir logger.error(reason) raise exception.BadStoreConfiguration(store_name="filesystem", reason=reason) def get(self, location): """ Takes a `tank.store.location.Location` object that indicates where to find the image file, and returns a tuple of generator (for reading the image file) and image_size :param location `tank.store.location.Location` object, supplied from tank.store.location.get_location_from_uri() :raises `tank.exception.NotFound` if image does not exist """ loc = location.store_location filepath = loc.path if not os.path.exists(filepath): raise exception.NotFound(_("Image file %s not found") % filepath) else: msg = _("Found image at %s. Returning in ChunkedFile.") % filepath logger.debug(msg) return (ChunkedFile(filepath), None) def delete(self, location): """ Takes a `tank.store.location.Location` object that indicates where to find the image file to delete :location `tank.store.location.Location` object, supplied from tank.store.location.get_location_from_uri() :raises NotFound if image does not exist :raises NotAuthorized if cannot delete because of permissions """ loc = location.store_location fn = loc.path if os.path.exists(fn): try: logger.debug(_("Deleting image at %(fn)s") % locals()) os.unlink(fn) except OSError: raise exception.NotAuthorized( _("You cannot delete file %s") % fn) else: raise exception.NotFound(_("Image file %s does not exist") % fn) def add(self, image_id, image_file, image_size): """ Stores an image file with supplied identifier to the backend storage system and returns an `tank.store.ImageAddResult` object containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :retval `tank.store.ImageAddResult` object :raises `tank.common.exception.Duplicate` if the image already existed :note By default, the backend writes the image data to a file `/<DATADIR>/<ID>`, where <DATADIR> is the value of the filesystem_store_datadir configuration option and <ID> is the supplied image ID. """ filepath = os.path.join(self.datadir, str(image_id)) if os.path.exists(filepath): raise exception.Duplicate( _("Image file %s already exists!") % filepath) checksum = hashlib.md5() bytes_written = 0 with open(filepath, 'wb') as f: while True: buf = image_file.read(ChunkedFile.CHUNKSIZE) if not buf: break bytes_written += len(buf) checksum.update(buf) f.write(buf) checksum_hex = checksum.hexdigest() logger.debug( _("Wrote %(bytes_written)d bytes to %(filepath)s with " "checksum %(checksum_hex)s") % locals()) return ('file://%s' % filepath, bytes_written, checksum_hex)
import eventlet from eventlet.green import socket, ssl import eventlet.wsgi from paste import deploy import routes import routes.middleware import webob.dec import webob.exc from tank.common import cfg from tank.common import exception from tank.common import utils bind_opts = [ cfg.StrOpt('bind_host', default='0.0.0.0'), cfg.IntOpt('bind_port'), ] socket_opts = [ cfg.IntOpt('backlog', default=4096), cfg.StrOpt('cert_file'), cfg.StrOpt('key_file'), ] class WritableLogger(object): """A thin wrapper that responds to `write` and logs.""" def __init__(self, logger, level=logging.DEBUG): self.logger = logger self.level = level
class Store(tank.store.base.Store): """An implementation of the s3 adapter.""" EXAMPLE_URL = "s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ>" opts = [ cfg.StrOpt('s3_store_host'), cfg.StrOpt('s3_store_access_key'), cfg.StrOpt('s3_store_secret_key'), cfg.StrOpt('s3_store_bucket'), cfg.StrOpt('s3_store_object_buffer_dir'), cfg.BoolOpt('s3_store_create_bucket_on_put', default=False), ] def configure_add(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadStoreConfiguration` """ self.conf.register_opts(self.opts) self.s3_host = self._option_get('s3_store_host') access_key = self._option_get('s3_store_access_key') secret_key = self._option_get('s3_store_secret_key') # NOTE(jaypipes): Need to encode to UTF-8 here because of a # bug in the HMAC library that boto uses. # See: http://bugs.python.org/issue5285 # See: http://trac.edgewall.org/ticket/8083 self.access_key = access_key.encode('utf-8') self.secret_key = secret_key.encode('utf-8') self.bucket = self._option_get('s3_store_bucket') self.scheme = 's3' if self.s3_host.startswith('https://'): self.scheme = 's3+https' self.full_s3_host = self.s3_host elif self.s3_host.startswith('http://'): self.full_s3_host = self.s3_host else: # Defaults http self.full_s3_host = 'http://' + self.s3_host self.s3_store_object_buffer_dir = \ self.conf.s3_store_object_buffer_dir def _option_get(self, param): result = getattr(self.conf, param) if not result: reason = _("Could not find %(param)s in configuration " "options.") % locals() logger.error(reason) raise exception.BadStoreConfiguration(store_name="s3", reason=reason) return result def get(self, location): """ Takes a `tank.store.location.Location` object that indicates where to find the image file, and returns a tuple of generator (for reading the image file) and image_size :param location `tank.store.location.Location` object, supplied from tank.store.location.get_location_from_uri() :raises `tank.exception.NotFound` if image does not exist """ loc = location.store_location from boto.s3.connection import S3Connection s3_conn = S3Connection(loc.accesskey, loc.secretkey, host=loc.s3serviceurl, is_secure=(loc.scheme == 's3+https')) bucket_obj = get_bucket(s3_conn, loc.bucket) key = get_key(bucket_obj, loc.key) msg = _("Retrieved image object from S3 using (s3_host=%(s3_host)s, " "access_key=%(accesskey)s, bucket=%(bucket)s, " "key=%(obj_name)s)") % ({ 's3_host': loc.s3serviceurl, 'accesskey': loc.accesskey, 'bucket': loc.bucket, 'obj_name': loc.key }) logger.debug(msg) #if expected_size and (key.size != expected_size): # msg = "Expected %s bytes, got %s" % (expected_size, key.size) # logger.error(msg) # raise tank.store.BackendException(msg) key.BufferSize = self.CHUNKSIZE return (ChunkedFile(key), key.size) def add(self, image_id, image_file, image_size): """ Stores an image file with supplied identifier to the backend storage system and returns an `tank.store.ImageAddResult` object containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :retval `tank.store.ImageAddResult` object :raises `tank.common.exception.Duplicate` if the image already existed S3 writes the image data using the scheme: s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ> where: <USER> = ``s3_store_user`` <KEY> = ``s3_store_key`` <S3_HOST> = ``s3_store_host`` <BUCKET> = ``s3_store_bucket`` <ID> = The id of the image being added """ from boto.s3.connection import S3Connection loc = StoreLocation({ 'scheme': self.scheme, 'bucket': self.bucket, 'key': image_id, 's3serviceurl': self.full_s3_host, 'accesskey': self.access_key, 'secretkey': self.secret_key }) s3_conn = S3Connection(loc.accesskey, loc.secretkey, host=loc.s3serviceurl, is_secure=(loc.scheme == 's3+https')) create_bucket_if_missing(self.bucket, s3_conn, self.conf) bucket_obj = get_bucket(s3_conn, self.bucket) obj_name = str(image_id) key = bucket_obj.get_key(obj_name) if key and key.exists(): raise exception.Duplicate( _("S3 already has an image at " "location %s") % loc.get_uri()) msg = _("Adding image object to S3 using (s3_host=%(s3_host)s, " "access_key=%(access_key)s, bucket=%(bucket)s, " "key=%(obj_name)s)") % ({ 's3_host': self.s3_host, 'access_key': self.access_key, 'bucket': self.bucket, 'obj_name': obj_name }) logger.debug(msg) key = bucket_obj.new_key(obj_name) # We need to wrap image_file, which is a reference to the # webob.Request.body_file, with a seekable file-like object, # otherwise the call to set_contents_from_file() will die # with an error about Input object has no method 'seek'. We # might want to call webob.Request.make_body_seekable(), but # unfortunately, that method copies the entire image into # memory and results in LP Bug #818292 occurring. So, here # we write temporary file in as memory-efficient manner as # possible and then supply the temporary file to S3. We also # take this opportunity to calculate the image checksum while # writing the tempfile, so we don't need to call key.compute_md5() msg = _("Writing request body file to temporary file " "for %s") % loc.get_uri() logger.debug(msg) tmpdir = self.s3_store_object_buffer_dir temp_file = tempfile.NamedTemporaryFile(dir=tmpdir) checksum = hashlib.md5() chunk = image_file.read(self.CHUNKSIZE) while chunk: checksum.update(chunk) temp_file.write(chunk) chunk = image_file.read(self.CHUNKSIZE) temp_file.flush() msg = _("Uploading temporary file to S3 for %s") % loc.get_uri() logger.debug(msg) # OK, now upload the data into the key key.set_contents_from_file(open(temp_file.name, 'r+b'), replace=False) size = key.size checksum_hex = checksum.hexdigest() logger.debug( _("Wrote %(size)d bytes to S3 key named %(obj_name)s " "with checksum %(checksum_hex)s") % locals()) return (loc.get_uri(), size, checksum_hex) def delete(self, location): """ Takes a `tank.store.location.Location` object that indicates where to find the image file to delete :location `tank.store.location.Location` object, supplied from tank.store.location.get_location_from_uri() :raises NotFound if image does not exist """ loc = location.store_location from boto.s3.connection import S3Connection s3_conn = S3Connection(loc.accesskey, loc.secretkey, host=loc.s3serviceurl, is_secure=(loc.scheme == 's3+https')) bucket_obj = get_bucket(s3_conn, loc.bucket) # Close the key when we're through. key = get_key(bucket_obj, loc.key) msg = _("Deleting image object from S3 using (s3_host=%(s3_host)s, " "access_key=%(accesskey)s, bucket=%(bucket)s, " "key=%(obj_name)s)") % ({ 's3_host': loc.s3serviceurl, 'accesskey': loc.accesskey, 'bucket': loc.bucket, 'obj_name': loc.key }) logger.debug(msg) return key.delete()