class Store(glance.store.base.Store): """An implementation of the swift backend adapter.""" EXAMPLE_URL = "swift://<USER>:<KEY>@<AUTH_ADDRESS>/<CONTAINER>/<FILE>" CHUNKSIZE = 65536 opts = [ cfg.BoolOpt('swift_enable_snet', default=False), cfg.StrOpt('swift_store_auth_address'), cfg.StrOpt('swift_store_user'), cfg.StrOpt('swift_store_key'), cfg.StrOpt('swift_store_container', default=DEFAULT_CONTAINER), cfg.IntOpt('swift_store_large_object_size', default=DEFAULT_LARGE_OBJECT_SIZE), cfg.IntOpt('swift_store_large_object_chunk_size', default=DEFAULT_LARGE_OBJECT_CHUNK_SIZE), cfg.BoolOpt('swift_store_create_container_on_put', default=False), ] def configure(self): self.conf.register_opts(self.opts) self.snet = self.conf.swift_enable_snet def configure_add(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadStoreConfiguration` """ self.auth_address = self._option_get('swift_store_auth_address') self.user = self._option_get('swift_store_user') self.key = self._option_get('swift_store_key') self.container = self.conf.swift_store_container try: # The config file has swift_store_large_object_*size in MB, but # internally we store it in bytes, since the image_size parameter # passed to add() is also in bytes. self.large_object_size = \ self.conf.swift_store_large_object_size * ONE_MB self.large_object_chunk_size = \ self.conf.swift_store_large_object_chunk_size * ONE_MB except cfg.ConfigFileValueError, e: reason = _("Error in configuration conf: %s") % e logger.error(reason) raise exception.BadStoreConfiguration(store_name="swift", reason=reason) self.scheme = 'swift+https' if self.auth_address.startswith('http://'): self.scheme = 'swift+http' self.full_auth_address = self.auth_address elif self.auth_address.startswith('https://'): self.full_auth_address = self.auth_address else: # Defaults https self.full_auth_address = 'https://' + self.auth_address
class RabbitStrategy(strategy.Strategy): """A notifier that puts a message on a queue when called.""" opts = [ cfg.ListOpt('rabbit_addresses', default='localhost:5672'), cfg.BoolOpt('rabbit_use_ssl', default=False), cfg.StrOpt('rabbit_userid', default='guest'), cfg.StrOpt('rabbit_password', default='guest'), cfg.StrOpt('rabbit_virtual_host', default='/'), cfg.StrOpt('rabbit_notification_exchange', default='glance'), cfg.StrOpt('rabbit_notification_topic', default='glance_notifications'), cfg.StrOpt('rabbit_max_retries', default=0), cfg.StrOpt('rabbit_retry_backoff', default=2), cfg.StrOpt('rabbit_retry_max_backoff', default=30) ] def __init__(self, conf): """Initialize the rabbit notification strategy.""" self._conf = conf self._conf.register_opts(self.opts) self.topic = self._conf.rabbit_notification_topic self.max_retries = self._conf.rabbit_max_retries # NOTE(comstud): When reading the config file, these values end # up being strings, and we need them as ints. self.retry_backoff = int(self._conf.rabbit_retry_backoff) self.retry_max_backoff = int(self._conf.rabbit_retry_max_backoff) self.connection = None self.retry_attempts = 0 try: self.reconnect() except KombuMaxRetriesReached: pass def _close(self): """Close connection to rabbit.""" try: self.connection.close() except self.connection_errors: pass self.connection = None def _connect(self, host, port)
""" loc = location.get_location_from_uri(uri) return loc.store_name scrubber_datadir_opt = cfg.StrOpt('scrubber_datadir', default='/var/lib/glance/scrubber') def get_scrubber_datadir(conf): conf.register_opt(scrubber_datadir_opt) return conf.scrubber_datadir delete_opts = [ cfg.BoolOpt('delayed_delete', default=False), cfg.IntOpt('scrub_time', default=0) ] def schedule_delete_from_backend(uri, conf, context, image_id, **kwargs): """ Given a uri and a time, schedule the deletion of an image. """ conf.register_opts(delete_opts) if not conf.delayed_delete: registry.update_image_metadata(context, image_id, {'status': 'deleted'}) try: return delete_from_backend(uri, **kwargs) except (UnsupportedBackend, exception.NotFound):
class Store(glance.store.base.Store): """An implementation of the s3 adapter.""" EXAMPLE_URL = "s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ>" opts = [ cfg.StrOpt('s3_store_host'), cfg.StrOpt('s3_store_access_key'), cfg.StrOpt('s3_store_secret_key'), cfg.StrOpt('s3_store_bucket'), cfg.StrOpt('s3_store_object_buffer_dir'), cfg.BoolOpt('s3_store_create_bucket_on_put', default=False), ] def configure_add(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadStoreConfiguration` """ self.conf.register_opts(self.opts) self.s3_host = self._option_get('s3_store_host') access_key = self._option_get('s3_store_access_key') secret_key = self._option_get('s3_store_secret_key') # NOTE(jaypipes): Need to encode to UTF-8 here because of a # bug in the HMAC library that boto uses. # See: http://bugs.python.org/issue5285 # See: http://trac.edgewall.org/ticket/8083 self.access_key = access_key.encode('utf-8') self.secret_key = secret_key.encode('utf-8') self.bucket = self._option_get('s3_store_bucket') self.scheme = 's3' if self.s3_host.startswith('https://'): self.scheme = 's3+https' self.full_s3_host = self.s3_host elif self.s3_host.startswith('http://'): self.full_s3_host = self.s3_host else: # Defaults http self.full_s3_host = 'http://' + self.s3_host self.s3_store_object_buffer_dir = \ self.conf.s3_store_object_buffer_dir def _option_get(self, param): result = getattr(self.conf, param) if not result: reason = _("Could not find %(param)s in configuration " "options.") % locals() logger.error(reason) raise exception.BadStoreConfiguration(store_name="s3", reason=reason) return result def get(self, location): """ Takes a `glance.store.location.Location` object that indicates where to find the image file, and returns a tuple of generator (for reading the image file) and image_size :param location `glance.store.location.Location` object, supplied from glance.store.location.get_location_from_uri() :raises `glance.exception.NotFound` if image does not exist """ loc = location.store_location from boto.s3.connection import S3Connection s3_conn = S3Connection(loc.accesskey, loc.secretkey, host=loc.s3serviceurl, is_secure=(loc.scheme == 's3+https')) bucket_obj = get_bucket(s3_conn, loc.bucket) key = get_key(bucket_obj, loc.key) msg = _("Retrieved image object from S3 using (s3_host=%(s3_host)s, " "access_key=%(accesskey)s, bucket=%(bucket)s, " "key=%(obj_name)s)") % ({ 's3_host': loc.s3serviceurl, 'accesskey': loc.accesskey, 'bucket': loc.bucket, 'obj_name': loc.key }) logger.debug(msg) #if expected_size and (key.size != expected_size): # msg = "Expected %s bytes, got %s" % (expected_size, key.size) # logger.error(msg) # raise glance.store.BackendException(msg) key.BufferSize = self.CHUNKSIZE return (ChunkedFile(key), key.size) def add(self, image_id, image_file, image_size): """ Stores an image file with supplied identifier to the backend storage system and returns an `glance.store.ImageAddResult` object containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :retval `glance.store.ImageAddResult` object :raises `glance.common.exception.Duplicate` if the image already existed S3 writes the image data using the scheme: s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ> where: <USER> = ``s3_store_user`` <KEY> = ``s3_store_key`` <S3_HOST> = ``s3_store_host`` <BUCKET> = ``s3_store_bucket`` <ID> = The id of the image being added """ from boto.s3.connection import S3Connection loc = StoreLocation({ 'scheme': self.scheme, 'bucket': self.bucket, 'key': image_id, 's3serviceurl': self.full_s3_host, 'accesskey': self.access_key, 'secretkey': self.secret_key }) s3_conn = S3Connection(loc.accesskey, loc.secretkey, host=loc.s3serviceurl, is_secure=(loc.scheme == 's3+https')) create_bucket_if_missing(self.bucket, s3_conn, self.conf) bucket_obj = get_bucket(s3_conn, self.bucket) obj_name = str(image_id) key = bucket_obj.get_key(obj_name) if key and key.exists(): raise exception.Duplicate( _("S3 already has an image at " "location %s") % loc.get_uri()) msg = _("Adding image object to S3 using (s3_host=%(s3_host)s, " "access_key=%(access_key)s, bucket=%(bucket)s, " "key=%(obj_name)s)") % ({ 's3_host': self.s3_host, 'access_key': self.access_key, 'bucket': self.bucket, 'obj_name': obj_name }) logger.debug(msg) key = bucket_obj.new_key(obj_name) # We need to wrap image_file, which is a reference to the # webob.Request.body_file, with a seekable file-like object, # otherwise the call to set_contents_from_file() will die # with an error about Input object has no method 'seek'. We # might want to call webob.Request.make_body_seekable(), but # unfortunately, that method copies the entire image into # memory and results in LP Bug #818292 occurring. So, here # we write temporary file in as memory-efficient manner as # possible and then supply the temporary file to S3. We also # take this opportunity to calculate the image checksum while # writing the tempfile, so we don't need to call key.compute_md5() msg = _("Writing request body file to temporary file " "for %s") % loc.get_uri() logger.debug(msg) tmpdir = self.s3_store_object_buffer_dir temp_file = tempfile.NamedTemporaryFile(dir=tmpdir) checksum = hashlib.md5() chunk = image_file.read(self.CHUNKSIZE) while chunk: checksum.update(chunk) temp_file.write(chunk) chunk = image_file.read(self.CHUNKSIZE) temp_file.flush() msg = _("Uploading temporary file to S3 for %s") % loc.get_uri() logger.debug(msg) # OK, now upload the data into the key key.set_contents_from_file(open(temp_file.name, 'r+b'), replace=False) size = key.size checksum_hex = checksum.hexdigest() logger.debug( _("Wrote %(size)d bytes to S3 key named %(obj_name)s " "with checksum %(checksum_hex)s") % locals()) return (loc.get_uri(), size, checksum_hex) def delete(self, location): """ Takes a `glance.store.location.Location` object that indicates where to find the image file to delete :location `glance.store.location.Location` object, supplied from glance.store.location.get_location_from_uri() :raises NotFound if image does not exist """ loc = location.store_location from boto.s3.connection import S3Connection s3_conn = S3Connection(loc.accesskey, loc.secretkey, host=loc.s3serviceurl, is_secure=(loc.scheme == 's3+https')) bucket_obj = get_bucket(s3_conn, loc.bucket) # Close the key when we're through. key = get_key(bucket_obj, loc.key) msg = _("Deleting image object from S3 using (s3_host=%(s3_host)s, " "access_key=%(accesskey)s, bucket=%(bucket)s, " "key=%(obj_name)s)") % ({ 's3_host': loc.s3serviceurl, 'accesskey': loc.accesskey, 'bucket': loc.bucket, 'obj_name': loc.key }) logger.debug(msg) return key.delete()
class RabbitStrategy(strategy.Strategy): """A notifier that puts a message on a queue when called.""" opts = [ cfg.StrOpt('rabbit_host', default='localhost'), cfg.IntOpt('rabbit_port', default=5672), cfg.BoolOpt('rabbit_use_ssl', default=False), cfg.StrOpt('rabbit_userid', default='guest'), cfg.StrOpt('rabbit_password', default='guest'), cfg.StrOpt('rabbit_virtual_host', default='/'), cfg.StrOpt('rabbit_notification_exchange', default='glance'), cfg.StrOpt('rabbit_notification_topic', default='glance_notifications'), cfg.StrOpt('rabbit_max_retries', default=0), cfg.StrOpt('rabbit_retry_backoff', default=2), cfg.StrOpt('rabbit_retry_max_backoff', default=30) ] def __init__(self, conf): """Initialize the rabbit notification strategy.""" self._conf = conf self._conf.register_opts(self.opts) self.topic = self._conf.rabbit_notification_topic self.max_retries = self._conf.rabbit_max_retries # NOTE(comstud): When reading the config file, these values end # up being strings, and we need them as ints. self.retry_backoff = int(self._conf.rabbit_retry_backoff) self.retry_max_backoff = int(self._conf.rabbit_retry_max_backoff) self.connection = None self.retry_attempts = 0 try: self.reconnect() except KombuMaxRetriesReached: pass def _close(self): """Close connection to rabbit.""" try: self.connection.close() except self.connection_errors: pass self.connection = None def _connect(self): """Connect to rabbit. Exceptions should be handled by the caller. """ log_info = {} log_info['hostname'] = self._conf.rabbit_host log_info['port'] = self._conf.rabbit_port if self.connection: logger.info( _("Reconnecting to AMQP server on " "%(hostname)s:%(port)d") % log_info) self._close() else: logger.info( _("Connecting to AMQP server on " "%(hostname)s:%(port)d") % log_info) self.connection = kombu.connection.BrokerConnection( hostname=self._conf.rabbit_host, port=self._conf.rabbit_port, userid=self._conf.rabbit_userid, password=self._conf.rabbit_password, virtual_host=self._conf.rabbit_virtual_host, ssl=self._conf.rabbit_use_ssl) self.connection_errors = self.connection.connection_errors self.connection.connect() self.channel = self.connection.channel() self.exchange = kombu.entity.Exchange( channel=self.channel, type="topic", name=self._conf.rabbit_notification_exchange) # NOTE(jerdfelt): Normally the consumer would create the queues, # but we do this to ensure that messages don't get dropped if the # consumer is started after we do for priority in ["WARN", "INFO", "ERROR"]: routing_key = "%s.%s" % (self.topic, priority.lower()) queue = kombu.entity.Queue(channel=self.channel, exchange=self.exchange, durable=True, name=routing_key, routing_key=routing_key) queue.declare() logger.info( _("Connected to AMQP server on " "%(hostname)s:%(port)d") % log_info) def reconnect(self): """Handles reconnecting and re-establishing queues.""" while True: self.retry_attempts += 1 try: self._connect() return except self.connection_errors, e: pass except Exception, e: # NOTE(comstud): Unfortunately it's possible for amqplib # to return an error not covered by its transport # connection_errors in the case of a timeout waiting for # a protocol response. (See paste link in LP888621 for # nova.) So, we check all exceptions for 'timeout' in them # and try to reconnect in this case. if 'timeout' not in str(e): raise log_info = {} log_info['err_str'] = str(e) log_info['max_retries'] = self.max_retries log_info['hostname'] = self._conf.rabbit_host log_info['port'] = self._conf.rabbit_port if self.max_retries and self.retry_attempts >= self.max_retries: logger.exception( _('Unable to connect to AMQP server on ' '%(hostname)s:%(port)d after %(max_retries)d ' 'tries: %(err_str)s') % log_info) if self.connection: self._close() raise KombuMaxRetriesReached sleep_time = self.retry_backoff * self.retry_attempts if self.retry_max_backoff: sleep_time = min(sleep_time, self.retry_max_backoff) log_info['sleep_time'] = sleep_time logger.exception( _('AMQP server on %(hostname)s:%(port)d is' ' unreachable: %(err_str)s. Trying again in ' '%(sleep_time)d seconds.') % log_info) time.sleep(sleep_time)
class Scrubber(object): CLEANUP_FILE = ".cleanup" opts = [ cfg.BoolOpt('cleanup_scrubber', default=False), cfg.IntOpt('cleanup_scrubber_time', default=86400) ] def __init__(self, conf, **local_conf): self.conf = conf self.conf.register_opts(self.opts) self.datadir = store.get_scrubber_datadir(conf) self.cleanup = self.conf.cleanup_scrubber self.cleanup_time = self.conf.cleanup_scrubber_time host, port = registry.get_registry_addr(conf) logger.info( _("Initializing scrubber with conf: %s") % { 'datadir': self.datadir, 'cleanup': self.cleanup, 'cleanup_time': self.cleanup_time, 'registry_host': host, 'registry_port': port }) self.registry = client.RegistryClient(host, port) utils.safe_mkdirs(self.datadir) store.create_stores(conf) def run(self, pool, event=None): now = time.time() if not os.path.exists(self.datadir): logger.info(_("%s does not exist") % self.datadir) return delete_work = [] for root, dirs, files in os.walk(self.datadir): for id in files: if id == self.CLEANUP_FILE: continue file_name = os.path.join(root, id) delete_time = os.stat(file_name).st_mtime if delete_time > now: continue uri, delete_time = read_queue_file(file_name) if delete_time > now: continue delete_work.append((id, uri, now)) logger.info(_("Deleting %s images") % len(delete_work)) pool.starmap(self._delete, delete_work) if self.cleanup: self._cleanup(pool) def _delete(self, id, uri, now): file_path = os.path.join(self.datadir, str(id)) try: logger.debug(_("Deleting %(uri)s") % {'uri': uri}) store.delete_from_backend(uri) except store.UnsupportedBackend: msg = _("Failed to delete image from store (%(uri)s).") logger.error(msg % {'uri': uri}) write_queue_file(file_path, uri, now) self.registry.update_image(id, {'status': 'deleted'}) utils.safe_remove(file_path) def _cleanup(self, pool): now = time.time() cleanup_file = os.path.join(self.datadir, self.CLEANUP_FILE) if not os.path.exists(cleanup_file): write_queue_file(cleanup_file, 'cleanup', now) return _uri, last_run_time = read_queue_file(cleanup_file) cleanup_time = last_run_time + self.cleanup_time if cleanup_time > now: return logger.info(_("Getting images deleted before %s") % self.cleanup_time) write_queue_file(cleanup_file, 'cleanup', now) filters = { 'deleted': True, 'is_public': 'none', 'status': 'pending_delete' } pending_deletes = self.registry.get_images_detailed(filters=filters) delete_work = [] for pending_delete in pending_deletes: deleted_at = pending_delete.get('deleted_at') if not deleted_at: continue time_fmt = "%Y-%m-%dT%H:%M:%S" delete_time = calendar.timegm(time.strptime(deleted_at, time_fmt)) if delete_time + self.cleanup_time > now: continue delete_work.append( (pending_delete['id'], pending_delete['location'], now)) logger.info(_("Deleting %s images") % len(delete_work)) pool.starmap(self._delete, delete_work)
class RabbitStrategy(object): """A notifier that puts a message on a queue when called.""" opts = [ cfg.StrOpt('rabbit_host', default='localhost'), cfg.IntOpt('rabbit_port', default=5672), cfg.BoolOpt('rabbit_use_ssl', default=False), cfg.StrOpt('rabbit_userid', default='guest'), cfg.StrOpt('rabbit_password', default='guest'), cfg.StrOpt('rabbit_virtual_host', default='/'), cfg.StrOpt('rabbit_notification_exchange', default='glance'), cfg.StrOpt('rabbit_notification_topic', default='glance_notifications') ] def __init__(self, conf): """Initialize the rabbit notification strategy.""" self._conf = conf self._conf.register_opts(self.opts) self.topic = self._conf.rabbit_notification_topic self.connect() def connect(self): self.connection = kombu.connection.BrokerConnection( hostname=self._conf.rabbit_host, userid=self._conf.rabbit_userid, password=self._conf.rabbit_password, virtual_host=self._conf.rabbit_virtual_host, ssl=self._conf.rabbit_use_ssl) self.channel = self.connection.channel() self.exchange = kombu.entity.Exchange( channel=self.channel, type="topic", name=self._conf.rabbit_notification_exchange) self.exchange.declare() def _send_message(self, message, priority): routing_key = "%s.%s" % (self.topic, priority.lower()) # NOTE(jerdfelt): Normally the consumer would create the queue, but # we do this to ensure that messages don't get dropped if the # consumer is started after we do queue = kombu.entity.Queue(channel=self.channel, exchange=self.exchange, durable=True, name=routing_key, routing_key=routing_key) queue.declare() msg = self.exchange.Message(json.dumps(message)) self.exchange.publish(msg, routing_key=routing_key) def warn(self, msg): self._send_message(msg, "WARN") def info(self, msg): self._send_message(msg, "INFO") def error(self, msg): self._send_message(msg, "ERROR")
class ContextMiddleware(wsgi.Middleware): opts = [ cfg.BoolOpt('owner_is_tenant', default=True), ] def __init__(self, app, conf, **local_conf): self.conf = conf self.conf.register_opts(self.opts) # Determine the context class to use self.ctxcls = RequestContext if 'context_class' in local_conf: self.ctxcls = utils.import_class(local_conf['context_class']) super(ContextMiddleware, self).__init__(app) def make_context(self, *args, **kwargs): """ Create a context with the given arguments. """ kwargs.setdefault('owner_is_tenant', self.conf.owner_is_tenant) return self.ctxcls(*args, **kwargs) def process_request(self, req): """ Extract any authentication information in the request and construct an appropriate context from it. A few scenarios exist: 1. If X-Auth-Token is passed in, then consult TENANT and ROLE headers to determine permissions. 2. An X-Auth-Token was passed in, but the Identity-Status is not confirmed. For now, just raising a NotAuthorized exception. 3. X-Auth-Token is omitted. If we were using Keystone, then the tokenauth middleware would have rejected the request, so we must be using NoAuth. In that case, assume that is_admin=True. """ # TODO(sirp): should we be using the glance_tokeauth shim from # Keystone here? If we do, we need to make sure it handles the NoAuth # case auth_tok = req.headers.get('X-Auth-Token', req.headers.get('X-Storage-Token')) if auth_tok: if req.headers.get('X-Identity-Status') == 'Confirmed': # 1. Auth-token is passed, check other headers user = req.headers.get('X-User') tenant = req.headers.get('X-Tenant') roles = [ r.strip() for r in req.headers.get('X-Role', '').split(',') ] is_admin = 'Admin' in roles else: # 2. Indentity-Status not confirmed # FIXME(sirp): not sure what the correct behavior in this case # is; just raising NotAuthorized for now raise exception.NotAuthorized() else: # 3. Auth-token is ommited, assume NoAuth user = None tenant = None roles = [] is_admin = True req.context = self.make_context(auth_tok=auth_tok, user=user, tenant=tenant, roles=roles, is_admin=is_admin)
default=0, help='Minimum seconds between reconnection attempts'), cfg.IntOpt('qpid_reconnect_interval_max', default=0, help='Maximum seconds between reconnection attempts'), cfg.IntOpt('qpid_reconnect_interval', default=0, help='Equivalent to setting max and min to the same value'), cfg.IntOpt('qpid_heartbeat', default=5, help='Seconds between connection keepalive heartbeats'), cfg.StrOpt('qpid_protocol', default='tcp', help="Transport to use, either 'tcp' or 'ssl'"), cfg.BoolOpt('qpid_tcp_nodelay', default=True, help='Disable Nagle algorithm'), ] class QpidStrategy(strategy.Strategy): """A notifier that puts a message on a queue when called.""" def __init__(self, conf): """Initialize the Qpid notification strategy.""" self.conf = conf self.conf.register_opts(qpid_opts) self.broker = self.conf.qpid_hostname + ":" + self.conf.qpid_port self.connection = qpid.messaging.Connection(self.broker) self.connection.username = self.conf.qpid_username self.connection.password = self.conf.qpid_password