class ContextMiddleware(wsgi.Middleware): opts = [ cfg.BoolOpt('owner_is_tenant', default=True), cfg.StrOpt('admin_role', default='admin'), ] def __init__(self, app, conf, **local_conf): self.conf = conf self.conf.register_opts(self.opts) super(ContextMiddleware, self).__init__(app) def process_request(self, req): """Convert authentication informtion into a request context Generate a RequestContext object from the available authentication headers and store on the 'context' attribute of the req object. :param req: wsgi request object that will be given the context object :raises webob.exc.HTTPUnauthorized: when value of the X-Identity-Status header is not 'Confirmed' """ if req.headers.get('X-Identity-Status') != 'Confirmed': raise webob.exc.HTTPUnauthorized() #NOTE(bcwaldon): X-Roles is a csv string, but we need to parse # it into a list to be useful roles_header = req.headers.get('X-Roles', '') roles = [r.strip().lower() for r in roles_header.split(',')] #NOTE(bcwaldon): This header is deprecated in favor of X-Auth-Token deprecated_token = req.headers.get('X-Storage-Token') kwargs = { 'user': req.headers.get('X-User-Id'), 'tenant': req.headers.get('X-Tenant-Id'), 'roles': roles, 'is_admin': self.conf.admin_role.strip().lower() in roles, 'auth_tok': req.headers.get('X-Auth-Token', deprecated_token), 'owner_is_tenant': self.conf.owner_is_tenant, } req.context = RequestContext(**kwargs)
import glance.store.location try: import swiftclient except ImportError: pass LOG = logging.getLogger(__name__) DEFAULT_CONTAINER = 'glance' DEFAULT_LARGE_OBJECT_SIZE = 5 * 1024 # 5GB DEFAULT_LARGE_OBJECT_CHUNK_SIZE = 200 # 200M ONE_MB = 1000 * 1024 swift_opts = [ cfg.BoolOpt('swift_enable_snet', default=False), cfg.StrOpt('swift_store_auth_address'), cfg.StrOpt('swift_store_user', secret=True), cfg.StrOpt('swift_store_key', secret=True), cfg.StrOpt('swift_store_auth_version', default='2'), cfg.StrOpt('swift_store_container', default=DEFAULT_CONTAINER), cfg.IntOpt('swift_store_large_object_size', default=DEFAULT_LARGE_OBJECT_SIZE), cfg.IntOpt('swift_store_large_object_chunk_size', default=DEFAULT_LARGE_OBJECT_CHUNK_SIZE), cfg.BoolOpt('swift_store_create_container_on_put', default=False), cfg.BoolOpt('swift_store_multi_tenant', default=False), cfg.ListOpt('swift_store_admin_tenants', default=[]), ] CONF = cfg.CONF
msg = 'Image %s owner %s -> %s' % (image_id, owner_name, owner_id) LOG.info(msg) return image_owner_map def update_image_owners(image_owner_map, db, context): for (image_id, image_owner) in image_owner_map.items(): db.image_update(context, image_id, {'owner': image_owner}) LOG.info('Image %s successfully updated.' % image_id) if __name__ == "__main__": config = cfg.CONF extra_cli_opts = [ cfg.BoolOpt('dry-run', help='Print output but do not make db changes.'), cfg.StrOpt('keystone-auth-uri', help='Authentication endpoint'), cfg.StrOpt('keystone-admin-tenant-name', help='Administrative user\'s tenant name'), cfg.StrOpt('keystone-admin-user', help='Administrative user\'s id'), cfg.StrOpt('keystone-admin-password', help='Administrative user\'s password'), ] config.register_cli_opts(extra_cli_opts) config(project='glance', prog='glance-registry') db_api.configure_db() context = glance.common.context.RequestContext(is_admin=True) auth_uri = config.keystone_auth_uri
import logging.handlers import os import sys from paste import deploy from glance.openstack.common import cfg from glance.version import version_info as version paste_deploy_opts = [ cfg.StrOpt('flavor'), cfg.StrOpt('config_file'), ] common_opts = [ cfg.BoolOpt('allow_additional_image_properties', default=True, help=_('Whether to allow users to specify image properties ' 'beyond what the image schema provides')), cfg.StrOpt('data_api', default='glance.db.sqlalchemy.api', help=_('Python module path of data access API')), cfg.IntOpt('limit_param_default', default=25, help=_('Default value for the number of items returned by a ' 'request if not specified explicitly in the request')), cfg.IntOpt('api_limit_max', default=1000, help=_('Maximum permissible number of items that could be ' 'returned by a request')), cfg.BoolOpt('show_image_direct_url', default=False, help=_(
from glance.openstack.common import cfg import glance.openstack.common.log as logging from glance.registry import client LOG = logging.getLogger(__name__) registry_addr_opts = [ cfg.StrOpt('registry_host', default='0.0.0.0'), cfg.IntOpt('registry_port', default=9191), ] registry_client_opts = [ cfg.StrOpt('registry_client_protocol', default='http'), cfg.StrOpt('registry_client_key_file'), cfg.StrOpt('registry_client_cert_file'), cfg.StrOpt('registry_client_ca_file'), cfg.BoolOpt('registry_client_insecure', default=False), cfg.StrOpt('metadata_encryption_key', secret=True), ] registry_client_ctx_opts = [ cfg.StrOpt('admin_user', secret=True), cfg.StrOpt('admin_password', secret=True), cfg.StrOpt('admin_tenant_name', secret=True), cfg.StrOpt('auth_url'), cfg.StrOpt('auth_strategy', default='noauth'), cfg.StrOpt('auth_region'), ] CONF = cfg.CONF CONF.register_opts(registry_addr_opts) CONF.register_opts(registry_client_opts) CONF.register_opts(registry_client_ctx_opts)
class Store(glance.store.base.Store): """An implementation of the swift backend adapter.""" EXAMPLE_URL = "swift://<USER>:<KEY>@<AUTH_ADDRESS>/<CONTAINER>/<FILE>" CHUNKSIZE = 65536 opts = [ cfg.BoolOpt('swift_enable_snet', default=False), cfg.StrOpt('swift_store_auth_address'), cfg.StrOpt('swift_store_user', secret=True), cfg.StrOpt('swift_store_key', secret=True), cfg.StrOpt('swift_store_auth_version', default='2'), cfg.StrOpt('swift_store_container', default=DEFAULT_CONTAINER), cfg.IntOpt('swift_store_large_object_size', default=DEFAULT_LARGE_OBJECT_SIZE), cfg.IntOpt('swift_store_large_object_chunk_size', default=DEFAULT_LARGE_OBJECT_CHUNK_SIZE), cfg.BoolOpt('swift_store_create_container_on_put', default=False), ] def get_schemes(self): return ('swift+https', 'swift', 'swift+http') def configure(self): self.conf.register_opts(self.opts) self.snet = self.conf.swift_enable_snet self.auth_version = self._option_get('swift_store_auth_version') def configure_add(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadStoreConfiguration` """ self.auth_address = self._option_get('swift_store_auth_address') self.user = self._option_get('swift_store_user') self.key = self._option_get('swift_store_key') self.container = self.conf.swift_store_container try: # The config file has swift_store_large_object_*size in MB, but # internally we store it in bytes, since the image_size parameter # passed to add() is also in bytes. _obj_size = self.conf.swift_store_large_object_size self.large_object_size = _obj_size * ONE_MB _obj_chunk_size = self.conf.swift_store_large_object_chunk_size self.large_object_chunk_size = _obj_chunk_size * ONE_MB except cfg.ConfigFileValueError, e: reason = _("Error in configuration conf: %s") % e logger.error(reason) raise exception.BadStoreConfiguration(store_name="swift", reason=reason) self.scheme = 'swift+https' if self.auth_address.startswith('http://'): self.scheme = 'swift+http' self.full_auth_address = self.auth_address elif self.auth_address.startswith('https://'): self.full_auth_address = self.auth_address else: # Defaults https self.full_auth_address = 'https://' + self.auth_address
class RabbitStrategy(strategy.Strategy): """A notifier that puts a message on a queue when called.""" opts = [ cfg.StrOpt('rabbit_host', default='localhost'), cfg.IntOpt('rabbit_port', default=5672), cfg.BoolOpt('rabbit_use_ssl', default=False), cfg.StrOpt('rabbit_userid', default='guest'), cfg.StrOpt('rabbit_password', default='guest'), cfg.StrOpt('rabbit_virtual_host', default='/'), cfg.StrOpt('rabbit_notification_exchange', default='glance'), cfg.StrOpt('rabbit_notification_topic', default='glance_notifications'), cfg.StrOpt('rabbit_max_retries', default=0), cfg.StrOpt('rabbit_retry_backoff', default=2), cfg.StrOpt('rabbit_retry_max_backoff', default=30) ] def __init__(self, conf): """Initialize the rabbit notification strategy.""" self._conf = conf self._conf.register_opts(self.opts) self.topic = self._conf.rabbit_notification_topic self.max_retries = self._conf.rabbit_max_retries # NOTE(comstud): When reading the config file, these values end # up being strings, and we need them as ints. self.retry_backoff = int(self._conf.rabbit_retry_backoff) self.retry_max_backoff = int(self._conf.rabbit_retry_max_backoff) self.connection = None self.retry_attempts = 0 try: self.reconnect() except KombuMaxRetriesReached: pass def _close(self): """Close connection to rabbit.""" try: self.connection.close() except self.connection_errors: pass self.connection = None def _connect(self): """Connect to rabbit. Exceptions should be handled by the caller. """ log_info = {} log_info['hostname'] = self._conf.rabbit_host log_info['port'] = self._conf.rabbit_port if self.connection: logger.info( _("Reconnecting to AMQP server on " "%(hostname)s:%(port)d") % log_info) self._close() else: logger.info( _("Connecting to AMQP server on " "%(hostname)s:%(port)d") % log_info) self.connection = kombu.connection.BrokerConnection( hostname=self._conf.rabbit_host, port=self._conf.rabbit_port, userid=self._conf.rabbit_userid, password=self._conf.rabbit_password, virtual_host=self._conf.rabbit_virtual_host, ssl=self._conf.rabbit_use_ssl) self.connection_errors = self.connection.connection_errors self.connection.connect() self.channel = self.connection.channel() self.exchange = kombu.entity.Exchange( channel=self.channel, type="topic", name=self._conf.rabbit_notification_exchange) # NOTE(jerdfelt): Normally the consumer would create the queues, # but we do this to ensure that messages don't get dropped if the # consumer is started after we do for priority in ["WARN", "INFO", "ERROR"]: routing_key = "%s.%s" % (self.topic, priority.lower()) queue = kombu.entity.Queue(channel=self.channel, exchange=self.exchange, durable=True, name=routing_key, routing_key=routing_key) queue.declare() logger.info( _("Connected to AMQP server on " "%(hostname)s:%(port)d") % log_info) def reconnect(self): """Handles reconnecting and re-establishing queues.""" while True: self.retry_attempts += 1 try: self._connect() return except self.connection_errors, e: pass except Exception, e: # NOTE(comstud): Unfortunately it's possible for amqplib # to return an error not covered by its transport # connection_errors in the case of a timeout waiting for # a protocol response. (See paste link in LP888621 for # nova.) So, we check all exceptions for 'timeout' in them # and try to reconnect in this case. if 'timeout' not in str(e): raise log_info = {} log_info['err_str'] = str(e) log_info['max_retries'] = self.max_retries log_info['hostname'] = self._conf.rabbit_host log_info['port'] = self._conf.rabbit_port if self.max_retries and self.retry_attempts >= self.max_retries: logger.exception( _('Unable to connect to AMQP server on ' '%(hostname)s:%(port)d after %(max_retries)d ' 'tries: %(err_str)s') % log_info) if self.connection: self._close() raise KombuMaxRetriesReached sleep_time = self.retry_backoff * self.retry_attempts if self.retry_max_backoff: sleep_time = min(sleep_time, self.retry_max_backoff) log_info['sleep_time'] = sleep_time logger.exception( _('AMQP server on %(hostname)s:%(port)d is' ' unreachable: %(err_str)s. Trying again in ' '%(sleep_time)d seconds.') % log_info) time.sleep(sleep_time)
from glance import context from glance.common import utils from glance.openstack.common import cfg import glance.openstack.common.log as logging from glance import registry from glance import store import glance.store.filesystem import glance.store.http import glance.store.s3 import glance.store.swift LOG = logging.getLogger(__name__) scrubber_opts = [ cfg.BoolOpt('cleanup_scrubber', default=False), cfg.IntOpt('cleanup_scrubber_time', default=86400) ] CONF = cfg.CONF CONF.register_opts(scrubber_opts) class Daemon(object): def __init__(self, wakeup_time=300, threads=1000): LOG.info( _("Starting Daemon: wakeup_time=%(wakeup_time)s " "threads=%(threads)s") % locals()) self.wakeup_time = wakeup_time self.event = eventlet.event.Event() self.pool = eventlet.greenpool.GreenPool(threads)
default=0, help='Minimum seconds between reconnection attempts'), cfg.IntOpt('qpid_reconnect_interval_max', default=0, help='Maximum seconds between reconnection attempts'), cfg.IntOpt('qpid_reconnect_interval', default=0, help='Equivalent to setting max and min to the same value'), cfg.IntOpt('qpid_heartbeat', default=60, help='Seconds between connection keepalive heartbeats'), cfg.StrOpt('qpid_protocol', default='tcp', help="Transport to use, either 'tcp' or 'ssl'"), cfg.BoolOpt('qpid_tcp_nodelay', default=True, help='Disable Nagle algorithm'), ] CONF = cfg.CONF CONF.register_opts(qpid_opts) class QpidStrategy(strategy.Strategy): """A notifier that puts a message on a queue when called.""" def __init__(self): """Initialize the Qpid notification strategy.""" self.broker = CONF.qpid_hostname + ":" + CONF.qpid_port self.connection = qpid.messaging.Connection(self.broker) self.connection.username = CONF.qpid_username
class Store(glance.store.base.Store): """An implementation of the s3 adapter.""" EXAMPLE_URL = "s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ>" opts = [ cfg.StrOpt('s3_store_host'), cfg.StrOpt('s3_store_access_key', secret=True), cfg.StrOpt('s3_store_secret_key', secret=True), cfg.StrOpt('s3_store_bucket'), cfg.StrOpt('s3_store_object_buffer_dir'), cfg.BoolOpt('s3_store_create_bucket_on_put', default=False), ] def get_schemes(self): return ('s3', 's3+http', 's3+https') def configure_add(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadStoreConfiguration` """ self.conf.register_opts(self.opts) self.s3_host = self._option_get('s3_store_host') access_key = self._option_get('s3_store_access_key') secret_key = self._option_get('s3_store_secret_key') # NOTE(jaypipes): Need to encode to UTF-8 here because of a # bug in the HMAC library that boto uses. # See: http://bugs.python.org/issue5285 # See: http://trac.edgewall.org/ticket/8083 self.access_key = access_key.encode('utf-8') self.secret_key = secret_key.encode('utf-8') self.bucket = self._option_get('s3_store_bucket') self.scheme = 's3' if self.s3_host.startswith('https://'): self.scheme = 's3+https' self.full_s3_host = self.s3_host elif self.s3_host.startswith('http://'): self.full_s3_host = self.s3_host else: # Defaults http self.full_s3_host = 'http://' + self.s3_host self.s3_store_object_buffer_dir = self.conf.s3_store_object_buffer_dir def _option_get(self, param): result = getattr(self.conf, param) if not result: reason = _("Could not find %(param)s in configuration " "options.") % locals() logger.error(reason) raise exception.BadStoreConfiguration(store_name="s3", reason=reason) return result def get(self, location): """ Takes a `glance.store.location.Location` object that indicates where to find the image file, and returns a tuple of generator (for reading the image file) and image_size :param location `glance.store.location.Location` object, supplied from glance.store.location.get_location_from_uri() :raises `glance.exception.NotFound` if image does not exist """ key = self._retrieve_key(location) key.BufferSize = self.CHUNKSIZE class ChunkedIndexable(glance.store.Indexable): def another(self): return (self.wrapped.fp.read(ChunkedFile.CHUNKSIZE) if self.wrapped.fp else None) return (ChunkedIndexable(ChunkedFile(key), key.size), key.size) def get_size(self, location): """ Takes a `glance.store.location.Location` object that indicates where to find the image file, and returns the image_size (or 0 if unavailable) :param location `glance.store.location.Location` object, supplied from glance.store.location.get_location_from_uri() """ try: key = self._retrieve_key(location) return key.size except Exception: return 0 def _retrieve_key(self, location): loc = location.store_location from boto.s3.connection import S3Connection s3_conn = S3Connection(loc.accesskey, loc.secretkey, host=loc.s3serviceurl, is_secure=(loc.scheme == 's3+https')) bucket_obj = get_bucket(s3_conn, loc.bucket) key = get_key(bucket_obj, loc.key) msg = _("Retrieved image object from S3 using (s3_host=%(s3_host)s, " "access_key=%(accesskey)s, bucket=%(bucket)s, " "key=%(obj_name)s)") % ({'s3_host': loc.s3serviceurl, 'accesskey': loc.accesskey, 'bucket': loc.bucket, 'obj_name': loc.key}) logger.debug(msg) return key def add(self, image_id, image_file, image_size): """ Stores an image file with supplied identifier to the backend storage system and returns an `glance.store.ImageAddResult` object containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :retval `glance.store.ImageAddResult` object :raises `glance.common.exception.Duplicate` if the image already existed S3 writes the image data using the scheme: s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ> where: <USER> = ``s3_store_user`` <KEY> = ``s3_store_key`` <S3_HOST> = ``s3_store_host`` <BUCKET> = ``s3_store_bucket`` <ID> = The id of the image being added """ from boto.s3.connection import S3Connection loc = StoreLocation({'scheme': self.scheme, 'bucket': self.bucket, 'key': image_id, 's3serviceurl': self.full_s3_host, 'accesskey': self.access_key, 'secretkey': self.secret_key}) s3_conn = S3Connection(loc.accesskey, loc.secretkey, host=loc.s3serviceurl, is_secure=(loc.scheme == 's3+https')) create_bucket_if_missing(self.bucket, s3_conn, self.conf) bucket_obj = get_bucket(s3_conn, self.bucket) obj_name = str(image_id) def _sanitize(uri): return re.sub('//.*:.*@', '//s3_store_secret_key:s3_store_access_key@', uri) key = bucket_obj.get_key(obj_name) if key and key.exists(): raise exception.Duplicate(_("S3 already has an image at " "location %s") % _sanitize(loc.get_uri())) msg = _("Adding image object to S3 using (s3_host=%(s3_host)s, " "access_key=%(access_key)s, bucket=%(bucket)s, " "key=%(obj_name)s)") % ({'s3_host': self.s3_host, 'access_key': self.access_key, 'bucket': self.bucket, 'obj_name': obj_name}) logger.debug(msg) key = bucket_obj.new_key(obj_name) # We need to wrap image_file, which is a reference to the # webob.Request.body_file, with a seekable file-like object, # otherwise the call to set_contents_from_file() will die # with an error about Input object has no method 'seek'. We # might want to call webob.Request.make_body_seekable(), but # unfortunately, that method copies the entire image into # memory and results in LP Bug #818292 occurring. So, here # we write temporary file in as memory-efficient manner as # possible and then supply the temporary file to S3. We also # take this opportunity to calculate the image checksum while # writing the tempfile, so we don't need to call key.compute_md5() msg = _("Writing request body file to temporary file " "for %s") % _sanitize(loc.get_uri()) logger.debug(msg) tmpdir = self.s3_store_object_buffer_dir temp_file = tempfile.NamedTemporaryFile(dir=tmpdir) checksum = hashlib.md5() for chunk in utils.chunkreadable(image_file, self.CHUNKSIZE): checksum.update(chunk) temp_file.write(chunk) temp_file.flush() msg = (_("Uploading temporary file to S3 for %s") % _sanitize(loc.get_uri())) logger.debug(msg) # OK, now upload the data into the key key.set_contents_from_file(open(temp_file.name, 'r+b'), replace=False) size = key.size checksum_hex = checksum.hexdigest() logger.debug(_("Wrote %(size)d bytes to S3 key named %(obj_name)s " "with checksum %(checksum_hex)s") % locals()) return (loc.get_uri(), size, checksum_hex) def delete(self, location): """ Takes a `glance.store.location.Location` object that indicates where to find the image file to delete :location `glance.store.location.Location` object, supplied from glance.store.location.get_location_from_uri() :raises NotFound if image does not exist """ loc = location.store_location from boto.s3.connection import S3Connection s3_conn = S3Connection(loc.accesskey, loc.secretkey, host=loc.s3serviceurl, is_secure=(loc.scheme == 's3+https')) bucket_obj = get_bucket(s3_conn, loc.bucket) # Close the key when we're through. key = get_key(bucket_obj, loc.key) msg = _("Deleting image object from S3 using (s3_host=%(s3_host)s, " "access_key=%(accesskey)s, bucket=%(bucket)s, " "key=%(obj_name)s)") % ({'s3_host': loc.s3serviceurl, 'accesskey': loc.accesskey, 'bucket': loc.bucket, 'obj_name': loc.key}) logger.debug(msg) return key.delete()
""" loc = location.get_location_from_uri(uri) return loc.store_name scrubber_datadir_opt = cfg.StrOpt('scrubber_datadir', default='/var/lib/glance/scrubber') def get_scrubber_datadir(conf): conf.register_opt(scrubber_datadir_opt) return conf.scrubber_datadir delete_opts = [ cfg.BoolOpt('delayed_delete', default=False), cfg.IntOpt('scrub_time', default=0) ] def schedule_delete_from_backend(uri, conf, context, image_id, **kwargs): """ Given a uri and a time, schedule the deletion of an image. """ conf.register_opts(delete_opts) if not conf.delayed_delete: registry.update_image_metadata(context, image_id, {'status': 'deleted'}) try: return delete_from_backend(uri, **kwargs) except (UnsupportedBackend, exception.StoreDeleteNotSupported,
import json import time import kombu.connection import kombu.entity from glance.notifier import strategy from glance.openstack.common import cfg import glance.openstack.common.log as logging LOG = logging.getLogger(__name__) rabbit_opts = [ cfg.StrOpt('rabbit_host', default='localhost'), cfg.IntOpt('rabbit_port', default=5672), cfg.BoolOpt('rabbit_use_ssl', default=False), cfg.StrOpt('rabbit_userid', default='guest'), cfg.StrOpt('rabbit_password', default='guest'), cfg.StrOpt('rabbit_virtual_host', default='/'), cfg.StrOpt('rabbit_notification_exchange', default='glance'), cfg.StrOpt('rabbit_notification_topic', default='glance_notifications'), cfg.StrOpt('rabbit_max_retries', default=0), cfg.StrOpt('rabbit_retry_backoff', default=2), cfg.StrOpt('rabbit_retry_max_backoff', default=30), cfg.BoolOpt('rabbit_durable_queues', default=False), ] CONF = cfg.CONF CONF.register_opts(rabbit_opts)
class Scrubber(object): CLEANUP_FILE = ".cleanup" opts = [ cfg.BoolOpt('cleanup_scrubber', default=False), cfg.IntOpt('cleanup_scrubber_time', default=86400) ] def __init__(self, conf, **local_conf): self.conf = conf self.conf.register_opts(self.opts) self.datadir = store.get_scrubber_datadir(conf) self.cleanup = self.conf.cleanup_scrubber self.cleanup_time = self.conf.cleanup_scrubber_time host, port = registry.get_registry_addr(conf) logger.info( _("Initializing scrubber with conf: %s") % { 'datadir': self.datadir, 'cleanup': self.cleanup, 'cleanup_time': self.cleanup_time, 'registry_host': host, 'registry_port': port }) self.registry = client.RegistryClient(host, port) utils.safe_mkdirs(self.datadir) store.create_stores(conf) def run(self, pool, event=None): now = time.time() if not os.path.exists(self.datadir): logger.info(_("%s does not exist") % self.datadir) return delete_work = [] for root, dirs, files in os.walk(self.datadir): for id in files: if id == self.CLEANUP_FILE: continue file_name = os.path.join(root, id) delete_time = os.stat(file_name).st_mtime if delete_time > now: continue uri, delete_time = read_queue_file(file_name) if delete_time > now: continue delete_work.append((id, uri, now)) logger.info(_("Deleting %s images") % len(delete_work)) pool.starmap(self._delete, delete_work) if self.cleanup: self._cleanup(pool) def _delete(self, id, uri, now): file_path = os.path.join(self.datadir, str(id)) try: logger.debug(_("Deleting %(uri)s") % {'uri': uri}) store.delete_from_backend(uri) except store.UnsupportedBackend: msg = _("Failed to delete image from store (%(uri)s).") logger.error(msg % {'uri': uri}) write_queue_file(file_path, uri, now) self.registry.update_image(id, {'status': 'deleted'}) utils.safe_remove(file_path) def _cleanup(self, pool): now = time.time() cleanup_file = os.path.join(self.datadir, self.CLEANUP_FILE) if not os.path.exists(cleanup_file): write_queue_file(cleanup_file, 'cleanup', now) return _uri, last_run_time = read_queue_file(cleanup_file) cleanup_time = last_run_time + self.cleanup_time if cleanup_time > now: return logger.info(_("Getting images deleted before %s") % self.cleanup_time) write_queue_file(cleanup_file, 'cleanup', now) filters = { 'deleted': True, 'is_public': 'none', 'status': 'pending_delete' } pending_deletes = self.registry.get_images_detailed(filters=filters) delete_work = [] for pending_delete in pending_deletes: deleted_at = pending_delete.get('deleted_at') if not deleted_at: continue time_fmt = "%Y-%m-%dT%H:%M:%S" # NOTE: Strip off microseconds which may occur after the last '.,' # Example: 2012-07-07T19:14:34.974216 date_str = deleted_at.rsplit('.', 1)[0].rsplit(',', 1)[0] delete_time = calendar.timegm(time.strptime(date_str, time_fmt)) if delete_time + self.cleanup_time > now: continue delete_work.append( (pending_delete['id'], pending_delete['location'], now)) logger.info(_("Deleting %s images") % len(delete_work)) pool.starmap(self._delete, delete_work)
import logging.handlers import os import sys from glance.common import wsgi from glance.openstack.common import cfg from glance import version paste_deploy_group = cfg.OptGroup('paste_deploy') paste_deploy_opts = [ cfg.StrOpt('flavor'), cfg.StrOpt('config_file'), ] common_opts = [ cfg.BoolOpt('allow_additional_image_properties', default=True, help='Whether to allow users to specify image properties ' 'beyond what the image schema provides'), ] class GlanceConfigOpts(cfg.CommonConfigOpts): def __init__(self, default_config_files=None, **kwargs): super(GlanceConfigOpts, self).__init__(project='glance', version='%%prog %s' % version.version_string(), default_config_files=default_config_files, **kwargs) self.register_opts(common_opts) self.default_paste_file = self.prog + '-paste.ini'
_MAKER = None _MAX_RETRIES = None _RETRY_INTERVAL = None BASE = models.BASE sa_logger = None LOG = os_logging.getLogger(__name__) STATUSES = [ 'active', 'saving', 'queued', 'killed', 'pending_delete', 'deleted' ] db_opts = [ cfg.IntOpt('sql_idle_timeout', default=3600), cfg.IntOpt('sql_max_retries', default=10), cfg.IntOpt('sql_retry_interval', default=1), cfg.BoolOpt('db_auto_create', default=False), ] CONF = cfg.CONF CONF.register_opts(db_opts) def ping_listener(dbapi_conn, connection_rec, connection_proxy): """ Ensures that MySQL connections checked out of the pool are alive. Borrowed from: http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f """
'min_disk', 'min_ram', 'is_public', 'location', 'checksum', 'owner', 'protected']) CONTAINER_FORMATS = ['ami', 'ari', 'aki', 'bare', 'ovf'] DISK_FORMATS = ['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso'] STATUSES = ['active', 'saving', 'queued', 'killed', 'pending_delete', 'deleted'] db_opts = [ cfg.IntOpt('sql_idle_timeout', default=3600), cfg.StrOpt('sql_connection', default='sqlite:///glance.sqlite'), cfg.IntOpt('sql_max_retries', default=10), cfg.IntOpt('sql_retry_interval', default=1), cfg.BoolOpt('db_auto_create', default=True), ] class MySQLPingListener(object): """ Ensures that MySQL connections checked out of the pool are alive. Borrowed from: http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f """ def checkout(self, dbapi_con, con_record, con_proxy): try:
'%(message)s', help='format string to use for log messages without context'), cfg.StrOpt('logging_debug_format_suffix', default='from (pid=%(process)d) %(funcName)s ' '%(pathname)s:%(lineno)d', help='data to append to log format when level is DEBUG'), cfg.StrOpt('logging_exception_prefix', default='%(asctime)s TRACE %(name)s %(instance)s', help='prefix each line of exception output with this format'), cfg.ListOpt('default_log_levels', default=[ 'amqplib=WARN', 'sqlalchemy=WARN', 'boto=WARN', 'suds=INFO', 'keystone=INFO', 'eventlet.wsgi.server=WARN' ], help='list of logger=LEVEL pairs'), cfg.BoolOpt('publish_errors', default=False, help='publish error events'), # NOTE(mikal): there are two options here because sometimes we are handed # a full instance (and could include more information), and other times we # are just handed a UUID for the instance. cfg.StrOpt('instance_format', default='[instance: %(uuid)s] ', help='If an instance is passed with the log message, format ' 'it like this'), cfg.StrOpt('instance_uuid_format', default='[instance: %(uuid)s] ', help='If an instance UUID is passed with the log message, ' 'format it like this'), ] generic_log_opts = [
from glance.common import utils from glance.openstack.common import cfg import glance.openstack.common.log as logging import glance.store import glance.store.base import glance.store.location LOG = logging.getLogger(__name__) s3_opts = [ cfg.StrOpt('s3_store_host'), cfg.StrOpt('s3_store_access_key', secret=True), cfg.StrOpt('s3_store_secret_key', secret=True), cfg.StrOpt('s3_store_bucket'), cfg.StrOpt('s3_store_object_buffer_dir'), cfg.BoolOpt('s3_store_create_bucket_on_put', default=False), cfg.StrOpt('s3_store_bucket_url_format', default='subdomain'), ] CONF = cfg.CONF CONF.register_opts(s3_opts) class StoreLocation(glance.store.location.StoreLocation): """ Class describing an S3 URI. An S3 URI can look like any of the following: s3://accesskey:[email protected]/bucket/key-id s3+http://accesskey:[email protected]/bucket/key-id s3+https://accesskey:[email protected]/bucket/key-id
import json import time import kombu.connection import kombu.entity from glance.notifier import strategy from glance.openstack.common import cfg import glance.openstack.common.log as logging LOG = logging.getLogger(__name__) rabbit_opts = [ cfg.StrOpt('rabbit_host', default='localhost'), cfg.IntOpt('rabbit_port', default=5672), cfg.BoolOpt('rabbit_use_ssl', default=False), cfg.StrOpt('rabbit_userid', default='guest'), cfg.StrOpt('rabbit_password', default='guest'), cfg.StrOpt('rabbit_virtual_host', default='/'), cfg.StrOpt('rabbit_notification_exchange', default='glance'), cfg.StrOpt('rabbit_notification_topic', default='glance_notifications'), cfg.StrOpt('rabbit_max_retries', default=0), cfg.StrOpt('rabbit_retry_backoff', default=2), cfg.StrOpt('rabbit_retry_max_backoff', default=30) ] CONF = cfg.CONF CONF.register_opts(rabbit_opts)
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import webob.exc from glance.common import wsgi import glance.context from glance.openstack.common import cfg import glance.openstack.common.log as logging context_opts = [ cfg.BoolOpt('owner_is_tenant', default=True), cfg.StrOpt('admin_role', default='admin'), cfg.BoolOpt('allow_anonymous_access', default=False), ] CONF = cfg.CONF CONF.register_opts(context_opts) LOG = logging.getLogger(__name__) class BaseContextMiddleware(wsgi.Middleware): def process_response(self, resp): try: request_id = resp.request.context.request_id except AttributeError: