class ImageCache(object): """Provides an LRU cache for image data.""" opts = [ cfg.StrOpt('image_cache_driver', default='sqlite'), cfg.IntOpt('image_cache_max_size', default=10 * (1024**3)), # 10 GB cfg.IntOpt('image_cache_stall_time', default=86400), # 24 hours cfg.StrOpt('image_cache_dir'), ] def __init__(self, conf): self.conf = conf self.conf.register_opts(self.opts) self.init_driver() def init_driver(self): """ Create the driver for the cache """ driver_name = self.conf.image_cache_driver driver_module = (__name__ + '.drivers.' + driver_name + '.Driver') try: self.driver_class = utils.import_class(driver_module) logger.info(_("Image cache loaded driver '%s'.") % driver_name) except exception.ImportFailure, import_err: logger.warn( _("Image cache driver " "'%(driver_name)s' failed to load. " "Got error: '%(import_err)s.") % locals()) driver_module = __name__ + '.drivers.sqlite.Driver' logger.info(_("Defaulting to SQLite driver.")) self.driver_class = utils.import_class(driver_module) self.configure_driver()
class Controller(object): opts = [ cfg.IntOpt('limit_param_default', default=25), cfg.IntOpt('api_limit_max', default=1000), ] def __init__(self, conf): self.conf = conf self.conf.register_opts(self.opts) db_api.configure_db(conf) def _get_images(self, context, **params): """ Get images, wrapping in exception if necessary. """ try: return db_api.image_get_all(context, **params) except exception.NotFound, e: msg = _("Invalid marker. Image could not be found.") raise exc.HTTPBadRequest(explanation=msg)
class Store(glance.store.base.Store): """An implementation of the RBD backend adapter.""" EXAMPLE_URL = "rbd://<IMAGE>" opts = [ cfg.IntOpt('rbd_store_chunk_size', default=DEFAULT_CHUNKSIZE), cfg.StrOpt('rbd_store_pool', default=DEFAULT_POOL), cfg.StrOpt('rbd_store_user', default=DEFAULT_USER), cfg.StrOpt('rbd_store_ceph_conf', default=DEFAULT_CONFFILE), ] def get_schemes(self): return ('rbd', ) def configure_add(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadStoreConfiguration` """ self.conf.register_opts(self.opts) try: self.chunk_size = self.conf.rbd_store_chunk_size * 1024 * 1024 # these must not be unicode since they will be passed to a # non-unicode-aware C library self.pool = str(self.conf.rbd_store_pool) self.user = str(self.conf.rbd_store_user) self.conf_file = str(self.conf.rbd_store_ceph_conf) except cfg.ConfigFileValueError, e: reason = _("Error in store configuration: %s") % e logger.error(reason) raise exception.BadStoreConfiguration(store_name='rbd', reason=reason)
from glance.openstack.common import timeutils _ENGINE = None _MAKER = None _MAX_RETRIES = None _RETRY_INTERVAL = None BASE = models.BASE sa_logger = None LOG = os_logging.getLogger(__name__) STATUSES = [ 'active', 'saving', 'queued', 'killed', 'pending_delete', 'deleted' ] db_opts = [ cfg.IntOpt('sql_idle_timeout', default=3600), cfg.IntOpt('sql_max_retries', default=10), cfg.IntOpt('sql_retry_interval', default=1), cfg.BoolOpt('db_auto_create', default=False), ] CONF = cfg.CONF CONF.register_opts(db_opts) def ping_listener(dbapi_conn, connection_rec, connection_proxy): """ Ensures that MySQL connections checked out of the pool are alive. Borrowed from:
LOG = logging.getLogger(__name__) DEFAULT_CONTAINER = 'glance' DEFAULT_LARGE_OBJECT_SIZE = 5 * 1024 # 5GB DEFAULT_LARGE_OBJECT_CHUNK_SIZE = 200 # 200M ONE_MB = 1000 * 1024 swift_opts = [ cfg.BoolOpt('swift_enable_snet', default=False), cfg.StrOpt('swift_store_auth_address'), cfg.StrOpt('swift_store_user', secret=True), cfg.StrOpt('swift_store_key', secret=True), cfg.StrOpt('swift_store_auth_version', default='2'), cfg.StrOpt('swift_store_container', default=DEFAULT_CONTAINER), cfg.IntOpt('swift_store_large_object_size', default=DEFAULT_LARGE_OBJECT_SIZE), cfg.IntOpt('swift_store_large_object_chunk_size', default=DEFAULT_LARGE_OBJECT_CHUNK_SIZE), cfg.BoolOpt('swift_store_create_container_on_put', default=False), cfg.BoolOpt('swift_store_multi_tenant', default=False), cfg.ListOpt('swift_store_admin_tenants', default=[]), ] CONF = cfg.CONF CONF.register_opts(swift_opts) class StoreLocation(glance.store.location.StoreLocation): """ Class describing a Swift URI. A Swift URI can look like any of the following:
from glance.version import version_info as version paste_deploy_opts = [ cfg.StrOpt('flavor'), cfg.StrOpt('config_file'), ] common_opts = [ cfg.BoolOpt('allow_additional_image_properties', default=True, help=_('Whether to allow users to specify image properties ' 'beyond what the image schema provides')), cfg.StrOpt('data_api', default='glance.db.sqlalchemy.api', help=_('Python module path of data access API')), cfg.IntOpt('limit_param_default', default=25, help=_('Default value for the number of items returned by a ' 'request if not specified explicitly in the request')), cfg.IntOpt('api_limit_max', default=1000, help=_('Maximum permissible number of items that could be ' 'returned by a request')), cfg.BoolOpt('show_image_direct_url', default=False, help=_( 'Whether to include the backend image storage location ' 'in image properties. Revealing storage location can be a ' 'security risk, so use this setting with caution!')), cfg.IntOpt('image_size_cap', default=1099511627776, help=_("Maximum size of image a user can upload in bytes. " "Defaults to 1099511627776 bytes (1 TB).")),
""" Registry API """ import os from glance.common import exception from glance.openstack.common import cfg import glance.openstack.common.log as logging from glance.registry import client LOG = logging.getLogger(__name__) registry_addr_opts = [ cfg.StrOpt('registry_host', default='0.0.0.0'), cfg.IntOpt('registry_port', default=9191), ] registry_client_opts = [ cfg.StrOpt('registry_client_protocol', default='http'), cfg.StrOpt('registry_client_key_file'), cfg.StrOpt('registry_client_cert_file'), cfg.StrOpt('registry_client_ca_file'), cfg.BoolOpt('registry_client_insecure', default=False), cfg.StrOpt('metadata_encryption_key', secret=True), ] registry_client_ctx_opts = [ cfg.StrOpt('admin_user', secret=True), cfg.StrOpt('admin_password', secret=True), cfg.StrOpt('admin_tenant_name', secret=True), cfg.StrOpt('auth_url'), cfg.StrOpt('auth_strategy', default='noauth'),
class Store(glance.store.base.Store): """An implementation of the swift backend adapter.""" EXAMPLE_URL = "swift://<USER>:<KEY>@<AUTH_ADDRESS>/<CONTAINER>/<FILE>" CHUNKSIZE = 65536 opts = [ cfg.BoolOpt('swift_enable_snet', default=False), cfg.StrOpt('swift_store_auth_address'), cfg.StrOpt('swift_store_user', secret=True), cfg.StrOpt('swift_store_key', secret=True), cfg.StrOpt('swift_store_auth_version', default='2'), cfg.StrOpt('swift_store_container', default=DEFAULT_CONTAINER), cfg.IntOpt('swift_store_large_object_size', default=DEFAULT_LARGE_OBJECT_SIZE), cfg.IntOpt('swift_store_large_object_chunk_size', default=DEFAULT_LARGE_OBJECT_CHUNK_SIZE), cfg.BoolOpt('swift_store_create_container_on_put', default=False), ] def get_schemes(self): return ('swift+https', 'swift', 'swift+http') def configure(self): self.conf.register_opts(self.opts) self.snet = self.conf.swift_enable_snet self.auth_version = self._option_get('swift_store_auth_version') def configure_add(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadStoreConfiguration` """ self.auth_address = self._option_get('swift_store_auth_address') self.user = self._option_get('swift_store_user') self.key = self._option_get('swift_store_key') self.container = self.conf.swift_store_container try: # The config file has swift_store_large_object_*size in MB, but # internally we store it in bytes, since the image_size parameter # passed to add() is also in bytes. _obj_size = self.conf.swift_store_large_object_size self.large_object_size = _obj_size * ONE_MB _obj_chunk_size = self.conf.swift_store_large_object_chunk_size self.large_object_chunk_size = _obj_chunk_size * ONE_MB except cfg.ConfigFileValueError, e: reason = _("Error in configuration conf: %s") % e logger.error(reason) raise exception.BadStoreConfiguration(store_name="swift", reason=reason) self.scheme = 'swift+https' if self.auth_address.startswith('http://'): self.scheme = 'swift+http' self.full_auth_address = self.auth_address elif self.auth_address.startswith('https://'): self.full_auth_address = self.auth_address else: # Defaults https self.full_auth_address = 'https://' + self.auth_address
LRU Cache for Image Data """ import hashlib from glance.common import exception from glance.common import utils from glance.openstack.common import cfg import glance.openstack.common.log as logging from glance.openstack.common import importutils LOG = logging.getLogger(__name__) image_cache_opts = [ cfg.StrOpt('image_cache_driver', default='sqlite'), cfg.IntOpt('image_cache_max_size', default=10 * (1024**3)), # 10 GB cfg.IntOpt('image_cache_stall_time', default=86400), # 24 hours cfg.StrOpt('image_cache_dir'), ] CONF = cfg.CONF CONF.register_opts(image_cache_opts) DEFAULT_MAX_CACHE_SIZE = 10 * 1024 * 1024 * 1024 # 10 GB class ImageCache(object): """Provides an LRU cache for image data.""" def __init__(self): self.init_driver()
class Scrubber(object): CLEANUP_FILE = ".cleanup" opts = [ cfg.BoolOpt('cleanup_scrubber', default=False), cfg.IntOpt('cleanup_scrubber_time', default=86400) ] def __init__(self, conf, **local_conf): self.conf = conf self.conf.register_opts(self.opts) self.datadir = store.get_scrubber_datadir(conf) self.cleanup = self.conf.cleanup_scrubber self.cleanup_time = self.conf.cleanup_scrubber_time host, port = registry.get_registry_addr(conf) logger.info( _("Initializing scrubber with conf: %s") % { 'datadir': self.datadir, 'cleanup': self.cleanup, 'cleanup_time': self.cleanup_time, 'registry_host': host, 'registry_port': port }) self.registry = client.RegistryClient(host, port) utils.safe_mkdirs(self.datadir) store.create_stores(conf) def run(self, pool, event=None): now = time.time() if not os.path.exists(self.datadir): logger.info(_("%s does not exist") % self.datadir) return delete_work = [] for root, dirs, files in os.walk(self.datadir): for id in files: if id == self.CLEANUP_FILE: continue file_name = os.path.join(root, id) delete_time = os.stat(file_name).st_mtime if delete_time > now: continue uri, delete_time = read_queue_file(file_name) if delete_time > now: continue delete_work.append((id, uri, now)) logger.info(_("Deleting %s images") % len(delete_work)) pool.starmap(self._delete, delete_work) if self.cleanup: self._cleanup(pool) def _delete(self, id, uri, now): file_path = os.path.join(self.datadir, str(id)) try: logger.debug(_("Deleting %(uri)s") % {'uri': uri}) store.delete_from_backend(uri) except store.UnsupportedBackend: msg = _("Failed to delete image from store (%(uri)s).") logger.error(msg % {'uri': uri}) write_queue_file(file_path, uri, now) self.registry.update_image(id, {'status': 'deleted'}) utils.safe_remove(file_path) def _cleanup(self, pool): now = time.time() cleanup_file = os.path.join(self.datadir, self.CLEANUP_FILE) if not os.path.exists(cleanup_file): write_queue_file(cleanup_file, 'cleanup', now) return _uri, last_run_time = read_queue_file(cleanup_file) cleanup_time = last_run_time + self.cleanup_time if cleanup_time > now: return logger.info(_("Getting images deleted before %s") % self.cleanup_time) write_queue_file(cleanup_file, 'cleanup', now) filters = { 'deleted': True, 'is_public': 'none', 'status': 'pending_delete' } pending_deletes = self.registry.get_images_detailed(filters=filters) delete_work = [] for pending_delete in pending_deletes: deleted_at = pending_delete.get('deleted_at') if not deleted_at: continue time_fmt = "%Y-%m-%dT%H:%M:%S" # NOTE: Strip off microseconds which may occur after the last '.,' # Example: 2012-07-07T19:14:34.974216 date_str = deleted_at.rsplit('.', 1)[0].rsplit(',', 1)[0] delete_time = calendar.timegm(time.strptime(date_str, time_fmt)) if delete_time + self.cleanup_time > now: continue delete_work.append( (pending_delete['id'], pending_delete['location'], now)) logger.info(_("Deleting %s images") % len(delete_work)) pool.starmap(self._delete, delete_work)
from glance import context from glance.common import utils from glance.openstack.common import cfg import glance.openstack.common.log as logging from glance import registry from glance import store import glance.store.filesystem import glance.store.http import glance.store.s3 import glance.store.swift LOG = logging.getLogger(__name__) scrubber_opts = [ cfg.BoolOpt('cleanup_scrubber', default=False), cfg.IntOpt('cleanup_scrubber_time', default=86400) ] CONF = cfg.CONF CONF.register_opts(scrubber_opts) class Daemon(object): def __init__(self, wakeup_time=300, threads=1000): LOG.info( _("Starting Daemon: wakeup_time=%(wakeup_time)s " "threads=%(threads)s") % locals()) self.wakeup_time = wakeup_time self.event = eventlet.event.Event() self.pool = eventlet.greenpool.GreenPool(threads)
import eventlet.greenio from eventlet.green import socket, ssl import eventlet.wsgi from paste import deploy import routes import routes.middleware import webob.dec import webob.exc from glance.common import exception from glance.common import utils from glance.openstack.common import cfg bind_opts = [ cfg.StrOpt('bind_host', default='0.0.0.0'), cfg.IntOpt('bind_port'), ] socket_opts = [ cfg.IntOpt('backlog', default=4096), cfg.StrOpt('cert_file'), cfg.StrOpt('key_file'), ] workers_opt = cfg.IntOpt('workers', default=0) class WritableLogger(object): """A thin wrapper that responds to `write` and logs.""" def __init__(self, logger, level=logging.DEBUG): self.logger = logger
loc = location.get_location_from_uri(uri) return loc.store_name scrubber_datadir_opt = cfg.StrOpt('scrubber_datadir', default='/var/lib/glance/scrubber') def get_scrubber_datadir(conf): conf.register_opt(scrubber_datadir_opt) return conf.scrubber_datadir delete_opts = [ cfg.BoolOpt('delayed_delete', default=False), cfg.IntOpt('scrub_time', default=0) ] def schedule_delete_from_backend(uri, conf, context, image_id, **kwargs): """ Given a uri and a time, schedule the deletion of an image. """ conf.register_opts(delete_opts) if not conf.delayed_delete: registry.update_image_metadata(context, image_id, {'status': 'deleted'}) try: return delete_from_backend(uri, **kwargs) except (UnsupportedBackend, exception.StoreDeleteNotSupported, exception.NotFound):
from eventlet.green import socket, ssl import eventlet.greenio import eventlet.wsgi import routes import routes.middleware import webob.dec import webob.exc from glance.common import exception from glance.common import utils from glance.openstack.common import cfg import glance.openstack.common.log as os_logging bind_opts = [ cfg.StrOpt('bind_host', default='0.0.0.0'), cfg.IntOpt('bind_port'), ] socket_opts = [ cfg.IntOpt('backlog', default=4096), cfg.IntOpt('tcp_keepidle', default=600), cfg.StrOpt('ca_file'), cfg.StrOpt('cert_file'), cfg.StrOpt('key_file'), ] workers_opt = cfg.IntOpt('workers', default=1) CONF = cfg.CONF CONF.register_opts(bind_opts) CONF.register_opts(socket_opts)
try: import rados import rbd except ImportError: pass DEFAULT_POOL = 'rbd' DEFAULT_CONFFILE = '' # librados will locate the default conf file DEFAULT_USER = None # let librados decide based on the Ceph conf file DEFAULT_CHUNKSIZE = 4 # in MiB DEFAULT_SNAPNAME = 'snap' LOG = logging.getLogger(__name__) rbd_opts = [ cfg.IntOpt('rbd_store_chunk_size', default=DEFAULT_CHUNKSIZE), cfg.StrOpt('rbd_store_pool', default=DEFAULT_POOL), cfg.StrOpt('rbd_store_user', default=DEFAULT_USER), cfg.StrOpt('rbd_store_ceph_conf', default=DEFAULT_CONFFILE), ] CONF = cfg.CONF CONF.register_opts(rbd_opts) class StoreLocation(glance.store.location.StoreLocation): """ Class describing a RBD URI. This is of the form: rbd://image
class RabbitStrategy(strategy.Strategy): """A notifier that puts a message on a queue when called.""" opts = [ cfg.StrOpt('rabbit_host', default='localhost'), cfg.IntOpt('rabbit_port', default=5672), cfg.BoolOpt('rabbit_use_ssl', default=False), cfg.StrOpt('rabbit_userid', default='guest'), cfg.StrOpt('rabbit_password', default='guest'), cfg.StrOpt('rabbit_virtual_host', default='/'), cfg.StrOpt('rabbit_notification_exchange', default='glance'), cfg.StrOpt('rabbit_notification_topic', default='glance_notifications'), cfg.StrOpt('rabbit_max_retries', default=0), cfg.StrOpt('rabbit_retry_backoff', default=2), cfg.StrOpt('rabbit_retry_max_backoff', default=30) ] def __init__(self, conf): """Initialize the rabbit notification strategy.""" self._conf = conf self._conf.register_opts(self.opts) self.topic = self._conf.rabbit_notification_topic self.max_retries = self._conf.rabbit_max_retries # NOTE(comstud): When reading the config file, these values end # up being strings, and we need them as ints. self.retry_backoff = int(self._conf.rabbit_retry_backoff) self.retry_max_backoff = int(self._conf.rabbit_retry_max_backoff) self.connection = None self.retry_attempts = 0 try: self.reconnect() except KombuMaxRetriesReached: pass def _close(self): """Close connection to rabbit.""" try: self.connection.close() except self.connection_errors: pass self.connection = None def _connect(self): """Connect to rabbit. Exceptions should be handled by the caller. """ log_info = {} log_info['hostname'] = self._conf.rabbit_host log_info['port'] = self._conf.rabbit_port if self.connection: logger.info( _("Reconnecting to AMQP server on " "%(hostname)s:%(port)d") % log_info) self._close() else: logger.info( _("Connecting to AMQP server on " "%(hostname)s:%(port)d") % log_info) self.connection = kombu.connection.BrokerConnection( hostname=self._conf.rabbit_host, port=self._conf.rabbit_port, userid=self._conf.rabbit_userid, password=self._conf.rabbit_password, virtual_host=self._conf.rabbit_virtual_host, ssl=self._conf.rabbit_use_ssl) self.connection_errors = self.connection.connection_errors self.connection.connect() self.channel = self.connection.channel() self.exchange = kombu.entity.Exchange( channel=self.channel, type="topic", name=self._conf.rabbit_notification_exchange) # NOTE(jerdfelt): Normally the consumer would create the queues, # but we do this to ensure that messages don't get dropped if the # consumer is started after we do for priority in ["WARN", "INFO", "ERROR"]: routing_key = "%s.%s" % (self.topic, priority.lower()) queue = kombu.entity.Queue(channel=self.channel, exchange=self.exchange, durable=True, name=routing_key, routing_key=routing_key) queue.declare() logger.info( _("Connected to AMQP server on " "%(hostname)s:%(port)d") % log_info) def reconnect(self): """Handles reconnecting and re-establishing queues.""" while True: self.retry_attempts += 1 try: self._connect() return except self.connection_errors, e: pass except Exception, e: # NOTE(comstud): Unfortunately it's possible for amqplib # to return an error not covered by its transport # connection_errors in the case of a timeout waiting for # a protocol response. (See paste link in LP888621 for # nova.) So, we check all exceptions for 'timeout' in them # and try to reconnect in this case. if 'timeout' not in str(e): raise log_info = {} log_info['err_str'] = str(e) log_info['max_retries'] = self.max_retries log_info['hostname'] = self._conf.rabbit_host log_info['port'] = self._conf.rabbit_port if self.max_retries and self.retry_attempts >= self.max_retries: logger.exception( _('Unable to connect to AMQP server on ' '%(hostname)s:%(port)d after %(max_retries)d ' 'tries: %(err_str)s') % log_info) if self.connection: self._close() raise KombuMaxRetriesReached sleep_time = self.retry_backoff * self.retry_attempts if self.retry_max_backoff: sleep_time = min(sleep_time, self.retry_max_backoff) log_info['sleep_time'] = sleep_time logger.exception( _('AMQP server on %(hostname)s:%(port)d is' ' unreachable: %(err_str)s. Trying again in ' '%(sleep_time)d seconds.') % log_info) time.sleep(sleep_time)
import json import time import kombu.connection import kombu.entity from glance.notifier import strategy from glance.openstack.common import cfg import glance.openstack.common.log as logging LOG = logging.getLogger(__name__) rabbit_opts = [ cfg.StrOpt('rabbit_host', default='localhost'), cfg.IntOpt('rabbit_port', default=5672), cfg.BoolOpt('rabbit_use_ssl', default=False), cfg.StrOpt('rabbit_userid', default='guest'), cfg.StrOpt('rabbit_password', default='guest'), cfg.StrOpt('rabbit_virtual_host', default='/'), cfg.StrOpt('rabbit_notification_exchange', default='glance'), cfg.StrOpt('rabbit_notification_topic', default='glance_notifications'), cfg.StrOpt('rabbit_max_retries', default=0), cfg.StrOpt('rabbit_retry_backoff', default=2), cfg.StrOpt('rabbit_retry_max_backoff', default=30) ] CONF = cfg.CONF CONF.register_opts(rabbit_opts)
default='localhost', help='Qpid broker hostname'), cfg.StrOpt('qpid_port', default='5672', help='Qpid broker port'), cfg.StrOpt('qpid_username', default='', help='Username for qpid connection'), cfg.StrOpt('qpid_password', default='', help='Password for qpid connection'), cfg.StrOpt('qpid_sasl_mechanisms', default='', help='Space separated list of SASL mechanisms to use for auth'), cfg.IntOpt('qpid_reconnect_timeout', default=0, help='Reconnection timeout in seconds'), cfg.IntOpt('qpid_reconnect_limit', default=0, help='Max reconnections before giving up'), cfg.IntOpt('qpid_reconnect_interval_min', default=0, help='Minimum seconds between reconnection attempts'), cfg.IntOpt('qpid_reconnect_interval_max', default=0, help='Maximum seconds between reconnection attempts'), cfg.IntOpt('qpid_reconnect_interval', default=0, help='Equivalent to setting max and min to the same value'), cfg.IntOpt('qpid_heartbeat', default=60,