class ImageCache(object): """Provides an LRU cache for image data.""" opts = [ cfg.StrOpt('image_cache_driver', default='sqlite'), cfg.IntOpt('image_cache_max_size', default=10 * (1024**3)), # 10 GB cfg.IntOpt('image_cache_stall_time', default=86400), # 24 hours cfg.StrOpt('image_cache_dir'), ] def __init__(self, conf): self.conf = conf self.conf.register_opts(self.opts) self.init_driver() def init_driver(self): """ Create the driver for the cache """ driver_name = self.conf.image_cache_driver driver_module = (__name__ + '.drivers.' + driver_name + '.Driver') try: self.driver_class = utils.import_class(driver_module) logger.info(_("Image cache loaded driver '%s'.") % driver_name) except exception.ImportFailure, import_err: logger.warn( _("Image cache driver " "'%(driver_name)s' failed to load. " "Got error: '%(import_err)s.") % locals()) driver_module = __name__ + '.drivers.sqlite.Driver' logger.info(_("Defaulting to SQLite driver.")) self.driver_class = utils.import_class(driver_module) self.configure_driver()
def test_walk_versions(self): """ Walks all version scripts for each tested database, ensuring that there are no errors in the version scripts for each engine """ for key, engine in self.engines.items(): conf = utils.TestConfigOpts({ 'sql_connection': TestMigrations.TEST_DATABASES[key]}) conf.register_opt(cfg.StrOpt('sql_connection')) self._walk_versions(conf)
def test_version_control_existing_db(self): """ Creates a DB without version control information, places it under version control and checks that it can be upgraded without errors. """ for key, engine in self.engines.items(): conf = utils.TestConfigOpts({ 'sql_connection': TestMigrations.TEST_DATABASES[key]}) conf.register_opt(cfg.StrOpt('sql_connection')) self._create_unversioned_001_db(engine) self._walk_versions(conf, initial_version=1)
def test_no_data_loss_2_to_3_to_2(self): """ Here, we test that in the case when we moved a column "type" from the base images table to be records in the image_properties table, that we don't lose any data during the migration. Similarly, we test that on downgrade, we don't lose any data, as the records are moved from the image_properties table back into the base image table. """ for key, engine in self.engines.items(): conf = utils.TestConfigOpts({ 'sql_connection': TestMigrations.TEST_DATABASES[key]}) conf.register_opt(cfg.StrOpt('sql_connection')) self._no_data_loss_2_to_3_to_2(engine, conf)
class Store(glance.store.base.Store): """An implementation of the RBD backend adapter.""" EXAMPLE_URL = "rbd://<IMAGE>" opts = [ cfg.IntOpt('rbd_store_chunk_size', default=DEFAULT_CHUNKSIZE), cfg.StrOpt('rbd_store_pool', default=DEFAULT_POOL), cfg.StrOpt('rbd_store_user', default=DEFAULT_USER), cfg.StrOpt('rbd_store_ceph_conf', default=DEFAULT_CONFFILE), ] def get_schemes(self): return ('rbd', ) def configure_add(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadStoreConfiguration` """ self.conf.register_opts(self.opts) try: self.chunk_size = self.conf.rbd_store_chunk_size * 1024 * 1024 # these must not be unicode since they will be passed to a # non-unicode-aware C library self.pool = str(self.conf.rbd_store_pool) self.user = str(self.conf.rbd_store_user) self.conf_file = str(self.conf.rbd_store_ceph_conf) except cfg.ConfigFileValueError, e: reason = _("Error in store configuration: %s") % e logger.error(reason) raise exception.BadStoreConfiguration(store_name='rbd', reason=reason)
class ContextMiddleware(wsgi.Middleware): opts = [ cfg.BoolOpt('owner_is_tenant', default=True), cfg.StrOpt('admin_role', default='admin'), ] def __init__(self, app, conf, **local_conf): self.conf = conf self.conf.register_opts(self.opts) super(ContextMiddleware, self).__init__(app) def process_request(self, req): """Convert authentication informtion into a request context Generate a RequestContext object from the available authentication headers and store on the 'context' attribute of the req object. :param req: wsgi request object that will be given the context object :raises webob.exc.HTTPUnauthorized: when value of the X-Identity-Status header is not 'Confirmed' """ if req.headers.get('X-Identity-Status') != 'Confirmed': raise webob.exc.HTTPUnauthorized() #NOTE(bcwaldon): X-Roles is a csv string, but we need to parse # it into a list to be useful roles_header = req.headers.get('X-Roles', '') roles = [r.strip().lower() for r in roles_header.split(',')] #NOTE(bcwaldon): This header is deprecated in favor of X-Auth-Token deprecated_token = req.headers.get('X-Storage-Token') kwargs = { 'user': req.headers.get('X-User-Id'), 'tenant': req.headers.get('X-Tenant-Id'), 'roles': roles, 'is_admin': self.conf.admin_role.strip().lower() in roles, 'auth_tok': req.headers.get('X-Auth-Token', deprecated_token), 'owner_is_tenant': self.conf.owner_is_tenant, } req.context = RequestContext(**kwargs)
def add_options(conf): """ Adds any configuration options that the db layer might have. :param conf: A ConfigOpts object :retval None """ conf.register_group( cfg.OptGroup('registrydb', title='Registry Database Options', help='The following configuration options ' 'are specific to the Glance image ' 'registry database.')) conf.register_cli_opt( cfg.StrOpt('sql_connection', metavar='CONNECTION', help='A valid SQLAlchemy connection ' 'string for the registry database. ' 'Default: %default'))
class Notifier(object): """Uses a notification strategy to send out messages about events.""" opts = [ cfg.StrOpt('notifier_strategy', default='default') ] def __init__(self, conf, strategy=None): conf.register_opts(self.opts) strategy = conf.notifier_strategy try: self.strategy = utils.import_class(_STRATEGIES[strategy])(conf) except (KeyError, ImportError): raise exception.InvalidNotifierStrategy(strategy=strategy) @staticmethod def generate_message(event_type, priority, payload): return { "message_id": str(uuid.uuid4()), "publisher_id": socket.gethostname(), "event_type": event_type, "priority": priority, "payload": payload, "timestamp": str(datetime.datetime.utcnow()), } def warn(self, event_type, payload): msg = self.generate_message(event_type, "WARN", payload) self.strategy.warn(msg) def info(self, event_type, payload): msg = self.generate_message(event_type, "INFO", payload) self.strategy.info(msg) def error(self, event_type, payload): msg = self.generate_message(event_type, "ERROR", payload) self.strategy.error(msg)
""" Registry API """ import os from glance.common import exception from glance.openstack.common import cfg import glance.openstack.common.log as logging from glance.registry import client LOG = logging.getLogger(__name__) registry_addr_opts = [ cfg.StrOpt('registry_host', default='0.0.0.0'), cfg.IntOpt('registry_port', default=9191), ] registry_client_opts = [ cfg.StrOpt('registry_client_protocol', default='http'), cfg.StrOpt('registry_client_key_file'), cfg.StrOpt('registry_client_cert_file'), cfg.StrOpt('registry_client_ca_file'), cfg.BoolOpt('registry_client_insecure', default=False), cfg.StrOpt('metadata_encryption_key', secret=True), ] registry_client_ctx_opts = [ cfg.StrOpt('admin_user', secret=True), cfg.StrOpt('admin_password', secret=True), cfg.StrOpt('admin_tenant_name', secret=True), cfg.StrOpt('auth_url'),
class Store(glance.store.base.Store): """An implementation of the swift backend adapter.""" EXAMPLE_URL = "swift://<USER>:<KEY>@<AUTH_ADDRESS>/<CONTAINER>/<FILE>" CHUNKSIZE = 65536 opts = [ cfg.BoolOpt('swift_enable_snet', default=False), cfg.StrOpt('swift_store_auth_address'), cfg.StrOpt('swift_store_user', secret=True), cfg.StrOpt('swift_store_key', secret=True), cfg.StrOpt('swift_store_auth_version', default='2'), cfg.StrOpt('swift_store_container', default=DEFAULT_CONTAINER), cfg.IntOpt('swift_store_large_object_size', default=DEFAULT_LARGE_OBJECT_SIZE), cfg.IntOpt('swift_store_large_object_chunk_size', default=DEFAULT_LARGE_OBJECT_CHUNK_SIZE), cfg.BoolOpt('swift_store_create_container_on_put', default=False), ] def get_schemes(self): return ('swift+https', 'swift', 'swift+http') def configure(self): self.conf.register_opts(self.opts) self.snet = self.conf.swift_enable_snet self.auth_version = self._option_get('swift_store_auth_version') def configure_add(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadStoreConfiguration` """ self.auth_address = self._option_get('swift_store_auth_address') self.user = self._option_get('swift_store_user') self.key = self._option_get('swift_store_key') self.container = self.conf.swift_store_container try: # The config file has swift_store_large_object_*size in MB, but # internally we store it in bytes, since the image_size parameter # passed to add() is also in bytes. _obj_size = self.conf.swift_store_large_object_size self.large_object_size = _obj_size * ONE_MB _obj_chunk_size = self.conf.swift_store_large_object_chunk_size self.large_object_chunk_size = _obj_chunk_size * ONE_MB except cfg.ConfigFileValueError, e: reason = _("Error in configuration conf: %s") % e logger.error(reason) raise exception.BadStoreConfiguration(store_name="swift", reason=reason) self.scheme = 'swift+https' if self.auth_address.startswith('http://'): self.scheme = 'swift+http' self.full_auth_address = self.auth_address elif self.auth_address.startswith('https://'): self.full_auth_address = self.auth_address else: # Defaults https self.full_auth_address = 'https://' + self.auth_address
import re import tempfile import urlparse from glance.common import exception from glance.common import utils from glance.openstack.common import cfg import glance.openstack.common.log as logging import glance.store import glance.store.base import glance.store.location LOG = logging.getLogger(__name__) s3_opts = [ cfg.StrOpt('s3_store_host'), cfg.StrOpt('s3_store_access_key', secret=True), cfg.StrOpt('s3_store_secret_key', secret=True), cfg.StrOpt('s3_store_bucket'), cfg.StrOpt('s3_store_object_buffer_dir'), cfg.BoolOpt('s3_store_create_bucket_on_put', default=False), cfg.StrOpt('s3_store_bucket_url_format', default='subdomain'), ] CONF = cfg.CONF CONF.register_opts(s3_opts) class StoreLocation(glance.store.location.StoreLocation): """ Class describing an S3 URI. An S3 URI can look like any of
""" LRU Cache for Image Data """ import hashlib from glance.common import exception from glance.common import utils from glance.openstack.common import cfg import glance.openstack.common.log as logging from glance.openstack.common import importutils LOG = logging.getLogger(__name__) image_cache_opts = [ cfg.StrOpt('image_cache_driver', default='sqlite'), cfg.IntOpt('image_cache_max_size', default=10 * (1024**3)), # 10 GB cfg.IntOpt('image_cache_stall_time', default=86400), # 24 hours cfg.StrOpt('image_cache_dir'), ] CONF = cfg.CONF CONF.register_opts(image_cache_opts) DEFAULT_MAX_CACHE_SIZE = 10 * 1024 * 1024 * 1024 # 10 GB class ImageCache(object): """Provides an LRU cache for image data.""" def __init__(self): self.init_driver()
import errno import hashlib import os import urlparse from glance.common import exception from glance.common import utils from glance.openstack.common import cfg import glance.openstack.common.log as logging import glance.store import glance.store.base import glance.store.location LOG = logging.getLogger(__name__) datadir_opt = cfg.StrOpt('filesystem_store_datadir') CONF = cfg.CONF CONF.register_opt(datadir_opt) class StoreLocation(glance.store.location.StoreLocation): """Class describing a Filesystem URI""" def process_specs(self): self.scheme = self.specs.get('scheme', 'file') self.path = self.specs.get('path') def get_uri(self): return "file://%s" % self.path def parse_uri(self, uri):
try: import swiftclient except ImportError: pass LOG = logging.getLogger(__name__) DEFAULT_CONTAINER = 'glance' DEFAULT_LARGE_OBJECT_SIZE = 5 * 1024 # 5GB DEFAULT_LARGE_OBJECT_CHUNK_SIZE = 200 # 200M ONE_MB = 1000 * 1024 swift_opts = [ cfg.BoolOpt('swift_enable_snet', default=False), cfg.StrOpt('swift_store_auth_address'), cfg.StrOpt('swift_store_user', secret=True), cfg.StrOpt('swift_store_key', secret=True), cfg.StrOpt('swift_store_auth_version', default='2'), cfg.StrOpt('swift_store_container', default=DEFAULT_CONTAINER), cfg.IntOpt('swift_store_large_object_size', default=DEFAULT_LARGE_OBJECT_SIZE), cfg.IntOpt('swift_store_large_object_chunk_size', default=DEFAULT_LARGE_OBJECT_CHUNK_SIZE), cfg.BoolOpt('swift_store_create_container_on_put', default=False), cfg.BoolOpt('swift_store_multi_tenant', default=False), cfg.ListOpt('swift_store_admin_tenants', default=[]), ] CONF = cfg.CONF CONF.register_opts(swift_opts)
class Enforcer(object): """Responsible for loading and enforcing rules""" policy_opts = ( cfg.StrOpt('policy_file', default=None), cfg.StrOpt('policy_default_rule', default='default'), ) def __init__(self, conf): for opt in self.policy_opts: conf.register_opt(opt) self.default_rule = conf.policy_default_rule self.policy_path = self._find_policy_file(conf) self.policy_file_mtime = None self.policy_file_contents = None def set_rules(self, rules): """Create a new Brain based on the provided dict of rules""" brain = policy.Brain(rules, self.default_rule) policy.set_brain(brain) def load_rules(self): """Set the rules found in the json file on disk""" rules = self._read_policy_file() self.set_rules(rules) @staticmethod def _find_policy_file(conf): """Locate the policy json data file""" if conf.policy_file: return conf.policy_file policy_file = conf.find_file('policy.json') if not policy_file: raise cfg.ConfigFilesNotFoundError(('policy.json', )) return policy_file def _read_policy_file(self): """Read contents of the policy file This re-caches policy data if the file has been changed. """ mtime = os.path.getmtime(self.policy_path) if not self.policy_file_contents or mtime != self.policy_file_mtime: logger.debug(_("Loading policy from %s") % self.policy_path) with open(self.policy_path) as fap: raw_contents = fap.read() self.policy_file_contents = json.loads(raw_contents) self.policy_file_mtime = mtime return self.policy_file_contents def enforce(self, context, action, target): """Verifies that the action is valid on the target in this context. :param context: Glance request context :param action: String representing the action to be checked :param object: Dictionary representing the object of the action. :raises: `glance.common.exception.Forbidden` :returns: None """ self.load_rules() match_list = ('rule:%s' % action, ) credentials = { 'roles': context.roles, 'user': context.user, 'tenant': context.tenant, } try: policy.enforce(match_list, target, credentials) except policy.NotAuthorized: raise exception.Forbidden(action=action)
from glance.common import wsgi from glance import notifier from glance.openstack.common import cfg import glance.openstack.common.log as logging from glance import registry from glance.store import (create_stores, get_from_backend, get_size_from_backend, schedule_delete_from_backend, get_store_from_location, get_store_from_scheme) LOG = logging.getLogger(__name__) SUPPORTED_PARAMS = glance.api.v1.SUPPORTED_PARAMS SUPPORTED_FILTERS = glance.api.v1.SUPPORTED_FILTERS # Defined at module level due to _is_opt_registered # identity check (not equality). default_store_opt = cfg.StrOpt('default_store', default='file') CONF = cfg.CONF CONF.register_opt(default_store_opt) class Controller(controller.BaseController): """ WSGI controller for images resource in Glance v1 API The images resource API is a RESTful web service for image data. The API is as follows:: GET /images -- Returns a set of brief metadata about images GET /images/detail -- Returns a set of detailed metadata about images
Routines for configuring Glance """ import logging import logging.config import logging.handlers import os import sys from paste import deploy from glance.openstack.common import cfg from glance.version import version_info as version paste_deploy_opts = [ cfg.StrOpt('flavor'), cfg.StrOpt('config_file'), ] common_opts = [ cfg.BoolOpt('allow_additional_image_properties', default=True, help=_('Whether to allow users to specify image properties ' 'beyond what the image schema provides')), cfg.StrOpt('data_api', default='glance.db.sqlalchemy.api', help=_('Python module path of data access API')), cfg.IntOpt('limit_param_default', default=25, help=_('Default value for the number of items returned by a ' 'request if not specified explicitly in the request')), cfg.IntOpt('api_limit_max',
raise exception.StoreDeleteNotSupported def get_store_from_location(uri): """ Given a location (assumed to be a URL), attempt to determine the store from the location. We use here a simple guess that the scheme of the parsed URL is the store... :param uri: Location to check for the store """ loc = location.get_location_from_uri(uri) return loc.store_name scrubber_datadir_opt = cfg.StrOpt('scrubber_datadir', default='/var/lib/glance/scrubber') def get_scrubber_datadir(conf): conf.register_opt(scrubber_datadir_opt) return conf.scrubber_datadir delete_opts = [ cfg.BoolOpt('delayed_delete', default=False), cfg.IntOpt('scrub_time', default=0) ] def schedule_delete_from_backend(uri, conf, context, image_id, **kwargs): """
# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.openstack.common import cfg from glance.openstack.common import importutils sql_connection_opt = cfg.StrOpt('sql_connection', default='sqlite:///glance.sqlite', secret=True, metavar='CONNECTION', help='A valid SQLAlchemy connection ' 'string for the registry database. ' 'Default: %default') CONF = cfg.CONF CONF.register_opt(sql_connection_opt) def add_cli_options(): """ Adds any configuration options that the db layer might have. :retval None """ CONF.unregister_opt(sql_connection_opt)
class Store(glance.store.base.Store): """An implementation of the s3 adapter.""" EXAMPLE_URL = "s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ>" opts = [ cfg.StrOpt('s3_store_host'), cfg.StrOpt('s3_store_access_key', secret=True), cfg.StrOpt('s3_store_secret_key', secret=True), cfg.StrOpt('s3_store_bucket'), cfg.StrOpt('s3_store_object_buffer_dir'), cfg.BoolOpt('s3_store_create_bucket_on_put', default=False), ] def get_schemes(self): return ('s3', 's3+http', 's3+https') def configure_add(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadStoreConfiguration` """ self.conf.register_opts(self.opts) self.s3_host = self._option_get('s3_store_host') access_key = self._option_get('s3_store_access_key') secret_key = self._option_get('s3_store_secret_key') # NOTE(jaypipes): Need to encode to UTF-8 here because of a # bug in the HMAC library that boto uses. # See: http://bugs.python.org/issue5285 # See: http://trac.edgewall.org/ticket/8083 self.access_key = access_key.encode('utf-8') self.secret_key = secret_key.encode('utf-8') self.bucket = self._option_get('s3_store_bucket') self.scheme = 's3' if self.s3_host.startswith('https://'): self.scheme = 's3+https' self.full_s3_host = self.s3_host elif self.s3_host.startswith('http://'): self.full_s3_host = self.s3_host else: # Defaults http self.full_s3_host = 'http://' + self.s3_host self.s3_store_object_buffer_dir = self.conf.s3_store_object_buffer_dir def _option_get(self, param): result = getattr(self.conf, param) if not result: reason = _("Could not find %(param)s in configuration " "options.") % locals() logger.error(reason) raise exception.BadStoreConfiguration(store_name="s3", reason=reason) return result def get(self, location): """ Takes a `glance.store.location.Location` object that indicates where to find the image file, and returns a tuple of generator (for reading the image file) and image_size :param location `glance.store.location.Location` object, supplied from glance.store.location.get_location_from_uri() :raises `glance.exception.NotFound` if image does not exist """ key = self._retrieve_key(location) key.BufferSize = self.CHUNKSIZE class ChunkedIndexable(glance.store.Indexable): def another(self): return (self.wrapped.fp.read(ChunkedFile.CHUNKSIZE) if self.wrapped.fp else None) return (ChunkedIndexable(ChunkedFile(key), key.size), key.size) def get_size(self, location): """ Takes a `glance.store.location.Location` object that indicates where to find the image file, and returns the image_size (or 0 if unavailable) :param location `glance.store.location.Location` object, supplied from glance.store.location.get_location_from_uri() """ try: key = self._retrieve_key(location) return key.size except Exception: return 0 def _retrieve_key(self, location): loc = location.store_location from boto.s3.connection import S3Connection s3_conn = S3Connection(loc.accesskey, loc.secretkey, host=loc.s3serviceurl, is_secure=(loc.scheme == 's3+https')) bucket_obj = get_bucket(s3_conn, loc.bucket) key = get_key(bucket_obj, loc.key) msg = _("Retrieved image object from S3 using (s3_host=%(s3_host)s, " "access_key=%(accesskey)s, bucket=%(bucket)s, " "key=%(obj_name)s)") % ({'s3_host': loc.s3serviceurl, 'accesskey': loc.accesskey, 'bucket': loc.bucket, 'obj_name': loc.key}) logger.debug(msg) return key def add(self, image_id, image_file, image_size): """ Stores an image file with supplied identifier to the backend storage system and returns an `glance.store.ImageAddResult` object containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :retval `glance.store.ImageAddResult` object :raises `glance.common.exception.Duplicate` if the image already existed S3 writes the image data using the scheme: s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ> where: <USER> = ``s3_store_user`` <KEY> = ``s3_store_key`` <S3_HOST> = ``s3_store_host`` <BUCKET> = ``s3_store_bucket`` <ID> = The id of the image being added """ from boto.s3.connection import S3Connection loc = StoreLocation({'scheme': self.scheme, 'bucket': self.bucket, 'key': image_id, 's3serviceurl': self.full_s3_host, 'accesskey': self.access_key, 'secretkey': self.secret_key}) s3_conn = S3Connection(loc.accesskey, loc.secretkey, host=loc.s3serviceurl, is_secure=(loc.scheme == 's3+https')) create_bucket_if_missing(self.bucket, s3_conn, self.conf) bucket_obj = get_bucket(s3_conn, self.bucket) obj_name = str(image_id) def _sanitize(uri): return re.sub('//.*:.*@', '//s3_store_secret_key:s3_store_access_key@', uri) key = bucket_obj.get_key(obj_name) if key and key.exists(): raise exception.Duplicate(_("S3 already has an image at " "location %s") % _sanitize(loc.get_uri())) msg = _("Adding image object to S3 using (s3_host=%(s3_host)s, " "access_key=%(access_key)s, bucket=%(bucket)s, " "key=%(obj_name)s)") % ({'s3_host': self.s3_host, 'access_key': self.access_key, 'bucket': self.bucket, 'obj_name': obj_name}) logger.debug(msg) key = bucket_obj.new_key(obj_name) # We need to wrap image_file, which is a reference to the # webob.Request.body_file, with a seekable file-like object, # otherwise the call to set_contents_from_file() will die # with an error about Input object has no method 'seek'. We # might want to call webob.Request.make_body_seekable(), but # unfortunately, that method copies the entire image into # memory and results in LP Bug #818292 occurring. So, here # we write temporary file in as memory-efficient manner as # possible and then supply the temporary file to S3. We also # take this opportunity to calculate the image checksum while # writing the tempfile, so we don't need to call key.compute_md5() msg = _("Writing request body file to temporary file " "for %s") % _sanitize(loc.get_uri()) logger.debug(msg) tmpdir = self.s3_store_object_buffer_dir temp_file = tempfile.NamedTemporaryFile(dir=tmpdir) checksum = hashlib.md5() for chunk in utils.chunkreadable(image_file, self.CHUNKSIZE): checksum.update(chunk) temp_file.write(chunk) temp_file.flush() msg = (_("Uploading temporary file to S3 for %s") % _sanitize(loc.get_uri())) logger.debug(msg) # OK, now upload the data into the key key.set_contents_from_file(open(temp_file.name, 'r+b'), replace=False) size = key.size checksum_hex = checksum.hexdigest() logger.debug(_("Wrote %(size)d bytes to S3 key named %(obj_name)s " "with checksum %(checksum_hex)s") % locals()) return (loc.get_uri(), size, checksum_hex) def delete(self, location): """ Takes a `glance.store.location.Location` object that indicates where to find the image file to delete :location `glance.store.location.Location` object, supplied from glance.store.location.get_location_from_uri() :raises NotFound if image does not exist """ loc = location.store_location from boto.s3.connection import S3Connection s3_conn = S3Connection(loc.accesskey, loc.secretkey, host=loc.s3serviceurl, is_secure=(loc.scheme == 's3+https')) bucket_obj = get_bucket(s3_conn, loc.bucket) # Close the key when we're through. key = get_key(bucket_obj, loc.key) msg = _("Deleting image object from S3 using (s3_host=%(s3_host)s, " "access_key=%(accesskey)s, bucket=%(bucket)s, " "key=%(obj_name)s)") % ({'s3_host': loc.s3serviceurl, 'accesskey': loc.accesskey, 'bucket': loc.bucket, 'obj_name': loc.key}) logger.debug(msg) return key.delete()
return image_owner_map def update_image_owners(image_owner_map, db, context): for (image_id, image_owner) in image_owner_map.items(): db.image_update(context, image_id, {'owner': image_owner}) LOG.info('Image %s successfully updated.' % image_id) if __name__ == "__main__": config = cfg.CONF extra_cli_opts = [ cfg.BoolOpt('dry-run', help='Print output but do not make db changes.'), cfg.StrOpt('keystone-auth-uri', help='Authentication endpoint'), cfg.StrOpt('keystone-admin-tenant-name', help='Administrative user\'s tenant name'), cfg.StrOpt('keystone-admin-user', help='Administrative user\'s id'), cfg.StrOpt('keystone-admin-password', help='Administrative user\'s password'), ] config.register_cli_opts(extra_cli_opts) config(project='glance', prog='glance-registry') db_api.configure_db() context = glance.common.context.RequestContext(is_admin=True) auth_uri = config.keystone_auth_uri admin_tenant_name = config.keystone_admin_tenant_name
import eventlet import eventlet.greenio from eventlet.green import socket, ssl import eventlet.wsgi from paste import deploy import routes import routes.middleware import webob.dec import webob.exc from glance.common import exception from glance.common import utils from glance.openstack.common import cfg bind_opts = [ cfg.StrOpt('bind_host', default='0.0.0.0'), cfg.IntOpt('bind_port'), ] socket_opts = [ cfg.IntOpt('backlog', default=4096), cfg.StrOpt('cert_file'), cfg.StrOpt('key_file'), ] workers_opt = cfg.IntOpt('workers', default=0) class WritableLogger(object): """A thin wrapper that responds to `write` and logs.""" def __init__(self, logger, level=logging.DEBUG):
from glance.openstack.common import context from glance.openstack.common.gettextutils import _ from glance.openstack.common import importutils from glance.openstack.common import jsonutils from glance.openstack.common import log as logging from glance.openstack.common import timeutils LOG = logging.getLogger(__name__) notifier_opts = [ cfg.MultiStrOpt('notification_driver', default=[], deprecated_name='list_notifier_drivers', help='Driver or drivers to handle sending notifications'), cfg.StrOpt('default_notification_level', default='INFO', help='Default notification level for outgoing notifications'), cfg.StrOpt('default_publisher_id', default='$host', help='Default publisher_id for outgoing notifications'), ] CONF = cfg.CONF CONF.register_opts(notifier_opts) WARN = 'WARN' INFO = 'INFO' ERROR = 'ERROR' CRITICAL = 'CRITICAL' DEBUG = 'DEBUG'
# under the License. import json import qpid.messaging from glance.notifier import strategy from glance.openstack.common import cfg import glance.openstack.common.log as logging LOG = logging.getLogger(__name__) qpid_opts = [ cfg.StrOpt('qpid_notification_exchange', default='glance', help='Qpid exchange for notifications'), cfg.StrOpt('qpid_notification_topic', default='glance_notifications', help='Qpid topic for notifications'), cfg.StrOpt('qpid_hostname', default='localhost', help='Qpid broker hostname'), cfg.StrOpt('qpid_port', default='5672', help='Qpid broker port'), cfg.StrOpt('qpid_username', default='', help='Username for qpid connection'), cfg.StrOpt('qpid_password', default='',
import logging.handlers import os import stat import sys import traceback from glance.openstack.common import cfg from glance.openstack.common.gettextutils import _ from glance.openstack.common import jsonutils from glance.openstack.common import local from glance.openstack.common import notifier log_opts = [ cfg.StrOpt('logging_context_format_string', default='%(asctime)s %(levelname)s %(name)s [%(request_id)s ' '%(user_id)s %(project_id)s] %(instance)s' '%(message)s', help='format string to use for log messages with context'), cfg.StrOpt('logging_default_format_string', default='%(asctime)s %(levelname)s %(name)s [-] %(instance)s' '%(message)s', help='format string to use for log messages without context'), cfg.StrOpt('logging_debug_format_suffix', default='from (pid=%(process)d) %(funcName)s ' '%(pathname)s:%(lineno)d', help='data to append to log format when level is DEBUG'), cfg.StrOpt('logging_exception_prefix', default='%(asctime)s TRACE %(name)s %(instance)s', help='prefix each line of exception output with this format'), cfg.ListOpt('default_log_levels', default=[
# under the License. """Policy Engine For Glance""" import json import os.path from glance.common import exception from glance.openstack.common import cfg import glance.openstack.common.log as logging from glance.openstack.common import policy LOG = logging.getLogger(__name__) policy_opts = ( cfg.StrOpt('policy_file', default='policy.json'), cfg.StrOpt('policy_default_rule', default='default'), ) CONF = cfg.CONF CONF.register_opts(policy_opts) DEFAULT_RULES = { 'default': [[]], 'manage_image_cache': [['role:admin']] } class Enforcer(object): """Responsible for loading and enforcing rules"""
class RabbitStrategy(strategy.Strategy): """A notifier that puts a message on a queue when called.""" opts = [ cfg.StrOpt('rabbit_host', default='localhost'), cfg.IntOpt('rabbit_port', default=5672), cfg.BoolOpt('rabbit_use_ssl', default=False), cfg.StrOpt('rabbit_userid', default='guest'), cfg.StrOpt('rabbit_password', default='guest'), cfg.StrOpt('rabbit_virtual_host', default='/'), cfg.StrOpt('rabbit_notification_exchange', default='glance'), cfg.StrOpt('rabbit_notification_topic', default='glance_notifications'), cfg.StrOpt('rabbit_max_retries', default=0), cfg.StrOpt('rabbit_retry_backoff', default=2), cfg.StrOpt('rabbit_retry_max_backoff', default=30) ] def __init__(self, conf): """Initialize the rabbit notification strategy.""" self._conf = conf self._conf.register_opts(self.opts) self.topic = self._conf.rabbit_notification_topic self.max_retries = self._conf.rabbit_max_retries # NOTE(comstud): When reading the config file, these values end # up being strings, and we need them as ints. self.retry_backoff = int(self._conf.rabbit_retry_backoff) self.retry_max_backoff = int(self._conf.rabbit_retry_max_backoff) self.connection = None self.retry_attempts = 0 try: self.reconnect() except KombuMaxRetriesReached: pass def _close(self): """Close connection to rabbit.""" try: self.connection.close() except self.connection_errors: pass self.connection = None def _connect(self): """Connect to rabbit. Exceptions should be handled by the caller. """ log_info = {} log_info['hostname'] = self._conf.rabbit_host log_info['port'] = self._conf.rabbit_port if self.connection: logger.info( _("Reconnecting to AMQP server on " "%(hostname)s:%(port)d") % log_info) self._close() else: logger.info( _("Connecting to AMQP server on " "%(hostname)s:%(port)d") % log_info) self.connection = kombu.connection.BrokerConnection( hostname=self._conf.rabbit_host, port=self._conf.rabbit_port, userid=self._conf.rabbit_userid, password=self._conf.rabbit_password, virtual_host=self._conf.rabbit_virtual_host, ssl=self._conf.rabbit_use_ssl) self.connection_errors = self.connection.connection_errors self.connection.connect() self.channel = self.connection.channel() self.exchange = kombu.entity.Exchange( channel=self.channel, type="topic", name=self._conf.rabbit_notification_exchange) # NOTE(jerdfelt): Normally the consumer would create the queues, # but we do this to ensure that messages don't get dropped if the # consumer is started after we do for priority in ["WARN", "INFO", "ERROR"]: routing_key = "%s.%s" % (self.topic, priority.lower()) queue = kombu.entity.Queue(channel=self.channel, exchange=self.exchange, durable=True, name=routing_key, routing_key=routing_key) queue.declare() logger.info( _("Connected to AMQP server on " "%(hostname)s:%(port)d") % log_info) def reconnect(self): """Handles reconnecting and re-establishing queues.""" while True: self.retry_attempts += 1 try: self._connect() return except self.connection_errors, e: pass except Exception, e: # NOTE(comstud): Unfortunately it's possible for amqplib # to return an error not covered by its transport # connection_errors in the case of a timeout waiting for # a protocol response. (See paste link in LP888621 for # nova.) So, we check all exceptions for 'timeout' in them # and try to reconnect in this case. if 'timeout' not in str(e): raise log_info = {} log_info['err_str'] = str(e) log_info['max_retries'] = self.max_retries log_info['hostname'] = self._conf.rabbit_host log_info['port'] = self._conf.rabbit_port if self.max_retries and self.retry_attempts >= self.max_retries: logger.exception( _('Unable to connect to AMQP server on ' '%(hostname)s:%(port)d after %(max_retries)d ' 'tries: %(err_str)s') % log_info) if self.connection: self._close() raise KombuMaxRetriesReached sleep_time = self.retry_backoff * self.retry_attempts if self.retry_max_backoff: sleep_time = min(sleep_time, self.retry_max_backoff) log_info['sleep_time'] = sleep_time logger.exception( _('AMQP server on %(hostname)s:%(port)d is' ' unreachable: %(err_str)s. Trying again in ' '%(sleep_time)d seconds.') % log_info) time.sleep(sleep_time)
IMAGE_ATTRS = BASE_MODEL_ATTRS | set(['name', 'status', 'size', 'disk_format', 'container_format', 'min_disk', 'min_ram', 'is_public', 'location', 'checksum', 'owner', 'protected']) CONTAINER_FORMATS = ['ami', 'ari', 'aki', 'bare', 'ovf'] DISK_FORMATS = ['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso'] STATUSES = ['active', 'saving', 'queued', 'killed', 'pending_delete', 'deleted'] db_opts = [ cfg.IntOpt('sql_idle_timeout', default=3600), cfg.StrOpt('sql_connection', default='sqlite:///glance.sqlite'), cfg.IntOpt('sql_max_retries', default=10), cfg.IntOpt('sql_retry_interval', default=1), cfg.BoolOpt('db_auto_create', default=True), ] class MySQLPingListener(object): """ Ensures that MySQL connections checked out of the pool are alive. Borrowed from: http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f """
import json import time import kombu.connection import kombu.entity from glance.notifier import strategy from glance.openstack.common import cfg import glance.openstack.common.log as logging LOG = logging.getLogger(__name__) rabbit_opts = [ cfg.StrOpt('rabbit_host', default='localhost'), cfg.IntOpt('rabbit_port', default=5672), cfg.BoolOpt('rabbit_use_ssl', default=False), cfg.StrOpt('rabbit_userid', default='guest'), cfg.StrOpt('rabbit_password', default='guest'), cfg.StrOpt('rabbit_virtual_host', default='/'), cfg.StrOpt('rabbit_notification_exchange', default='glance'), cfg.StrOpt('rabbit_notification_topic', default='glance_notifications'), cfg.StrOpt('rabbit_max_retries', default=0), cfg.StrOpt('rabbit_retry_backoff', default=2), cfg.StrOpt('rabbit_retry_max_backoff', default=30) ] CONF = cfg.CONF CONF.register_opts(rabbit_opts)
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import socket import uuid from glance.common import exception from glance.openstack.common import cfg from glance.openstack.common import importutils import glance.openstack.common.log as logging from glance.openstack.common import timeutils notifier_opts = [ cfg.StrOpt('notifier_strategy', default='default') ] CONF = cfg.CONF CONF.register_opts(notifier_opts) LOG = logging.getLogger(__name__) _STRATEGY_ALIASES = { "logging": "glance.notifier.notify_log.LoggingStrategy", "rabbit": "glance.notifier.notify_kombu.RabbitStrategy", "qpid": "glance.notifier.notify_qpid.QpidStrategy", "noop": "glance.notifier.notify_noop.NoopStrategy", "default": "glance.notifier.notify_noop.NoopStrategy", }