def store_add_to_backend(image_id, data, size, store, context=None): """ A wrapper around a call to each stores add() method. This gives glance a common place to check the output :param image_id: The image add to which data is added :param data: The data to be stored :param size: The length of the data in bytes :param store: The store to which the data is being added :return: The url location of the file, the size amount of data, the checksum of the data the storage systems metadata dictionary for the location """ (location, size, checksum, metadata) = store.add(image_id, data, size) if metadata is not None: if not isinstance(metadata, dict): msg = (_("The storage driver %(driver)s returned invalid " " metadata %(metadata)s. This must be a dictionary type") % dict(driver=str(store), metadata=str(metadata))) LOG.error(msg) raise exceptions.BackendException(msg) try: check_location_metadata(metadata) except exceptions.BackendException as e: e_msg = (_("A bad metadata structure was returned from the " "%(driver)s storage driver: %(metadata)s. %(e)s.") % dict(driver=utils.exception_to_str(store), metadata=utils.exception_to_str(metadata), e=utils.exception_to_str(e))) LOG.error(e_msg) raise exceptions.BackendException(e_msg) return (location, size, checksum, metadata)
def test_exception_to_str(self): class FakeException(Exception): def __str__(self): raise UnicodeError() ret = utils.exception_to_str(Exception('error message')) self.assertEqual(ret, 'error message') ret = utils.exception_to_str(FakeException('\xa5 error message')) self.assertEqual( ret, "Caught '%(exception)s' exception." % {'exception': 'FakeException'})
def _delete_partial(filepath, iid): try: os.unlink(filepath) except Exception as e: msg = _('Unable to remove partial image ' 'data for image %(iid)s: %(e)s') LOG.error(msg % dict(iid=iid, e=utils.exception_to_str(e)))
def create_bucket_if_missing(conf, bucket, s3_conn): """ Creates a missing bucket in S3 if the ``s3_store_create_bucket_on_put`` option is set. :param conf: Configuration :param bucket: Name of bucket to create :param s3_conn: Connection to S3 """ from boto.exception import S3ResponseError try: s3_conn.get_bucket(bucket) except S3ResponseError as e: if e.status == http_client.NOT_FOUND: if conf.glance_store.s3_store_create_bucket_on_put: host = conf.glance_store.s3_store_host location = get_s3_location(host) try: s3_conn.create_bucket(bucket, location=location) except S3ResponseError as e: msg = (_("Failed to add bucket to S3.\n" "Got error from S3: %s.") % utils.exception_to_str(e)) raise glance_store.BackendException(msg) else: msg = (_("The bucket %(bucket)s does not exist in " "S3. Please set the " "s3_store_create_bucket_on_put option " "to add bucket to S3 automatically.") % { 'bucket': bucket }) raise glance_store.BackendException(msg)
def _create_container_if_missing(self, container, connection): """ Creates a missing container in Swift if the ``swift_store_create_container_on_put`` option is set. :param container: Name of container to create :param connection: Connection to swift service """ try: connection.head_container(container) except swiftclient.ClientException as e: if e.http_status == httplib.NOT_FOUND: if self.conf.glance_store.swift_store_create_container_on_put: try: msg = (_LI("Creating swift container %(container)s") % { 'container': container }) LOG.info(msg) connection.put_container(container) except swiftclient.ClientException as e: msg = (_("Failed to add container to Swift.\n" "Got error from Swift: %s.") % cutils.exception_to_str(e)) raise glance_store.BackendException(msg) else: msg = (_("The container %(container)s does not exist in " "Swift. Please set the " "swift_store_create_container_on_put option" "to add container to Swift automatically.") % { 'container': container }) raise glance_store.BackendException(msg) else: raise
def test_add_no_container_no_create(self): """ Tests that adding an image with a non-existing container raises an appropriate exception """ conf = copy.deepcopy(SWIFT_CONF) conf['swift_store_user'] = '******' conf['swift_store_create_container_on_put'] = False conf['swift_store_container'] = 'noexist' self.config(**conf) reload(swift) self.store = Store(self.conf) self.store.configure() image_swift = six.StringIO("nevergonnamakeit") global SWIFT_PUT_OBJECT_CALLS SWIFT_PUT_OBJECT_CALLS = 0 # We check the exception text to ensure the container # missing text is found in it, otherwise, we would have # simply used self.assertRaises here exception_caught = False try: self.store.add(str(uuid.uuid4()), image_swift, 0) except BackendException as e: exception_caught = True self.assertIn("container noexist does not exist " "in Swift", utils.exception_to_str(e)) self.assertTrue(exception_caught) self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 0)
def test_exception_to_str_ignore(self): if six.PY3: # On Python 3, exception messages are unicode strings, they are not # decoded from an encoding and so it's not possible to test the # "ignore" error handler self.skipTest("test specific to Python 2") ret = utils.exception_to_str(Exception('\xa5 error message')) self.assertEqual(ret, ' error message')
def swift_retry_iter(resp_iter, length, store, location, context): if not length and isinstance(resp_iter, six.BytesIO): if six.PY3: # On Python 3, io.BytesIO does not have a len attribute, instead # go the end using seek to get the size of the file pos = resp_iter.tell() resp_iter.seek(0, 2) length = resp_iter.tell() resp_iter.seek(pos) else: # On Python 2, StringIO has a len attribute length = resp_iter.len length = length if length else ( resp_iter.len if hasattr(resp_iter, 'len') else 0) retries = 0 bytes_read = 0 while retries <= store.conf.glance_store.swift_store_retry_get_count: try: for chunk in resp_iter: yield chunk bytes_read += len(chunk) except swiftclient.ClientException as e: LOG.warn( _("Swift exception raised %s") % cutils.exception_to_str(e)) if bytes_read != length: if retries == store.conf.glance_store.swift_store_retry_get_count: # terminate silently and let higher level decide LOG.error( _("Stopping Swift retries after %d " "attempts") % retries) break else: retries += 1 glance_conf = store.conf.glance_store retry_count = glance_conf.swift_store_retry_get_count LOG.info( _("Retrying Swift connection " "(%(retries)d/%(max_retries)d) with " "range=%(start)d-%(end)d") % { 'retries': retries, 'max_retries': retry_count, 'start': bytes_read, 'end': length }) (_resp_headers, resp_iter) = store._get_object(location, None, bytes_read, context=context) else: break
def _validate_metadata(self, metadata_file): """Validate metadata against json schema. If metadata is valid then cache metadata and use it when creating new image. :param metadata_file: JSON metadata file path :raises: BadStoreConfiguration exception if metadata is not valid. """ try: with open(metadata_file, 'rb') as fptr: metadata = jsonutils.load(fptr) if isinstance(metadata, dict): # If metadata is of type dictionary # i.e. - it contains only one mountpoint # then convert it to list of dictionary. metadata = [metadata] # Validate metadata against json schema jsonschema.validate(metadata, MULTI_FILESYSTEM_METADATA_SCHEMA) glance_store.check_location_metadata(metadata) self.FILESYSTEM_STORE_METADATA = metadata except (jsonschema.exceptions.ValidationError, exceptions.BackendException, ValueError) as vee: reason = _('The JSON in the metadata file %(file)s is ' 'not valid and it can not be used: ' '%(vee)s.') % dict(file=metadata_file, vee=utils.exception_to_str(vee)) LOG.error(reason) raise exceptions.BadStoreConfiguration( store_name="filesystem", reason=reason) except IOError as ioe: reason = _('The path for the metadata file %(file)s could ' 'not be accessed: ' '%(ioe)s.') % dict(file=metadata_file, ioe=utils.exception_to_str(ioe)) LOG.error(reason) raise exceptions.BadStoreConfiguration( store_name="filesystem", reason=reason)
def configure(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. """ try: self.configure_add() self.add = getattr(self, '_add', self.add) except exceptions.BadStoreConfiguration as e: self._add = self.add self.add = self.add_disabled msg = (_(u"Failed to configure store correctly: %s " "Disabling add method.") % utils.exception_to_str(e)) LOG.warn(msg)
def load_paste_app(app_name, flavor=None, conf_file=None): """ Builds and returns a WSGI app from a paste config file. We assume the last config file specified in the supplied ConfigOpts object is the paste config file, if conf_file is None. :param app_name: name of the application to load :param flavor: name of the variant of the application to load :param conf_file: path to the paste config file :raises RuntimeError when config file cannot be located or application cannot be loaded from config file """ # append the deployment flavor to the application name, # in order to identify the appropriate paste pipeline app_name += _get_deployment_flavor(flavor) if not conf_file: conf_file = _get_deployment_config_file() try: logger = logging.getLogger(__name__) logger.debug(_("Loading %(app_name)s from %(conf_file)s"), { 'conf_file': conf_file, 'app_name': app_name }) app = deploy.loadapp("config:%s" % conf_file, name=app_name) # Log the options used when starting if we're in debug mode... if CONF.debug: CONF.log_opt_values(logger, logging.DEBUG) return app except (LookupError, ImportError) as e: msg = (_("Unable to load %(app_name)s from " "configuration file %(conf_file)s." "\nGot: %(e)r") % { 'app_name': app_name, 'conf_file': conf_file, 'e': utils.exception_to_str(e) }) logger.error(msg) raise RuntimeError(msg)
def configure(self): """ Configure the store to use the stored configuration options and initialize capabilities based on current configuration. Any store that needs special configuration should implement this method. """ try: self.configure_add() except exceptions.BadStoreConfiguration as e: self.unset_capabilities(capabilities.WRITE_ACCESS) msg = (_(u"Failed to configure store correctly: %s " "Disabling add method.") % utils.exception_to_str(e)) LOG.warn(msg) self.update_capabilities()
def swift_retry_iter(resp_iter, length, store, location, context): length = length if length else ( resp_iter.len if hasattr(resp_iter, 'len') else 0) retries = 0 bytes_read = 0 while retries <= store.conf.glance_store.swift_store_retry_get_count: try: for chunk in resp_iter: yield chunk bytes_read += len(chunk) except swiftclient.ClientException as e: LOG.warn( _("Swift exception raised %s") % cutils.exception_to_str(e)) if bytes_read != length: if retries == store.conf.glance_store.swift_store_retry_get_count: # terminate silently and let higher level decide LOG.error( _("Stopping Swift retries after %d " "attempts") % retries) break else: retries += 1 glance_conf = store.conf.glance_store retry_count = glance_conf.swift_store_retry_get_count LOG.info( _("Retrying Swift connection " "(%(retries)d/%(max_retries)d) with " "range=%(start)d-%(end)d") % { 'retries': retries, 'max_retries': retry_count, 'start': bytes_read, 'end': length }) (_resp_headers, resp_iter) = store._get_object(location, None, bytes_read, context=context) else: break
def add(self, image_id, image_file, image_size, connection=None, context=None): location = self.create_location(image_id, context=context) if not connection: connection = self.get_connection(location, context=context) self._create_container_if_missing(location.container, connection) LOG.debug("Adding image object '%(obj_name)s' " "to Swift" % dict(obj_name=location.obj)) try: if image_size > 0 and image_size < self.large_object_size: # Image size is known, and is less than large_object_size. # Send to Swift with regular PUT. obj_etag = connection.put_object(location.container, location.obj, image_file, content_length=image_size) else: # Write the image into Swift in chunks. chunk_id = 1 if image_size > 0: total_chunks = str( int( math.ceil( float(image_size) / float(self.large_object_chunk_size)))) else: # image_size == 0 is when we don't know the size # of the image. This can occur with older clients # that don't inspect the payload size. LOG.debug("Cannot determine image size. Adding as a " "segmented object to Swift.") total_chunks = '?' checksum = hashlib.md5() written_chunks = [] combined_chunks_size = 0 while True: chunk_size = self.large_object_chunk_size if image_size == 0: content_length = None else: left = image_size - combined_chunks_size if left == 0: break if chunk_size > left: chunk_size = left content_length = chunk_size chunk_name = "%s-%05d" % (location.obj, chunk_id) reader = ChunkReader(image_file, checksum, chunk_size) try: chunk_etag = connection.put_object( location.container, chunk_name, reader, content_length=content_length) written_chunks.append(chunk_name) except Exception: # Delete orphaned segments from swift backend with excutils.save_and_reraise_exception(): LOG.exception( _("Error during chunked upload to " "backend, deleting stale chunks")) self._delete_stale_chunks(connection, location.container, written_chunks) bytes_read = reader.bytes_read msg = ("Wrote chunk %(chunk_name)s (%(chunk_id)d/" "%(total_chunks)s) of length %(bytes_read)d " "to Swift returning MD5 of content: " "%(chunk_etag)s" % { 'chunk_name': chunk_name, 'chunk_id': chunk_id, 'total_chunks': total_chunks, 'bytes_read': bytes_read, 'chunk_etag': chunk_etag }) LOG.debug(msg) if bytes_read == 0: # Delete the last chunk, because it's of zero size. # This will happen if size == 0. LOG.debug("Deleting final zero-length chunk") connection.delete_object(location.container, chunk_name) break chunk_id += 1 combined_chunks_size += bytes_read # In the case we have been given an unknown image size, # set the size to the total size of the combined chunks. if image_size == 0: image_size = combined_chunks_size # Now we write the object manifest and return the # manifest's etag... manifest = "%s/%s-" % (location.container, location.obj) headers = { 'ETag': hashlib.md5("").hexdigest(), 'X-Object-Manifest': manifest } # The ETag returned for the manifest is actually the # MD5 hash of the concatenated checksums of the strings # of each chunk...so we ignore this result in favour of # the MD5 of the entire image file contents, so that # users can verify the image file contents accordingly connection.put_object(location.container, location.obj, None, headers=headers) obj_etag = checksum.hexdigest() # NOTE: We return the user and key here! Have to because # location is used by the API server to return the actual # image data. We *really* should consider NOT returning # the location attribute from GET /images/<ID> and # GET /images/details if sutils.is_multiple_swift_store_accounts_enabled(self.conf): include_creds = False else: include_creds = True return (location.get_uri(credentials_included=include_creds), image_size, obj_etag, {}) except swiftclient.ClientException as e: if e.http_status == httplib.CONFLICT: msg = _("Swift already has an image at this location") raise exceptions.Duplicate(message=msg) msg = (_(u"Failed to add object to Swift.\n" "Got error from Swift: %s.") % cutils.exception_to_str(e)) LOG.error(msg) raise glance_store.BackendException(msg)