def parse_uri(self, uri): prefix = 'rbd://' self.validate_schemas(uri, valid_schemas=(prefix,)) # convert to ascii since librbd doesn't handle unicode try: ascii_uri = str(uri) except UnicodeError: reason = _('URI contains non-ascii characters') msg = _LI("Invalid URI: %s") % reason LOG.info(msg) raise exceptions.BadStoreUri(message=reason) pieces = ascii_uri[len(prefix):].split('/') if len(pieces) == 1: self.fsid, self.pool, self.image, self.snapshot = \ (None, None, pieces[0], None) elif len(pieces) == 4: self.fsid, self.pool, self.image, self.snapshot = \ map(urllib.parse.unquote, pieces) else: reason = _('URI must have exactly 1 or 4 components') msg = _LI("Invalid URI: %s") % reason LOG.info(msg) raise exceptions.BadStoreUri(message=reason) if any(map(lambda p: p == '', pieces)): reason = _('URI cannot contain empty components') msg = _LI("Invalid URI: %s") % reason LOG.info(msg) raise exceptions.BadStoreUri(message=reason)
def get_connection(self): """Get swift client connection. Returns swift client connection. If allow_reauth is True and connection token is going to expire soon then the method returns updated connection. The method invariant is the following: if self.allow_reauth is False then the method returns the same connection for every call. So the connection may expire. If self.allow_reauth is True the returned swift connection is always valid and cannot expire at least for swift_store_expire_soon_interval. """ if self.allow_reauth: # we are refreshing token only and if only connection manager # re-authentication is allowed. Token refreshing is setup by # connection manager users. Also we disable re-authentication # if there is not way to execute it (cannot initialize trusts for # multi-tenant or auth_version is not 3) auth_ref = self.client.session.auth.get_auth_ref( self.client.session) # if connection token is going to expire soon (keystone checks # is token is going to expire or expired already) if auth_ref.will_expire_soon(self.store.conf.glance_store. swift_store_expire_soon_interval): LOG.info(_LI("Requesting new token for swift connection.")) # request new token with session and client provided by store auth_token = self.client.session.get_auth_headers().get( self.AUTH_HEADER_NAME) LOG.info( _LI("Token has been successfully requested. " "Refreshing swift connection.")) # initialize new switclient connection with fresh token self.connection = self.store.get_store_connection( auth_token, self.storage_url) return self.connection
def parse_uri(self, uri): prefix = 'rbd://' self.validate_schemas(uri, valid_schemas=(prefix, )) # convert to ascii since librbd doesn't handle unicode try: ascii_uri = str(uri) except UnicodeError: reason = _('URI contains non-ascii characters') msg = _LI("Invalid URI: %s") % reason LOG.info(msg) raise exceptions.BadStoreUri(message=reason) pieces = ascii_uri[len(prefix):].split('/') if len(pieces) == 1: self.fsid, self.pool, self.image, self.snapshot = \ (None, None, pieces[0], None) elif len(pieces) == 4: self.fsid, self.pool, self.image, self.snapshot = \ map(urllib.parse.unquote, pieces) else: reason = _('URI must have exactly 1 or 4 components') msg = _LI("Invalid URI: %s") % reason LOG.info(msg) raise exceptions.BadStoreUri(message=reason) if any(map(lambda p: p == '', pieces)): reason = _('URI cannot contain empty components') msg = _LI("Invalid URI: %s") % reason LOG.info(msg) raise exceptions.BadStoreUri(message=reason)
def get_connection(self): """Get swift client connection. Returns swift client connection. If allow_reauth is True and connection token is going to expire soon then the method returns updated connection. The method invariant is the following: if self.allow_reauth is False then the method returns the same connection for every call. So the connection may expire. If self.allow_reauth is True the returned swift connection is always valid and cannot expire at least for swift_store_expire_soon_interval. """ if self.allow_reauth: # we are refreshing token only and if only connection manager # re-authentication is allowed. Token refreshing is setup by # connection manager users. Also we disable re-authentication # if there is not way to execute it (cannot initialize trusts for # multi-tenant or auth_version is not 3) auth_ref = self.client.session.auth.get_auth_ref( self.client.session) # if connection token is going to expire soon (keystone checks # is token is going to expire or expired already) if auth_ref.will_expire_soon( self.store.conf.glance_store.swift_store_expire_soon_interval ): LOG.info(_LI("Requesting new token for swift connection.")) # request new token with session and client provided by store auth_token = self.client.session.get_auth_headers().get( self.AUTH_HEADER_NAME) LOG.info(_LI("Token has been successfully requested. " "Refreshing swift connection.")) # initialize new switclient connection with fresh token self.connection = self.store.get_store_connection( auth_token, self.storage_url) return self.connection
def parse_uri(self, uri): """ Parse URLs. This method fixes an issue where credentials specified in the URL are interpreted differently in Python 2.6.1+ than prior versions of Python. """ pieces = urllib.parse.urlparse(uri) self.validate_schemas(uri, valid_schemas=('https://', 'http://')) self.scheme = pieces.scheme netloc = pieces.netloc path = pieces.path try: if '@' in netloc: creds, netloc = netloc.split('@') else: creds = None except ValueError: # Python 2.6.1 compat # see lp659445 and Python issue7904 if '@' in path: creds, path = path.split('@') else: creds = None if creds: try: self.user, self.password = creds.split(':') except ValueError: reason = _("Credentials are not well-formatted.") LOG.info(reason) raise exceptions.BadStoreUri(message=reason) else: self.user = None if netloc == '': LOG.info(_LI("No address specified in HTTP URL")) raise exceptions.BadStoreUri(uri=uri) else: # IPv6 address has the following format [1223:0:0:..]:<some_port> # we need to be sure that we are validating port in both IPv4,IPv6 delimiter = "]:" if netloc.count(":") > 1 else ":" host, dlm, port = netloc.partition(delimiter) # if port is present in location then validate port format if port and not port.isdigit(): raise exceptions.BadStoreUri(uri=uri) self.netloc = netloc self.path = path
def parse_uri(self, uri): """ Parse URLs. This method fixes an issue where credentials specified in the URL are interpreted differently in Python 2.6.1+ than prior versions of Python. """ pieces = urllib.parse.urlparse(uri) assert pieces.scheme in ('https', 'http') self.scheme = pieces.scheme netloc = pieces.netloc path = pieces.path try: if '@' in netloc: creds, netloc = netloc.split('@') else: creds = None except ValueError: # Python 2.6.1 compat # see lp659445 and Python issue7904 if '@' in path: creds, path = path.split('@') else: creds = None if creds: try: self.user, self.password = creds.split(':') except ValueError: reason = _("Credentials are not well-formatted.") LOG.info(reason) raise exceptions.BadStoreUri(message=reason) else: self.user = None if netloc == '': LOG.info(_LI("No address specified in HTTP URL")) raise exceptions.BadStoreUri(uri=uri) else: # IPv6 address has the following format [1223:0:0:..]:<some_port> # we need to be sure that we are validating port in both IPv4,IPv6 delimiter = "]:" if netloc.count(":") > 1 else ":" host, dlm, port = netloc.partition(delimiter) # if port is present in location then validate port format if port and not port.isdigit(): raise exceptions.BadStoreUri(uri=uri) self.netloc = netloc self.path = path
def __exit__(self, exc_type, exc_val, exc_tb): if self._client and self.client.trust_id: # client has been initialized - need to cleanup resources LOG.info(_LI("Revoking trust %s"), self.client.trust_id) self.client.trusts.delete(self.client.trust_id)
def add(self, image_id, image_file, image_size, hashing_algo, context=None, verifier=None): """ Stores an image file with supplied identifier to the backend storage system and returns a tuple containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :param hashing_algo: A hashlib algorithm identifier (string) :param context: The request context :param verifier: An object used to verify signatures for images :returns: tuple of: (1) URL in backing store, (2) bytes written, (3) checksum, (4) multihash value, and (5) a dictionary with storage system specific information :raises: `glance_store.exceptions.Duplicate` if the image already exists """ self._check_context(context, require_tenant=True) client = self.get_cinderclient(context) os_hash_value = utils.get_hasher(hashing_algo, False) checksum = utils.get_hasher('md5', False) bytes_written = 0 size_gb = int(math.ceil(float(image_size) / units.Gi)) if size_gb == 0: size_gb = 1 name = "image-%s" % image_id owner = context.project_id metadata = { 'glance_image_id': image_id, 'image_size': str(image_size), 'image_owner': owner } volume_type = self.store_conf.cinder_volume_type LOG.debug('Creating a new volume: image_size=%d size_gb=%d type=%s', image_size, size_gb, volume_type or 'None') if image_size == 0: LOG.info( _LI("Since image size is zero, we will be doing " "resize-before-write for each GB which " "will be considerably slower than normal.")) volume = client.volumes.create(size_gb, name=name, metadata=metadata, volume_type=volume_type) volume = self._wait_volume_status(volume, 'creating', 'available') size_gb = volume.size failed = True need_extend = True buf = None try: while need_extend: with self._open_cinder_volume(client, volume, 'wb') as f: f.seek(bytes_written) if buf: f.write(buf) bytes_written += len(buf) while True: buf = image_file.read(self.WRITE_CHUNKSIZE) if not buf: need_extend = False break os_hash_value.update(buf) checksum.update(buf) if verifier: verifier.update(buf) if (bytes_written + len(buf) > size_gb * units.Gi and image_size == 0): break f.write(buf) bytes_written += len(buf) if need_extend: size_gb += 1 LOG.debug("Extending volume %(volume_id)s to %(size)s GB.", { 'volume_id': volume.id, 'size': size_gb }) volume.extend(volume, size_gb) try: volume = self._wait_volume_status( volume, 'extending', 'available') size_gb = volume.size except exceptions.BackendException: raise exceptions.StorageFull() failed = False except IOError as e: # Convert IOError reasons to Glance Store exceptions errors = { errno.EFBIG: exceptions.StorageFull(), errno.ENOSPC: exceptions.StorageFull(), errno.EACCES: exceptions.StorageWriteDenied() } raise errors.get(e.errno, e) finally: if failed: LOG.error(_LE("Failed to write to volume %(volume_id)s."), {'volume_id': volume.id}) try: volume.delete() except Exception: LOG.exception( _LE('Failed to delete of volume ' '%(volume_id)s.'), {'volume_id': volume.id}) if image_size == 0: metadata.update({'image_size': str(bytes_written)}) volume.update_all_metadata(metadata) volume.update_readonly_flag(volume, True) hash_hex = os_hash_value.hexdigest() checksum_hex = checksum.hexdigest() LOG.debug( "Wrote %(bytes_written)d bytes to volume %(volume_id)s " "with checksum %(checksum_hex)s.", { 'bytes_written': bytes_written, 'volume_id': volume.id, 'checksum_hex': checksum_hex }) image_metadata = {} location_url = 'cinder://%s' % volume.id if self.backend_group: image_metadata['store'] = u"%s" % self.backend_group location_url = 'cinder://%s/%s' % (self.backend_group, volume.id) return (location_url, bytes_written, checksum_hex, hash_hex, image_metadata)
def __exit__(self, exc_type, exc_val, exc_tb): if self._client and self.client.trust_id: # client has been initialized - need to cleanup resources LOG.info(_LI("Revoking trust %s"), self.client.trust_id) self.client.trusts.delete(self.client.trust_id)
def add(self, image_id, image_file, image_size, context=None, verifier=None): """ Stores an image file with supplied identifier to the backend storage system and returns a tuple containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :param context: The request context :param verifier: An object used to verify signatures for images :retval tuple of URL in backing store, bytes written, checksum and a dictionary with storage system specific information :raises `glance_store.exceptions.Duplicate` if the image already existed """ self._check_context(context, require_tenant=True) client = get_cinderclient(self.conf, context) checksum = hashlib.md5() bytes_written = 0 size_gb = int((image_size + units.Gi - 1) / units.Gi) if size_gb == 0: size_gb = 1 name = "image-%s" % image_id owner = context.tenant metadata = { 'glance_image_id': image_id, 'image_size': str(image_size), 'image_owner': owner } LOG.debug('Creating a new volume: image_size=%d size_gb=%d', image_size, size_gb) if image_size == 0: LOG.info( _LI("Since image size is zero, we will be doing " "resize-before-write for each GB which " "will be considerably slower than normal.")) volume = client.volumes.create(size_gb, name=name, metadata=metadata) volume = self._wait_volume_status(volume, 'creating', 'available') failed = True need_extend = True buf = None try: while need_extend: with self._open_cinder_volume(client, volume, 'wb') as f: f.seek(bytes_written) if buf: f.write(buf) bytes_written += len(buf) while True: buf = image_file.read(self.WRITE_CHUNKSIZE) if not buf: need_extend = False break checksum.update(buf) if verifier: verifier.update(buf) if (bytes_written + len(buf) > size_gb * units.Gi and image_size == 0): break f.write(buf) bytes_written += len(buf) if need_extend: size_gb += 1 LOG.debug("Extending volume %(volume_id)s to %(size)s GB.", { 'volume_id': volume.id, 'size': size_gb }) volume.extend(volume, size_gb) try: volume = self._wait_volume_status( volume, 'extending', 'available') except exceptions.BackendException: raise exceptions.StorageFull() failed = False except IOError as e: # Convert IOError reasons to Glance Store exceptions errors = { errno.EFBIG: exceptions.StorageFull(), errno.ENOSPC: exceptions.StorageFull(), errno.EACCES: exceptions.StorageWriteDenied() } raise errors.get(e.errno, e) finally: if failed: LOG.error(_LE("Failed to write to volume %(volume_id)s."), {'volume_id': volume.id}) try: volume.delete() except Exception: LOG.exception( _LE('Failed to delete of volume ' '%(volume_id)s.'), {'volume_id': volume.id}) if image_size == 0: metadata.update({'image_size': str(bytes_written)}) volume.update_all_metadata(metadata) volume.update_readonly_flag(volume, True) checksum_hex = checksum.hexdigest() LOG.debug( "Wrote %(bytes_written)d bytes to volume %(volume_id)s " "with checksum %(checksum_hex)s.", { 'bytes_written': bytes_written, 'volume_id': volume.id, 'checksum_hex': checksum_hex }) return ('cinder://%s' % volume.id, bytes_written, checksum_hex, {})
def add(self, image_id, image_file, image_size, context=None, verifier=None): """ Stores an image file with supplied identifier to the backend storage system and returns a tuple containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :param context: The request context :param verifier: An object used to verify signatures for images :retval tuple of URL in backing store, bytes written, checksum and a dictionary with storage system specific information :raises `glance_store.exceptions.Duplicate` if the image already existed """ self._check_context(context, require_tenant=True) client = get_cinderclient(self.conf, context) checksum = hashlib.md5() bytes_written = 0 size_gb = int(math.ceil(float(image_size) / units.Gi)) if size_gb == 0: size_gb = 1 name = "image-%s" % image_id owner = context.tenant metadata = {'glance_image_id': image_id, 'image_size': str(image_size), 'image_owner': owner} LOG.debug('Creating a new volume: image_size=%d size_gb=%d', image_size, size_gb) if image_size == 0: LOG.info(_LI("Since image size is zero, we will be doing " "resize-before-write for each GB which " "will be considerably slower than normal.")) volume = client.volumes.create(size_gb, name=name, metadata=metadata) volume = self._wait_volume_status(volume, 'creating', 'available') failed = True need_extend = True buf = None try: while need_extend: with self._open_cinder_volume(client, volume, 'wb') as f: f.seek(bytes_written) if buf: f.write(buf) bytes_written += len(buf) while True: buf = image_file.read(self.WRITE_CHUNKSIZE) if not buf: need_extend = False break checksum.update(buf) if verifier: verifier.update(buf) if (bytes_written + len(buf) > size_gb * units.Gi and image_size == 0): break f.write(buf) bytes_written += len(buf) if need_extend: size_gb += 1 LOG.debug("Extending volume %(volume_id)s to %(size)s GB.", {'volume_id': volume.id, 'size': size_gb}) volume.extend(volume, size_gb) try: volume = self._wait_volume_status(volume, 'extending', 'available') except exceptions.BackendException: raise exceptions.StorageFull() failed = False except IOError as e: # Convert IOError reasons to Glance Store exceptions errors = {errno.EFBIG: exceptions.StorageFull(), errno.ENOSPC: exceptions.StorageFull(), errno.EACCES: exceptions.StorageWriteDenied()} raise errors.get(e.errno, e) finally: if failed: LOG.error(_LE("Failed to write to volume %(volume_id)s."), {'volume_id': volume.id}) try: volume.delete() except Exception: LOG.exception(_LE('Failed to delete of volume ' '%(volume_id)s.'), {'volume_id': volume.id}) if image_size == 0: metadata.update({'image_size': str(bytes_written)}) volume.update_all_metadata(metadata) volume.update_readonly_flag(volume, True) checksum_hex = checksum.hexdigest() LOG.debug("Wrote %(bytes_written)d bytes to volume %(volume_id)s " "with checksum %(checksum_hex)s.", {'bytes_written': bytes_written, 'volume_id': volume.id, 'checksum_hex': checksum_hex}) return ('cinder://%s' % volume.id, bytes_written, checksum_hex, {})