def put_container(self, container): """Create the container if needed. Check if the container exist by issuing a HEAD request, if the container does not exist we create it. We cannot enforce a new storage policy on an existing container. """ try: self.conn.head_container(container) except swift_exc.ClientException as e: if e.http_status == 404: try: storage_policy = CONF.backup_swift_create_storage_policy headers = ({ 'X-Storage-Policy': storage_policy } if storage_policy else None) self.conn.put_container(container, headers=headers) except socket.error as err: raise exception.SwiftConnectionFailed(reason=err) return LOG.warning("Failed to HEAD container to determine if it " "exists and should be created.") raise exception.SwiftConnectionFailed(reason=e) except socket.error as err: raise exception.SwiftConnectionFailed(reason=err)
def put_container(self, container): """Create the container if needed. No failure if it pre-exists.""" try: self.conn.put_container(container) except socket.error as err: raise exception.SwiftConnectionFailed(reason=err) return
def read(self): try: (_resp, body) = self.conn.get_object(self.container, self.object_name) except socket.error as err: raise exception.SwiftConnectionFailed(reason=err) return body
def close(self): reader = six.BytesIO(self.data) try: etag = self.conn.put_object(self.container, self.object_name, reader, content_length=len(self.data)) except socket.error as err: raise exception.SwiftConnectionFailed(reason=err) LOG.debug('swift MD5 for %(object_name)s: %(etag)s', { 'object_name': self.object_name, 'etag': etag, }) md5 = hashlib.md5(self.data).hexdigest() LOG.debug('backup MD5 for %(object_name)s: %(md5)s', { 'object_name': self.object_name, 'md5': md5 }) if etag != md5: err = _('error writing object to swift, MD5 of object in ' 'swift %(etag)s is not the same as MD5 of object sent ' 'to swift %(md5)s'), { 'etag': etag, 'md5': md5 } raise exception.InvalidBackup(reason=err) return md5
def delete(self, backup): """Delete the given backup from swift.""" container = backup['container'] LOG.debug('delete started, backup: %s, container: %s, prefix: %s', backup['id'], container, backup['service_metadata']) if container is not None: swift_object_names = [] try: swift_object_names = self._generate_object_names(backup) except Exception: LOG.warn( _('swift error while listing objects, continuing' ' with delete')) for swift_object_name in swift_object_names: try: self.conn.delete_object(container, swift_object_name) except socket.error as err: raise exception.SwiftConnectionFailed(reason=str(err)) except Exception: LOG.warn( _('swift error while deleting object %s, ' 'continuing with delete') % swift_object_name) else: LOG.debug( _('deleted swift object: %(swift_object_name)s' ' in container: %(container)s') % locals()) # Deleting a backup's objects from swift can take some time. # Yield so other threads can run eventlet.sleep(0) LOG.debug(_('delete %s finished') % backup['id'])
def restore(self, backup, volume_id, volume_file): """Restore the given volume backup from swift.""" backup_id = backup['id'] container = backup['container'] object_prefix = backup['service_metadata'] LOG.debug( _('starting restore of backup %(object_prefix)s from swift' ' container: %(container)s, to volume %(volume_id)s, ' 'backup: %(backup_id)s') % locals()) try: metadata = self._read_metadata(backup) except socket.error as err: raise exception.SwiftConnectionFailed(reason=str(err)) metadata_version = metadata['version'] LOG.debug(_('Restoring swift backup version %s'), metadata_version) try: restore_func = getattr( self, self.SERVICE_VERSION_MAPPING.get(metadata_version)) except TypeError: err = (_('No support to restore swift backup version %s') % metadata_version) raise exception.InvalidBackup(reason=err) restore_func(backup, volume_id, metadata, volume_file) LOG.debug( _('restore %(backup_id)s to %(volume_id)s finished.') % locals())
def _prepare_backup(self, backup): """Prepare the backup process and return the backup metadata.""" backup_id = backup['id'] volume_id = backup['volume_id'] volume = self.db.volume_get(self.context, volume_id) if volume['size'] <= 0: err = _('volume size %d is invalid.') % volume['size'] raise exception.InvalidVolume(reason=err) try: container = self._create_container(self.context, backup) except socket.error as err: raise exception.SwiftConnectionFailed(reason=str(err)) object_prefix = self._generate_swift_object_name_prefix(backup) backup['service_metadata'] = object_prefix self.db.backup_update(self.context, backup_id, {'service_metadata': object_prefix}) volume_size_bytes = volume['size'] * units.GiB availability_zone = self.az LOG.debug(_('starting backup of volume: %(volume_id)s to swift,' ' volume size: %(volume_size_bytes)d, swift object names' ' prefix %(object_prefix)s, availability zone:' ' %(availability_zone)s') % { 'volume_id': volume_id, 'volume_size_bytes': volume_size_bytes, 'object_prefix': object_prefix, 'availability_zone': availability_zone, }) object_meta = {'id': 1, 'list': [], 'prefix': object_prefix} return object_meta, container
def get_container_entries(self, container, prefix): """Get container entry names""" try: swift_objects = self.conn.get_container(container, prefix=prefix, full_listing=True)[1] except socket.error as err: raise exception.SwiftConnectionFailed(reason=err) swift_object_names = [swift_obj['name'] for swift_obj in swift_objects] return swift_object_names
def _backup_chunk(self, backup, container, data, data_offset, object_meta): """Backup data chunk based on the object metadata and offset.""" object_prefix = object_meta['prefix'] object_list = object_meta['list'] object_id = object_meta['id'] object_name = '%s-%05d' % (object_prefix, object_id) obj = {} obj[object_name] = {} obj[object_name]['offset'] = data_offset obj[object_name]['length'] = len(data) LOG.debug(_('reading chunk of data from volume')) if self.compressor is not None: algorithm = CONF.backup_compression_algorithm.lower() obj[object_name]['compression'] = algorithm data_size_bytes = len(data) data = self.compressor.compress(data) comp_size_bytes = len(data) LOG.debug(_('compressed %(data_size_bytes)d bytes of data ' 'to %(comp_size_bytes)d bytes using ' '%(algorithm)s') % { 'data_size_bytes': data_size_bytes, 'comp_size_bytes': comp_size_bytes, 'algorithm': algorithm, }) else: LOG.debug(_('not compressing data')) obj[object_name]['compression'] = 'none' reader = StringIO.StringIO(data) LOG.debug(_('About to put_object')) try: etag = self.conn.put_object(container, object_name, reader, content_length=len(data)) except socket.error as err: raise exception.SwiftConnectionFailed(reason=str(err)) LOG.debug(_('swift MD5 for %(object_name)s: %(etag)s') % {'object_name': object_name, 'etag': etag, }) md5 = hashlib.md5(data).hexdigest() obj[object_name]['md5'] = md5 LOG.debug(_('backup MD5 for %(object_name)s: %(md5)s') % {'object_name': object_name, 'md5': md5}) if etag != md5: err = _('error writing object to swift, MD5 of object in ' 'swift %(etag)s is not the same as MD5 of object sent ' 'to swift %(md5)s') % {'etag': etag, 'md5': md5} raise exception.InvalidBackup(reason=err) object_list.append(obj) object_id += 1 object_meta['list'] = object_list object_meta['id'] = object_id LOG.debug(_('Calling eventlet.sleep(0)')) eventlet.sleep(0)
def _finalize_backup(self, backup, container, object_meta): """Finalize the backup by updating its metadata on Swift""" object_list = object_meta['list'] object_id = object_meta['id'] try: self._write_metadata(backup, backup['volume_id'], container, object_list) except socket.error as err: raise exception.SwiftConnectionFailed(reason=str(err)) self.db.backup_update(self.context, backup['id'], {'object_count': object_id}) LOG.debug(_('backup %s finished.') % backup['id'])
def _restore_v1(self, backup, volume_id, metadata, volume_file): """Restore a v1 swift volume backup from swift.""" backup_id = backup['id'] LOG.debug(_('v1 swift volume backup restore of %s started'), backup_id) container = backup['container'] metadata_objects = metadata['objects'] metadata_object_names = [] for metadata_object in metadata_objects: metadata_object_names.extend(metadata_object.keys()) LOG.debug(_('metadata_object_names = %s') % metadata_object_names) prune_list = [self._metadata_filename(backup)] swift_object_names = [ swift_object_name for swift_object_name in self._generate_object_names(backup) if swift_object_name not in prune_list ] if sorted(swift_object_names) != sorted(metadata_object_names): err = _('restore_backup aborted, actual swift object list in ' 'swift does not match object list stored in metadata') raise exception.InvalidBackup(reason=err) for metadata_object in metadata_objects: object_name = metadata_object.keys()[0] LOG.debug( _('restoring object from swift. backup: %(backup_id)s, ' 'container: %(container)s, swift object name: ' '%(object_name)s, volume: %(volume_id)s') % locals()) try: (resp, body) = self.conn.get_object(container, object_name) except socket.error as err: raise exception.SwiftConnectionFailed(reason=str(err)) compression_algorithm = metadata_object[object_name]['compression'] decompressor = self._get_compressor(compression_algorithm) if decompressor is not None: LOG.debug( _('decompressing data using %s algorithm') % compression_algorithm) decompressed = decompressor.decompress(body) volume_file.write(decompressed) else: volume_file.write(body) # force flush every write to avoid long blocking write on close volume_file.flush() os.fsync(volume_file.fileno()) # Restoring a backup to a volume can take some time. Yield so other # threads can run, allowing for among other things the service # status to be updated eventlet.sleep(0) LOG.debug(_('v1 swift volume backup restore of %s finished'), backup_id)
def close(self): reader = io.BytesIO(self.data) try: etag = self.conn.put_object(self.container, self.object_name, reader, content_length=len(self.data)) except socket.error as err: raise exception.SwiftConnectionFailed(reason=err) md5 = secretutils.md5(self.data, usedforsecurity=False).hexdigest() if etag != md5: err = _('error writing object to swift, MD5 of object in ' 'swift %(etag)s is not the same as MD5 of object sent ' 'to swift %(md5)s') % {'etag': etag, 'md5': md5} raise exception.InvalidBackup(reason=err) return md5
def restore(self, backup, volume_id, volume_file): """Restore the given volume backup from swift.""" backup_id = backup['id'] container = backup['container'] object_prefix = backup['service_metadata'] LOG.debug( _('starting restore of backup %(object_prefix)s from swift' ' container: %(container)s, to volume %(volume_id)s, ' 'backup: %(backup_id)s') % { 'object_prefix': object_prefix, 'container': container, 'volume_id': volume_id, 'backup_id': backup_id, }) try: metadata = self._read_metadata(backup) except socket.error as err: raise exception.SwiftConnectionFailed(reason=str(err)) metadata_version = metadata['version'] LOG.debug(_('Restoring swift backup version %s'), metadata_version) try: restore_func = getattr( self, self.DRIVER_VERSION_MAPPING.get(metadata_version)) except TypeError: err = (_('No support to restore swift backup version %s') % metadata_version) raise exception.InvalidBackup(reason=err) restore_func(backup, volume_id, metadata, volume_file) volume_meta = metadata.get('volume_meta', None) try: if volume_meta: self.put_metadata(volume_id, volume_meta) else: LOG.debug("No volume metadata in this backup") except exception.BackupMetadataUnsupportedVersion: msg = _("Metadata restore failed due to incompatible version") LOG.error(msg) raise exception.BackupOperationError(msg) LOG.debug( _('restore %(backup_id)s to %(volume_id)s finished.') % { 'backup_id': backup_id, 'volume_id': volume_id })
def delete_object(self, container, object_name): """Deletes a backup object from a Swift object store.""" try: self.conn.delete_object(container, object_name) except socket.error as err: raise exception.SwiftConnectionFailed(reason=err)
def backup(self, backup, volume_file): """Backup the given volume to swift using the given backup metadata.""" backup_id = backup['id'] volume_id = backup['volume_id'] volume = self.db.volume_get(self.context, volume_id) if volume['size'] <= 0: err = _('volume size %d is invalid.') % volume['size'] raise exception.InvalidVolume(reason=err) try: container = self._create_container(self.context, backup) except socket.error as err: raise exception.SwiftConnectionFailed(reason=str(err)) object_prefix = self._generate_swift_object_name_prefix(backup) backup['service_metadata'] = object_prefix self.db.backup_update(self.context, backup_id, {'service_metadata': object_prefix}) volume_size_bytes = volume['size'] * 1024 * 1024 * 1024 availability_zone = self.az LOG.debug( _('starting backup of volume: %(volume_id)s to swift,' ' volume size: %(volume_size_bytes)d, swift object names' ' prefix %(object_prefix)s, availability zone:' ' %(availability_zone)s') % locals()) object_id = 1 object_list = [] while True: data_block_size_bytes = self.data_block_size_bytes object_name = '%s-%05d' % (object_prefix, object_id) obj = {} obj[object_name] = {} obj[object_name]['offset'] = volume_file.tell() data = volume_file.read(data_block_size_bytes) obj[object_name]['length'] = len(data) if data == '': break LOG.debug(_('reading chunk of data from volume')) if self.compressor is not None: algorithm = FLAGS.backup_compression_algorithm.lower() obj[object_name]['compression'] = algorithm data_size_bytes = len(data) data = self.compressor.compress(data) comp_size_bytes = len(data) LOG.debug( _('compressed %(data_size_bytes)d bytes of data' ' to %(comp_size_bytes)d bytes using ' '%(algorithm)s') % locals()) else: LOG.debug(_('not compressing data')) obj[object_name]['compression'] = 'none' reader = StringIO.StringIO(data) LOG.debug(_('About to put_object')) try: etag = self.conn.put_object(container, object_name, reader) except socket.error as err: raise exception.SwiftConnectionFailed(reason=str(err)) LOG.debug(_('swift MD5 for %(object_name)s: %(etag)s') % locals()) md5 = hashlib.md5(data).hexdigest() obj[object_name]['md5'] = md5 LOG.debug(_('backup MD5 for %(object_name)s: %(md5)s') % locals()) if etag != md5: err = _('error writing object to swift, MD5 of object in ' 'swift %(etag)s is not the same as MD5 of object sent ' 'to swift %(md5)s') % locals() raise exception.InvalidBackup(reason=err) object_list.append(obj) object_id += 1 LOG.debug(_('Calling eventlet.sleep(0)')) eventlet.sleep(0) try: self._write_metadata(backup, volume_id, container, object_list) except socket.error as err: raise exception.SwiftConnectionFailed(reason=str(err)) self.db.backup_update(self.context, backup_id, {'object_count': object_id}) LOG.debug(_('backup %s finished.') % backup_id)
def _restore_v1(self, backup, volume_id, metadata, volume_file): """Restore a v1 swift volume backup from swift.""" backup_id = backup['id'] LOG.debug('v1 swift volume backup restore of %s started', backup_id) container = backup['container'] metadata_objects = metadata['objects'] metadata_object_names = sum((obj.keys() for obj in metadata_objects), []) LOG.debug('metadata_object_names = %s' % metadata_object_names) prune_list = [self._metadata_filename(backup)] swift_object_names = [ swift_object_name for swift_object_name in self._generate_object_names(backup) if swift_object_name not in prune_list ] if sorted(swift_object_names) != sorted(metadata_object_names): err = _('restore_backup aborted, actual swift object list in ' 'swift does not match object list stored in metadata') raise exception.InvalidBackup(reason=err) for metadata_object in metadata_objects: object_name = metadata_object.keys()[0] LOG.debug( 'restoring object from swift. backup: %(backup_id)s, ' 'container: %(container)s, swift object name: ' '%(object_name)s, volume: %(volume_id)s' % { 'backup_id': backup_id, 'container': container, 'object_name': object_name, 'volume_id': volume_id, }) try: (_resp, body) = self.conn.get_object(container, object_name) except socket.error as err: raise exception.SwiftConnectionFailed(reason=err) compression_algorithm = metadata_object[object_name]['compression'] decompressor = self._get_compressor(compression_algorithm) if decompressor is not None: LOG.debug('decompressing data using %s algorithm' % compression_algorithm) decompressed = decompressor.decompress(body) volume_file.write(decompressed) else: volume_file.write(body) # force flush every write to avoid long blocking write on close volume_file.flush() # Be tolerant to IO implementations that do not support fileno() try: fileno = volume_file.fileno() except IOError: LOG.info( _LI("volume_file does not support " "fileno() so skipping" "fsync()")) else: os.fsync(fileno) # Restoring a backup to a volume can take some time. Yield so other # threads can run, allowing for among other things the service # status to be updated eventlet.sleep(0) LOG.debug('v1 swift volume backup restore of %s finished', backup_id)