def _do_backup(self, backup_path, vol_id, backup_mode): """Perform the actual backup operation. :param backup_path: volume path :param vol_id: volume id :param backup_mode: file mode of source volume; 'image' or 'file' :raises: InvalidBackup """ backup_attrs = {'Total number of objects backed up': '1'} compr_flag = 'yes' if CONF.backup_tsm_compression else 'no' backup_cmd = ['dsmc', 'backup'] if _image_mode(backup_mode): backup_cmd.append('image') backup_cmd.extend(['-quiet', '-compression=%s' % compr_flag, '-password=%s' % self.tsm_password, backup_path]) out, err = utils.execute(*backup_cmd, run_as_root=True, check_exit_code=False) success = _check_dsmc_output(out, backup_attrs, exact_match=False) if not success: err = (_('backup: %(vol_id)s failed to obtain backup ' 'success notification from server.\n' 'stdout: %(out)s\n stderr: %(err)s') % {'vol_id': vol_id, 'out': out, 'err': err}) LOG.error(err) raise exception.InvalidBackup(reason=err)
def restore(self, backup, volume_id, volume_file): """Restore the given volume backup from backup repository.""" backup_id = backup['id'] container = backup['container'] object_prefix = backup['service_metadata'] LOG.debug( 'starting restore of backup %(object_prefix)s ' 'container: %(container)s, to volume %(volume_id)s, ' 'backup: %(backup_id)s.', { 'object_prefix': object_prefix, 'container': container, 'volume_id': volume_id, 'backup_id': backup_id, }) metadata = self._read_metadata(backup) metadata_version = metadata['version'] LOG.debug('Restoring backup version %s', metadata_version) try: restore_func = getattr( self, self.DRIVER_VERSION_MAPPING.get(metadata_version)) except TypeError: err = (_('No support to restore backup version %s') % metadata_version) raise exception.InvalidBackup(reason=err) # Build a list of backups based on parent_id. A full backup # will be the last one in the list. backup_list = [] backup_list.append(backup) current_backup = backup while current_backup.parent_id: prev_backup = objects.Backup.get_by_id(self.context, current_backup.parent_id) backup_list.append(prev_backup) current_backup = prev_backup # Do a full restore first, then layer the incremental backups # on top of it in order. index = len(backup_list) - 1 while index >= 0: backup1 = backup_list[index] index = index - 1 metadata = self._read_metadata(backup1) restore_func(backup1, volume_id, metadata, volume_file) volume_meta = metadata.get('volume_meta', None) try: if volume_meta: self.put_metadata(volume_id, volume_meta) else: LOG.debug("No volume metadata in this backup.") except exception.BackupMetadataUnsupportedVersion: msg = _("Metadata restore failed due to incompatible version.") LOG.error(msg) raise exception.BackupOperationError(msg) LOG.debug('restore %(backup_id)s to %(volume_id)s finished.', { 'backup_id': backup_id, 'volume_id': volume_id })
def restore(self, backup, volume_id, volume_file): """Restore the given volume backup from swift.""" backup_id = backup['id'] container = backup['container'] object_prefix = backup['service_metadata'] LOG.debug( _('starting restore of backup %(object_prefix)s from swift' ' container: %(container)s, to volume %(volume_id)s, ' 'backup: %(backup_id)s') % { 'object_prefix': object_prefix, 'container': container, 'volume_id': volume_id, 'backup_id': backup_id, }) try: metadata = self._read_metadata(backup) except socket.error as err: raise exception.SwiftConnectionFailed(reason=str(err)) metadata_version = metadata['version'] LOG.debug(_('Restoring swift backup version %s'), metadata_version) try: restore_func = getattr( self, self.DRIVER_VERSION_MAPPING.get(metadata_version)) except TypeError: err = (_('No support to restore swift backup version %s') % metadata_version) raise exception.InvalidBackup(reason=err) restore_func(backup, volume_id, metadata, volume_file) LOG.debug( _('restore %(backup_id)s to %(volume_id)s finished.') % { 'backup_id': backup_id, 'volume_id': volume_id })
def export_record(self, context, backup_id): """Make the RPC call to export a volume backup. Call backup manager to execute backup export. :param context: running context :param backup_id: backup id to export :returns: dictionary -- a description of how to import the backup :returns: contains 'backup_url' and 'backup_service' :raises: InvalidBackup """ check_policy(context, 'backup-export') backup = self.get(context, backup_id) if backup['status'] != fields.BackupStatus.AVAILABLE: msg = (_('Backup status must be available and not %s.') % backup['status']) raise exception.InvalidBackup(reason=msg) LOG.debug("Calling RPCAPI with context: " "%(ctx)s, host: %(host)s, backup: %(id)s.", {'ctx': context, 'host': backup['host'], 'id': backup['id']}) backup.host = self._get_available_backup_service_host( backup.host, backup.availability_zone) backup.save() export_data = self.backup_rpcapi.export_record(context, backup) return export_data
def _write_metadata(self, backup, volume_id, container, object_list, volume_meta): filename = self._metadata_filename(backup) LOG.debug('_write_metadata started, container name: %(container)s,' ' metadata filename: %(filename)s' % { 'container': container, 'filename': filename }) metadata = {} metadata['version'] = self.DRIVER_VERSION metadata['backup_id'] = backup['id'] metadata['volume_id'] = volume_id metadata['backup_name'] = backup['display_name'] metadata['backup_description'] = backup['display_description'] metadata['created_at'] = str(backup['created_at']) metadata['objects'] = object_list metadata['volume_meta'] = volume_meta metadata_json = json.dumps(metadata, sort_keys=True, indent=2) reader = six.StringIO(metadata_json) etag = self.conn.put_object(container, filename, reader, content_length=reader.len) md5 = hashlib.md5(metadata_json).hexdigest() if etag != md5: err = _('error writing metadata file to swift, MD5 of metadata' ' file in swift [%(etag)s] is not the same as MD5 of ' 'metadata file sent to swift [%(md5)s]') % { 'etag': etag, 'md5': md5 } raise exception.InvalidBackup(reason=err) LOG.debug('_write_metadata finished')
def close(self): media = http.MediaIoBaseUpload(io.BytesIO(self.data), 'application/octet-stream', chunksize=self.chunk_size, resumable=self.resumable) resp = self.conn.objects().insert( bucket=self.bucket, name=self.object_name, body={}, media_body=media).execute(num_retries=self.num_retries) etag = resp['md5Hash'] md5 = secretutils.md5(self.data, usedforsecurity=False).digest() md5 = md5.encode('utf-8') etag = bytes(etag, 'utf-8') md5 = base64.b64encode(md5) if etag != md5: err = _('MD5 of object: %(object_name)s before: ' '%(md5)s and after: %(etag)s is not same.') % { 'object_name': self.object_name, 'md5': md5, 'etag': etag, } raise exception.InvalidBackup(reason=err) else: LOG.debug('MD5 before: %(md5)s and after: %(etag)s ' 'writing object: %(object_name)s in GCS.', {'etag': etag, 'md5': md5, 'object_name': self.object_name, }) return md5
def close(self): reader = six.BytesIO(self.data) try: etag = self.conn.put_object(self.container, self.object_name, reader, content_length=len(self.data)) except socket.error as err: raise exception.SwiftConnectionFailed(reason=err) LOG.debug('swift MD5 for %(object_name)s: %(etag)s', { 'object_name': self.object_name, 'etag': etag, }) md5 = hashlib.md5(self.data).hexdigest() LOG.debug('backup MD5 for %(object_name)s: %(md5)s', { 'object_name': self.object_name, 'md5': md5 }) if etag != md5: err = _('error writing object to swift, MD5 of object in ' 'swift %(etag)s is not the same as MD5 of object sent ' 'to swift %(md5)s'), { 'etag': etag, 'md5': md5 } raise exception.InvalidBackup(reason=err) return md5
def _make_link(volume_path, backup_path, vol_id): """Create a hard link for the volume block device. The IBM TSM client performs an image backup on a block device. The name of the block device is the backup prefix plus the backup id :param volume_path: real device path name for volume :param backup_path: path name TSM will use as volume to backup :param vol_id: id of volume to backup (for reporting) :raises: InvalidBackup """ try: utils.execute('ln', volume_path, backup_path, run_as_root=True, check_exit_code=True) except processutils.ProcessExecutionError as exc: err = (_('backup: %(vol_id)s failed to create device hardlink ' 'from %(vpath)s to %(bpath)s.\n' 'stdout: %(out)s\n stderr: %(err)s') % { 'vol_id': vol_id, 'vpath': volume_path, 'bpath': backup_path, 'out': exc.stdout, 'err': exc.stderr }) LOG.error(err) raise exception.InvalidBackup(reason=err)
def _get_backup_metadata(backup, operation): """Return metadata persisted with backup object.""" try: svc_dict = json.loads(backup.service_metadata) backup_path = svc_dict.get('backup_path') backup_mode = svc_dict.get('backup_mode') except TypeError: # for backwards compatibility vol_prefix = CONF.backup_tsm_volume_prefix backup_id = backup['id'] backup_path = utils.make_dev_path('%s-%s' % (vol_prefix, backup_id)) backup_mode = 'image' if backup_mode not in VALID_BACKUP_MODES: volume_id = backup['volume_id'] backup_id = backup['id'] err = (_('%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. ' 'Backup object has unexpected mode. Image or file ' 'backups supported, actual mode is %(vol_mode)s.') % {'op': operation, 'bck_id': backup_id, 'vol_id': volume_id, 'vol_mode': backup_mode}) LOG.error(err) raise exception.InvalidBackup(reason=err) return backup_path, backup_mode
def delete_backup(self, context, backup_id): """Delete volume backup from configured backup service.""" LOG.info(_('Delete backup started, backup: %s.'), backup_id) backup = self.db.backup_get(context, backup_id) self.db.backup_update(context, backup_id, {'host': self.host}) expected_status = 'deleting' actual_status = backup['status'] if actual_status != expected_status: err = _('Delete_backup aborted, expected backup status ' '%(expected_status)s but got %(actual_status)s.') % { 'expected_status': expected_status, 'actual_status': actual_status, } self.db.backup_update(context, backup_id, { 'status': 'error', 'fail_reason': err }) raise exception.InvalidBackup(reason=err) backup_service = self._map_service_to_driver(backup['service']) if backup_service is not None: configured_service = self.driver_name if backup_service != configured_service: err = _('Delete backup aborted, the backup service currently' ' configured [%(configured_service)s] is not the' ' backup service that was used to create this' ' backup [%(backup_service)s].') % { 'configured_service': configured_service, 'backup_service': backup_service, } self.db.backup_update(context, backup_id, {'status': 'error'}) raise exception.InvalidBackup(reason=err) try: backup_service = self.service.get_backup_driver(context) backup_service.delete(backup) except Exception as err: with excutils.save_and_reraise_exception(): self.db.backup_update(context, backup_id, { 'status': 'error', 'fail_reason': unicode(err) }) context = context.elevated() self.db.backup_destroy(context, backup_id) LOG.info(_('Delete backup finished, backup %s deleted.'), backup_id)
def _get_volume_realpath(volume_file, volume_id): """Get the real path for the volume block device. If the volume is not a block device or a regular file issue an InvalidBackup exception. :param volume_file: file object representing the volume :param volume_id: Volume id for backup or as restore target :raises: InvalidBackup :returns str -- real path of volume device :returns str -- backup mode to be used """ try: # Get real path volume_path = os.path.realpath(volume_file.name) # Verify that path is a block device volume_mode = os.stat(volume_path).st_mode if stat.S_ISBLK(volume_mode): backup_mode = 'image' elif stat.S_ISREG(volume_mode): backup_mode = 'file' else: err = (_('backup: %(vol_id)s failed. ' '%(path)s is unexpected file type. Block or regular ' 'files supported, actual file mode is %(vol_mode)s.') % {'vol_id': volume_id, 'path': volume_path, 'vol_mode': volume_mode}) LOG.error(err) raise exception.InvalidBackup(reason=err) except AttributeError: err = (_('backup: %(vol_id)s failed. Cannot obtain real path ' 'to volume at %(path)s.') % {'vol_id': volume_id, 'path': volume_file}) LOG.error(err) raise exception.InvalidBackup(reason=err) except OSError: err = (_('backup: %(vol_id)s failed. ' '%(path)s is not a file.') % {'vol_id': volume_id, 'path': volume_path}) LOG.error(err) raise exception.InvalidBackup(reason=err) return volume_path, backup_mode
def export_record(self, context, backup): """Export all volume backup metadata details to allow clean import. Export backup metadata so it could be re-imported into the database without any prerequisite in the backup database. :param context: running context :param backup: backup object to export :returns: backup_record - a description of how to import the backup :returns: contains 'backup_url' - how to import the backup, and :returns: 'backup_service' describing the needed driver. :raises InvalidBackup: """ LOG.info('Export record started, backup: %s.', backup.id) expected_status = fields.BackupStatus.AVAILABLE actual_status = backup.status if actual_status != expected_status: err = (_('Export backup aborted, expected backup status ' '%(expected_status)s but got %(actual_status)s.') % {'expected_status': expected_status, 'actual_status': actual_status}) raise exception.InvalidBackup(reason=err) backup_record = {'backup_service': backup.service} if not self._is_our_backup(backup): err = (_('Export record aborted, the backup service currently ' 'configured [%(configured_service)s] is not the ' 'backup service that was used to create this ' 'backup [%(backup_service)s].') % {'configured_service': self.driver_name, 'backup_service': backup.service}) raise exception.InvalidBackup(reason=err) # Call driver to create backup description string try: backup_service = self.service(context) driver_info = backup_service.export_record(backup) backup_url = backup.encode_record(driver_info=driver_info) backup_record['backup_url'] = backup_url except Exception as err: msg = six.text_type(err) raise exception.InvalidBackup(reason=msg) LOG.info('Export record finished, backup %s exported.', backup.id) return backup_record
def delete_object(self, container, object_name): """Delete object from container.""" if self.onest.delete_object(container, object_name): LOG.debug('Delete object:%s successful.', object_name) else: err = _('Delete object:%s failed!'), object_name LOG.error(err) raise exception.InvalidBackup(reason=err)
def delete(self, context, backup_id): """Make the RPC call to delete a volume backup.""" check_policy(context, 'delete') backup = self.get(context, backup_id) if backup['status'] not in ['available', 'error']: msg = _('Backup status must be available or error') raise exception.InvalidBackup(reason=msg) # Don't allow backup to be deleted if there are incremental # backups dependent on it. deltas = self.get_all(context, {'parent_id': backup['id']}) if deltas and len(deltas): msg = _('Incremental backups exist for this backup.') raise exception.InvalidBackup(reason=msg) self.db.backup_update(context, backup_id, {'status': 'deleting'}) self.backup_rpcapi.delete_backup(context, backup['host'], backup['id'])
def export_record(self, context, backup_id): """Export all volume backup metadata details to allow clean import. Export backup metadata so it could be re-imported into the database without any prerequisite in the backup database. :param context: running context :param backup_id: backup id to export :returns: backup_record - a description of how to import the backup :returns: contains 'backup_url' - how to import the backup, and :returns: 'backup_service' describing the needed driver. :raises: InvalidBackup """ LOG.info(_('Export record started, backup: %s.'), backup_id) backup = self.db.backup_get(context, backup_id) expected_status = 'available' actual_status = backup['status'] if actual_status != expected_status: err = (_('Export backup aborted, expected backup status ' '%(expected_status)s but got %(actual_status)s.') % {'expected_status': expected_status, 'actual_status': actual_status}) raise exception.InvalidBackup(reason=err) backup_record = {} # Call driver to create backup description string try: cinderClient = self._get_cascaded_cinder_client(context) cascaded_backup_id = \ self.volumes_mapping_cache['backups'].get(backup_id, '') LOG.info(_("cascade ino: export cascade backup :%s"), cascaded_backup_id) bodyResponse = cinderClient.backups.export_record(cascaded_backup_id) backup_record['backup_url'] = bodyResponse['backup_url'] backup_record['backup_service'] = bodyResponse['backup_service'] except Exception as err: msg = unicode(err) raise exception.InvalidBackup(reason=msg) LOG.info(_('Export record finished, backup %s exported.'), cascaded_backup_id) return backup_record
def read(self): data = self.onest.get_object_data(self.container, self.object_name) if data: LOG.debug('Read object successful, name:%s.', self.object_name) return data else: err = _('Read object failed!') LOG.error(err) raise exception.InvalidBackup(reason=err)
def delete(self, context, backup_id): """Make the RPC call to delete a volume backup.""" check_policy(context, 'delete') backup = self.get(context, backup_id) if backup['status'] not in ['available', 'error']: msg = _('Backup status must be available or error') raise exception.InvalidBackup(reason=msg) self.db.backup_update(context, backup_id, {'status': 'deleting'}) self.backup_rpcapi.delete_backup(context, backup['host'], backup['id'])
def _get_volume_realpath(self, volume_file, volume_id): """Get the real path for the volume block device. If the volume is not a block device then issue an InvalidBackup exsception. :param volume_file: file object representing the volume :param volume_id: Volume id for backup or as restore target :raises: InvalidBackup :returns str -- real path of volume device """ try: # Get real path volume_path = os.path.realpath(volume_file.name) # Verify that path is a block device volume_mode = os.stat(volume_path).st_mode if not stat.S_ISBLK(volume_mode): err = (_('backup: %(vol_id)s Failed. ' '%(path)s is not a block device.') % { 'vol_id': volume_id, 'path': volume_path }) LOG.error(err) raise exception.InvalidBackup(reason=err) except AttributeError as e: err = (_('backup: %(vol_id)s Failed. Cannot obtain real path ' 'to device %(path)s.') % { 'vol_id': volume_id, 'path': volume_file }) LOG.error(err) raise exception.InvalidBackup(reason=err) except OSError as e: err = (_('backup: %(vol_id)s Failed. ' '%(path)s is not a file.') % { 'vol_id': volume_id, 'path': volume_path }) LOG.error(err) raise exception.InvalidBackup(reason=err) return volume_path
def create_backup(self, context, backup_id): """Create volume backups using configured backup service.""" backup = self.db.backup_get(context, backup_id) volume_id = backup['volume_id'] volume = self.db.volume_get(context, volume_id) LOG.info(_('create_backup started, backup: %(backup_id)s for ' 'volume: %(volume_id)s') % {'backup_id': backup_id, 'volume_id': volume_id}) self.db.backup_update(context, backup_id, {'host': self.host, 'service': self.driver_name}) expected_status = 'backing-up' actual_status = volume['status'] if actual_status != expected_status: err = _('create_backup aborted, expected volume status ' '%(expected_status)s but got %(actual_status)s') % { 'expected_status': expected_status, 'actual_status': actual_status, } self.db.backup_update(context, backup_id, {'status': 'error', 'fail_reason': err}) raise exception.InvalidVolume(reason=err) expected_status = 'creating' actual_status = backup['status'] if actual_status != expected_status: err = _('create_backup aborted, expected backup status ' '%(expected_status)s but got %(actual_status)s') % { 'expected_status': expected_status, 'actual_status': actual_status, } self.db.volume_update(context, volume_id, {'status': 'available'}) self.db.backup_update(context, backup_id, {'status': 'error', 'fail_reason': err}) raise exception.InvalidBackup(reason=err) try: backup_service = self.service.get_backup_driver(context) self.driver.backup_volume(context, backup, backup_service) except Exception as err: with excutils.save_and_reraise_exception(): self.db.volume_update(context, volume_id, {'status': 'available'}) self.db.backup_update(context, backup_id, {'status': 'error', 'fail_reason': unicode(err)}) self.db.volume_update(context, volume_id, {'status': 'available'}) self.db.backup_update(context, backup_id, {'status': 'available', 'size': volume['size'], 'availability_zone': self.az}) LOG.info(_('create_backup finished. backup: %s'), backup_id)
def _backup_chunk(self, backup, container, data, data_offset, object_meta): """Backup data chunk based on the object metadata and offset.""" object_prefix = object_meta['prefix'] object_list = object_meta['list'] object_id = object_meta['id'] object_name = '%s-%05d' % (object_prefix, object_id) obj = {} obj[object_name] = {} obj[object_name]['offset'] = data_offset obj[object_name]['length'] = len(data) LOG.debug(_('reading chunk of data from volume')) if self.compressor is not None: algorithm = CONF.backup_compression_algorithm.lower() obj[object_name]['compression'] = algorithm data_size_bytes = len(data) data = self.compressor.compress(data) comp_size_bytes = len(data) LOG.debug(_('compressed %(data_size_bytes)d bytes of data ' 'to %(comp_size_bytes)d bytes using ' '%(algorithm)s') % { 'data_size_bytes': data_size_bytes, 'comp_size_bytes': comp_size_bytes, 'algorithm': algorithm, }) else: LOG.debug(_('not compressing data')) obj[object_name]['compression'] = 'none' reader = StringIO.StringIO(data) LOG.debug(_('About to put_object')) try: etag = self.conn.put_object(container, object_name, reader, content_length=len(data)) except socket.error as err: raise exception.SwiftConnectionFailed(reason=str(err)) LOG.debug(_('swift MD5 for %(object_name)s: %(etag)s') % {'object_name': object_name, 'etag': etag, }) md5 = hashlib.md5(data).hexdigest() obj[object_name]['md5'] = md5 LOG.debug(_('backup MD5 for %(object_name)s: %(md5)s') % {'object_name': object_name, 'md5': md5}) if etag != md5: err = _('error writing object to swift, MD5 of object in ' 'swift %(etag)s is not the same as MD5 of object sent ' 'to swift %(md5)s') % {'etag': etag, 'md5': md5} raise exception.InvalidBackup(reason=err) object_list.append(obj) object_id += 1 object_meta['list'] = object_list object_meta['id'] = object_id LOG.debug(_('Calling eventlet.sleep(0)')) eventlet.sleep(0)
def close(self): path = os.path.join(r'/tmp/', self.object_name) if not os.path.exists(path): with open(path, 'w') as file_object: file_object.write(self.data) else: err = _('The tmp file %s exist, create failed!'), path LOG.error(err) raise exception.InvalidBackup(reason=err) file_obj = file(path) obj = onest_common.OnestObject(file_obj, {}) if self.onest.put_object(self.container, self.object_name, obj.data): LOG.debug('Success to write object:%s.', self.object_name) else: err = _('oNestObjectWriter write object error!') LOG.error(err) raise exception.InvalidBackup(reason=err) if os.path.exists(path): os.remove(path)
def _restore_v1(self, backup, volume_id, metadata, volume_file): """Restore a v1 swift volume backup from swift.""" backup_id = backup['id'] LOG.debug(_('v1 swift volume backup restore of %s started'), backup_id) container = backup['container'] metadata_objects = metadata['objects'] metadata_object_names = [] for metadata_object in metadata_objects: metadata_object_names.extend(metadata_object.keys()) LOG.debug(_('metadata_object_names = %s') % metadata_object_names) prune_list = [self._metadata_filename(backup)] swift_object_names = [ swift_object_name for swift_object_name in self._generate_object_names(backup) if swift_object_name not in prune_list ] if sorted(swift_object_names) != sorted(metadata_object_names): err = _('restore_backup aborted, actual swift object list in ' 'swift does not match object list stored in metadata') raise exception.InvalidBackup(reason=err) for metadata_object in metadata_objects: object_name = metadata_object.keys()[0] LOG.debug( _('restoring object from swift. backup: %(backup_id)s, ' 'container: %(container)s, swift object name: ' '%(object_name)s, volume: %(volume_id)s') % locals()) try: (resp, body) = self.conn.get_object(container, object_name) except socket.error as err: raise exception.SwiftConnectionFailed(reason=str(err)) compression_algorithm = metadata_object[object_name]['compression'] decompressor = self._get_compressor(compression_algorithm) if decompressor is not None: LOG.debug( _('decompressing data using %s algorithm') % compression_algorithm) decompressed = decompressor.decompress(body) volume_file.write(decompressed) else: volume_file.write(body) # force flush every write to avoid long blocking write on close volume_file.flush() os.fsync(volume_file.fileno()) # Restoring a backup to a volume can take some time. Yield so other # threads can run, allowing for among other things the service # status to be updated eventlet.sleep(0) LOG.debug(_('v1 swift volume backup restore of %s finished'), backup_id)
def delete_backup(self, backup): """Delete the given backup from TSM server. :param backup: backup information for volume :raises: InvalidBackup """ delete_attrs = {'Total number of objects deleted': '1'} delete_path, backup_mode = _get_backup_metadata(backup, 'restore') LOG.debug('Delete started for backup: %(backup)s, mode: %(mode)s.', { 'backup': backup.id, 'mode': backup_mode }) try: out, err = utils.execute('dsmc', 'delete', 'backup', '-quiet', '-noprompt', '-objtype=%s' % backup_mode, '-password=%s' % self.tsm_password, delete_path, run_as_root=True, check_exit_code=False) except processutils.ProcessExecutionError as exc: err = (_('delete: %(vol_id)s failed to run dsmc with ' 'stdout: %(out)s\n stderr: %(err)s') % { 'vol_id': backup.volume_id, 'out': exc.stdout, 'err': exc.stderr }) LOG.error(err) raise exception.InvalidBackup(reason=err) success = _check_dsmc_output(out, delete_attrs) if not success: # log error if tsm cannot delete the backup object # but do not raise exception so that cinder backup # object can be removed. LOG.error( 'delete: %(vol_id)s failed with ' 'stdout: %(out)s\n stderr: %(err)s', { 'vol_id': backup.volume_id, 'out': out, 'err': err }) LOG.debug('Delete %s finished.', backup['id'])
def close(self): path = os.path.join(r'/tmp/', self.object_name) if not os.path.exists(path): with open(path, 'w') as file_object: file_object.write(self.data) else: err = (_('oNestObjectWriter, ' 'the tmp file(%(path)s) exist, create failed! ' 'Container: %(container)s, object: %(obj_name)s.'), { 'container': self.container, 'obj_name': self.object_name, 'path': path }) LOG.error(err) raise exception.InvalidBackup(reason=err) file_obj = file(path) obj = onest_common.OnestObject(file_obj, {}) if self.onest.put_object(self.container, self.object_name, obj.data): LOG.debug( 'oNestObjectWriter, write success. ' 'Container: %(container)s, object: %(obj_name)s.', { 'container': self.container, 'obj_name': self.object_name }) else: err = (_('oNestObjectWriter, write failed! ' 'Container: %(container)s, object: %(obj_name)s.'), { 'container': self.container, 'obj_name': self.object_name }) LOG.error(err) raise exception.InvalidBackup(reason=err) if os.path.exists(path): os.remove(path)
def restore(self, backup, volume_id, volume_file): """Restore the given volume backup from TSM server. :param backup: backup information for volume :param volume_id: volume id :param volume_file: file object representing the volume :raises: InvalidBackup """ # backup_path is the path that was originally backed up. backup_path, backup_mode = _get_backup_metadata(backup, 'restore') LOG.debug( 'Starting restore of backup from TSM ' 'to volume %(volume_id)s, ' 'backup: %(backup_id)s, ' 'mode: %(mode)s.', { 'volume_id': volume_id, 'backup_id': backup.id, 'mode': backup_mode }) # volume_path is the path to restore into. This may # be different than the original volume. volume_path, unused = _get_volume_realpath(volume_file, volume_id) restore_path = _create_unique_device_link(backup.id, volume_path, volume_id, backup_mode) try: self._do_restore(backup_path, restore_path, volume_id, backup_mode) except processutils.ProcessExecutionError as exc: err = (_('restore: %(vol_id)s failed to run dsmc ' 'on %(bpath)s.\n' 'stdout: %(out)s\n stderr: %(err)s') % { 'vol_id': volume_id, 'bpath': restore_path, 'out': exc.stdout, 'err': exc.stderr }) LOG.error(err) raise exception.InvalidBackup(reason=err) finally: _cleanup_device_hardlink(restore_path, volume_path, volume_id) LOG.debug('Restore %(backup_id)s to %(volume_id)s finished.', { 'backup_id': backup.id, 'volume_id': volume_id })
def close(self): reader = io.BytesIO(self.data) try: etag = self.conn.put_object(self.container, self.object_name, reader, content_length=len(self.data)) except socket.error as err: raise exception.SwiftConnectionFailed(reason=err) md5 = secretutils.md5(self.data, usedforsecurity=False).hexdigest() if etag != md5: err = _('error writing object to swift, MD5 of object in ' 'swift %(etag)s is not the same as MD5 of object sent ' 'to swift %(md5)s') % {'etag': etag, 'md5': md5} raise exception.InvalidBackup(reason=err) return md5
def delete_backup(self, context, backup_id): """Delete volume backup from configured backup service.""" LOG.info(_('cascade info:delete backup started, backup: %s.'), backup_id) backup = self.db.backup_get(context, backup_id) expected_status = 'deleting' actual_status = backup['status'] if actual_status != expected_status: err = _('Delete_backup aborted, expected backup status ' '%(expected_status)s but got %(actual_status)s.') \ % {'expected_status': expected_status, 'actual_status': actual_status} self.db.backup_update(context, backup_id, {'status': 'error', 'fail_reason': err}) raise exception.InvalidBackup(reason=err) try: self._delete_backup_cascaded(context,backup_id) except Exception as err: with excutils.save_and_reraise_exception(): self.db.backup_update(context, backup_id, {'status': 'error', 'fail_reason': unicode(err)}) # Get reservations try: reserve_opts = { 'backups': -1, 'backup_gigabytes': -backup['size'], } reservations = QUOTAS.reserve(context, project_id=backup['project_id'], **reserve_opts) except Exception: reservations = None LOG.exception(_("Failed to update usages deleting backup")) context = context.elevated() self.db.backup_destroy(context, backup_id) # Commit the reservations if reservations: QUOTAS.commit(context, reservations, project_id=backup['project_id']) LOG.info(_('Delete backup finished, backup %s deleted.'), backup_id)
def delete_object(self, container, object_name): """Delete object from container.""" if self.onest.delete_object(container, object_name): LOG.debug( 'Delete object success. ' 'Container: %(container)s, object: %(object_name)s.', { 'container': container, 'object_name': object_name }) else: err = (_('Delete object failed! ' 'Container: %(container)s, object: %(object_name)s.'), { 'container': container, 'object_name': object_name }) LOG.error(err) raise exception.InvalidBackup(reason=err)
def get_container_entries(self, container, prefix): """Get container entry names.""" response = self.onest.list_objects_of_bucket(container, {'prefix': prefix}) if response: object_names = [] for entry in response.entries: object_name = self._get_object_name(entry.object_uri) if object_name: object_names.append(object_name) LOG.debug('Get container object names successful, names:%s.', object_names) return object_names else: err = _('Get container object names failed!') LOG.error(err) raise exception.InvalidBackup(reason=err)
def reset_status(self, context, backup, status): """Reset volume backup status. :param context: running context :param backup: The backup object for reset status operation :param status: The status to be set :raises InvalidBackup: :raises AttributeError: """ LOG.info( 'Reset backup status started, backup_id: ' '%(backup_id)s, status: %(status)s.', { 'backup_id': backup.id, 'status': status }) LOG.info('Backup service: %s.', backup.service) if not self._is_our_backup(backup): err = _('Reset backup status aborted, the backup service' ' currently configured [%(configured_service)s] ' 'is not the backup service that was used to create' ' this backup [%(backup_service)s].') % \ {'configured_service': self.driver_name, 'backup_service': backup.service} raise exception.InvalidBackup(reason=err) if backup.service is not None: backup.status = status backup.save() # Needs to clean temporary volumes and snapshots. try: self._cleanup_temp_volumes_snapshots_for_one_backup( context, backup) except Exception: LOG.exception( "Problem cleaning temp volumes and " "snapshots for backup %(bkup)s.", {'bkup': backup.id}) # send notification to ceilometer notifier_info = {'id': backup.id, 'update': {'status': status}} notifier = rpc.get_notifier('backupStatusUpdate') notifier.info(context, "backups.reset_status.end", notifier_info) volume_utils.notify_about_backup_usage(context, backup, 'reset_status.end')