def restore_level(self, restore_resource, read_pipe, backup, except_queue): try: metadata = backup.metadata() if (not self.encrypt_pass_file and metadata.get("encryption", False)): raise Exception("Cannot restore encrypted backup without key") engine_metadata = backup.engine_metadata() image_info = metadata.get('image', {}) container_format = image_info.get('container_format', 'bare') disk_format = image_info.get('disk_format', 'raw') length = int(engine_metadata.get('length')) stream = self.stream_image(read_pipe) data = utils.ReSizeStream(stream, length, 1) image = self.client.create_image("Restore: {0}".format( image_info.get('name', image_info.get('id', None))), container_format, disk_format, data=data) if self.encrypt_pass_file: try: tmpdir = tempfile.mkdtemp() except Exception: LOG.error("Unable to create a tmp directory") raise tar_engine = tar.TarEngine(self.compression_algo, self.dereference_symlink, self.exclude, self.storage, self.max_segment_size, self.encrypt_pass_file, self.dry_run) tar_engine.restore_level(tmpdir, read_pipe, backup, except_queue) utils.wait_for( GlanceEngine.image_active, 1, CONF.timeout, message="Waiting for image to finish uploading {0} and become" " active".format(image.id), kwargs={ "glance_client": self.glance, "image_id": image.id }) return image except Exception as e: LOG.exception(e) except_queue.put(e) raise
def backup_nova(self, instance_id): """ Implement nova backup :param instance_id: Id of the instance for backup :return: """ instance_id = instance_id client_manager = self.client_manager nova = client_manager.get_nova() instance = nova.servers.get(instance_id) glance = client_manager.get_glance() def instance_finish_task(): instance = nova.servers.get(instance_id) return not instance.__dict__['OS-EXT-STS:task_state'] utils.wait_for( instance_finish_task, 1, CONF.timeout, message="Waiting for instance {0} to finish {1} to start the " "snapshot process".format( instance_id, instance.__dict__['OS-EXT-STS:task_state'] ) ) instance = nova.servers.get(instance) image_id = nova.servers.create_image(instance, "snapshot_of_%s" % instance_id) image = glance.images.get(image_id) def image_active(): image = glance.images.get(image_id) return image.status == 'active' utils.wait_for(image_active, 1, CONF.timeout, message="Waiting for instance {0} snapshot {1} to " "become active".format(instance_id, image_id)) try: image = glance.images.get(image_id) except Exception as e: LOG.error(e) stream = client_manager.download_image(image) package = "{0}/{1}".format(instance_id, utils.DateTime.now().timestamp) LOG.info("Saving image to {0}".format(self.storage.type)) headers = {"x-object-meta-name": instance.name, "x-object-meta-flavor-id": str(instance.flavor.get('id')), 'x-object-meta-length': str(len(stream))} self.storage.add_stream(stream, package, headers) LOG.info("Deleting temporary image {0}".format(image)) glance.images.delete(image.id)
def backup_data(self, backup_resource, manifest_path): server = self.nova.servers.get(backup_resource) if not server: raise Exception("Server not found {0}".format(backup_resource)) def instance_finish_task(): server = self.nova.servers.get(backup_resource) return not server.__dict__['OS-EXT-STS:task_state'] utils.wait_for( instance_finish_task, 1, CONF.timeout, message="Waiting for instance {0} to finish {1} to start the " "snapshot process".format( backup_resource, server.__dict__['OS-EXT-STS:task_state'])) image_id = self.nova.servers.create_image( server, "snapshot_of_{0}".format(backup_resource)) image = self.glance.images.get(image_id) if not image: raise Exception( "Image {0} is not created or can't be found.".format(image_id)) # wait a bit for the snapshot to be taken and completely uploaded # to glance. utils.wait_for(NovaEngine.image_active, 1, 100, message="Waiting for instnace {0} snapshot to become " "active".format(backup_resource), kwargs={ "glance_client": self.glance, "image_id": image_id }) image = self.glance.images.get(image_id) stream = self.client.download_image(image) LOG.info("Uploading image to swift") headers = { "server_name": server.name, "flavour_id": str(server.flavor.get('id')), 'length': str(len(stream)) } self.set_tenant_meta(manifest_path, headers) for chunk in stream: yield chunk LOG.info("Deleting temporary image {0}".format(image.id)) self.glance.images.delete(image.id) self.server_info = server.to_dict() self.server_info['length'] = len(stream)
def restore_level(self, restore_resource, read_pipe, backup, except_queue): try: metadata = backup.metadata() engine_metadata = backup.engine_metadata() server_info = metadata.get('server', {}) length = int(engine_metadata.get('length')) available_networks = server_info.get('addresses') nova_networks = self.nova.networks.findall() net_names = [ network for network, _ in available_networks.iteritems() ] match_networks = [{ "net-id": network.id } for network in nova_networks if network.to_dict().get('label') in net_names] stream = self.stream_image(read_pipe) data = utils.ReSizeStream(stream, length, 1) image = self.client.create_image("Restore: {0}".format( server_info.get('name', server_info.get('id', None))), 'bare', 'raw', data=data) utils.wait_for( NovaEngine.image_active, 1, CONF.timeout, message="Waiting for image to finish uploading {0} and become" " active".format(image.id), kwargs={ "glance_client": self.glance, "image_id": image.id }) server = self.nova.servers.create( name=server_info.get('name'), flavor=server_info['flavor']['id'], image=image.id, nics=match_networks) return server except Exception as e: LOG.exception(e) except_queue.put(e) raise
def test_wait_for_negative(self): condition = mock.MagicMock(side_effect=[False, False, False]) self.assertRaises(exception_utils.TimeoutException, utils.wait_for(condition, 0.1, 0.2))
def test_wait_for_positive(self): condition = mock.MagicMock(side_effect=[False, False, True, True]) utils.wait_for(condition, 0.1, 1) self.assertEqual(3, condition.called)
def backup_data(self, backup_path, manifest_path): LOG.info("Starting os-brick engine backup stream") volume = self.cinder.volumes.get(backup_path) self.volume_info = volume.to_dict() snapshot = self.cinder.volume_snapshots.create(backup_path, force=True) LOG.info("[*] Creating volume snapshot") utils.wait_for( OsbrickEngine.is_active, 1, 100, message="Waiting for volume {0} snapshot to become " "active".format(backup_path), kwargs={"client_manager": self.cinder.volume_snapshots, "id": snapshot.id} ) LOG.info("[*] Converting snapshot to volume") backup_volume = self.cinder.volumes.create(snapshot.size, snapshot_id=snapshot.id) utils.wait_for( OsbrickEngine.is_active, 1, 100, message="Waiting for backup volume {0} to become " "active".format(backup_volume.id), kwargs={"client_manager": self.cinder.volumes, "id": backup_volume.id} ) try: tmpdir = tempfile.mkdtemp() except Exception: LOG.error("Unable to create a tmp directory") raise LOG.info("[*] Trying to attach the volume to localhost") brickclient = brick_client.Client(volumes_client=self.cinder) attach_info = brickclient.attach(backup_volume.id, socket.gethostname(), tmpdir) if not os.path.ismount(tmpdir): subprocess.check_output(['sudo', 'mount', '-t', 'ext4', attach_info.get('path'), tmpdir]) cwd = os.getcwd() os.chdir(tmpdir) tar_engine = tar.TarEngine(self.compression_algo, self.dereference_symlink, self.exclude, self.storage, self.max_segment_size, self.encrypt_pass_file, self.dry_run) for data_chunk in tar_engine.backup_data('.', manifest_path): yield data_chunk os.chdir(cwd) LOG.info("[*] Detaching volume") subprocess.check_output(['sudo', 'umount', tmpdir]) shutil.rmtree(tmpdir) brickclient.detach(backup_volume.id) utils.wait_for( OsbrickEngine.is_active, 1, 100, message="Waiting for backup volume {0} to become " "active".format(backup_volume.id), kwargs={"client_manager": self.cinder.volumes, "id": backup_volume.id} ) LOG.info("[*] Removing backup volume and snapshot") self.cinder.volumes.delete(backup_volume.id) self.cinder.volume_snapshots.delete(snapshot, force=True) LOG.info('Backup process completed')
def restore_level(self, restore_path, read_pipe, backup, except_queue): try: LOG.info("Restoring volume {} using os-brick engine".format( restore_path)) new_volume = False metadata = backup.metadata() if (not self.encrypt_pass_file and metadata.get("encryption", False)): raise Exception("Cannot restore encrypted backup without key") volume_info = metadata.get("volume_info") try: backup_volume = self.cinder.volumes.get(restore_path) except Exception: new_volume = True LOG.info("[*] Volume doesn't exists, creating a new one") backup_volume = self.cinder.volumes.create(volume_info['size']) utils.wait_for( OsbrickEngine.is_active, 1, 100, message="Waiting for backup volume {0} to become " "active".format(backup_volume.id), kwargs={"client_manager": self.cinder.volumes, "id": backup_volume.id} ) if backup_volume.attachments: LOG.info('Volume is used, creating a copy from snapshot') snapshot = self.cinder.volume_snapshots.create( backup_volume.id, force=True) utils.wait_for( OsbrickEngine.is_active, 1, 100, message="Waiting for volume {0} snapshot to become " "active".format(backup_volume.id), kwargs={"client_manager": self.cinder.volume_snapshots, "id": snapshot.id} ) LOG.info("[*] Converting snapshot to volume") backup_volume = self.cinder.volumes.create( snapshot.size, snapshot_id=snapshot.id) utils.wait_for( OsbrickEngine.is_active, 1, 100, message="Waiting for backup volume {0} to become " "active".format(backup_volume.id), kwargs={"client_manager": self.cinder.volumes, "id": backup_volume.id} ) backup_volume = self.cinder.volumes.get(backup_volume.id) if backup_volume.status != 'available': raise RuntimeError('Unable to use volume for restore data') try: tmpdir = tempfile.mkdtemp() except Exception: LOG.error("Unable to create a tmp directory") raise LOG.info("[*] Trying to attach the volume to localhost") brickclient = brick_client.Client(volumes_client=self.cinder) attach_info = brickclient.attach(backup_volume.id, socket.gethostname(), tmpdir) if not os.path.ismount(tmpdir): if new_volume: subprocess.check_output(['sudo', 'mkfs.ext4', attach_info.get('path')]) subprocess.check_output(['sudo', 'mount', '-t', 'ext4', attach_info.get('path'), tmpdir]) tar_engine = tar.TarEngine(self.compression_algo, self.dereference_symlink, self.exclude, self.storage, self.max_segment_size, self.encrypt_pass_file, self.dry_run) tar_engine.restore_level(tmpdir, read_pipe, backup, except_queue) subprocess.check_output(['sudo', 'umount', tmpdir]) shutil.rmtree(tmpdir) LOG.info("[*] Detaching volume") brickclient.detach(backup_volume.id) utils.wait_for( OsbrickEngine.is_active, 1, 100, message="Waiting for backup volume {0} to become " "active".format(backup_volume.id), kwargs={"client_manager": self.cinder.volumes, "id": backup_volume.id} ) LOG.info('Restore process completed') except Exception as e: LOG.exception(e) except_queue.put(e) raise
def restore_level(self, restore_resource, read_pipe, backup, except_queue): try: metadata = backup.metadata() if (not self.encrypt_pass_file and metadata.get("encryption", False)): raise Exception("Cannot restore encrypted backup without key") engine_metadata = backup.engine_metadata() server_info = metadata.get('server', {}) length = int(engine_metadata.get('length')) available_networks = server_info.get('addresses') nova_networks = self.neutron.list_networks()['networks'] net_names = [ network for network, _ in available_networks.iteritems() ] match_networks = [{ "net-id": network.get('id') } for network in nova_networks if network.get('name') in net_names] stream = self.stream_image(read_pipe) data = utils.ReSizeStream(stream, length, 1) image = self.client.create_image("Restore: {0}".format( server_info.get('name', server_info.get('id', None))), 'bare', 'raw', data=data) if self.encrypt_pass_file: try: tmpdir = tempfile.mkdtemp() except Exception: LOG.error("Unable to create a tmp directory") raise tar_engine = tar.TarEngine(self.compression_algo, self.dereference_symlink, self.exclude, self.storage, self.max_segment_size, self.encrypt_pass_file, self.dry_run) tar_engine.restore_level(tmpdir, read_pipe, backup, except_queue) utils.wait_for( NovaEngine.image_active, 1, CONF.timeout, message="Waiting for image to finish uploading {0} and become" " active".format(image.id), kwargs={ "glance_client": self.glance, "image_id": image.id }) server = self.nova.servers.create( name=server_info.get('name'), flavor=server_info['flavor']['id'], image=image.id, nics=match_networks) return server except Exception as e: LOG.exception(e) except_queue.put(e) raise
def backup_data(self, backup_resource, manifest_path): server = self.nova.servers.get(backup_resource) if not server: raise Exception("Server not found {0}".format(backup_resource)) def instance_finish_task(): server = self.nova.servers.get(backup_resource) return not server.__dict__['OS-EXT-STS:task_state'] utils.wait_for( instance_finish_task, 1, CONF.timeout, message="Waiting for instance {0} to finish {1} to start the " "snapshot process".format( backup_resource, server.__dict__['OS-EXT-STS:task_state'])) image_id = self.nova.servers.create_image( server, "snapshot_of_{0}".format(backup_resource)) image = self.glance.images.get(image_id) if not image: raise Exception( "Image {0} is not created or can't be found.".format(image_id)) # wait a bit for the snapshot to be taken and completely uploaded # to glance. utils.wait_for(NovaEngine.image_active, 1, 100, message="Waiting for instnace {0} snapshot to become " "active".format(backup_resource), kwargs={ "glance_client": self.glance, "image_id": image_id }) image = self.glance.images.get(image_id) image_temporary_snapshot_id = None copied_volume = None image_info = getattr(server, "image", None) if image_info is not None and isinstance(image_info, dict): LOG.info('Image type instance backup') boot_device_type = "image" stream = self.client.download_image(image) else: LOG.info('Volume or snapshot type instance backup') boot_device_type = "volume" image_block_mapping_info = image.get("block_device_mapping") image_block_mapping = json.loads(image_block_mapping_info) \ if image_block_mapping_info else None image_temporary_snapshot_id = \ image_block_mapping[0]['snapshot_id'] \ if image_block_mapping else None copied_volume = self.client.do_copy_volume( self.cinder.volume_snapshots.get(image_temporary_snapshot_id)) LOG.debug("Deleting temporary glance image " "generated by snapshot") self.glance.images.delete(image.id) LOG.debug("Creation temporary glance image") image = self.client.make_glance_image(copied_volume.id, copied_volume) LOG.debug("Download temporary glance image {0}".format(image.id)) stream = self.client.download_image(image) LOG.info("Uploading image to storage path") headers = { "server_name": server.name, "flavour_id": str(server.flavor.get('id')), 'length': str(len(stream)), "boot_device_type": boot_device_type } self.set_tenant_meta(manifest_path, headers) for chunk in stream: yield chunk if self.encrypt_pass_file: tar_engine = tar.TarEngine(self.compression_algo, self.dereference_symlink, self.exclude, self.storage, self.max_segment_size, self.encrypt_pass_file, self.dry_run) for data_chunk in tar_engine.backup_data('.', manifest_path): yield data_chunk if image_temporary_snapshot_id is not None: LOG.info("Deleting temporary snapshot {0}".format( image_temporary_snapshot_id)) self.cinder.volume_snapshots.delete(image_temporary_snapshot_id) if copied_volume is not None: LOG.info("Deleting temporary copied volume {0}".format( copied_volume.id)) self.cinder.volumes.delete(copied_volume) LOG.info("Deleting temporary image {0}".format(image.id)) self.glance.images.delete(image.id)
def backup_data(self, backup_resource, manifest_path): server = self.nova.servers.get(backup_resource) if not server: raise Exception("Server not found {0}".format(backup_resource)) def instance_finish_task(): server = self.nova.servers.get(backup_resource) return not server.__dict__['OS-EXT-STS:task_state'] utils.wait_for( instance_finish_task, 1, CONF.timeout, message="Waiting for instance {0} to finish {1} to start the " "snapshot process".format( backup_resource, server.__dict__['OS-EXT-STS:task_state'] ) ) image_id = self.nova.servers.create_image( server, "snapshot_of_{0}".format(backup_resource) ) image = self.glance.images.get(image_id) if not image: raise Exception( "Image {0} is not created or can't be found.".format(image_id) ) # wait a bit for the snapshot to be taken and completely uploaded # to glance. utils.wait_for( NovaEngine.image_active, 1, 100, message="Waiting for instnace {0} snapshot to become " "active".format(backup_resource), kwargs={"glance_client": self.glance, "image_id": image_id} ) image = self.glance.images.get(image_id) image_temporary_snapshot_id = None copied_volume = None image_info = getattr(server, "image", None) if image_info is not None and isinstance(image_info, dict): LOG.info('Image type instance backup') boot_device_type = "image" stream = self.client.download_image(image) else: LOG.info('Volume or snapshot type instance backup') boot_device_type = "volume" image_block_mapping_info = image.get("block_device_mapping") image_block_mapping = json.loads(image_block_mapping_info) \ if image_block_mapping_info else None image_temporary_snapshot_id = \ image_block_mapping[0]['snapshot_id'] \ if image_block_mapping else None copied_volume = self.client.do_copy_volume( self.cinder.volume_snapshots.get( image_temporary_snapshot_id)) LOG.debug("Deleting temporary glance image " "generated by snapshot") self.glance.images.delete(image.id) LOG.debug("Creation temporary glance image") image = self.client.make_glance_image( copied_volume.id, copied_volume) LOG.debug("Download temporary glance image {0}".format(image.id)) stream = self.client.download_image(image) LOG.info("Uploading image to storage path") headers = {"server_name": server.name, "flavour_id": str(server.flavor.get('id')), 'length': str(len(stream)), "boot_device_type": boot_device_type} self.set_tenant_meta(manifest_path, headers) for chunk in stream: yield chunk if self.encrypt_pass_file: tar_engine = tar.TarEngine(self.compression_algo, self.dereference_symlink, self.exclude, self.storage, self.max_segment_size, self.encrypt_pass_file, self.dry_run) for data_chunk in tar_engine.backup_data('.', manifest_path): yield data_chunk if image_temporary_snapshot_id is not None: LOG.info("Deleting temporary snapshot {0}" .format(image_temporary_snapshot_id)) self.cinder.volume_snapshots.delete(image_temporary_snapshot_id) if copied_volume is not None: LOG.info("Deleting temporary copied volume {0}" .format(copied_volume.id)) self.cinder.volumes.delete(copied_volume) LOG.info("Deleting temporary image {0}".format(image.id)) self.glance.images.delete(image.id)
def restore_level(self, restore_resource, read_pipe, backup, except_queue): try: metadata = backup.metadata() if (not self.encrypt_pass_file and metadata.get("encryption", False)): raise Exception("Cannot restore encrypted backup without key") engine_metadata = backup.engine_metadata() server_info = metadata.get('server', {}) length = int(engine_metadata.get('length')) available_networks = server_info.get('addresses') nova_networks = self.neutron.list_networks()['networks'] net_names = [network for network, _ in available_networks.iteritems()] match_networks = [{"net-id": network.get('id')} for network in nova_networks if network.get('name') in net_names] stream = self.stream_image(read_pipe) data = utils.ReSizeStream(stream, length, 1) image = self.client.create_image( "Restore: {0}".format( server_info.get('name', server_info.get('id', None)) ), 'bare', 'raw', data=data ) if self.encrypt_pass_file: try: tmpdir = tempfile.mkdtemp() except Exception: LOG.error("Unable to create a tmp directory") raise tar_engine = tar.TarEngine(self.compression_algo, self.dereference_symlink, self.exclude, self.storage, self.max_segment_size, self.encrypt_pass_file, self.dry_run) tar_engine.restore_level(tmpdir, read_pipe, backup, except_queue) utils.wait_for( NovaEngine.image_active, 1, CONF.timeout, message="Waiting for image to finish uploading {0} and become" " active".format(image.id), kwargs={"glance_client": self.glance, "image_id": image.id} ) server = self.nova.servers.create( name=server_info.get('name'), flavor=server_info['flavor']['id'], image=image.id, nics=match_networks ) return server except Exception as e: LOG.exception(e) except_queue.put(e) raise
def backup_data(self, backup_path, manifest_path): LOG.info("Starting os-brick engine backup stream") volume = self.cinder.volumes.get(backup_path) self.volume_info = volume.to_dict() snapshot = self.cinder.volume_snapshots.create(backup_path, force=True) LOG.info("[*] Creating volume snapshot") utils.wait_for(OsbrickEngine.is_active, 1, 100, message="Waiting for volume {0} snapshot to become " "active".format(backup_path), kwargs={ "client_manager": self.cinder.volume_snapshots, "id": snapshot.id }) LOG.info("[*] Converting snapshot to volume") backup_volume = self.cinder.volumes.create(snapshot.size, snapshot_id=snapshot.id) utils.wait_for(OsbrickEngine.is_active, 1, 100, message="Waiting for backup volume {0} to become " "active".format(backup_volume.id), kwargs={ "client_manager": self.cinder.volumes, "id": backup_volume.id }) try: tmpdir = tempfile.mkdtemp() except Exception: LOG.error("Unable to create a tmp directory") raise LOG.info("[*] Trying to attach the volume to localhost") brickclient = brick_client.Client(volumes_client=self.cinder) attach_info = brickclient.attach(backup_volume.id, socket.gethostname(), tmpdir) if not os.path.ismount(tmpdir): subprocess.check_output([ 'sudo', 'mount', '-t', 'ext4', attach_info.get('path'), tmpdir ]) cwd = os.getcwd() os.chdir(tmpdir) tar_engine = tar.TarEngine(self.compression_algo, self.dereference_symlink, self.exclude, self.storage, self.max_segment_size, self.encrypt_pass_file, self.dry_run) for data_chunk in tar_engine.backup_data('.', manifest_path): yield data_chunk os.chdir(cwd) LOG.info("[*] Detaching volume") subprocess.check_output(['sudo', 'umount', tmpdir]) shutil.rmtree(tmpdir) brickclient.detach(backup_volume.id) utils.wait_for(OsbrickEngine.is_active, 1, 100, message="Waiting for backup volume {0} to become " "active".format(backup_volume.id), kwargs={ "client_manager": self.cinder.volumes, "id": backup_volume.id }) LOG.info("[*] Removing backup volume and snapshot") self.cinder.volumes.delete(backup_volume.id) self.cinder.volume_snapshots.delete(snapshot, force=True) LOG.info('Backup process completed')
def restore_level(self, restore_path, read_pipe, backup, except_queue): try: LOG.info("Restoring volume {} using os-brick engine".format( restore_path)) new_volume = False metadata = backup.metadata() volume_info = metadata.get("volume_info") try: backup_volume = self.cinder.volumes.get(restore_path) except Exception: new_volume = True LOG.info("[*] Volume doesn't exists, creating a new one") backup_volume = self.cinder.volumes.create(volume_info['size']) utils.wait_for( OsbrickEngine.is_active, 1, 100, message="Waiting for backup volume {0} to become " "active".format(backup_volume.id), kwargs={ "client_manager": self.cinder.volumes, "id": backup_volume.id }) if backup_volume.attachments: LOG.info('Volume is used, creating a copy from snapshot') snapshot = self.cinder.volume_snapshots.create( backup_volume.id, force=True) utils.wait_for( OsbrickEngine.is_active, 1, 100, message="Waiting for volume {0} snapshot to become " "active".format(backup_volume.id), kwargs={ "client_manager": self.cinder.volume_snapshots, "id": snapshot.id }) LOG.info("[*] Converting snapshot to volume") backup_volume = self.cinder.volumes.create( snapshot.size, snapshot_id=snapshot.id) utils.wait_for( OsbrickEngine.is_active, 1, 100, message="Waiting for backup volume {0} to become " "active".format(backup_volume.id), kwargs={ "client_manager": self.cinder.volumes, "id": backup_volume.id }) backup_volume = self.cinder.volumes.get(backup_volume.id) if backup_volume.status != 'available': raise RuntimeError('Unable to use volume for restore data') try: tmpdir = tempfile.mkdtemp() except Exception: LOG.error("Unable to create a tmp directory") raise LOG.info("[*] Trying to attach the volume to localhost") brickclient = brick_client.Client(volumes_client=self.cinder) attach_info = brickclient.attach(backup_volume.id, socket.gethostname(), tmpdir) if not os.path.ismount(tmpdir): if new_volume: subprocess.check_output( ['sudo', 'mkfs.ext4', attach_info.get('path')]) subprocess.check_output([ 'sudo', 'mount', '-t', 'ext4', attach_info.get('path'), tmpdir ]) tar_engine = tar.TarEngine(self.compression_algo, self.dereference_symlink, self.exclude, self.storage, self.max_segment_size, self.encrypt_pass_file, self.dry_run) tar_engine.restore_level(tmpdir, read_pipe, backup, except_queue) subprocess.check_output(['sudo', 'umount', tmpdir]) shutil.rmtree(tmpdir) LOG.info("[*] Detaching volume") brickclient.detach(backup_volume.id) utils.wait_for(OsbrickEngine.is_active, 1, 100, message="Waiting for backup volume {0} to become " "active".format(backup_volume.id), kwargs={ "client_manager": self.cinder.volumes, "id": backup_volume.id }) LOG.info('Restore process completed') except Exception as e: LOG.exception(e) except_queue.put(e) raise