def backup_data(self, backup_resource, manifest_path): # import pdb;pdb.set_trace() image = self.glance.images.get(backup_resource) if not image: raise Exception( "Image {0} can't be found.".format(backup_resource)) LOG.info('Image backup') stream = self.client.download_image(image) LOG.info("Uploading image to storage path") headers = { "image_name": image.name, "image_id": image.get('id'), "disk_format": image.get('disk_format'), "container_format": image.get('container_format'), "visibility": image.get('visibility'), 'length': str(len(stream)), "protected": image.protected } self.set_tenant_meta(manifest_path, headers) for chunk in stream: yield chunk if self.encrypt_pass_file: tar_engine = tar.TarEngine(self.compression_algo, self.dereference_symlink, self.exclude, self.storage, self.max_segment_size, self.encrypt_pass_file, self.dry_run) for data_chunk in tar_engine.backup_data('.', manifest_path): yield data_chunk
def restore_level(self, restore_resource, read_pipe, backup, except_queue): try: metadata = backup.metadata() if (not self.encrypt_pass_file and metadata.get("encryption", False)): raise Exception("Cannot restore encrypted backup without key") engine_metadata = backup.engine_metadata() image_info = metadata.get('image', {}) container_format = image_info.get('container_format', 'bare') disk_format = image_info.get('disk_format', 'raw') length = int(engine_metadata.get('length')) stream = self.stream_image(read_pipe) data = utils.ReSizeStream(stream, length, 1) image = self.client.create_image("Restore: {0}".format( image_info.get('name', image_info.get('id', None))), container_format, disk_format, data=data) if self.encrypt_pass_file: try: tmpdir = tempfile.mkdtemp() except Exception: LOG.error("Unable to create a tmp directory") raise tar_engine = tar.TarEngine(self.compression_algo, self.dereference_symlink, self.exclude, self.storage, self.max_segment_size, self.encrypt_pass_file, self.dry_run) tar_engine.restore_level(tmpdir, read_pipe, backup, except_queue) utils.wait_for( GlanceEngine.image_active, 1, CONF.timeout, message="Waiting for image to finish uploading {0} and become" " active".format(image.id), kwargs={ "glance_client": self.glance, "image_id": image.id }) return image except Exception as e: LOG.exception(e) except_queue.put(e) raise
def __init__(self): self.dereference_symlink = None self.mysql_conf = '/tmp/freezer-test-conf-file' self.backup_media = 'fs' self.lvm_auto_snap = '/dev/null' self.lvm_volgroup = 'testgroup' self.lvm_srcvol = 'testvol' self.lvm_dirmount = '/tmp/testdir' self.lvm_snapsize = '1G' self.lvm_snapname = 'testsnapname' self.lvcreate_path = 'true' self.lvremove_path = 'true' self.mode = 'mysql' self.bash_path = 'true' self.file_path = 'true' self.mount_path = 'true' self.umount_path = 'true' self.backup_name = 'test-backup-name' self.hostname = 'test-hostname' self.curr_backup_level = 0 self.path_to_backup = '/tmp' self.tar_path = 'true' self.no_incremental = 'true' self.exclude = 'true' self.encrypt_pass_file = 'true' self.openssl_path = 'true' self.always_level = '0' self.max_level = '0' self.hostname_backup_name = "hostname_backup_name" self.remove_older_than = '0' self.max_segment_size = '0' self.time_stamp = 123456789 self.container = 'test-container' self.max_level = '20' self.encrypt_pass_file = '/dev/random' self.always_level = '20' self.overwrite = False self.remove_from_date = '2014-12-03T23:23:23' self.restart_always_level = 100000 self.restore_abs_path = '/tmp' self.restore_from_date = '2014-12-03T23:23:23' self.restore_from_host = 'test-hostname' self.action = 'info' self.shadow = '' self.windows_volume = '' self.insecure = True self.os_auth_ver = 2 self.dry_run = False self.upload_limit = -1 self.download_limit = -1 self.sql_server_instance = 'Sql Server' self.cinder_vol_id = '' self.cinder_vol_name = '' self.cindernative_vol_id = '' self.cindernative_backup_id = '' self.nova_inst_id = '' self.nova_inst_name = '' self.lvm_snapperm = 'ro' self.compression = 'gzip' self.storage = mock.MagicMock() self.engine = mock.MagicMock() opts = osclients.OpenstackOpts.create_from_env().get_opts_dicts() self.client_manager = osclients.OSClientManager(**opts) self.client_manager.get_swift = mock.Mock( return_value=FakeSwiftClient().client.Connection()) self.client_manager.create_swift = self.client_manager.get_swift self.storage = swift.SwiftStorage(self.client_manager, self.container, self.max_segment_size) self.engine = tar_engine.TarEngine(self.compression, self.dereference_symlink, self.exclude, self.storage, 1000, False) self.client_manager.get_glance = mock.Mock( return_value=FakeGlanceClient()) self.client_manager.get_cinder = mock.Mock( return_value=FakeCinderClient()) nova_client = mock.MagicMock() self.client_manager.get_nova = mock.Mock(return_value=nova_client) self.command = ''
def restore_level(self, restore_resource, read_pipe, backup, except_queue): try: metadata = backup.metadata() if (not self.encrypt_pass_file and metadata.get("encryption", False)): raise Exception("Cannot restore encrypted backup without key") engine_metadata = backup.engine_metadata() server_info = metadata.get('server', {}) length = int(engine_metadata.get('length')) available_networks = server_info.get('addresses') nova_networks = self.neutron.list_networks()['networks'] net_names = [ network for network, _ in available_networks.iteritems() ] match_networks = [{ "net-id": network.get('id') } for network in nova_networks if network.get('name') in net_names] stream = self.stream_image(read_pipe) data = utils.ReSizeStream(stream, length, 1) image = self.client.create_image("Restore: {0}".format( server_info.get('name', server_info.get('id', None))), 'bare', 'raw', data=data) if self.encrypt_pass_file: try: tmpdir = tempfile.mkdtemp() except Exception: LOG.error("Unable to create a tmp directory") raise tar_engine = tar.TarEngine(self.compression_algo, self.dereference_symlink, self.exclude, self.storage, self.max_segment_size, self.encrypt_pass_file, self.dry_run) tar_engine.restore_level(tmpdir, read_pipe, backup, except_queue) utils.wait_for( NovaEngine.image_active, 1, CONF.timeout, message="Waiting for image to finish uploading {0} and become" " active".format(image.id), kwargs={ "glance_client": self.glance, "image_id": image.id }) server = self.nova.servers.create( name=server_info.get('name'), flavor=server_info['flavor']['id'], image=image.id, nics=match_networks) return server except Exception as e: LOG.exception(e) except_queue.put(e) raise
def backup_data(self, backup_resource, manifest_path): server = self.nova.servers.get(backup_resource) if not server: raise Exception("Server not found {0}".format(backup_resource)) def instance_finish_task(): server = self.nova.servers.get(backup_resource) return not server.__dict__['OS-EXT-STS:task_state'] utils.wait_for( instance_finish_task, 1, CONF.timeout, message="Waiting for instance {0} to finish {1} to start the " "snapshot process".format( backup_resource, server.__dict__['OS-EXT-STS:task_state'])) image_id = self.nova.servers.create_image( server, "snapshot_of_{0}".format(backup_resource)) image = self.glance.images.get(image_id) if not image: raise Exception( "Image {0} is not created or can't be found.".format(image_id)) # wait a bit for the snapshot to be taken and completely uploaded # to glance. utils.wait_for(NovaEngine.image_active, 1, 100, message="Waiting for instnace {0} snapshot to become " "active".format(backup_resource), kwargs={ "glance_client": self.glance, "image_id": image_id }) image = self.glance.images.get(image_id) image_temporary_snapshot_id = None copied_volume = None image_info = getattr(server, "image", None) if image_info is not None and isinstance(image_info, dict): LOG.info('Image type instance backup') boot_device_type = "image" stream = self.client.download_image(image) else: LOG.info('Volume or snapshot type instance backup') boot_device_type = "volume" image_block_mapping_info = image.get("block_device_mapping") image_block_mapping = json.loads(image_block_mapping_info) \ if image_block_mapping_info else None image_temporary_snapshot_id = \ image_block_mapping[0]['snapshot_id'] \ if image_block_mapping else None copied_volume = self.client.do_copy_volume( self.cinder.volume_snapshots.get(image_temporary_snapshot_id)) LOG.debug("Deleting temporary glance image " "generated by snapshot") self.glance.images.delete(image.id) LOG.debug("Creation temporary glance image") image = self.client.make_glance_image(copied_volume.id, copied_volume) LOG.debug("Download temporary glance image {0}".format(image.id)) stream = self.client.download_image(image) LOG.info("Uploading image to storage path") headers = { "server_name": server.name, "flavour_id": str(server.flavor.get('id')), 'length': str(len(stream)), "boot_device_type": boot_device_type } self.set_tenant_meta(manifest_path, headers) for chunk in stream: yield chunk if self.encrypt_pass_file: tar_engine = tar.TarEngine(self.compression_algo, self.dereference_symlink, self.exclude, self.storage, self.max_segment_size, self.encrypt_pass_file, self.dry_run) for data_chunk in tar_engine.backup_data('.', manifest_path): yield data_chunk if image_temporary_snapshot_id is not None: LOG.info("Deleting temporary snapshot {0}".format( image_temporary_snapshot_id)) self.cinder.volume_snapshots.delete(image_temporary_snapshot_id) if copied_volume is not None: LOG.info("Deleting temporary copied volume {0}".format( copied_volume.id)) self.cinder.volumes.delete(copied_volume) LOG.info("Deleting temporary image {0}".format(image.id)) self.glance.images.delete(image.id)
def backup_data(self, backup_path, manifest_path): LOG.info("Starting os-brick engine backup stream") volume = self.cinder.volumes.get(backup_path) self.volume_info = volume.to_dict() snapshot = self.cinder.volume_snapshots.create(backup_path, force=True) LOG.info("[*] Creating volume snapshot") utils.wait_for(OsbrickEngine.is_active, 1, 100, message="Waiting for volume {0} snapshot to become " "active".format(backup_path), kwargs={ "client_manager": self.cinder.volume_snapshots, "id": snapshot.id }) LOG.info("[*] Converting snapshot to volume") backup_volume = self.cinder.volumes.create(snapshot.size, snapshot_id=snapshot.id) utils.wait_for(OsbrickEngine.is_active, 1, 100, message="Waiting for backup volume {0} to become " "active".format(backup_volume.id), kwargs={ "client_manager": self.cinder.volumes, "id": backup_volume.id }) try: tmpdir = tempfile.mkdtemp() except Exception: LOG.error("Unable to create a tmp directory") raise LOG.info("[*] Trying to attach the volume to localhost") brickclient = brick_client.Client(volumes_client=self.cinder) attach_info = brickclient.attach(backup_volume.id, socket.gethostname(), tmpdir) if not os.path.ismount(tmpdir): subprocess.check_output([ 'sudo', 'mount', '-t', 'ext4', attach_info.get('path'), tmpdir ]) cwd = os.getcwd() os.chdir(tmpdir) tar_engine = tar.TarEngine(self.compression_algo, self.dereference_symlink, self.exclude, self.storage, self.max_segment_size, self.encrypt_pass_file, self.dry_run) for data_chunk in tar_engine.backup_data('.', manifest_path): yield data_chunk os.chdir(cwd) LOG.info("[*] Detaching volume") subprocess.check_output(['sudo', 'umount', tmpdir]) shutil.rmtree(tmpdir) brickclient.detach(backup_volume.id) utils.wait_for(OsbrickEngine.is_active, 1, 100, message="Waiting for backup volume {0} to become " "active".format(backup_volume.id), kwargs={ "client_manager": self.cinder.volumes, "id": backup_volume.id }) LOG.info("[*] Removing backup volume and snapshot") self.cinder.volumes.delete(backup_volume.id) self.cinder.volume_snapshots.delete(snapshot, force=True) LOG.info('Backup process completed')
def restore_level(self, restore_path, read_pipe, backup, except_queue): try: LOG.info("Restoring volume {} using os-brick engine".format( restore_path)) new_volume = False metadata = backup.metadata() volume_info = metadata.get("volume_info") try: backup_volume = self.cinder.volumes.get(restore_path) except Exception: new_volume = True LOG.info("[*] Volume doesn't exists, creating a new one") backup_volume = self.cinder.volumes.create(volume_info['size']) utils.wait_for( OsbrickEngine.is_active, 1, 100, message="Waiting for backup volume {0} to become " "active".format(backup_volume.id), kwargs={ "client_manager": self.cinder.volumes, "id": backup_volume.id }) if backup_volume.attachments: LOG.info('Volume is used, creating a copy from snapshot') snapshot = self.cinder.volume_snapshots.create( backup_volume.id, force=True) utils.wait_for( OsbrickEngine.is_active, 1, 100, message="Waiting for volume {0} snapshot to become " "active".format(backup_volume.id), kwargs={ "client_manager": self.cinder.volume_snapshots, "id": snapshot.id }) LOG.info("[*] Converting snapshot to volume") backup_volume = self.cinder.volumes.create( snapshot.size, snapshot_id=snapshot.id) utils.wait_for( OsbrickEngine.is_active, 1, 100, message="Waiting for backup volume {0} to become " "active".format(backup_volume.id), kwargs={ "client_manager": self.cinder.volumes, "id": backup_volume.id }) backup_volume = self.cinder.volumes.get(backup_volume.id) if backup_volume.status != 'available': raise RuntimeError('Unable to use volume for restore data') try: tmpdir = tempfile.mkdtemp() except Exception: LOG.error("Unable to create a tmp directory") raise LOG.info("[*] Trying to attach the volume to localhost") brickclient = brick_client.Client(volumes_client=self.cinder) attach_info = brickclient.attach(backup_volume.id, socket.gethostname(), tmpdir) if not os.path.ismount(tmpdir): if new_volume: subprocess.check_output( ['sudo', 'mkfs.ext4', attach_info.get('path')]) subprocess.check_output([ 'sudo', 'mount', '-t', 'ext4', attach_info.get('path'), tmpdir ]) tar_engine = tar.TarEngine(self.compression_algo, self.dereference_symlink, self.exclude, self.storage, self.max_segment_size, self.encrypt_pass_file, self.dry_run) tar_engine.restore_level(tmpdir, read_pipe, backup, except_queue) subprocess.check_output(['sudo', 'umount', tmpdir]) shutil.rmtree(tmpdir) LOG.info("[*] Detaching volume") brickclient.detach(backup_volume.id) utils.wait_for(OsbrickEngine.is_active, 1, 100, message="Waiting for backup volume {0} to become " "active".format(backup_volume.id), kwargs={ "client_manager": self.cinder.volumes, "id": backup_volume.id }) LOG.info('Restore process completed') except Exception as e: LOG.exception(e) except_queue.put(e) raise