def prepare_test(self): super(BlockdevIncBackupPullModePoweroffVMTest, self).prepare_test() self._nbd_export = InternalNBDExportImage(self.main_vm, self.params, self._full_bk_images[0]) self._nbd_export.start_nbd_server() for obj in self._client_image_objs: obj.create(self.params)
def prepare_test(self): super(BlockdevIncBackupPullModeRebootVMTest, self).prepare_test() self._nbd_export = InternalNBDExportImage(self.main_vm, self.params, self._full_bk_images[0]) self._nbd_export.start_nbd_server() self._client_image_obj.create(self.params) self._error_msg = '{pid} Aborted|(core dumped)'.format( pid=self.main_vm.get_pid())
def init_nbd_exports(self): # nbd export objects, used for exporting local images for i, tag in enumerate(self.src_img_tags): self.full_backup_nbd_objs.append( InternalNBDExportImage(self.main_vm, self.params, self.full_backup_tags[i])) self.inc_backup_nbd_objs.append( InternalNBDExportImage(self.main_vm, self.params, self.inc_backup_tags[i]))
def prepare_test(self): self.preprocess_data_disks() self.prepare_main_vm() self.add_bitmap() self.prepare_data_disks() self.add_target_data_disks() self._nbd_export = InternalNBDExportImage(self.main_vm, self.params, self._full_bk_images[0]) self._nbd_export.start_nbd_server()
def _init_nbd_exports(tag): bk_tags = self.params.object_params(tag).objects("backup_images") self.full_backup_nbd_objs.append( InternalNBDExportImage(self.main_vm, self.params, bk_tags[0])) self.params['nbd_export_bitmap_%s' % bk_tags[1]] = "full_bitmap_%s" % tag self.inc_backup_nbd_objs.append( InternalNBDExportImage(self.main_vm, self.params, bk_tags[1]))
class BlockdevIncBackupPullModePoweroffVMTest(BlockdevLiveBackupBaseTest): """Poweroff VM during pulling image from 4 clients""" def __init__(self, test, params, env): super(BlockdevIncBackupPullModePoweroffVMTest, self).__init__(test, params, env) self._is_qemu_hang = False self._job = None self._nbd_export = None localhost = socket.gethostname() self.params['nbd_server'] = localhost if localhost else 'localhost' # the fleecing image to be exported self.params['image_name_image1'] = self.params['image_name'] self.params['image_format_image1'] = self.params['image_format'] self._fleecing_image_obj = self.source_disk_define_by_params( self.params, self._full_bk_images[0]) self.trash.append(self._fleecing_image_obj) # local target images, where data is copied from nbd image self._targets = [] self._clients = [] self._client_image_objs = [] nbd_image = self.params['nbd_image_%s' % self._full_bk_images[0]] for tag in self.params.objects('client_images'): self._client_image_objs.append( self.source_disk_define_by_params(self.params, tag)) p = Process(target=copyif, args=(self.params, nbd_image, tag)) self._clients.append(p) self._targets.append(partial(self._join_process, p=p)) self.trash.extend(self._client_image_objs) def add_target_data_disks(self): self._fleecing_image_obj.create(self.params) tag = self._fleecing_image_obj.tag devices = self.main_vm.devices.images_define_by_params( tag, self.params.object_params(tag), 'disk') devices.pop() # ignore the front end device for dev in devices: if isinstance(dev, QBlockdevFormatNode): dev.params["backing"] = self._source_nodes[0] ret = self.main_vm.devices.simple_hotplug(dev, self.main_vm.monitor) if not ret[1]: self.test.fail("Failed to hotplug '%s'" % dev) def generate_data_file(self, tag, filename=None): """ No need to create files, just start vm from the target, also note that, currently, creating a file may cause qemu core dumped due to a product bug 1879437 """ pass def remove_files_from_system_image(self, tmo=60): """No need to remove files for no file is created""" pass def prepare_test(self): super(BlockdevIncBackupPullModePoweroffVMTest, self).prepare_test() self._nbd_export = InternalNBDExportImage(self.main_vm, self.params, self._full_bk_images[0]) self._nbd_export.start_nbd_server() for obj in self._client_image_objs: obj.create(self.params) def _wait_till_all_qemu_io_active(self): def _wait_till_qemu_io_active(tag): for i in range(self.params.get_numeric('cmd_timeout', 20) * 10): if process.system(self.params['grep_qemu_io_cmd'] % tag, ignore_status=True, shell=True) == 0: break time.sleep(0.1) else: self.test.error('Failed to detect the active qemu-io process') list( map(_wait_till_qemu_io_active, [o.tag for o in self._client_image_objs])) def _poweroff_vm_during_data_copy(self): self._wait_till_all_qemu_io_active() s = self.main_vm.wait_for_login() s.cmd(cmd='poweroff', ignore_all_errors=True) def destroy_vms(self): if self._is_qemu_hang: # kill qemu instead of send shell/qmp command, # which will wait for minutes self.main_vm.monitors = [] self.main_vm.destroy(gracefully=False) elif self.main_vm.is_alive(): self.main_vm.destroy() def _check_qemu_responsive(self): try: self.main_vm.monitor.cmd(cmd="query-status", timeout=10) except MonitorProtocolError: self._is_qemu_hang = True self.test.fail('qemu hangs') except MonitorSocketError: self.test.fail('Failed to send qmp cmd to qemu') else: self.test.error('Too slow I/O to finish pulling data ' 'set process_timeout to a larger value in cfg') def _join_process(self, p): p.join(timeout=self.params.get_numeric('process_timeout', 1800)) def pull_data_and_poweroff_vm_in_parallel(self): """pull data and poweroff vm in parallel""" results = [] list(map(lambda p: p.start(), self._clients)) try: self._poweroff_vm_during_data_copy() parallel(self._targets) # never do join again when p.exitcode is None, # in case qemu-io hangs, process never returns results = list( map(lambda p: p.exitcode is not None and p.exitcode == 0, self._clients)) finally: list(map(lambda p: p.terminate(), self._clients)) list(map(lambda p: p.join(), self._clients)) if not all(results): # timeout(still running) or process quit unexpectedly self._check_qemu_responsive() def cancel_job(self): self.main_vm.monitor.cmd('job-cancel', {'id': self._job}) def export_full_bk_fleecing_img(self): self._nbd_export.add_nbd_image(self._full_bk_nodes[0]) def do_full_backup(self): super(BlockdevIncBackupPullModePoweroffVMTest, self).do_full_backup() self._job = [job['id'] for job in query_jobs(self.main_vm)][0] def do_test(self): self.do_full_backup() self.export_full_bk_fleecing_img() self.pull_data_and_poweroff_vm_in_parallel() self.cancel_job()
def _init_nbd_export(self, tag): self._nbd_export = InternalNBDExportImage( self.main_vm, self.params, tag) if self._block_export_uid else QemuNBDExportImage( self.params, tag)
class BlockdevIncbkXptAllocDepth(BlockdevLiveBackupBaseTest): """Allocation depth export test""" def __init__(self, test, params, env): super(BlockdevIncbkXptAllocDepth, self).__init__(test, params, env) self._base_image, self._snapshot_image = self.params.objects( 'image_backup_chain') localhost = socket.gethostname() self.params['nbd_server'] = localhost if localhost else 'localhost' self._nbd_image_obj = self.source_disk_define_by_params( self.params, self.params['nbd_image_tag']) self._block_export_uid = self.params.get('block_export_uid') self._nbd_export = None self._is_exported = False def _init_nbd_export(self, tag): self._nbd_export = InternalNBDExportImage( self.main_vm, self.params, tag) if self._block_export_uid else QemuNBDExportImage( self.params, tag) def _start_nbd_export(self, tag): if self._block_export_uid is not None: # export local image with block-export-add self._nbd_export.start_nbd_server() self._nbd_export.add_nbd_image('drive_%s' % tag) else: # export local image with qemu-nbd # we should stop vm and rebase sn onto base if self.main_vm.is_alive(): self.main_vm.destroy() self._rebase_sn_onto_base() self._nbd_export.export_image() self._is_exported = True def _rebase_sn_onto_base(self): disk = self.source_disk_define_by_params(self.params, self._snapshot_image) disk.rebase(params=self.params) def post_test(self): self.stop_export() super(BlockdevIncbkXptAllocDepth, self).post_test() def stop_export(self): """stop nbd export""" if self._is_exported: self._nbd_export.stop_export() self._is_exported = False def export_image(self, tag): """export image from nbd server""" self._init_nbd_export(tag) self._start_nbd_export(tag) def check_allocation_depth_from_export(self, zero, data): """ check allocation depth from output of qemu-img map local(base image): zero: false, data: false backing(snapshot): zero: true, data: true """ opts = filename_to_file_opts(self._nbd_image_obj.image_filename) opts[self.params['dirty_bitmap_opt']] = 'qemu:allocation-depth' map_cmd = '{qemu_img} map --output=json {args}'.format( qemu_img=get_qemu_img_binary(self.params), args="'json:%s'" % json.dumps(opts)) result = process.run(map_cmd, ignore_status=False, shell=True) for item in json.loads(result.stdout.decode().strip()): if item['zero'] is zero and item['data'] is data: break else: self.test.fail('Failed to get "zero": %s, "data": %s' % (zero, data)) def do_test(self): self.do_full_backup() self.export_image(self._base_image) self.check_allocation_depth_from_export(zero=False, data=False) self.stop_export() self.export_image(self._snapshot_image) self.check_allocation_depth_from_export(zero=True, data=True)
def run(test, params, env): """ 1) Clone system image with qemu-img 2) Export the image with qemu internal NBD server 3) ncate ip -p port or ncat -U /socket/path 4) Boot from the exported nbd image 5) Log into VM :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def _create_image(): result = qemu_storage.QemuImg( params, None, params['images'].split()[0]).dd( output=storage.get_image_filename( params.object_params(params["local_image_tag"]), data_dir.get_data_dir() ), bs=1024*1024 ) if result.exit_status != 0: test.fail('Failed to clone the system image, error: %s' % result.stderr.decode()) def _start_vm_without_image(): params['images'] = '' vm = None try: vm = img_utils.boot_vm_with_images(test, params, env) vm.verify_alive() finally: # let VT remove it params['images'] = ' %s' % params['local_image_tag'] return vm def _make_ncat_cmd(): ncat = '' if params.get('nbd_unix_socket_%s' % params['nbd_image_tag']): ncat = params['ncat_cmd'] else: localhost = socket.gethostname() params['nbd_server'] = localhost if localhost else 'localhost' ncat = params['ncat_cmd'].format(localhost=params['nbd_server']) return ncat _create_image() vm = _start_vm_without_image() nbd_export = InternalNBDExportImage(vm, params, params['local_image_tag']) nbd_export.hotplug_tls() nbd_export.hotplug_image() nbd_export.export_image() params['nbd_export_name'] = nbd_export.get_export_name() ncat_cmd = _make_ncat_cmd() result = process.run(ncat_cmd, ignore_status=True, shell=True) if params['errmsg_check'] not in result.stderr.decode().strip(): test.fail('Failed to read message(%s) from output(%s)' % (params['errmsg_check'], result.stderr.decode())) vm2 = None try: # Start another VM from the nbd exported image vm2 = img_utils.boot_vm_with_images(test, params, env, (params["nbd_image_tag"],), 'vm2') session = vm2.wait_for_login( timeout=params.get_numeric("login_timeout", 480)) session.close() finally: if vm2: vm2.destroy()
class BlockdevIncBackupPullModeRebootVMTest(BlockdevLiveBackupBaseTest): """Reboot VM during pulling image from client""" def __init__(self, test, params, env): super(BlockdevIncBackupPullModeRebootVMTest, self).__init__(test, params, env) self._job = None self._nbd_export = None localhost = socket.gethostname() self.params['nbd_server'] = localhost if localhost else 'localhost' # the fleecing image to be exported self.params['image_name_image1'] = self.params['image_name'] self.params['image_format_image1'] = self.params['image_format'] self._fleecing_image_obj = self.source_disk_define_by_params( self.params, self._full_bk_images[0]) self.trash.append(self._fleecing_image_obj) # local target image, where data is copied from nbd image self._client_image_obj = self.source_disk_define_by_params( self.params, self.params['client_image_%s' % self._full_bk_images[0]]) self.trash.append(self._client_image_obj) self._target_images = [self._client_image_obj.tag] def add_target_data_disks(self): self._fleecing_image_obj.create(self.params) tag = self._fleecing_image_obj.tag devices = self.main_vm.devices.images_define_by_params( tag, self.params.object_params(tag), 'disk') devices.pop() # ignore the front end device for dev in devices: if isinstance(dev, QBlockdevFormatNode): dev.params["backing"] = self._source_nodes[0] ret = self.main_vm.devices.simple_hotplug(dev, self.main_vm.monitor) if not ret[1]: self.test.fail("Failed to hotplug '%s'" % dev) def generate_data_file(self, tag, filename=None): """ No need to create files, just start vm from the target, also note that, currently, creating a file may cause qemu core dumped due to a product bug 1879437 """ pass def remove_files_from_system_image(self, tmo=60): """No need to remove files for no file is created""" pass def prepare_test(self): super(BlockdevIncBackupPullModeRebootVMTest, self).prepare_test() self._nbd_export = InternalNBDExportImage(self.main_vm, self.params, self._full_bk_images[0]) self._nbd_export.start_nbd_server() self._client_image_obj.create(self.params) self._error_msg = '{pid} Aborted|(core dumped)'.format( pid=self.main_vm.get_pid()) def export_full_bk_fleecing_img(self): self._nbd_export.add_nbd_image(self._full_bk_nodes[0]) def do_full_backup(self): super(BlockdevIncBackupPullModeRebootVMTest, self).do_full_backup() self._job = [job['id'] for job in query_jobs(self.main_vm)][0] def _copy_full_data_from_export(self): nbd_image = self.params['nbd_image_%s' % self._full_bk_images[0]] copyif(self.params, nbd_image, self._client_image_obj.tag) def _wait_till_qemu_io_active(self): for i in range(self.params.get_numeric('cmd_timeout', 20) * 10): if process.system('ps -C qemu-io', ignore_status=True, shell=True) == 0: break time.sleep(0.1) else: self.test.error('Cannot detect the active qemu-io process') def _reboot_vm_during_data_copy(self): self._wait_till_qemu_io_active() self.main_vm.reboot(method="system_reset") def _is_qemu_aborted(self): log_file = os.path.join(self.test.resultsdir, self.params.get('debug_log_file', 'debug.log')) with open(log_file, 'r') as f: out = f.read().strip() return re.search(self._error_msg, out, re.M) is not None def pull_data_and_reboot_vm_in_parallel(self): """run data copy and vm reboot in parallel""" targets = [ self._reboot_vm_during_data_copy, self._copy_full_data_from_export ] try: utils_misc.parallel(targets) except Exception as e: if self._is_qemu_aborted(): self.test.fail('qemu aborted(core dumped)') else: raise def cancel_job(self): self.main_vm.monitor.cmd('job-cancel', {'id': self._job}) def check_clone_vm_login(self): session = self.clone_vm.wait_for_login( timeout=self.params.get_numeric('login_timeout', 300)) session.close() def do_test(self): self.do_full_backup() self.export_full_bk_fleecing_img() self.pull_data_and_reboot_vm_in_parallel() self.cancel_job() self.prepare_clone_vm() self.check_clone_vm_login()
class BlockdevIncbkExposeActiveBitmap(BlockdevLiveBackupBaseTest): """Expose an active bitmap""" def __init__(self, test, params, env): super(BlockdevIncbkExposeActiveBitmap, self).__init__(test, params, env) if self.params.get_boolean('enable_nbd'): self.params['nbd_server_data1'] = self.params.get('nbd_server') self.params['nbd_client_tls_creds_data1'] = self.params.get( 'nbd_client_tls_creds') self._nbd_export = None localhost = socket.gethostname() self.params[ 'nbd_server_full'] = localhost if localhost else 'localhost' self.params['nbd_export_bitmaps_full'] = self._bitmaps[0] self._fleecing_image_obj = self.source_disk_define_by_params( self.params, self._full_bk_images[0]) self.trash.append(self._fleecing_image_obj) def add_bitmap(self): kargs = { 'bitmap_name': self._bitmaps[0], 'target_device': self._source_nodes[0], 'persistent': 'off' } block_dirty_bitmap_add(self.main_vm, kargs) def add_target_data_disks(self): self._fleecing_image_obj.create(self.params) tag = self._fleecing_image_obj.tag devices = self.main_vm.devices.images_define_by_params( tag, self.params.object_params(tag), 'disk') devices.pop() # ignore the front end device for dev in devices: if isinstance(dev, QBlockdevFormatNode): dev.params["backing"] = self._source_nodes[0] ret = self.main_vm.devices.simple_hotplug(dev, self.main_vm.monitor) if not ret[1]: self.test.fail("Failed to hotplug '%s'" % dev) def prepare_test(self): self.preprocess_data_disks() self.prepare_main_vm() self.add_bitmap() self.prepare_data_disks() self.add_target_data_disks() self._nbd_export = InternalNBDExportImage(self.main_vm, self.params, self._full_bk_images[0]) self._nbd_export.start_nbd_server() def expose_active_bitmap(self): try: self._nbd_export.add_nbd_image(self._full_bk_nodes[0]) except QMPCmdError as e: error_msg = self.params['error_msg'] % self._bitmaps[0] if error_msg not in str(e): self.test.fail('Unexpected error: %s' % str(e)) else: self.test.fail('active bitmap export completed unexpectedly') def do_full_backup(self): blockdev_batch_backup(self.main_vm, self._source_nodes, self._full_bk_nodes, None, **self._full_backup_options) def do_test(self): self.do_full_backup() self.expose_active_bitmap()
class BlockdevIncBackupPullModePoweroffVMTest(BlockdevLiveBackupBaseTest): """Poweroff VM during pulling image from 4 clients""" def __init__(self, test, params, env): super(BlockdevIncBackupPullModePoweroffVMTest, self).__init__(test, params, env) self._is_qemu_hang = False self._job = None self._nbd_export = None localhost = socket.gethostname() self.params['nbd_server'] = localhost if localhost else 'localhost' # the fleecing image to be exported self.params['image_name_image1'] = self.params['image_name'] self.params['image_format_image1'] = self.params['image_format'] self._fleecing_image_obj = self.source_disk_define_by_params( self.params, self._full_bk_images[0]) self.trash.append(self._fleecing_image_obj) # local target images, where data is copied from nbd image self._clients = [] self._client_image_objs = [] nbd_image = self.params['nbd_image_%s' % self._full_bk_images[0]] for tag in self.params.objects('client_images'): self._client_image_objs.append( self.source_disk_define_by_params(self.params, tag)) p = Process(target=copyif, args=(self.params, nbd_image, tag)) self._clients.append(p) self.trash.extend(self._client_image_objs) def add_target_data_disks(self): self._fleecing_image_obj.create(self.params) tag = self._fleecing_image_obj.tag devices = self.main_vm.devices.images_define_by_params( tag, self.params.object_params(tag), 'disk') devices.pop() # ignore the front end device for dev in devices: if isinstance(dev, QBlockdevFormatNode): dev.params["backing"] = self._source_nodes[0] ret = self.main_vm.devices.simple_hotplug(dev, self.main_vm.monitor) if not ret[1]: self.test.fail("Failed to hotplug '%s'" % dev) def generate_data_file(self, tag, filename=None): """ No need to create files, just start vm from the target, also note that, currently, creating a file may cause qemu core dumped due to a product bug 1879437 """ self.disks_info = {} def prepare_test(self): super(BlockdevIncBackupPullModePoweroffVMTest, self).prepare_test() self._nbd_export = InternalNBDExportImage(self.main_vm, self.params, self._full_bk_images[0]) self._nbd_export.start_nbd_server() for obj in self._client_image_objs: obj.create(self.params) def _wait_till_all_qemu_io_active(self): def _wait_till_qemu_io_active(tag): for i in range(self.params.get_numeric('cmd_timeout', 20) * 10): if process.system(self.params['grep_qemu_io_cmd'] % tag, ignore_status=True, shell=True) == 0: break time.sleep(0.1) else: self.test.error('Failed to detect the active qemu-io process') list( map(_wait_till_qemu_io_active, [o.tag for o in self._client_image_objs])) def _poweroff_vm_during_data_copy(self, session): self._wait_till_all_qemu_io_active() session.cmd(cmd='poweroff', ignore_all_errors=True) tmo = self.params.get_numeric('vm_down_timeout', 300) if not wait_for(self.main_vm.is_dead, timeout=tmo): # qemu should quit after vm poweroff, or we have to do some checks self._check_qemu_responsive() else: logging.info('qemu quit after vm poweroff') def destroy_vms(self): if self._is_qemu_hang: # kill qemu instead of send shell/qmp command, # which will wait for minutes self.main_vm.monitors = [] self.main_vm.destroy(gracefully=False) elif self.main_vm.is_alive(): self.main_vm.destroy() def _check_qemu_responsive(self): try: self.main_vm.monitor.cmd(cmd="query-status", timeout=10) except Exception as e: self._is_qemu_hang = True self.test.fail('qemu hangs: %s' % str(e)) else: self.test.error('qemu keeps alive unexpectedly after vm poweroff') def pull_data_and_poweroff_vm_in_parallel(self): """pull data and poweroff vm in parallel""" # setup connection here for it costs some time to log into vm session = self.main_vm.wait_for_login() list(map(lambda p: p.start(), self._clients)) try: self._poweroff_vm_during_data_copy(session) finally: list(map(lambda p: p.terminate(), self._clients)) list(map(lambda p: p.join(), self._clients)) def export_full_bk_fleecing_img(self): self._nbd_export.add_nbd_image(self._full_bk_nodes[0]) def do_full_backup(self): super(BlockdevIncBackupPullModePoweroffVMTest, self).do_full_backup() self._job = [job['id'] for job in query_jobs(self.main_vm)][0] def do_test(self): self.do_full_backup() self.export_full_bk_fleecing_img() self.pull_data_and_poweroff_vm_in_parallel()
def _init_nbd_export(self): self._nbd_export = InternalNBDExportImage( self.main_vm, self.params, self._full_bk_images[0] ) if self._block_export_uid else QemuNBDExportImage( self.params, self._full_bk_images[0])
class BlockdevIncbkXptMutBitmaps(BlockdevLiveBackupBaseTest): """Multiple bitmaps export test""" def __init__(self, test, params, env): super(BlockdevIncbkXptMutBitmaps, self).__init__(test, params, env) self._bitmaps = params.objects('bitmap_list') self._bitmap_states = [True, False] localhost = socket.gethostname() self.params['nbd_server'] = localhost if localhost else 'localhost' self._nbd_image_obj = self.source_disk_define_by_params( self.params, self.params['nbd_image_tag']) self._block_export_uid = self.params.get('block_export_uid') self._nbd_export = None self._is_exported = False def _init_nbd_export(self): self._nbd_export = InternalNBDExportImage( self.main_vm, self.params, self._full_bk_images[0] ) if self._block_export_uid else QemuNBDExportImage( self.params, self._full_bk_images[0]) def check_nbd_export_info(self): if self._block_export_uid is not None: info = self._nbd_export.query_nbd_export() if info is None: self.test.fail('Failed to get the nbd block export') if (not info or info['shutting-down'] or info['id'] != self._block_export_uid or info['type'] != 'nbd' or info['node-name'] != self._full_bk_nodes[0]): self.test.fail( 'Failed to get the correct export information: %s' % info) def do_nbd_export(self): if self._block_export_uid is not None: self._nbd_export.start_nbd_server() self._nbd_export.add_nbd_image(self._full_bk_nodes[0]) else: self.main_vm.destroy() self._nbd_export.export_image() self._is_exported = True def prepare_test(self): self.prepare_main_vm() self.add_target_data_disks() self._init_nbd_export() def post_test(self): if self._is_exported: self._nbd_export.stop_export() super(BlockdevIncbkXptMutBitmaps, self).post_test() def add_persistent_bitmaps(self): """Add two bitmaps, one is enabled while the other is disabled""" bitmaps = [{ 'node': self._full_bk_nodes[0], 'name': b, 'persistent': True, 'disabled': s } for b, s in zip(self._bitmaps, self._bitmap_states)] job_list = [{ 'type': 'block-dirty-bitmap-add', 'data': data } for data in bitmaps] self.main_vm.monitor.transaction(job_list) def check_bitmaps_from_export(self): qemu_img = get_qemu_img_binary(self.params) opts = filename_to_file_opts(self._nbd_image_obj.image_filename) for bm in self._bitmaps: opts[self.params['dirty_bitmap_opt']] = 'qemu:dirty-bitmap:%s' % bm args = "'json:%s'" % json.dumps(opts) map_cmd = '{qemu_img} map --output=human {args}'.format( qemu_img=qemu_img, args=args) result = process.run(map_cmd, ignore_status=False, shell=True) if self._nbd_image_obj.image_filename not in result.stdout_text: self.test.fail('Failed to get bitmap info.') def do_test(self): self.add_persistent_bitmaps() self.do_nbd_export() self.check_nbd_export_info() self.check_bitmaps_from_export()