def prepare_nfs_backingfile(vm, params): """ Create an image using nfs type backing_file :param vm: The guest :param params: the parameters used """ mnt_path_name = params.get("nfs_mount_dir", "nfs-mount") exp_opt = params.get("export_options", "rw,no_root_squash,fsid=0") exp_dir = params.get("export_dir", "nfs-export") backingfile_img = params.get("source_dist_img", "nfs-img") disk_format = params.get("disk_format", "qcow2") img_name = params.get("img_name", "test.img") precreation = "yes" == params.get("precreation", "yes") vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) disk_xml = vmxml.devices.by_device_tag('disk')[0] src_disk_format = disk_xml.xmltreefile.find('driver').get('type') first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] disk_img = os.path.join(os.path.dirname(blk_source), img_name) res = libvirt.setup_or_cleanup_nfs(True, mnt_path_name, is_mount=True, export_options=exp_opt, export_dir=exp_dir) mnt_path = res["mount_dir"] params["selinux_status_bak"] = res["selinux_status_bak"] if vm.is_alive(): vm.destroy(gracefully=False) disk_cmd = ("qemu-img convert -f %s -O %s %s %s/%s" % (src_disk_format, disk_format, blk_source, mnt_path, backingfile_img)) process.run(disk_cmd, ignore_status=False, verbose=True) local_image_list.append("%s/%s" % (mnt_path, backingfile_img)) logging.debug("Create a local image backing on NFS.") disk_cmd = ("qemu-img create -f %s -b %s/%s %s" % (disk_format, mnt_path, backingfile_img, disk_img)) process.run(disk_cmd, ignore_status=False, verbose=True) local_image_list.append(disk_img) if precreation: logging.debug("Create an image backing on NFS on remote host.") remote_session = remote.remote_login("ssh", server_ip, "22", server_user, server_pwd, r'[$#%]') utils_misc.make_dirs(os.path.dirname(blk_source), remote_session) status, stdout = utils_misc.cmd_status_output( disk_cmd, session=remote_session) logging.debug("status: {}, stdout: {}".format(status, stdout)) remote_image_list.append("%s/%s" % (mnt_path, backingfile_img)) remote_image_list.append(disk_img) remote_session.close() params.update({'disk_source_name': disk_img, 'disk_type': 'file', 'disk_source_protocol': 'file'}) libvirt.set_vm_disk(vm, params)
def cleanup_nfs_backend_guest(vmxml_backup): if virt_use_nfs: result = process.run("setsebool virt_use_nfs 0", shell=True, verbose=True) if result.exit_status: logging.error("Failed to set virt_use_nfs off") # recover the guest xml for xml_backup in vmxml_backup: xml_backup.sync(options="--managed-save") nfs_server = libvirt.setup_or_cleanup_nfs(False)
def setup_nfs_backend_guest(vmxml_backup): # nfs_server setup nfs_server = libvirt.setup_or_cleanup_nfs(True) nfs_export_dir = nfs_server['export_dir'] logging.debug("nfs dir is %s", nfs_export_dir) # change the original xml and image path for dom in vms: vmxml = vm_xml.VMXML.new_from_dumpxml(dom.name) vmxml_backup.append(vmxml.copy()) disk_xml = vmxml.get_devices(device_type="disk")[0] orig_image_path_name = disk_xml.source.attrs['file'] libvirt.update_vm_disk_source(dom.name, nfs_export_dir) shutil.copy(orig_image_path_name, nfs_export_dir) # set the selinux bool if virt_use_nfs: result = process.run("setsebool virt_use_nfs 1", shell=True, verbose=True) if result.exit_status: logging.error("Failed to set virt_use_nfs on")
def run(test, params, env): """ Test DAC setting in both domain xml and qemu.conf. (1) Init variables for test. (2) Set VM xml and qemu.conf with proper DAC label, also set monitor socket parent dir with propoer ownership and mode. (3) Start VM and check the context. """ # Get general variables. status_error = ('yes' == params.get("status_error", 'no')) host_sestatus = params.get("host_selinux", "enforcing") # Get variables about seclabel for VM. sec_type = params.get("vm_sec_type", "dynamic") vm_sec_model = params.get("vm_sec_model", "dac") vm_sec_label = params.get("vm_sec_label", None) vm_sec_relabel = params.get("vm_sec_relabel", "yes") sec_dict = {'type': sec_type, 'model': vm_sec_model, 'relabel': vm_sec_relabel} if vm_sec_label: sec_dict['label'] = vm_sec_label set_qemu_conf = "yes" == params.get("set_qemu_conf", "no") # Get per-img seclabel variables disk_type = params.get("disk_type") disk_target = params.get('disk_target') disk_src_protocol = params.get("disk_source_protocol") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) invalid_label = 'yes' == params.get("invalid_label", "no") relabel = params.get("per_img_sec_relabel") sec_label = params.get("per_img_sec_label") per_sec_model = params.get("per_sec_model", 'dac') per_img_dict = {'sec_model': per_sec_model, 'relabel': relabel, 'sec_label': sec_label} params.update(per_img_dict) # Get qemu.conf config variables qemu_user = params.get("qemu_user", 'qemu') qemu_group = params.get("qemu_group", 'qemu') dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes") # Get variables about VM and get a VM object and VMXML instance. vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Set selinux of host. backup_sestatus = utils_selinux.get_status() if backup_sestatus == "disabled": test.cancel("SELinux is in Disabled " "mode. it must be in Enforcing " "mode to run this test") utils_selinux.set_status(host_sestatus) qemu_sock_mod = False qemu_sock_path = '/var/lib/libvirt/qemu/' qemu_conf = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() try: if set_qemu_conf: # Set qemu.conf for user and group if qemu_user: qemu_conf.user = qemu_user if qemu_group: qemu_conf.group = qemu_group if dynamic_ownership: qemu_conf.dynamic_ownership = 1 else: qemu_conf.dynamic_ownership = 0 logging.debug("the qemu.conf content is: %s" % qemu_conf) libvirtd.restart() st = os.stat(qemu_sock_path) if not bool(st.st_mode & stat.S_IWGRP): # chmod g+w os.chmod(qemu_sock_path, st.st_mode | stat.S_IWGRP) qemu_sock_mod = True # Set the context of the VM. logging.debug("sec_dict is %s" % sec_dict) vmxml.set_seclabel([sec_dict]) vmxml.sync() # Get per-image seclabel in id string if sec_label: per_img_usr, per_img_grp = sec_label.split(':') sec_label_id = format_user_group_str(per_img_usr, per_img_grp) # Start VM to check the qemu process and image. try: # Set per-img sec context and start vm utlv.set_vm_disk(vm, params) # Start VM successfully. if status_error: if invalid_label: # invalid label should fail, more info in bug 1165485 logging.debug("The guest failed to start as expected," "details see bug: bugzilla.redhat.com/show_bug.cgi" "?id=1165485") else: test.fail("Test succeeded in negative case.") # Get vm process label when VM is running. vm_pid = vm.get_pid() pid_stat = os.stat("/proc/%d" % vm_pid) vm_process_uid = pid_stat.st_uid vm_process_gid = pid_stat.st_gid vm_context = "%s:%s" % (vm_process_uid, vm_process_gid) logging.debug("vm process label is: %s", vm_context) # Get vm image label when VM is running if disk_type != "network": disks = vm.get_blk_devices() if libvirt_version.version_compare(3, 1, 0) and disk_type == "block": output = to_text(process.system_output( "nsenter -t %d -m -- ls -l %s" % (vm_pid, disks[disk_target]['source']))) owner, group = output.strip().split()[2:4] disk_context = format_user_group_str(owner, group) else: stat_re = os.stat(disks[disk_target]['source']) disk_context = "%s:%s" % (stat_re.st_uid, stat_re.st_gid) logging.debug("The disk dac label after vm start is: %s", disk_context) if sec_label and relabel == 'yes': if disk_context != sec_label_id: test.fail("The disk label is not equal to " "'%s'." % sec_label_id) except virt_vm.VMStartError as e: # Starting VM failed. if not status_error: test.fail("Test failed in positive case." "error: %s" % e) finally: # clean up if vm.is_alive(): vm.destroy(gracefully=False) backup_xml.sync() if qemu_sock_mod: st = os.stat(qemu_sock_path) os.chmod(qemu_sock_path, st.st_mode ^ stat.S_IWGRP) if set_qemu_conf: qemu_conf.restore() libvirtd.restart() utils_selinux.set_status(backup_sestatus) if disk_src_protocol == 'iscsi': utlv.setup_or_cleanup_iscsi(is_setup=False) elif disk_src_protocol == 'gluster': utlv.setup_or_cleanup_gluster(False, brick_path=brick_path, **params) libvirtd.restart() elif disk_src_protocol == 'netfs': utlv.setup_or_cleanup_nfs(is_setup=False, restore_selinux=backup_sestatus)
if (re.search(log_pattern, cmd_result.stdout) or chk_libvirtd_log(libvirtd_log_path, log_pattern, "debug")): logging.debug("Found success a timed out block copy") else: raise error.TestFail("Expect fail, but run " "successfully.") finally: # Restore libvirtd conf and restart libvirtd libvirtd_conf.restore() libvirtd_utl.restart() if libvirtd_log_path and os.path.exists(libvirtd_log_path): os.unlink(libvirtd_log_path) if vm.is_alive(): vm.destroy(gracefully=False) utils_misc.wait_for( lambda: virsh.domstate(vm_name, ignore_status=True).exit_status, 2) original_xml.sync("--snapshots-metadata") if replace_vm_disk and disk_source_protocol == "netfs": restore_selinux = params.get('selinux_status_bak') utl.setup_or_cleanup_nfs(is_setup=False, restore_selinux=restore_selinux) if os.path.exists(dest_path): os.remove(dest_path) if os.path.exists(snap_path): os.remove(snap_path) if os.path.exists(save_path): os.remove(save_path)
def run(test, params, env): """ Test command: virsh blockcommit <domain> <path> 1) Prepare test environment. 2) Commit changes from a snapshot down to its backing image. 3) Recover test environment. 4) Check result. """ def make_disk_snapshot(postfix_n): # Add all disks into commandline. disks = vm.get_disk_devices() # Make three external snapshots for disks only for count in range(1, 4): options = "%s_%s %s%s-desc " % (postfix_n, count, postfix_n, count) options += "--disk-only --atomic --no-metadata" if needs_agent: options += " --quiesce" for disk in disks: disk_detail = disks[disk] basename = os.path.basename(disk_detail['source']) # Remove the original suffix if any, appending # ".postfix_n[0-9]" diskname = basename.split(".")[0] snap_name = "%s.%s%s" % (diskname, postfix_n, count) disk_external = os.path.join(tmp_dir, snap_name) snapshot_external_disks.append(disk_external) options += " %s,snapshot=external,file=%s" % (disk, disk_external) cmd_result = virsh.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) status = cmd_result.exit_status if status != 0: raise error.TestFail("Failed to make snapshots for disks!") # Create a file flag in VM after each snapshot flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") file_path = flag_file.name flag_file.close() status, output = session.cmd_status_output("touch %s" % file_path) if status: raise error.TestFail("Touch file in vm failed. %s" % output) snapshot_flag_files.append(file_path) # MAIN TEST CODE ### # Process cartesian parameters vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vm_state = params.get("vm_state", "running") needs_agent = "yes" == params.get("needs_agent", "yes") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") top_inactive = ("yes" == params.get("top_inactive")) with_timeout = ("yes" == params.get("with_timeout_option", "no")) status_error = ("yes" == params.get("status_error", "no")) base_option = params.get("base_option", "none") middle_base = "yes" == params.get("middle_base", "no") pivot_opt = "yes" == params.get("pivot_opt", "no") snap_in_mirror = "yes" == params.get("snap_in_mirror", "no") snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", "no") with_active_commit = "yes" == params.get("with_active_commit", "no") multiple_chain = "yes" == params.get("multiple_chain", "no") virsh_dargs = {'debug': True} # Process domain disk device parameters disk_type = params.get("disk_type") disk_src_protocol = params.get("disk_source_protocol") restart_tgtd = params.get("restart_tgtd", 'no') vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) if not top_inactive: if not libvirt_version.version_compare(1, 2, 4): raise error.TestNAError("live active block commit is not supported" " in current libvirt version.") # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Abort the test if there are snapshots already exsiting_snaps = virsh.snapshot_list(vm_name) if len(exsiting_snaps) != 0: raise error.TestFail("There are snapshots created for %s already" % vm_name) snapshot_external_disks = [] cmd_session = None try: if disk_src_protocol == 'iscsi' and disk_type == 'network': if not libvirt_version.version_compare(1, 0, 4): raise error.TestNAError("'iscsi' disk doesn't support in" " current libvirt version.") # Set vm xml and guest agent if replace_vm_disk: libvirt.set_vm_disk(vm, params, tmp_dir) if needs_agent: vm.prepare_guest_agent() # The first disk is supposed to include OS # We will perform blockcommit operation for it. first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] blk_target = first_disk['target'] snapshot_flag_files = [] # get a vm session before snapshot session = vm.wait_for_login() # do snapshot postfix_n = 'snap' make_disk_snapshot(postfix_n) basename = os.path.basename(blk_source) diskname = basename.split(".")[0] snap_src_lst = [blk_source] if multiple_chain: snap_name = "%s.%s1" % (diskname, postfix_n) snap_top = os.path.join(tmp_dir, snap_name) top_index = snapshot_external_disks.index(snap_top) + 1 omit_list = snapshot_external_disks[top_index:] vm.destroy(gracefully=False) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = vmxml.get_devices(device_type="disk")[0] vmxml.del_device(disk_xml) disk_dict = {'attrs': {'file': snap_top}} disk_xml.source = disk_xml.new_disk_source(**disk_dict) vmxml.add_device(disk_xml) vmxml.sync() vm.start() session = vm.wait_for_login() postfix_n = 'new_snap' make_disk_snapshot(postfix_n) snap_src_lst = [blk_source] snap_src_lst += snapshot_external_disks logging.debug("omit list is %s", omit_list) for i in omit_list: snap_src_lst.remove(i) else: # snapshot src file list snap_src_lst += snapshot_external_disks backing_chain = '' for i in reversed(range(4)): if i == 0: backing_chain += "%s" % snap_src_lst[i] else: backing_chain += "%s -> " % snap_src_lst[i] logging.debug("The backing chain is: %s" % backing_chain) # check snapshot disk xml backingStore is expected vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') disk_xml = None for disk in disks: if disk.target['dev'] != blk_target: continue else: disk_xml = disk.xmltreefile logging.debug("the target disk xml after snapshot is %s", disk_xml) break if not disk_xml: raise error.TestFail("Can't find disk xml with target %s" % blk_target) elif libvirt_version.version_compare(1, 2, 4): # backingStore element introuduced in 1.2.4 chain_lst = snap_src_lst[::-1] ret = check_chain_xml(disk_xml, chain_lst) if not ret: raise error.TestFail("Domain image backing chain check failed") # set blockcommit_options top_image = None blockcommit_options = "--wait --verbose" if with_timeout: blockcommit_options += " --timeout 2" if base_option == "shallow": blockcommit_options += " --shallow" elif base_option == "base": if middle_base: snap_name = "%s.%s1" % (diskname, postfix_n) blk_source = os.path.join(tmp_dir, snap_name) blockcommit_options += " --base %s" % blk_source if top_inactive: snap_name = "%s.%s2" % (diskname, postfix_n) top_image = os.path.join(tmp_dir, snap_name) blockcommit_options += " --top %s" % top_image else: blockcommit_options += " --active" if pivot_opt: blockcommit_options += " --pivot" if vm_state == "shut off": vm.destroy(gracefully=True) if with_active_commit: # inactive commit follow active commit will fail with bug 1135339 cmd = "virsh blockcommit %s %s --active --pivot" % (vm_name, blk_target) cmd_session = aexpect.ShellSession(cmd) # Run test case result = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) # Check status_error libvirt.check_exit_status(result, status_error) if result.exit_status and status_error: return while True: vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') for disk in disks: if disk.target['dev'] != blk_target: continue else: disk_xml = disk.xmltreefile break if not top_inactive: disk_mirror = disk_xml.find('mirror') if '--pivot' not in blockcommit_options: if disk_mirror is not None: job_type = disk_mirror.get('job') job_ready = disk_mirror.get('ready') src_element = disk_mirror.find('source') disk_src_file = None for elem in ('file', 'name', 'dev'): elem_val = src_element.get(elem) if elem_val: disk_src_file = elem_val break err_msg = "blockcommit base source " err_msg += "%s not expected" % disk_src_file if '--shallow' in blockcommit_options: if not multiple_chain: if disk_src_file != snap_src_lst[2]: raise error.TestFail(err_msg) else: if disk_src_file != snap_src_lst[3]: raise error.TestFail(err_msg) else: if disk_src_file != blk_source: raise error.TestFail(err_msg) if libvirt_version.version_compare(1, 2, 7): # The job attribute mentions which API started the # operation since 1.2.7. if job_type != 'active-commit': raise error.TestFail( "blockcommit job type '%s'" " not expected" % job_type) if job_ready != 'yes': # The attribute ready, if present, tracks # progress of the job: yes if the disk is known # to be ready to pivot, or, since 1.2.7, abort # or pivot if the job is in the process of # completing. continue else: logging.debug( "after active block commit job " "ready for pivot, the target disk" " xml is %s", disk_xml) break else: break else: break else: if disk_mirror is None: logging.debug(disk_xml) if "--shallow" in blockcommit_options: chain_lst = snap_src_lst[::-1] chain_lst.pop(0) ret = check_chain_xml(disk_xml, chain_lst) if not ret: raise error.TestFail("Domain image backing " "chain check failed") elif "--base" in blockcommit_options: chain_lst = snap_src_lst[::-1] base_index = chain_lst.index(blk_source) chain_lst = chain_lst[base_index:] ret = check_chain_xml(disk_xml, chain_lst) if not ret: raise error.TestFail("Domain image backing " "chain check failed") break else: # wait pivot after commit is synced continue else: logging.debug("after inactive commit the disk xml is: %s" % disk_xml) if libvirt_version.version_compare(1, 2, 4): if "--shallow" in blockcommit_options: chain_lst = snap_src_lst[::-1] chain_lst.remove(top_image) ret = check_chain_xml(disk_xml, chain_lst) if not ret: raise error.TestFail("Domain image backing chain " "check failed") elif "--base" in blockcommit_options: chain_lst = snap_src_lst[::-1] top_index = chain_lst.index(top_image) base_index = chain_lst.index(blk_source) val_tmp = [] for i in range(top_index, base_index): val_tmp.append(chain_lst[i]) for i in val_tmp: chain_lst.remove(i) ret = check_chain_xml(disk_xml, chain_lst) if not ret: raise error.TestFail("Domain image backing chain " "check failed") break else: break # Check flag files if not vm_state == "shut off" and not multiple_chain: for flag in snapshot_flag_files: status, output = session.cmd_status_output("cat %s" % flag) if status: raise error.TestFail("blockcommit failed: %s" % output) if not pivot_opt and snap_in_mirror: # do snapshot during mirror phase snap_path = "%s/%s.snap" % (tmp_dir, vm_name) snap_opt = "--disk-only --atomic --no-metadata " snap_opt += "vda,snapshot=external,file=%s" % snap_path snapshot_external_disks.append(snap_path) cmd_result = virsh.snapshot_create_as(vm_name, snap_opt, ignore_statues=True, debug=True) libvirt.check_exit_status(cmd_result, snap_in_mirror_err) finally: if vm.is_alive(): vm.destroy(gracefully=False) # Recover xml of vm. vmxml_backup.sync("--snapshots-metadata") if cmd_session: cmd_session.close() for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) if disk_src_protocol == 'iscsi': libvirt.setup_or_cleanup_iscsi(is_setup=False, restart_tgtd=restart_tgtd) elif disk_src_protocol == 'gluster': libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path) libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() elif disk_src_protocol == 'netfs': restore_selinux = params.get('selinux_status_bak') libvirt.setup_or_cleanup_nfs(is_setup=False, restore_selinux=restore_selinux)
output = virsh.domblkerror(vm_name) if output.exit_status == 0: expect_result = "%s: %s" % (img_disk.target['dev'], error_type) if output.stdout.strip() == expect_result: logging.info("Get expect result: %s", expect_result) else: test.fail("Failed to get expect result, get %s" % output.stdout.strip()) else: test.fail("Fail to get domblkerror info:%s" % output.stderr) finally: logging.info("Do clean steps") if session: session.close() if error_type == "unspecified error": if nfs_service is not None: nfs_service.start() vm.destroy() if os.path.isfile("%s.bak" % export_file): shutil.move("%s.bak" % export_file, export_file) res = libvirt.setup_or_cleanup_nfs(is_setup=False, mount_dir=nfs_dir, export_dir=img_dir, restore_selinux=selinux_bak) elif error_type == "no space": vm.destroy() if _pool_vol: _pool_vol.cleanup_pool(pool_name, "fs", pool_target, img_name) vmxml_backup.sync() data_dir.clean_tmp_files()
option = None original_xml.sync(option) else: original_xml.define() except Exception, e: logging.error(e) for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) # Clean up libvirt pool, which may be created by 'set_vm_disk' if disk_type == 'volume': virsh.pool_destroy(pool_name, ignore_status=True, debug=True) # Clean up NFS try: if replace_vm_disk and disk_source_protocol == "netfs": utl.setup_or_cleanup_nfs(is_setup=False) except Exception, e: logging.error(e) # Clean up iSCSI try: for iscsi_n in list(set(emulated_iscsi)): utl.setup_or_cleanup_iscsi(is_setup=False, emulated_image=iscsi_n) except Exception, e: logging.error(e) if os.path.exists(dest_path): os.remove(dest_path) if os.path.exists(snap_path): os.remove(snap_path) if os.path.exists(save_path): os.remove(save_path)
def run(test, params, env): """ Test virsh migrate-setmaxdowntime command. 1) Prepare migration environment 2) Start migration and set migrate-maxdowntime 3) Cleanup environment(migrated vm on destination) 4) Check result """ dest_uri = params.get("virsh_migrate_dest_uri", "qemu+ssh://MIGRATE_EXAMPLE/system") src_uri = params.get("virsh_migrate_src_uri", "qemu+ssh://MIGRATE_EXAMPLE/system") if dest_uri.count('///') or dest_uri.count('MIGRATE_EXAMPLE'): raise error.TestNAError("Set your destination uri first.") if src_uri.count('MIGRATE_EXAMPLE'): raise error.TestNAError("Set your source uri first.") if src_uri == dest_uri: raise error.TestNAError("You should not set dest uri same as local.") vm_ref = params.get("setmmdt_vm_ref", "domname") pre_vm_state = params.get("pre_vm_state", "running") status_error = "yes" == params.get("status_error", "no") do_migrate = "yes" == params.get("do_migrate", "yes") migrate_maxdowntime = params.get("migrate_maxdowntime", 1.000) if (migrate_maxdowntime == ""): downtime = "" else: downtime = int(float(migrate_maxdowntime)) * 1000 extra = params.get("setmmdt_extra") # A delay between threads delay_time = int(params.get("delay_time", 1)) # timeout of threads thread_timeout = 180 vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) domuuid = vm.get_uuid() grep_str_local = params.get("grep_str_from_local_libvirt_log", "") # For safety reasons, we'd better back up original guest xml orig_config_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if not orig_config_xml: raise error.TestError("Backing up xmlfile failed.") # Params to configure libvirtd.conf log_file = "/var/log/libvirt/libvirtd.log" log_level = "1" log_filters = '"1:json 1:libvirt 1:qemu 1:monitor 3:remote 4:event"' libvirtd_conf_dict = { "log_level": log_level, "log_filters": log_filters, "log_outputs": '"%s:file:%s"' % (log_level, log_file) } # Update libvirtd config with new parameters libvirtd = utils_libvirtd.Libvirtd() libvirtd_conf = config_libvirt(libvirtd_conf_dict) libvirtd.restart() # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Params to setup SSH connection params["server_ip"] = params.get("migrate_dest_host") params["server_pwd"] = params.get("migrate_dest_pwd") params["client_ip"] = params.get("migrate_source_host") params["client_pwd"] = params.get("migrate_source_pwd") params["nfs_client_ip"] = params.get("migrate_dest_host") params["nfs_server_ip"] = params.get("migrate_source_host") # Params to enable SELinux boolean on remote host params["remote_boolean_varible"] = "virt_use_nfs" params["remote_boolean_value"] = "on" params["set_sebool_remote"] = "yes" remote_host = params.get("migrate_dest_host") username = params.get("migrate_dest_user", "root") password = params.get("migrate_dest_pwd") # Config ssh autologin for remote host ssh_key.setup_ssh_key(remote_host, username, password, port=22) setmmdt_dargs = {'debug': True, 'ignore_status': True, 'uri': src_uri} migrate_dargs = {'debug': True, 'ignore_status': True} seLinuxBool = None nfs_client = None local_selinux_bak = "" try: # Update the disk using shared storage libvirt.set_vm_disk(vm, params) # Backup the SELinux status on local host for recovering local_selinux_bak = params.get("selinux_status_bak", "") # Configure NFS client on remote host nfs_client = nfs.NFSClient(params) nfs_client.setup() logging.info("Enable virt NFS SELinux boolean on target host") seLinuxBool = utils_misc.SELinuxBoolean(params) seLinuxBool.setup() if not vm.is_alive(): vm.start() vm.wait_for_login() domid = vm.get_id() # Confirm how to reference a VM. if vm_ref == "domname": vm_ref = vm_name elif vm_ref == "domid": vm_ref = domid elif vm_ref == "domuuid": vm_ref = domuuid # Prepare vm state if pre_vm_state == "paused": vm.pause() elif pre_vm_state == "shutoff": vm.destroy(gracefully=False) # Ensure VM in 'shut off' status utils_misc.wait_for(lambda: vm.state() == "shut off", 30) # Set max migration downtime must be during migration # Using threads for synchronization threads = [] if do_migrate: threads.append( threading.Thread(target=thread_func_live_migration, args=(vm, dest_uri, migrate_dargs))) threads.append( threading.Thread(target=thread_func_setmmdt, args=(vm_ref, downtime, extra, setmmdt_dargs))) for thread in threads: thread.start() # Migration must be executing before setting maxdowntime time.sleep(delay_time) # Wait until thread is over for thread in threads: thread.join(thread_timeout) if (status_error is False or do_migrate is False): logging.debug("To match the expected pattern '%s' ...", grep_str_local) cmd = "grep -E '%s' %s" % (grep_str_local, log_file) cmdResult = process.run(cmd, shell=True, verbose=False) logging.debug(cmdResult) finally: # Clean up. if do_migrate: logging.debug("Cleanup VM on remote host...") cleanup_dest(vm, src_uri, dest_uri) if orig_config_xml: logging.debug("Recover VM XML...") orig_config_xml.sync() if seLinuxBool: logging.info("Recover NFS SELinux boolean on remote host...") seLinuxBool.cleanup(True) if nfs_client: logging.info("Cleanup NFS client environment...") nfs_client.cleanup() logging.info("Remove the NFS image...") source_file = params.get("source_file") libvirt.delete_local_disk("file", path=source_file) logging.info("Cleanup NFS server environment...") exp_dir = params.get("export_dir") mount_dir = params.get("mnt_path_name") libvirt.setup_or_cleanup_nfs(False, export_dir=exp_dir, mount_dir=mount_dir, restore_selinux=local_selinux_bak) # Recover libvirtd service configuration on local if libvirtd_conf: logging.debug("Recover local libvirtd configuration...") libvirtd_conf.restore() libvirtd.restart() os.remove(log_file) # Check results. if status_error: if ret_setmmdt: if not do_migrate and libvirt_version.version_compare(1, 2, 9): # https://bugzilla.redhat.com/show_bug.cgi?id=1146618 # Commit fe808d9 fix it and allow setting migration # max downtime any time since libvirt-1.2.9 logging.info("Libvirt version is newer than 1.2.9," "Allow set maxdowntime while VM isn't migrating") else: raise error.TestFail("virsh migrate-setmaxdowntime succeed " "but not expected.") else: if do_migrate and not ret_migration: raise error.TestFail("Migration failed.") if not ret_setmmdt: raise error.TestFail("virsh migrate-setmaxdowntime failed.")
def run(test, params, env): """ Test command: virsh blockcommit <domain> <path> 1) Prepare test environment. 2) Commit changes from a snapshot down to its backing image. 3) Recover test environment. 4) Check result. """ def make_disk_snapshot(postfix_n, snapshot_take, is_check_snapshot_tree=False, is_create_image_file_in_vm=False): """ Make external snapshots for disks only. :param postfix_n: postfix option :param snapshot_take: snapshots taken. :param is_create_image_file_in_vm: create image file in VM. """ # Add all disks into command line. disks = vm.get_disk_devices() # Make three external snapshots for disks only for count in range(1, snapshot_take): options = "%s_%s %s%s-desc " % (postfix_n, count, postfix_n, count) options += "--disk-only --atomic --no-metadata" if needs_agent: options += " --quiesce" for disk in disks: disk_detail = disks[disk] basename = os.path.basename(disk_detail['source']) # Remove the original suffix if any, appending # ".postfix_n[0-9]" diskname = basename.split(".")[0] snap_name = "%s.%s%s" % (diskname, postfix_n, count) disk_external = os.path.join(tmp_dir, snap_name) snapshot_external_disks.append(disk_external) options += " %s,snapshot=external,file=%s" % (disk, disk_external) clean_snap_file(disk_external) if is_check_snapshot_tree: options = options.replace("--no-metadata", "") cmd_result = virsh.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) status = cmd_result.exit_status if status != 0: test.fail("Failed to make snapshots for disks!") if is_create_image_file_in_vm: create_file_cmd = "dd if=/dev/urandom of=/mnt/snapshot_%s.img bs=1M count=2" % count session.cmd_status_output(create_file_cmd) created_image_files_in_vm.append("snapshot_%s.img" % count) # Create a file flag in VM after each snapshot flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") file_path = flag_file.name flag_file.close() status, output = session.cmd_status_output("touch %s" % file_path) if status: test.fail("Touch file in vm failed. %s" % output) snapshot_flag_files.append(file_path) def check_snapshot_tree(): """ Check whether predefined snapshot names are equals to snapshot names by virsh snapshot-list --tree """ predefined_snapshot_name_list = [] for count in range(1, snapshot_take): predefined_snapshot_name_list.append("%s_%s" % (postfix_n, count)) snapshot_list_cmd = "virsh snapshot-list %s --tree" % vm_name result_output = process.run(snapshot_list_cmd, ignore_status=True, shell=True).stdout_text virsh_snapshot_name_list = [] for line in result_output.rsplit("\n"): strip_line = line.strip() if strip_line and "|" not in strip_line: virsh_snapshot_name_list.append(strip_line) # Compare two lists in their order and values, all need to be same. compare_list = [out_p for out_p, out_v in zip(predefined_snapshot_name_list, virsh_snapshot_name_list) if out_p not in out_v] if compare_list: test.fail("snapshot tree not correctly returned.") # If check_snapshot_tree is True, check snapshot tree output. if is_check_snapshot_tree: check_snapshot_tree() def check_vm_disk_file(vm): """ Check current vm disk source. :param vm: The vm to be checked """ image_name1, image_format = params.get("image_name", "image"), params.get("image_format", "qcow2") image_dir = os.path.join(data_dir.get_data_dir(), image_name1) original_image_path = image_dir + "." + image_format logging.debug("Source file should be : %s", original_image_path) vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name) disk = vmxml.get_devices('disk')[0] logging.debug("Current disk info is : %s", disk) if disk.source.attrs['file'] != original_image_path: test.error("Please check current vm disk source") def clean_snap_file(snap_path): """ Clean the existed duplicate snap file. :param snap_path: snap file path """ if os.path.exists(snap_path): os.remove(snap_path) logging.debug("Cleaned snap file before creating :%s" % snap_path) def get_first_disk_source(): """ Get disk source of first device :return: first disk of first device. """ first_device = vm.get_first_disk_devices() first_disk_src = first_device['source'] return first_disk_src def make_relative_path_backing_files(pre_set_root_dir=None): """ Create backing chain files of relative path. :param pre_set_root_dir: preset root dir :return: absolute path of top active file """ first_disk_source = get_first_disk_source() basename = os.path.basename(first_disk_source) if pre_set_root_dir is None: root_dir = os.path.dirname(first_disk_source) else: root_dir = pre_set_root_dir cmd = "mkdir -p %s" % os.path.join(root_dir, '{b..d}') ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) # Make three external relative path backing files. backing_file_dict = collections.OrderedDict() backing_file_dict["b"] = "../%s" % basename backing_file_dict["c"] = "../b/b.img" backing_file_dict["d"] = "../c/c.img" if pre_set_root_dir: backing_file_dict["b"] = "%s" % first_disk_source backing_file_dict["c"] = "%s/b/b.img" % root_dir backing_file_dict["d"] = "%s/c/c.img" % root_dir disk_format = params.get("disk_format", "qcow2") for key, value in list(backing_file_dict.items()): backing_file_path = os.path.join(root_dir, key) cmd = ("cd %s && qemu-img create -f %s -o backing_file=%s,backing_fmt=%s %s.img" % (backing_file_path, "qcow2", value, disk_format, key)) ret = process.run(cmd, shell=True) disk_format = "qcow2" libvirt.check_exit_status(ret) return os.path.join(backing_file_path, "d.img") def check_chain_backing_files(disk_src_file, expect_backing_file=False): """ Check backing chain files of relative path after blockcommit. :param disk_src_file: first disk src file. :param expect_backing_file: whether it expect to have backing files. """ first_disk_source = get_first_disk_source() # Validate source image need refer to original one after active blockcommit if not expect_backing_file and disk_src_file not in first_disk_source: test.fail("The disk image path:%s doesn't include the origin image: %s" % (first_disk_source, disk_src_file)) # Validate source image doesn't have backing files after active blockcommit cmd = "qemu-img info %s --backing-chain" % first_disk_source if qemu_img_locking_feature_support: cmd = "qemu-img info -U %s --backing-chain" % first_disk_source ret = process.run(cmd, shell=True).stdout_text.strip() if expect_backing_file: if 'backing file' not in ret: test.fail("The disk image doesn't have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) else: if 'backing file' in ret: test.fail("The disk image still have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) def create_reuse_external_snapshots(pre_set_root_dir=None): """ Create reuse external snapshots :param pre_set_root_dir: preset root directory :return: absolute path of base file """ if pre_set_root_dir is None: first_disk_source = get_first_disk_source() basename = os.path.basename(first_disk_source) root_dir = os.path.dirname(first_disk_source) else: root_dir = pre_set_root_dir meta_options = " --reuse-external --disk-only --no-metadata" # Make three external relative path backing files. backing_file_dict = collections.OrderedDict() backing_file_dict["b"] = "b.img" backing_file_dict["c"] = "c.img" backing_file_dict["d"] = "d.img" for key, value in list(backing_file_dict.items()): backing_file_path = os.path.join(root_dir, key) external_snap_shot = "%s/%s" % (backing_file_path, value) snapshot_external_disks.append(external_snap_shot) options = "%s --diskspec %s,file=%s" % (meta_options, disk_target, external_snap_shot) cmd_result = virsh.snapshot_create_as(vm_name, options, ignore_status=False, debug=True) libvirt.check_exit_status(cmd_result) logging.debug('reuse external snapshots:%s' % snapshot_external_disks) return root_dir def check_file_in_vm(): """ Check whether certain image files exists in VM internal. """ for img_file in created_image_files_in_vm: status, output = session.cmd_status_output("ls -l /mnt/%s" % img_file) logging.debug(output) if status: test.fail("blockcommit from top to base failed when ls image file in VM: %s" % output) def do_blockcommit_pivot_repeatedly(): """ Validate bugzilla:https://bugzilla.redhat.com/show_bug.cgi?id=1857735 """ # Make external snapshot,pivot and delete snapshot file repeatedly. tmp_snapshot_name = "external_snapshot_" + "repeated.qcow2" block_target = 'vda' for count in range(0, 5): options = "%s " % tmp_snapshot_name options += "--disk-only --atomic" disk_external = os.path.join(tmp_dir, tmp_snapshot_name) options += " --diskspec %s,snapshot=external,file=%s" % (block_target, disk_external) virsh.snapshot_create_as(vm_name, options, ignore_status=False, debug=True) virsh.blockcommit(vm_name, block_target, " --active --pivot ", ignore_status=False, debug=True) virsh.snapshot_delete(vm_name, tmp_snapshot_name, " --metadata") libvirt.delete_local_disk('file', disk_external) # MAIN TEST CODE ### # Process cartesian parameters vm_name = params.get("main_vm") vm = env.get_vm(vm_name) snapshot_take = int(params.get("snapshot_take", '0')) vm_state = params.get("vm_state", "running") needs_agent = "yes" == params.get("needs_agent", "yes") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") top_inactive = ("yes" == params.get("top_inactive")) with_timeout = ("yes" == params.get("with_timeout_option", "no")) cmd_timeout = params.get("cmd_timeout", "1") status_error = ("yes" == params.get("status_error", "no")) base_option = params.get("base_option", "none") middle_base = "yes" == params.get("middle_base", "no") pivot_opt = "yes" == params.get("pivot_opt", "no") snap_in_mirror = "yes" == params.get("snap_in_mirror", "no") snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", "no") with_active_commit = "yes" == params.get("with_active_commit", "no") multiple_chain = "yes" == params.get("multiple_chain", "no") virsh_dargs = {'debug': True} check_snapshot_tree = "yes" == params.get("check_snapshot_tree", "no") bandwidth = params.get("blockcommit_bandwidth", "") bandwidth_byte = "yes" == params.get("bandwidth_byte", "no") disk_target = params.get("disk_target", "vda") disk_format = params.get("disk_format", "qcow2") reuse_external_snapshot = "yes" == params.get("reuse_external_snapshot", "no") restart_vm_before_commit = "yes" == params.get("restart_vm_before_commit", "no") check_image_file_in_vm = "yes" == params.get("check_image_file_in_vm", "no") pre_set_root_dir = None blk_source_folder = None convert_qcow2_image_to_raw = "yes" == params.get("convert_qcow2_image_to_raw", "no") repeatedly_do_blockcommit_pivot = "yes" == params.get("repeatedly_do_blockcommit_pivot", "no") from_top_without_active_option = "yes" == params.get("from_top_without_active_option", "no") top_to_middle_keep_overlay = "yes" == params.get("top_to_middle_keep_overlay", "no") block_disk_type_based_on_file_backing_file = "yes" == params.get("block_disk_type_based_on_file_backing_file", "no") block_disk_type_based_on_gluster_backing_file = "yes" == params.get("block_disk_type_based_on_gluster_backing_file", "no") # Check whether qemu-img need add -U suboption since locking feature was added afterwards qemu-2.10 qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support() backing_file_relative_path = "yes" == params.get("backing_file_relative_path", "no") # Process domain disk device parameters disk_type = params.get("disk_type") disk_src_protocol = params.get("disk_source_protocol") restart_tgtd = params.get("restart_tgtd", 'no') vol_name = params.get("vol_name") tmp_dir = data_dir.get_data_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) if not top_inactive: if not libvirt_version.version_compare(1, 2, 4): test.cancel("live active block commit is not supported" " in current libvirt version.") # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Abort the test if there are snapshots already exsiting_snaps = virsh.snapshot_list(vm_name) if len(exsiting_snaps) != 0: test.fail("There are snapshots created for %s already" % vm_name) check_vm_disk_file(vm) snapshot_external_disks = [] cmd_session = None # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = '' try: if disk_src_protocol == 'iscsi' and disk_type == 'block' and reuse_external_snapshot: first_disk = vm.get_first_disk_devices() pre_set_root_dir = os.path.dirname(first_disk['source']) if disk_src_protocol == 'iscsi' and disk_type == 'network': if not libvirt_version.version_compare(1, 0, 4): test.cancel("'iscsi' disk doesn't support in" " current libvirt version.") # Set vm xml and guest agent if replace_vm_disk: if disk_src_protocol == "rbd" and disk_type == "network": src_host = params.get("disk_source_host", "EXAMPLE_HOSTS") mon_host = params.get("mon_host", "EXAMPLE_MON_HOST") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(mon_host) if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"): test.cancel("Please provide rbd host first.") params.update( {"disk_source_name": os.path.join( pool_name, 'rbd_'+utils_misc.generate_random_string(4)+'.img')}) if utils_package.package_install(["ceph-common"]): ceph.rbd_image_rm(mon_host, *params.get("disk_source_name").split('/')) else: test.error('Failed to install ceph-common to clean image.') if backing_file_relative_path: if vm.is_alive(): vm.destroy(gracefully=False) first_src_file = get_first_disk_source() blk_source_image = os.path.basename(first_src_file) blk_source_folder = os.path.dirname(first_src_file) replace_disk_image = make_relative_path_backing_files() params.update({'disk_source_name': replace_disk_image, 'disk_type': 'file', 'disk_src_protocol': 'file'}) vm.start() if convert_qcow2_image_to_raw: if vm.is_alive(): vm.destroy(gracefully=False) first_src_file = get_first_disk_source() blk_source_image = os.path.basename(first_src_file) blk_source_folder = os.path.dirname(first_src_file) blk_source_image_after_converted = "%s/converted_%s" % (blk_source_folder, blk_source_image) # Convert the image from qcow2 to raw convert_disk_cmd = ("qemu-img convert" " -O %s %s %s" % (disk_format, first_src_file, blk_source_image_after_converted)) process.run(convert_disk_cmd, ignore_status=False, shell=True) params.update({'disk_source_name': blk_source_image_after_converted, 'disk_type': 'file', 'disk_src_protocol': 'file'}) libvirt.set_vm_disk(vm, params, tmp_dir) if needs_agent: vm.prepare_guest_agent() if repeatedly_do_blockcommit_pivot: do_blockcommit_pivot_repeatedly() # Create block type disk on file backing file if block_disk_type_based_on_file_backing_file or block_disk_type_based_on_gluster_backing_file: if not vm.is_alive(): vm.start() first_src_file = get_first_disk_source() libvirt.setup_or_cleanup_iscsi(is_setup=False) iscsi_target = libvirt.setup_or_cleanup_iscsi(is_setup=True) block_type_backstore = iscsi_target if block_disk_type_based_on_file_backing_file: first_src_file = get_first_disk_source() if block_disk_type_based_on_gluster_backing_file: first_src_file = "gluster://%s/%s/gluster.qcow2" % (params.get("gluster_server_ip"), params.get("vol_name")) backing_file_create_cmd = ("qemu-img create -f %s -o backing_file=%s,backing_fmt=%s %s" % ("qcow2", first_src_file, "qcow2", block_type_backstore)) process.run(backing_file_create_cmd, ignore_status=False, shell=True) meta_options = " --reuse-external --disk-only --no-metadata" options = "%s --diskspec %s,file=%s,stype=block" % (meta_options, 'vda', block_type_backstore) virsh.snapshot_create_as(vm_name, options, ignore_status=False, debug=True) # The first disk is supposed to include OS # We will perform blockcommit operation for it. first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] blk_target = first_disk['target'] snapshot_flag_files = [] created_image_files_in_vm = [] # get a vm session before snapshot session = vm.wait_for_login() # do snapshot postfix_n = 'snap' if reuse_external_snapshot: make_relative_path_backing_files(pre_set_root_dir) blk_source_folder = create_reuse_external_snapshots(pre_set_root_dir) else: make_disk_snapshot(postfix_n, snapshot_take, check_snapshot_tree, check_image_file_in_vm) basename = os.path.basename(blk_source) diskname = basename.split(".")[0] snap_src_lst = [blk_source] if multiple_chain: snap_name = "%s.%s1" % (diskname, postfix_n) snap_top = os.path.join(tmp_dir, snap_name) top_index = snapshot_external_disks.index(snap_top) + 1 omit_list = snapshot_external_disks[top_index:] vm.destroy(gracefully=False) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = '' disk_xmls = vmxml.get_devices(device_type="disk") for disk in disk_xmls: if disk.get('device_tag') == 'disk': disk_xml = disk break vmxml.del_device(disk_xml) disk_dict = {'attrs': {'file': snap_top}} disk_xml.source = disk_xml.new_disk_source(**disk_dict) if libvirt_version.version_compare(6, 0, 0): bs_source = {'file': blk_source} bs_dict = {"type": params.get("disk_type", "file"), "format": {'type': params.get("disk_format", "qcow2")}} new_bs = disk_xml.new_backingstore(**bs_dict) new_bs["source"] = disk_xml.backingstore.new_source(**bs_source) disk_xml.backingstore = new_bs vmxml.add_device(disk_xml) vmxml.sync() vm.start() session = vm.wait_for_login() postfix_n = 'new_snap' make_disk_snapshot(postfix_n, snapshot_take) snap_src_lst = [blk_source] snap_src_lst += snapshot_external_disks logging.debug("omit list is %s", omit_list) for i in omit_list: snap_src_lst.remove(i) else: # snapshot src file list snap_src_lst += snapshot_external_disks backing_chain = '' for i in reversed(list(range(snapshot_take))): if i == 0: backing_chain += "%s" % snap_src_lst[i] else: backing_chain += "%s -> " % snap_src_lst[i] logging.debug("The backing chain is: %s" % backing_chain) # check snapshot disk xml backingStore is expected vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') disk_xml = None for disk in disks: if disk.target['dev'] != blk_target: continue else: if disk.device != 'disk': continue disk_xml = disk.xmltreefile logging.debug("the target disk xml after snapshot is %s", disk_xml) break if not disk_xml: test.fail("Can't find disk xml with target %s" % blk_target) elif libvirt_version.version_compare(1, 2, 4): # backingStore element introduced in 1.2.4 chain_lst = snap_src_lst[::-1] ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing chain check failed") # set blockcommit_options top_image = None blockcommit_options = "--wait --verbose" if with_timeout: blockcommit_options += " --timeout %s" % cmd_timeout if base_option == "shallow": blockcommit_options += " --shallow" elif base_option == "base": if middle_base: snap_name = "%s.%s1" % (diskname, postfix_n) blk_source = os.path.join(tmp_dir, snap_name) blockcommit_options += " --base %s" % blk_source if len(bandwidth): blockcommit_options += " --bandwidth %s" % bandwidth if bandwidth_byte: blockcommit_options += " --bytes" if top_inactive: snap_name = "%s.%s2" % (diskname, postfix_n) top_image = os.path.join(tmp_dir, snap_name) if reuse_external_snapshot: index = len(snapshot_external_disks) - 2 top_image = snapshot_external_disks[index] blockcommit_options += " --top %s" % top_image else: blockcommit_options += " --active" if pivot_opt: blockcommit_options += " --pivot" if from_top_without_active_option: blockcommit_options = blockcommit_options.replace("--active", "") if top_to_middle_keep_overlay: blockcommit_options = blockcommit_options.replace("--active", "") blockcommit_options = blockcommit_options.replace("--pivot", "") blockcommit_options += " --keep-overlay" if restart_vm_before_commit: top = 2 base = len(snapshot_external_disks) blockcommit_options = ("--top %s[%d] --base %s[%d] --verbose --wait --keep-relative" % (disk_target, top, disk_target, base)) vm.destroy(gracefully=True) vm.start() if vm_state == "shut off": vm.destroy(gracefully=True) if with_active_commit: # inactive commit follow active commit will fail with bug 1135339 cmd = "virsh blockcommit %s %s --active --pivot" % (vm_name, blk_target) cmd_session = aexpect.ShellSession(cmd) if backing_file_relative_path: blockcommit_options = " --active --verbose --shallow --pivot --keep-relative" block_commit_index = snapshot_take expect_backing_file = False # Do block commit using --active for count in range(1, snapshot_take): res = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) if top_inactive: vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = '' disk_xmls = vmxml.get_devices(device_type="disk") for disk in disk_xmls: if disk.get('device_tag') == 'disk': disk_xml = disk break top_index = 1 try: top_index = disk_xml.backingstore.index except AttributeError: pass else: top_index = int(top_index) block_commit_index = snapshot_take - 1 expect_backing_file = True for count in range(1, block_commit_index): # Do block commit with --wait if top_inactive if top_inactive: blockcommit_options = (" --wait --verbose --top vda[%d] " "--base vda[%d] --keep-relative" % (top_index, top_index+1)) if not libvirt_version.version_compare(6, 0, 0): top_index = 1 else: top_index += 1 res = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) check_chain_backing_files(blk_source_image, expect_backing_file) return if reuse_external_snapshot and not top_inactive: block_commit_index = len(snapshot_external_disks) - 1 for index in range(block_commit_index): # Do block commit with --shallow --wait external_blockcommit_options = (" --shallow --wait --verbose --top %s " % (snapshot_external_disks[index])) res = virsh.blockcommit(vm_name, blk_target, external_blockcommit_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) # Do blockcommit with top active result = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) # Check status_error libvirt.check_exit_status(result, status_error) return # Start one thread to check the bandwidth in output if bandwidth and bandwidth_byte: bandwidth += 'B' pool = ThreadPool(processes=1) pool.apply_async(check_bandwidth_thread, (libvirt.check_blockjob, vm_name, blk_target, bandwidth, test)) # Run test case # Active commit does not support on rbd based disk with bug 1200726 result = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) # Check status_error libvirt.check_exit_status(result, status_error) # Skip check chain file as per test case description if restart_vm_before_commit: return if check_image_file_in_vm: check_file_in_vm() if result.exit_status and status_error: return while True: vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') for disk in disks: if disk.target['dev'] != blk_target: continue else: disk_xml = disk.xmltreefile break if not top_inactive: disk_mirror = disk_xml.find('mirror') if '--pivot' not in blockcommit_options: if disk_mirror is not None: job_type = disk_mirror.get('job') job_ready = disk_mirror.get('ready') src_element = disk_mirror.find('source') disk_src_file = None for elem in ('file', 'name', 'dev'): elem_val = src_element.get(elem) if elem_val: disk_src_file = elem_val break err_msg = "blockcommit base source " err_msg += "%s not expected" % disk_src_file if '--shallow' in blockcommit_options: if not multiple_chain: if disk_src_file != snap_src_lst[2]: test.fail(err_msg) else: if disk_src_file != snap_src_lst[3]: test.fail(err_msg) else: if disk_src_file != blk_source: test.fail(err_msg) if libvirt_version.version_compare(1, 2, 7): # The job attribute mentions which API started the # operation since 1.2.7. if job_type != 'active-commit': test.fail("blockcommit job type '%s'" " not expected" % job_type) if job_ready != 'yes': # The attribute ready, if present, tracks # progress of the job: yes if the disk is known # to be ready to pivot, or, since 1.2.7, abort # or pivot if the job is in the process of # completing. continue else: logging.debug("after active block commit job " "ready for pivot, the target disk" " xml is %s", disk_xml) break else: break else: break else: if disk_mirror is None: logging.debug(disk_xml) if "--shallow" in blockcommit_options: chain_lst = snap_src_lst[::-1] chain_lst.pop(0) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing " "chain check failed") cmd_result = virsh.blockjob(vm_name, blk_target, '', ignore_status=True, debug=True) libvirt.check_exit_status(cmd_result) elif "--base" in blockcommit_options: chain_lst = snap_src_lst[::-1] base_index = chain_lst.index(blk_source) chain_lst = chain_lst[base_index:] ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing " "chain check failed") break else: # wait pivot after commit is synced continue else: logging.debug("after inactive commit the disk xml is: %s" % disk_xml) if libvirt_version.version_compare(1, 2, 4): if "--shallow" in blockcommit_options: chain_lst = snap_src_lst[::-1] chain_lst.remove(top_image) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing chain " "check failed") elif "--base" in blockcommit_options: chain_lst = snap_src_lst[::-1] top_index = chain_lst.index(top_image) base_index = chain_lst.index(blk_source) val_tmp = [] for i in range(top_index, base_index): val_tmp.append(chain_lst[i]) for i in val_tmp: chain_lst.remove(i) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing chain " "check failed") break else: break # Check flag files if not vm_state == "shut off" and not multiple_chain: for flag in snapshot_flag_files: status, output = session.cmd_status_output("cat %s" % flag) if status: test.fail("blockcommit failed: %s" % output) if not pivot_opt and snap_in_mirror: # do snapshot during mirror phase snap_path = "%s/%s.snap" % (tmp_dir, vm_name) snap_opt = "--disk-only --atomic --no-metadata " snap_opt += "vda,snapshot=external,file=%s" % snap_path snapshot_external_disks.append(snap_path) cmd_result = virsh.snapshot_create_as(vm_name, snap_opt, ignore_statues=True, debug=True) libvirt.check_exit_status(cmd_result, snap_in_mirror_err) finally: if vm.is_alive(): vm.destroy(gracefully=False) # Clean ceph image if used in test if 'mon_host' in locals(): if utils_package.package_install(["ceph-common"]): disk_source_name = params.get("disk_source_name") cmd = ("rbd -m {0} info {1} && rbd -m {0} rm " "{1}".format(mon_host, disk_source_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("result of rbd removal: %s", cmd_result) else: logging.debug('Failed to install ceph-common to clean ceph.') # Recover xml of vm. vmxml_backup.sync("--snapshots-metadata") # Remove ceph configure file if created if ceph_cfg: os.remove(ceph_cfg) if cmd_session: cmd_session.close() for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) if backing_file_relative_path or reuse_external_snapshot: libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) if blk_source_folder: process.run("cd %s && rm -rf b c d" % blk_source_folder, shell=True) if disk_src_protocol == 'iscsi' or 'iscsi_target' in locals(): libvirt.setup_or_cleanup_iscsi(is_setup=False, restart_tgtd=restart_tgtd) elif disk_src_protocol == 'gluster': gluster.setup_or_cleanup_gluster(False, brick_path=brick_path, **params) libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() elif disk_src_protocol == 'netfs': restore_selinux = params.get('selinux_status_bak') libvirt.setup_or_cleanup_nfs(is_setup=False, restore_selinux=restore_selinux) # Recover images xattr if having some dirty_images = get_images_with_xattr(vm) if dirty_images: clean_images_with_xattr(dirty_images) test.fail("VM's image(s) having xattr left")
def run(test, params, env): """ Test virsh domblkerror in 2 types error 1. unspecified error 2. no space """ if not virsh.has_help_command('domblkerror'): test.cancel("This version of libvirt does not support domblkerror " "test") vm_name = params.get("main_vm", "avocado-vt-vm1") error_type = params.get("domblkerror_error_type") timeout = params.get("domblkerror_timeout", 240) mnt_dir = params.get("domblkerror_mnt_dir", "/home/test") export_file = params.get("nfs_export_file", "/etc/exports") img_name = params.get("domblkerror_img_name", "libvirt-disk") img_size = params.get("domblkerror_img_size") target_dev = params.get("domblkerror_target_dev", "vdb") pool_name = params.get("domblkerror_pool_name", "fs_pool") vol_name = params.get("domblkerror_vol_name", "vol1") ubuntu = distro.detect().name == 'Ubuntu' nfs_service_package = params.get("nfs_service_package", "nfs-kernel-server") nfs_service = None vm = env.get_vm(vm_name) # backup /etc/exports shutil.copyfile(export_file, "%s.bak" % export_file) selinux_bak = "" # backup xml vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: # Gerenate tmp dir tmp_dir = data_dir.get_tmp_dir() img_dir = os.path.join(tmp_dir, 'images') if not os.path.exists(img_dir): os.mkdir(img_dir) # Generate attached disk process.run("qemu-img create %s %s" % (os.path.join(img_dir, img_name), img_size), shell=True, verbose=True) # Get unspecified error if error_type == "unspecified error": # In this situation, guest will attach a disk on nfs, stop nfs # service will cause guest paused and get unspecified error nfs_dir = os.path.join(tmp_dir, 'mnt') if not os.path.exists(nfs_dir): os.mkdir(nfs_dir) mount_opt = "rw,no_root_squash,async" res = libvirt.setup_or_cleanup_nfs(is_setup=True, mount_dir=nfs_dir, is_mount=False, export_options=mount_opt, export_dir=img_dir) if not ubuntu: selinux_bak = res["selinux_status_bak"] nfs_service_package = "nfs" process.run("mount -o nolock,soft,timeo=1,retrans=1,retry=0 " "127.0.0.1:%s %s" % (img_dir, nfs_dir), shell=True, verbose=True) img_path = os.path.join(nfs_dir, img_name) nfs_service = Factory.create_service(nfs_service_package) elif error_type == "no space": # Steps to generate no space block error: # 1. Prepare a iscsi disk and build fs pool with it # 2. Create vol with larger capacity and 0 allocation # 3. Attach this disk in guest # 4. In guest, create large image in the vol, which may cause # guest paused _pool_vol = None pool_target = os.path.join(tmp_dir, pool_name) _pool_vol = libvirt.PoolVolumeTest(test, params) _pool_vol.pre_pool(pool_name, "fs", pool_target, img_name, image_size=img_size) _pool_vol.pre_vol(vol_name, "raw", "100M", "0", pool_name) img_path = os.path.join(pool_target, vol_name) # Generate disk xml # Guest will attach a disk with cache=none and error_policy=stop img_disk = Disk(type_name="file") img_disk.device = "disk" img_disk.source = img_disk.new_disk_source( **{'attrs': {'file': img_path}}) img_disk.driver = {'name': "qemu", 'type': "raw", 'cache': "none", 'error_policy': "stop"} img_disk.target = {'dev': target_dev, 'bus': "virtio"} logging.debug("disk xml is %s", img_disk.xml) # Start guest and get session if not vm.is_alive(): vm.start() session = vm.wait_for_login() # Get disk list before operation get_disks_cmd = "fdisk -l|grep '^Disk /dev'|cut -d: -f1|cut -d' ' -f2" bef_list = str(session.cmd_output(get_disks_cmd)).strip().split("\n") logging.debug("disk_list_debug = %s", bef_list) # Attach disk to guest ret = virsh.attach_device(domain_opt=vm_name, file_opt=img_disk.xml) if ret.exit_status != 0: test.fail("Fail to attach device %s" % ret.stderr) time.sleep(2) logging.debug("domain xml is %s", virsh.dumpxml(vm_name)) # get disk list after attach aft_list = str(session.cmd_output(get_disks_cmd)).strip().split("\n") logging.debug("disk list after attaching - %s", aft_list) # Find new disk after attach new_disk = "".join(list(set(bef_list) ^ set(aft_list))) logging.debug("new disk is %s", new_disk) def create_large_image(): """ Create large image in guest """ # create partition and file system session.cmd("parted -s %s mklabel msdos" % new_disk) session.cmd("parted -s %s mkpart primary ext3 '0%%' '100%%'" % new_disk) # mount disk and write file in it session.cmd("mkfs.ext3 %s1" % new_disk) session.cmd("mkdir -p %s && mount %s1 %s" % (mnt_dir, new_disk, mnt_dir)) # The following step may cause guest paused before it return try: session.cmd("dd if=/dev/zero of=%s/big_file bs=1024 " "count=51200 && sync" % mnt_dir) except Exception, err: logging.debug("Expected Fail %s", err) session.close() create_large_image() if error_type == "unspecified error": # umount nfs to trigger error after create large image if nfs_service is not None: nfs_service.stop() logging.debug("nfs status is %s", nfs_service.status()) # wait and check the guest status with timeout def _check_state(): """ Check domain state """ return (vm.state() == "paused") if not utils_misc.wait_for(_check_state, timeout): # If not paused, perform one more IO operation to the mnt disk session.cmd("echo 'one more write to big file' > %s/big_file" % mnt_dir) if not utils_misc.wait_for(_check_state, 60): test.fail("Guest does not paused, it is %s now" % vm.state()) else: logging.info("Now domain state changed to paused status") output = virsh.domblkerror(vm_name) if output.exit_status == 0: expect_result = "%s: %s" % (img_disk.target['dev'], error_type) if output.stdout.strip() == expect_result: logging.info("Get expect result: %s", expect_result) else: test.fail("Failed to get expect result, get %s" % output.stdout.strip()) else: test.fail("Fail to get domblkerror info:%s" % output.stderr)
else: logging.info("Now domain state changed to paused status") output = virsh.domblkerror(vm_name) if output.exit_status == 0: expect_result = "%s: %s" % (img_disk.target['dev'], error_type) if output.stdout.strip() == expect_result: logging.info("Get expect result: %s", expect_result) else: test.fail("Failed to get expect result, get %s" % output.stdout.strip()) else: test.fail("Fail to get domblkerror info:%s" % output.stderr) finally: logging.info("Do clean steps") if error_type == "unspecified error": if nfs_service is not None: nfs_service.start() vm.destroy() if os.path.isfile("%s.bak" % export_file): shutil.move("%s.bak" % export_file, export_file) res = libvirt.setup_or_cleanup_nfs(is_setup=False, mount_dir=nfs_dir, export_dir=img_dir, restore_selinux=selinux_bak) elif error_type == "no space": vm.destroy() if _pool_vol: _pool_vol.cleanup_pool(pool_name, "fs", pool_target, img_name) vmxml_backup.sync() data_dir.clean_tmp_files()
def run(test, params, env): """ Test command: virsh blockcommit <domain> <path> 1) Prepare test environment. 2) Commit changes from a snapshot down to its backing image. 3) Recover test environment. 4) Check result. """ def make_disk_snapshot(): # Add all disks into commandline. disks = vm.get_disk_devices() # Make three external snapshots for disks only for count in range(1, 4): options = "snapshot%s snap%s-desc " \ "--disk-only --atomic --no-metadata" % (count, count) if needs_agent: options += " --quiesce" for disk in disks: disk_detail = disks[disk] basename = os.path.basename(disk_detail['source']) # Remove the original suffix if any, appending ".snap[0-9]" diskname = basename.split(".")[0] disk_external = os.path.join(tmp_dir, "%s.snap%s" % (diskname, count)) snapshot_external_disks.append(disk_external) options += " %s,snapshot=external,file=%s" % (disk, disk_external) cmd_result = virsh.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) status = cmd_result.exit_status if status != 0: raise error.TestFail("Failed to make snapshots for disks!") # Create a file flag in VM after each snapshot flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") file_path = flag_file.name flag_file.close() status, output = session.cmd_status_output("touch %s" % file_path) if status: raise error.TestFail("Touch file in vm failed. %s" % output) snapshot_flag_files.append(file_path) # MAIN TEST CODE ### # Process cartesian parameters vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vm_state = params.get("vm_state", "running") needs_agent = "yes" == params.get("needs_agent", "yes") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") top_inactive = ("yes" == params.get("top_inactive")) with_timeout = ("yes" == params.get("with_timeout_option", "no")) status_error = ("yes" == params.get("status_error", "no")) base_option = params.get("base_option", "none") middle_base = "yes" == params.get("middle_base", "no") pivot_opt = "yes" == params.get("pivot_opt", "no") snap_in_mirror = "yes" == params.get("snap_in_mirror", "no") snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", "no") virsh_dargs = {'debug': True} # Process domain disk device parameters disk_type = params.get("disk_type") disk_src_protocol = params.get("disk_source_protocol") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) if not top_inactive: if not libvirt_version.version_compare(1, 2, 4): raise error.TestNAError("live active block commit is not supported" + " in current libvirt version.") # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Abort the test if there are snapshots already exsiting_snaps = virsh.snapshot_list(vm_name) if len(exsiting_snaps) != 0: raise error.TestFail("There are snapshots created for %s already" % vm_name) snapshot_external_disks = [] try: if disk_src_protocol == 'iscsi' and disk_type == 'network': if not libvirt_version.version_compare(1, 0, 4): raise error.TestNAError("'iscsi' disk doesn't support in" + " current libvirt version.") # Set vm xml and guest agent if replace_vm_disk: libvirt.set_vm_disk(vm, params, tmp_dir) if needs_agent: libvirt.set_guest_agent(vm) # The first disk is supposed to include OS # We will perform blockcommit operation for it. first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] blk_target = first_disk['target'] snapshot_flag_files = [] # get a vm session before snapshot session = vm.wait_for_login() # do snapshot make_disk_snapshot() # snapshot src file list snap_src_lst = [blk_source] snap_src_lst += snapshot_external_disks backing_chain = '' for i in reversed(range(4)): if i == 0: backing_chain += "%s" % snap_src_lst[i] else: backing_chain += "%s -> " % snap_src_lst[i] logging.debug("The backing chain is: %s" % backing_chain) # check snapshot disk xml backingStore is expected vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') disk_xml = None for disk in disks: if disk.target['dev'] != blk_target: continue else: disk_xml = disk.xmltreefile logging.debug("the target disk xml after snapshot is %s", disk_xml) break if not disk_xml: raise error.TestFail("Can't find disk xml with target %s" % blk_target) elif libvirt_version.version_compare(1, 2, 4): # backingStore element introuduced in 1.2.4 chain_lst = snap_src_lst[::-1] ret = check_chain_xml(disk_xml, chain_lst) if not ret: raise error.TestFail("Domain image backing chain check failed") # set blockcommit_options top_image = None blockcommit_options = "--wait --verbose" basename = os.path.basename(blk_source) diskname = basename.split(".")[0] if with_timeout: blockcommit_options += " --timeout 1" if base_option == "shallow": blockcommit_options += " --shallow" elif base_option == "base": if middle_base: blk_source = os.path.join(tmp_dir, "%s.snap1" % diskname) blockcommit_options += " --base %s" % blk_source if top_inactive: top_image = os.path.join(tmp_dir, "%s.snap2" % diskname) blockcommit_options += " --top %s" % top_image else: blockcommit_options += " --active" if pivot_opt: blockcommit_options += " --pivot" if vm_state == "shut off": vm.shutdown() # Run test case result = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) # Check status_error libvirt.check_exit_status(result, status_error) if result.exit_status and status_error: return while True: vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') for disk in disks: if disk.target['dev'] != blk_target: continue else: disk_xml = disk.xmltreefile break if not top_inactive: disk_mirror = disk_xml.find('mirror') if '--pivot' not in blockcommit_options: if disk_mirror is not None: job_type = disk_mirror.get('job') job_ready = disk_mirror.get('ready') src_element = disk_mirror.find('source') disk_src_file = None for elem in ('file', 'name', 'dev'): elem_val = src_element.get(elem) if elem_val: disk_src_file = elem_val break err_msg = "blockcommit base source " err_msg += "%s not expected" % disk_src_file if '--shallow' in blockcommit_options: if disk_src_file != snap_src_lst[2]: raise error.TestFail(err_msg) else: if disk_src_file != blk_source: raise error.TestFail(err_msg) if libvirt_version.version_compare(1, 2, 7): # The job attribute mentions which API started the # operation since 1.2.7. if job_type != 'active-commit': raise error.TestFail("blockcommit job type '%s'" " not expected" % job_type) if job_ready != 'yes': # The attribute ready, if present, tracks # progress of the job: yes if the disk is known # to be ready to pivot, or, since 1.2.7, abort # or pivot if the job is in the process of # completing. continue else: logging.debug("after active block commit job " "ready for pivot, the target disk" " xml is %s", disk_xml) break else: break else: if disk_mirror is None: logging.debug(disk_xml) if "--shallow" in blockcommit_options: chain_lst = snap_src_lst[::-1] chain_lst.pop(0) ret = check_chain_xml(disk_xml, chain_lst) if not ret: raise error.TestFail("Domain image backing " "chain check failed") elif "--base" in blockcommit_options: chain_lst = snap_src_lst[::-1] base_index = chain_lst.index(blk_source) chain_lst = chain_lst[base_index:] ret = check_chain_xml(disk_xml, chain_lst) if not ret: raise error.TestFail("Domain image backing " "chain check failed") break else: # wait pivot after commit is synced continue else: logging.debug("after inactive commit the disk xml is: %s" % disk_xml) if libvirt_version.version_compare(1, 2, 4): if "--shallow" in blockcommit_options: chain_lst = snap_src_lst[::-1] chain_lst.remove(top_image) ret = check_chain_xml(disk_xml, chain_lst) if not ret: raise error.TestFail("Domain image backing chain " "check failed") elif "--base" in blockcommit_options: chain_lst = snap_src_lst[::-1] top_index = chain_lst.index(top_image) base_index = chain_lst.index(blk_source) val_tmp = [] for i in range(top_index, base_index): val_tmp.append(chain_lst[i]) for i in val_tmp: chain_lst.remove(i) ret = check_chain_xml(disk_xml, chain_lst) if not ret: raise error.TestFail("Domain image backing chain " "check failed") break else: break # Check flag files if not vm_state == "shut off": for flag in snapshot_flag_files: status, output = session.cmd_status_output("cat %s" % flag) if status: raise error.TestFail("blockcommit failed: %s" % output) if not pivot_opt and snap_in_mirror: # do snapshot during mirror phase snap_path = "%s/%s.snap" % (tmp_dir, vm_name) snap_opt = "--disk-only --atomic --no-metadata " snap_opt += "vda,snapshot=external,file=%s" % snap_path snapshot_external_disks.append(snap_path) cmd_result = virsh.snapshot_create_as(vm_name, snap_opt, ignore_statues=True, debug=True) libvirt.check_exit_status(cmd_result, snap_in_mirror_err) finally: if vm.is_alive(): vm.destroy() # Recover xml of vm. vmxml_backup.sync("--snapshots-metadata") for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) if disk_src_protocol == 'iscsi': libvirt.setup_or_cleanup_iscsi(is_setup=False) elif disk_src_protocol == 'gluster': libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path) elif disk_src_protocol == 'netfs': restore_selinux = params.get('selinux_status_bak') libvirt.setup_or_cleanup_nfs(is_setup=False, restore_selinux=restore_selinux)
def run(test, params, env): """ Test command: virsh blockpull <domain> <path> 1) Prepare test environment. 2) Populate a disk from its backing image. 3) Recover test environment. 4) Check result. """ def make_disk_snapshot(): # Make four external snapshots for disks only for count in range(1, 5): snap_xml = snapshot_xml.SnapshotXML() snapshot_name = "snapshot_test%s" % count snap_xml.snap_name = snapshot_name snap_xml.description = "Snapshot Test %s" % count # Add all disks into xml file. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') new_disks = [] for src_disk_xml in disks: disk_xml = snap_xml.SnapDiskXML() disk_xml.xmltreefile = src_disk_xml.xmltreefile # Skip cdrom if disk_xml.device == "cdrom": continue del disk_xml.device del disk_xml.address disk_xml.snapshot = "external" disk_xml.disk_name = disk_xml.target['dev'] # Only qcow2 works as external snapshot file format, update it # here driver_attr = disk_xml.driver driver_attr.update({'type': 'qcow2'}) disk_xml.driver = driver_attr new_attrs = disk_xml.source.attrs if disk_xml.source.attrs.has_key('file'): file_name = disk_xml.source.attrs['file'] new_file = "%s.snap%s" % (file_name.split('.')[0], count) snapshot_external_disks.append(new_file) new_attrs.update({'file': new_file}) hosts = None elif (disk_xml.source.attrs.has_key('name') and disk_src_protocol == 'gluster'): src_name = disk_xml.source.attrs['name'] new_name = "%s.snap%s" % (src_name.split('.')[0], count) new_attrs.update({'name': new_name}) snapshot_external_disks.append(new_name) hosts = disk_xml.source.hosts elif (disk_xml.source.attrs.has_key('dev') or disk_xml.source.attrs.has_key('name')): if (disk_xml.type_name == 'block' or disk_src_protocol in ['iscsi', 'rbd']): # Use local file as external snapshot target for block # and iscsi network type. # As block device will be treat as raw format by # default, it's not fit for external disk snapshot # target. A work around solution is use qemu-img again # with the target. # And external active snapshots are not supported on # 'network' disks using 'iscsi' protocol disk_xml.type_name = 'file' if new_attrs.has_key('dev'): del new_attrs['dev'] elif new_attrs.has_key('name'): del new_attrs['name'] del new_attrs['protocol'] new_file = "%s/blk_src_file.snap%s" % (tmp_dir, count) snapshot_external_disks.append(new_file) new_attrs.update({'file': new_file}) hosts = None new_src_dict = {"attrs": new_attrs} if hosts: new_src_dict.update({"hosts": hosts}) disk_xml.source = disk_xml.new_disk_source(**new_src_dict) new_disks.append(disk_xml) snap_xml.set_disks(new_disks) snapshot_xml_path = snap_xml.xml logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile) options = "--disk-only --xmlfile %s " % snapshot_xml_path snapshot_result = virsh.snapshot_create( vm_name, options, debug=True) if snapshot_result.exit_status != 0: raise error.TestFail(snapshot_result.stderr) # Create a file flag in VM after each snapshot flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") file_path = flag_file.name flag_file.close() status, output = session.cmd_status_output("touch %s" % file_path) if status: raise error.TestFail("Touch file in vm failed. %s" % output) snapshot_flag_files.append(file_path) # MAIN TEST CODE ### # Process cartesian parameters vm_name = params.get("main_vm") vm = env.get_vm(vm_name) needs_agent = "yes" == params.get("needs_agent", "yes") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") snap_in_mirror = "yes" == params.get("snap_in_mirror", 'no') snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", 'no') bandwidth = params.get("bandwidth", None) with_timeout = ("yes" == params.get("with_timeout_option", "no")) status_error = ("yes" == params.get("status_error", "no")) base_option = params.get("base_option", None) keep_relative = "yes" == params.get("keep_relative", 'no') virsh_dargs = {'debug': True} # Process domain disk device parameters disk_type = params.get("disk_type") disk_target = params.get("disk_target", 'vda') disk_src_protocol = params.get("disk_source_protocol") restart_tgtd = params.get("restart_tgtd", "no") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("original xml is %s", vmxml_backup) # Abort the test if there are snapshots already exsiting_snaps = virsh.snapshot_list(vm_name) if len(exsiting_snaps) != 0: raise error.TestFail("There are snapshots created for %s already" % vm_name) snapshot_external_disks = [] try: if disk_src_protocol == 'iscsi' and disk_type == 'network': if not libvirt_version.version_compare(1, 0, 4): raise error.TestNAError("'iscsi' disk doesn't support in" " current libvirt version.") if disk_src_protocol == 'gluster': if not libvirt_version.version_compare(1, 2, 7): raise error.TestNAError("Snapshot on glusterfs not" " support in current " "version. Check more info " " with https://bugzilla.re" "dhat.com/show_bug.cgi?id=" "1017289") # Set vm xml and guest agent if replace_vm_disk: if disk_src_protocol == "rbd" and disk_type == "network": src_host = params.get("disk_source_host", "EXAMPLE_HOSTS") mon_host = params.get("mon_host", "EXAMPLE_MON_HOST") if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"): raise error.TestNAError("Please provide ceph host first.") libvirt.set_vm_disk(vm, params, tmp_dir) if needs_agent: vm.prepare_guest_agent() # The first disk is supposed to include OS # We will perform blockpull operation for it. first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] blk_target = first_disk['target'] snapshot_flag_files = [] # get a vm session before snapshot session = vm.wait_for_login() # do snapshot make_disk_snapshot() vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug("The domain xml after snapshot is %s" % vmxml) # snapshot src file list snap_src_lst = [blk_source] snap_src_lst += snapshot_external_disks if snap_in_mirror: blockpull_options = "--bandwidth 1" else: blockpull_options = "--wait --verbose" if with_timeout: blockpull_options += " --timeout 1" if bandwidth: blockpull_options += " --bandwidth %s" % bandwidth if base_option == "async": blockpull_options += " --async" base_image = None base_index = None if (libvirt_version.version_compare(1, 2, 4) or disk_src_protocol == 'gluster'): if base_option == "shallow": base_index = 1 base_image = "%s[%s]" % (disk_target, base_index) elif base_option == "base": base_index = 2 base_image = "%s[%s]" % (disk_target, base_index) elif base_option == "top": base_index = 0 base_image = "%s[%s]" % (disk_target, base_index) else: if base_option == "shallow": base_image = snap_src_lst[3] elif base_option == "base": base_image = snap_src_lst[2] elif base_option == "top": base_image = snap_src_lst[4] if base_option and base_image: blockpull_options += " --base %s" % base_image if keep_relative: blockpull_options += " --keep-relative" # Run test case result = virsh.blockpull(vm_name, blk_target, blockpull_options, **virsh_dargs) status = result.exit_status # If pull job aborted as timeout, the exit status is different # on RHEL6(0) and RHEL7(1) if with_timeout and 'Pull aborted' in result.stdout: if libvirt_version.version_compare(1, 1, 1): status_error = True else: status_error = False # Check status_error libvirt.check_exit_status(result, status_error) if not status and not with_timeout: if snap_in_mirror: snap_mirror_path = "%s/snap_mirror" % tmp_dir snap_options = "--diskspec vda,snapshot=external," snap_options += "file=%s --disk-only" % snap_mirror_path snapshot_external_disks.append(snap_mirror_path) ret = virsh.snapshot_create_as(vm_name, snap_options, ignore_status=True, debug=True) libvirt.check_exit_status(ret, snap_in_mirror_err) return vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') for disk in disks: if disk.target['dev'] != blk_target: continue else: disk_xml = disk.xmltreefile break logging.debug("after pull the disk xml is: %s" % disk_xml) if libvirt_version.version_compare(1, 2, 4): err_msg = "Domain image backing chain check failed" if not base_option or "async" in base_option: chain_lst = snap_src_lst[-1:] ret = check_chain_xml(disk_xml, chain_lst) if not ret: raise error.TestFail(err_msg) elif "base" or "shallow" in base_option: chain_lst = snap_src_lst[::-1] if not base_index and base_image: base_index = chain_lst.index(base_image) val_tmp = [] for i in range(1, base_index): val_tmp.append(chain_lst[i]) for i in val_tmp: chain_lst.remove(i) ret = check_chain_xml(disk_xml, chain_lst) if not ret: raise error.TestFail(err_msg) # If base image is the top layer of snapshot chain, # virsh blockpull should fail, return directly if base_option == "top": return # Check flag files for flag in snapshot_flag_files: status, output = session.cmd_status_output("cat %s" % flag) if status: raise error.TestFail("blockpull failed: %s" % output) finally: if vm.is_alive(): vm.destroy(gracefully=False) # Recover xml of vm. vmxml_backup.sync("--snapshots-metadata") if not disk_src_protocol or disk_src_protocol != 'gluster': for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) libvirtd = utils_libvirtd.Libvirtd() if disk_src_protocol == 'iscsi': libvirt.setup_or_cleanup_iscsi(is_setup=False, restart_tgtd=restart_tgtd) elif disk_src_protocol == 'gluster': libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path) libvirtd.restart() elif disk_src_protocol == 'netfs': restore_selinux = params.get('selinux_status_bak') libvirt.setup_or_cleanup_nfs(is_setup=False, restore_selinux=restore_selinux)
def run(test, params, env): """ Test command: virsh blockcopy. This command can copy a disk backing image chain to dest. 1. Positive testing 1.1 Copy a disk to a new image file. 1.2 Reuse existing destination copy. 1.3 Valid blockcopy timeout and bandwidth test. 2. Negative testing 2.1 Copy a disk to a non-exist directory. 2.2 Copy a disk with invalid options. 2.3 Do block copy for a persistent domain. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) target = params.get("target_disk", "") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") copy_to_nfs = "yes" == params.get("copy_to_nfs", "no") mnt_path_name = params.get("mnt_path_name") # check the source disk if not target: raise error.TestFail("Require target disk to copy") if vm_xml.VMXML.check_disk_exist(vm_name, target): logging.debug("Find %s in domain %s.", target, vm_name) else: raise error.TestFail("Can't find %s in domain %s." % (target, vm_name)) options = params.get("blockcopy_options", "") bandwidth = params.get("blockcopy_bandwidth", "") default_timeout = params.get("default_timeout", "300") reuse_external = "yes" == params.get("reuse_external", "no") persistent_vm = params.get("persistent_vm", "no") status_error = "yes" == params.get("status_error", "no") active_error = "yes" == params.get("active_error", "no") active_sanp = "yes" == params.get("active_sanp", "no") active_save = "yes" == params.get("active_save", "no") check_state_lock = "yes" == params.get("check_state_lock", "no") timeout = int(params.get("timeout", 1200)) rerun_flag = 0 original_xml = vm.backup_xml() new_xml = original_xml tmp_dir = data_dir.get_tmp_dir() # Prepare dest path params dest_path = params.get("dest_path", "") dest_format = params.get("dest_format", "") # Ugh... this piece of chicanery brought to you by the QemuImg which # will "add" the 'dest_format' extension during the check_format code. # So if we create the file with the extension and then remove it when # doing the check_format later, then we avoid erroneous failures. dest_extension = "" if dest_format != "": dest_extension = ".%s" % dest_format if not dest_path: tmp_file = time.strftime("%Y-%m-%d-%H.%M.%S.img") tmp_file += dest_extension if copy_to_nfs: tmp_dir = "%s/%s" % (tmp_dir, mnt_path_name) dest_path = os.path.join(tmp_dir, tmp_file) # Prepare for --reuse-external option if reuse_external: options += "--reuse-external" # Set rerun_flag=1 to do blockcopy twice, and the first time created # file can be reused in the second time if no dest_path given # This will make sure the image size equal to original disk size if dest_path == "/path/non-exist": if os.path.exists(dest_path) and not os.path.isdir(dest_path): os.remove(dest_path) else: rerun_flag = 1 # Prepare other options if dest_format == "raw": options += "--raw" if len(bandwidth): options += "--bandwidth %s" % bandwidth # Prepare acl options uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") if not copy_to_nfs: raise error.TestNAError("Bug will not fix:" " https://bugzilla.redhat.com/show_bug.cgi?id=924151") extra_dict = {'uri': uri, 'unprivileged_user': unprivileged_user, 'debug': True, 'ignore_status': True, 'timeout': timeout} libvirtd_utl = utils_libvirtd.Libvirtd() libvirtd_conf = utils_config.LibvirtdConfig() libvirtd_conf["log_filters"] = '"3:json 1:libvirt 1:qemu"' libvirtd_log_path = os.path.join(test.tmpdir, "libvirtd.log") libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path logging.debug("the libvirtd config file content is:\n %s" % libvirtd_conf) libvirtd_utl.restart() bug_log = "Hit on bug: https://bugzilla.redhat.com/show_bug.cgi?id=1134294" def check_format(dest_path, dest_extension, expect): """ Check the image format :param dest_path: Path of the copy to create :param expect: Expect image format """ # And now because the QemuImg will add the extension for us # we have to remove it here. path_noext = dest_path.strip(dest_extension) params['image_name'] = path_noext params['image_format'] = expect image = qemu_storage.QemuImg(params, "/", path_noext) if image.get_format() == expect: logging.debug("%s format is %s.", dest_path, expect) else: raise error.TestFail("%s format is not %s." % (dest_path, expect)) def blockcopy_chk(): """ Raise TestFail when blockcopy hang with state change lock """ err_pattern = "Timed out during operation: cannot acquire" err_pattern += " state change lock" ret = chk_libvirtd_log(libvirtd_log_path, err_pattern, "error") if ret: raise error.TestFail("blockcopy command hang, %s" % bug_log) snap_path = '' save_path = '' try: # Domain disk replacement with desire type if replace_vm_disk: utl.set_vm_disk(vm, params, tmp_dir) new_xml = vm.backup_xml() # Prepare transient/persistent vm if persistent_vm == "no" and vm.is_persistent(): vm.undefine() elif persistent_vm == "yes" and not vm.is_persistent(): vm.define(new_xml) try: # Run blockcopy command if rerun_flag == 1: options1 = "--wait --raw --finish --verbose" cmd_result = virsh.blockcopy(vm_name, target, dest_path, options1, **extra_dict) status = cmd_result.exit_status if status != 0: raise error.TestFail("Run blockcopy command fail.") elif not os.path.exists(dest_path): raise error.TestFail("Cannot find the created copy.") cmd_result = virsh.blockcopy(vm_name, target, dest_path, options, **extra_dict) status = cmd_result.exit_status if not libvirtd_utl.is_running(): raise error.TestFail("Libvirtd service is dead.") if not status_error: if status == 0: check_xml(vm_name, target, dest_path, options) if options.count("--bandwidth"): utl.check_blockjob(vm_name, target, "bandwidth", bandwidth) if check_state_lock: # Run blockjob pivot in subprocess as it will hang # for a while, run blockjob info again to check # job state command = "virsh blockjob %s %s --pivot" % (vm_name, target) session = aexpect.ShellSession(command) ret = virsh.blockjob(vm_name, target, "--info") err_info = "cannot acquire state change lock" if err_info in ret.stderr: logging.error(bug_log) utl.check_exit_status(ret, status_error) session.close() val = options.count("--pivot") + options.count("--finish") if val == 0: finish_job(vm_name, target, default_timeout) if options.count("--raw"): check_format(dest_path, dest_extension, dest_format) if active_sanp: snap_path = "%s/%s.snap" % (tmp_dir, vm_name) snap_opt = "--disk-only --atomic --no-metadata " snap_opt += "vda,snapshot=external,file=%s" % snap_path ret = virsh.snapshot_create_as(vm_name, snap_opt, ignore_statues=True, debug=True) utl.check_exit_status(ret, active_error) if active_save: save_path = "%s/%s.save" % (tmp_dir, vm_name) ret = virsh.save(vm_name, save_path, ignore_statues=True, debug=True) utl.check_exit_status(ret, active_error) else: err_msg = "internal error: unable to execute QEMU command" err_msg += " 'block-job-complete'" if err_msg in cmd_result.stderr: logging.error(bug_log) raise error.TestFail(cmd_result.stderr) else: if status: logging.debug("Expect error: %s", cmd_result.stderr) else: # Commit id '4c297728' changed how virsh exits when # unexpectedly failing due to timeout from a fail (1) # to a success(0), so we need to look for a different # marker to indicate the copy aborted if options.count("--timeout") and \ options.count("--wait") and \ re.search("Copy aborted", cmd_result.stdout): logging.debug("Found success a timed out block copy") else: raise error.TestFail("Expect fail, but run " "successfully.") except (JobTimeout, Exception), excpt: blockcopy_chk() if not status_error: raise error.TestFail("Run command failed: %s" % excpt) finally: # Restore libvirtd conf and restart libvirtd libvirtd_conf.restore() libvirtd_utl.restart() if libvirtd_log_path and os.path.exists(libvirtd_log_path): os.unlink(libvirtd_log_path) if vm.is_alive(): vm.destroy(gracefully=False) virsh.define(original_xml) if copy_to_nfs: restore_selinux = params.get('selinux_status_bak') utl.setup_or_cleanup_nfs(is_setup=False, restore_selinux=restore_selinux) if os.path.exists(original_xml): os.remove(original_xml) if os.path.exists(new_xml): os.remove(new_xml) if os.path.exists(dest_path): os.remove(dest_path) if os.path.exists(snap_path): os.remove(snap_path) if os.path.exists(save_path): os.remove(save_path)
def run(test, params, env): """ Test command: virsh restore. Restore a domain from a saved state in a file 1.Prepare test environment. 2.When the libvirtd == "off", stop the libvirtd service. 3.Run virsh restore command with assigned option. 4.Recover test environment. 5.Confirm the test result. """ def check_file_own(file_path, exp_uid, exp_gid): """ Check the uid and gid of file_path :param file_path: The file path :param exp_uid: The expected uid :param exp_gid: The expected gid :raise: test.fail if the uid and gid of file are not expected """ fstat_res = os.stat(file_path) if fstat_res.st_uid != exp_uid or fstat_res.st_gid != exp_gid: test.fail("The uid.gid {}.{} is not expected, it should be {}.{}.". format(fstat_res.st_uid, fstat_res.st_gid, exp_uid, exp_gid)) vm_name = params.get("main_vm") vm = env.get_vm(vm_name) os_type = params.get("os_type") status_error = ("yes" == params.get("status_error")) libvirtd = params.get("libvirtd", "on") extra_param = params.get("restore_extra_param") pre_status = params.get("restore_pre_status") vm_ref = params.get("restore_vm_ref") uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') time_before_save = int(params.get('time_before_save', 0)) setup_nfs = "yes" == params.get("setup_nfs", "no") setup_iscsi = "yes" == params.get("setup_iscsi", "no") check_log = params.get("check_log") check_str_not_in_log = params.get("check_str_not_in_log") qemu_conf_dict = eval(params.get("qemu_conf_dict", "{}")) vm_ref_uid = None vm_ref_gid = None qemu_conf = None if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") try: if "--xml" in extra_param: vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name, options="--migratable") backup_xml = vmxml.copy() # Grant more priveledge on the file in order for un-priveledge user # to access. os.chmod(vmxml.xml, stat.S_IRWXU | stat.S_IRGRP | stat.S_IROTH) if not setup_nfs: extra_param = "--xml %s" % vmxml.xml dict_os_attrs = {} if "hd" in vmxml.os.boots: dict_os_attrs.update({"boots": ["cdrom"]}) vmxml.set_os_attrs(**dict_os_attrs) else: test.cancel("Please add 'hd' in boots for --xml testing") logging.info("vmxml os is %s after update" % vmxml.os.xmltreefile) else: params["mnt_path_name"] = params.get("nfs_mount_dir") vm_ref_uid = params["change_file_uid"] = pwd.getpwnam( "qemu").pw_uid vm_ref_gid = params["change_file_gid"] = grp.getgrnam( "qemu").gr_gid libvirt.set_vm_disk(vm, params) session = vm.wait_for_login() # Clear log file if check_log: cmd = "> %s" % check_log process.run(cmd, shell=True, verbose=True) if qemu_conf_dict: logging.debug("Update qemu configuration file.") qemu_conf = libvirt.customize_libvirt_config( qemu_conf_dict, "qemu") process.run("cat /etc/libvirt/qemu.conf", shell=True, verbose=True) # run test if vm_ref == "" or vm_ref == "xyz": status = virsh.restore(vm_ref, extra_param, debug=True, unprivileged_user=unprivileged_user, uri=uri, ignore_status=True).exit_status else: if os_type == "linux": cmd = "cat /proc/cpuinfo" try: status, output = session.cmd_status_output(cmd, timeout=10) finally: session.close() if not re.search("processor", output): test.fail("Unable to read /proc/cpuinfo") tmp_file = os.path.join(data_dir.get_tmp_dir(), "save.file") if setup_iscsi: tmp_file = libvirt.setup_or_cleanup_iscsi(is_setup=True, is_login=True, image_size='1G') time.sleep(time_before_save) ret = virsh.save(vm_name, tmp_file, debug=True) libvirt.check_exit_status(ret) if vm_ref == "saved_file" or setup_iscsi: vm_ref = tmp_file elif vm_ref == "empty_new_file": tmp_file = os.path.join(data_dir.get_tmp_dir(), "new.file") with open(tmp_file, 'w') as tmp: pass vm_ref = tmp_file # Change the ownership of the saved file if vm_ref_uid and vm_ref_gid: os.chown(vm_ref, vm_ref_uid, vm_ref_gid) tmpdir = data_dir.get_tmp_dir() dump_xml = os.path.join(tmpdir, "test.xml") virsh.save_image_dumpxml(vm_ref, "> %s" % dump_xml) extra_param = "--xml %s" % dump_xml check_file_own(vm_ref, vm_ref_uid, vm_ref_gid) if vm.is_alive(): vm.destroy() if pre_status == "start": virsh.start(vm_name) if libvirtd == "off": utils_libvirtd.libvirtd_stop() status = virsh.restore(vm_ref, extra_param, debug=True, unprivileged_user=unprivileged_user, uri=uri, ignore_status=True).exit_status if not status_error: list_output = virsh.dom_list().stdout.strip() session.close() # recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() if status_error: if not status: if libvirtd == "off" and libvirt_version.version_compare( 5, 6, 0): logging.info( "From libvirt version 5.6.0 libvirtd is restarted " "and command should succeed") else: test.fail("Run successfully with wrong command!") else: if status: test.fail("Run failed with right command") if not re.search(vm_name, list_output): test.fail("Run failed with right command") if extra_param.count("paused"): if not vm.is_paused(): test.fail("Guest state should be" " paused after restore" " due to the option --paused") if (extra_param.count("running") or extra_param.count("xml") or not extra_param): if vm.is_dead() or vm.is_paused(): test.fail("Guest state should be" " running after restore") if extra_param.count("xml"): if not setup_nfs: aft_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) boots_list = aft_vmxml.os.boots if "hd" in boots_list or "cdrom" not in boots_list: test.fail("Update xml with restore failed") else: if vm_ref_uid and vm_ref_gid: check_file_own(vm_ref, vm_ref_uid, vm_ref_gid) vm.destroy() check_file_own(vm_ref, vm_ref_uid, vm_ref_gid) if check_str_not_in_log and check_log: libvirt.check_logfile(check_str_not_in_log, check_log, False) finally: if vm.is_paused(): virsh.resume(vm_name) if "--xml" in extra_param: backup_xml.sync() if setup_nfs: libvirt.setup_or_cleanup_nfs(is_setup=False, mount_dir=params.get("mnt_path_name"), export_dir=params.get("export_dir"), rm_export_dir=False) if setup_iscsi: libvirt.setup_or_cleanup_iscsi(False)
def run(test, params, env): """ Test command: virsh blockcommit <domain> <path> 1) Prepare test environment. 2) Commit changes from a snapshot down to its backing image. 3) Recover test environment. 4) Check result. """ def make_disk_snapshot(postfix_n, snapshot_take): """ Make external snapshots for disks only. :param postfix_n: postfix option :param snapshot_take: snapshots taken. """ # Add all disks into command line. disks = vm.get_disk_devices() # Make three external snapshots for disks only for count in range(1, snapshot_take): options = "%s_%s %s%s-desc " % (postfix_n, count, postfix_n, count) options += "--disk-only --atomic --no-metadata" if needs_agent: options += " --quiesce" for disk in disks: disk_detail = disks[disk] basename = os.path.basename(disk_detail['source']) # Remove the original suffix if any, appending # ".postfix_n[0-9]" diskname = basename.split(".")[0] snap_name = "%s.%s%s" % (diskname, postfix_n, count) disk_external = os.path.join(tmp_dir, snap_name) snapshot_external_disks.append(disk_external) options += " %s,snapshot=external,file=%s" % (disk, disk_external) cmd_result = virsh.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) status = cmd_result.exit_status if status != 0: test.fail("Failed to make snapshots for disks!") # Create a file flag in VM after each snapshot flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") file_path = flag_file.name flag_file.close() status, output = session.cmd_status_output("touch %s" % file_path) if status: test.fail("Touch file in vm failed. %s" % output) snapshot_flag_files.append(file_path) def get_first_disk_source(): """ Get disk source of first device :return: first disk of first device. """ first_device = vm.get_first_disk_devices() first_disk_src = first_device['source'] return first_disk_src def make_relative_path_backing_files(): """ Create backing chain files of relative path. :return: absolute path of top active file """ first_disk_source = get_first_disk_source() basename = os.path.basename(first_disk_source) root_dir = os.path.dirname(first_disk_source) cmd = "mkdir -p %s" % os.path.join(root_dir, '{b..d}') ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) # Make three external relative path backing files. backing_file_dict = collections.OrderedDict() backing_file_dict["b"] = "../%s" % basename backing_file_dict["c"] = "../b/b.img" backing_file_dict["d"] = "../c/c.img" for key, value in list(backing_file_dict.items()): backing_file_path = os.path.join(root_dir, key) cmd = ("cd %s && qemu-img create -f qcow2 -o backing_file=%s,backing_fmt=qcow2 %s.img" % (backing_file_path, value, key)) ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) return os.path.join(backing_file_path, "d.img") def check_chain_backing_files(disk_src_file, expect_backing_file=False): """ Check backing chain files of relative path after blockcommit. :param disk_src_file: first disk src file. :param expect_backing_file: whether it expect to have backing files. """ first_disk_source = get_first_disk_source() # Validate source image need refer to original one after active blockcommit if not expect_backing_file and disk_src_file not in first_disk_source: test.fail("The disk image path:%s doesn't include the origin image: %s" % (first_disk_source, disk_src_file)) # Validate source image doesn't have backing files after active blockcommit cmd = "qemu-img info %s --backing-chain" % first_disk_source if qemu_img_locking_feature_support: cmd = "qemu-img info -U %s --backing-chain" % first_disk_source ret = process.run(cmd, shell=True).stdout_text.strip() if expect_backing_file: if 'backing file' not in ret: test.fail("The disk image doesn't have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) else: if 'backing file' in ret: test.fail("The disk image still have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) # MAIN TEST CODE ### # Process cartesian parameters vm_name = params.get("main_vm") vm = env.get_vm(vm_name) snapshot_take = int(params.get("snapshot_take", '0')) vm_state = params.get("vm_state", "running") needs_agent = "yes" == params.get("needs_agent", "yes") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") top_inactive = ("yes" == params.get("top_inactive")) with_timeout = ("yes" == params.get("with_timeout_option", "no")) status_error = ("yes" == params.get("status_error", "no")) base_option = params.get("base_option", "none") middle_base = "yes" == params.get("middle_base", "no") pivot_opt = "yes" == params.get("pivot_opt", "no") snap_in_mirror = "yes" == params.get("snap_in_mirror", "no") snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", "no") with_active_commit = "yes" == params.get("with_active_commit", "no") multiple_chain = "yes" == params.get("multiple_chain", "no") virsh_dargs = {'debug': True} # Check whether qemu-img need add -U suboption since locking feature was added afterwards qemu-2.10 qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support() backing_file_relative_path = "yes" == params.get("backing_file_relative_path", "no") # Process domain disk device parameters disk_type = params.get("disk_type") disk_src_protocol = params.get("disk_source_protocol") restart_tgtd = params.get("restart_tgtd", 'no') vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) if not top_inactive: if not libvirt_version.version_compare(1, 2, 4): test.cancel("live active block commit is not supported" " in current libvirt version.") # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Abort the test if there are snapshots already exsiting_snaps = virsh.snapshot_list(vm_name) if len(exsiting_snaps) != 0: test.fail("There are snapshots created for %s already" % vm_name) snapshot_external_disks = [] cmd_session = None try: if disk_src_protocol == 'iscsi' and disk_type == 'network': if not libvirt_version.version_compare(1, 0, 4): test.cancel("'iscsi' disk doesn't support in" " current libvirt version.") # Set vm xml and guest agent if replace_vm_disk: if disk_src_protocol == "rbd" and disk_type == "network": src_host = params.get("disk_source_host", "EXAMPLE_HOSTS") mon_host = params.get("mon_host", "EXAMPLE_MON_HOST") if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"): test.cancel("Please provide rbd host first.") if backing_file_relative_path: if vm.is_alive(): vm.destroy(gracefully=False) first_src_file = get_first_disk_source() blk_source_image = os.path.basename(first_src_file) blk_source_folder = os.path.dirname(first_src_file) replace_disk_image = make_relative_path_backing_files() params.update({'disk_source_name': replace_disk_image, 'disk_type': 'file', 'disk_src_protocol': 'file'}) vm.start() libvirt.set_vm_disk(vm, params, tmp_dir) if needs_agent: vm.prepare_guest_agent() # The first disk is supposed to include OS # We will perform blockcommit operation for it. first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] blk_target = first_disk['target'] snapshot_flag_files = [] # get a vm session before snapshot session = vm.wait_for_login() # do snapshot postfix_n = 'snap' make_disk_snapshot(postfix_n, snapshot_take) basename = os.path.basename(blk_source) diskname = basename.split(".")[0] snap_src_lst = [blk_source] if multiple_chain: snap_name = "%s.%s1" % (diskname, postfix_n) snap_top = os.path.join(tmp_dir, snap_name) top_index = snapshot_external_disks.index(snap_top) + 1 omit_list = snapshot_external_disks[top_index:] vm.destroy(gracefully=False) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = '' disk_xmls = vmxml.get_devices(device_type="disk") for disk in disk_xmls: if disk.get('device_tag') == 'disk': disk_xml = disk break vmxml.del_device(disk_xml) disk_dict = {'attrs': {'file': snap_top}} disk_xml.source = disk_xml.new_disk_source(**disk_dict) vmxml.add_device(disk_xml) vmxml.sync() vm.start() session = vm.wait_for_login() postfix_n = 'new_snap' make_disk_snapshot(postfix_n, snapshot_take) snap_src_lst = [blk_source] snap_src_lst += snapshot_external_disks logging.debug("omit list is %s", omit_list) for i in omit_list: snap_src_lst.remove(i) else: # snapshot src file list snap_src_lst += snapshot_external_disks backing_chain = '' for i in reversed(list(range(snapshot_take))): if i == 0: backing_chain += "%s" % snap_src_lst[i] else: backing_chain += "%s -> " % snap_src_lst[i] logging.debug("The backing chain is: %s" % backing_chain) # check snapshot disk xml backingStore is expected vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') disk_xml = None for disk in disks: if disk.target['dev'] != blk_target: continue else: if disk.device != 'disk': continue disk_xml = disk.xmltreefile logging.debug("the target disk xml after snapshot is %s", disk_xml) break if not disk_xml: test.fail("Can't find disk xml with target %s" % blk_target) elif libvirt_version.version_compare(1, 2, 4): # backingStore element introuduced in 1.2.4 chain_lst = snap_src_lst[::-1] ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing chain check failed") # set blockcommit_options top_image = None blockcommit_options = "--wait --verbose" if with_timeout: blockcommit_options += " --timeout 1" if base_option == "shallow": blockcommit_options += " --shallow" elif base_option == "base": if middle_base: snap_name = "%s.%s1" % (diskname, postfix_n) blk_source = os.path.join(tmp_dir, snap_name) blockcommit_options += " --base %s" % blk_source if top_inactive: snap_name = "%s.%s2" % (diskname, postfix_n) top_image = os.path.join(tmp_dir, snap_name) blockcommit_options += " --top %s" % top_image else: blockcommit_options += " --active" if pivot_opt: blockcommit_options += " --pivot" if vm_state == "shut off": vm.destroy(gracefully=True) if with_active_commit: # inactive commit follow active commit will fail with bug 1135339 cmd = "virsh blockcommit %s %s --active --pivot" % (vm_name, blk_target) cmd_session = aexpect.ShellSession(cmd) if backing_file_relative_path: blockcommit_options = " --active --verbose --shallow --pivot --keep-relative" block_commit_index = snapshot_take expect_backing_file = False # Do block commit using --active for count in range(1, snapshot_take): res = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) if top_inactive: blockcommit_options = " --wait --verbose --top vda[1] --base vda[2] --keep-relative" block_commit_index = snapshot_take - 1 expect_backing_file = True # Do block commit with --wait if top_inactive for count in range(1, block_commit_index): res = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) check_chain_backing_files(blk_source_image, expect_backing_file) return # Run test case # Active commit does not support on rbd based disk with bug 1200726 result = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) # Check status_error libvirt.check_exit_status(result, status_error) if result.exit_status and status_error: return while True: vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') for disk in disks: if disk.target['dev'] != blk_target: continue else: disk_xml = disk.xmltreefile break if not top_inactive: disk_mirror = disk_xml.find('mirror') if '--pivot' not in blockcommit_options: if disk_mirror is not None: job_type = disk_mirror.get('job') job_ready = disk_mirror.get('ready') src_element = disk_mirror.find('source') disk_src_file = None for elem in ('file', 'name', 'dev'): elem_val = src_element.get(elem) if elem_val: disk_src_file = elem_val break err_msg = "blockcommit base source " err_msg += "%s not expected" % disk_src_file if '--shallow' in blockcommit_options: if not multiple_chain: if disk_src_file != snap_src_lst[2]: test.fail(err_msg) else: if disk_src_file != snap_src_lst[3]: test.fail(err_msg) else: if disk_src_file != blk_source: test.fail(err_msg) if libvirt_version.version_compare(1, 2, 7): # The job attribute mentions which API started the # operation since 1.2.7. if job_type != 'active-commit': test.fail("blockcommit job type '%s'" " not expected" % job_type) if job_ready != 'yes': # The attribute ready, if present, tracks # progress of the job: yes if the disk is known # to be ready to pivot, or, since 1.2.7, abort # or pivot if the job is in the process of # completing. continue else: logging.debug("after active block commit job " "ready for pivot, the target disk" " xml is %s", disk_xml) break else: break else: break else: if disk_mirror is None: logging.debug(disk_xml) if "--shallow" in blockcommit_options: chain_lst = snap_src_lst[::-1] chain_lst.pop(0) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing " "chain check failed") cmd_result = virsh.blockjob(vm_name, blk_target, '', ignore_status=True, debug=True) libvirt.check_exit_status(cmd_result) elif "--base" in blockcommit_options: chain_lst = snap_src_lst[::-1] base_index = chain_lst.index(blk_source) chain_lst = chain_lst[base_index:] ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing " "chain check failed") break else: # wait pivot after commit is synced continue else: logging.debug("after inactive commit the disk xml is: %s" % disk_xml) if libvirt_version.version_compare(1, 2, 4): if "--shallow" in blockcommit_options: chain_lst = snap_src_lst[::-1] chain_lst.remove(top_image) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing chain " "check failed") elif "--base" in blockcommit_options: chain_lst = snap_src_lst[::-1] top_index = chain_lst.index(top_image) base_index = chain_lst.index(blk_source) val_tmp = [] for i in range(top_index, base_index): val_tmp.append(chain_lst[i]) for i in val_tmp: chain_lst.remove(i) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing chain " "check failed") break else: break # Check flag files if not vm_state == "shut off" and not multiple_chain: for flag in snapshot_flag_files: status, output = session.cmd_status_output("cat %s" % flag) if status: test.fail("blockcommit failed: %s" % output) if not pivot_opt and snap_in_mirror: # do snapshot during mirror phase snap_path = "%s/%s.snap" % (tmp_dir, vm_name) snap_opt = "--disk-only --atomic --no-metadata " snap_opt += "vda,snapshot=external,file=%s" % snap_path snapshot_external_disks.append(snap_path) cmd_result = virsh.snapshot_create_as(vm_name, snap_opt, ignore_statues=True, debug=True) libvirt.check_exit_status(cmd_result, snap_in_mirror_err) finally: if vm.is_alive(): vm.destroy(gracefully=False) # Recover xml of vm. vmxml_backup.sync("--snapshots-metadata") if cmd_session: cmd_session.close() for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) if backing_file_relative_path: libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) process.run("cd %s && rm -rf b c d" % blk_source_folder, shell=True) if disk_src_protocol == 'iscsi': libvirt.setup_or_cleanup_iscsi(is_setup=False, restart_tgtd=restart_tgtd) elif disk_src_protocol == 'gluster': libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path) libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() elif disk_src_protocol == 'netfs': restore_selinux = params.get('selinux_status_bak') libvirt.setup_or_cleanup_nfs(is_setup=False, restore_selinux=restore_selinux)
def run(test, params, env): """ Test the virsh pool commands with acl, initiate a pool then do following operations. (1) Undefine a given type pool (2) Define the pool from xml (3) Build given type pool (4) Start pool (5) Destroy pool (6) Refresh pool after start it (7) Run vol-list with the pool (9) Delete pool For negative cases, redo failed step to make the case run continue. Run cleanup at last restore env. """ # Initialize the variables pool_name = params.get("pool_name", "temp_pool_1") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target", "") # The file for dumped pool xml pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp") if os.path.dirname(pool_target) is "": pool_target = os.path.join(test.tmpdir, pool_target) vol_name = params.get("vol_name", "temp_vol_1") # Use pool name as VG name vg_name = pool_name vol_path = os.path.join(pool_target, vol_name) define_acl = "yes" == params.get("define_acl", "no") undefine_acl = "yes" == params.get("undefine_acl", "no") start_acl = "yes" == params.get("start_acl", "no") destroy_acl = "yes" == params.get("destroy_acl", "no") build_acl = "yes" == params.get("build_acl", "no") delete_acl = "yes" == params.get("delete_acl", "no") refresh_acl = "yes" == params.get("refresh_acl", "no") vol_list_acl = "yes" == params.get("vol_list_acl", "no") list_dumpxml_acl = "yes" == params.get("list_dumpxml_acl", "no") src_pool_error = "yes" == params.get("src_pool_error", "no") define_error = "yes" == params.get("define_error", "no") undefine_error = "yes" == params.get("undefine_error", "no") start_error = "yes" == params.get("start_error", "no") destroy_error = "yes" == params.get("destroy_error", "no") build_error = "yes" == params.get("build_error", "no") delete_error = "yes" == params.get("delete_error", "no") refresh_error = "yes" == params.get("refresh_error", "no") vol_list_error = "yes" == params.get("vol_list_error", "no") # Clean up flags: # cleanup_env[0] for nfs, cleanup_env[1] for iscsi, cleanup_env[2] for lvm # cleanup_env[3] for selinux backup status, cleanup_env[4] for gluster cleanup_env = [False, False, False, "", False] # libvirt acl related params uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") acl_dargs = {'uri': uri, 'unprivileged_user': unprivileged_user, 'debug': True} def check_pool_list(pool_name, option="--all", expect_error=False): """ Check pool by running pool-list command with given option. :param pool_name: Name of the pool :param option: option for pool-list command :param expect_error: Boolean value, expect command success or fail """ found = False # Get the list stored in a variable if list_dumpxml_acl: result = virsh.pool_list(option, **acl_dargs) else: result = virsh.pool_list(option, ignore_status=True) utlv.check_exit_status(result, False) output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]", str(result.stdout)) for item in output: if pool_name in item[0]: found = True break if found: logging.debug("Find pool '%s' in pool list.", pool_name) else: logging.debug("Not find pool %s in pool list.", pool_name) if expect_error and found: raise error.TestFail("Unexpect pool '%s' exist." % pool_name) if not expect_error and not found: raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name) # Run Testcase try: _pool = libvirt_storage.StoragePool() # Init a pool for test result = utlv.define_pool(pool_name, pool_type, pool_target, cleanup_env) utlv.check_exit_status(result, src_pool_error) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) if list_dumpxml_acl: xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml, **acl_dargs) else: xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml) logging.debug("Pool '%s' XML:\n%s", pool_name, xml) # Step (1) # Undefine pool if undefine_acl: result = virsh.pool_undefine(pool_name, **acl_dargs) else: result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result, undefine_error) if undefine_error: check_pool_list(pool_name, "--all", False) # Redo under negative case to keep case continue result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result) check_pool_list(pool_name, "--all", True) else: check_pool_list(pool_name, "--all", True) # Step (2) # Define pool from XML file if define_acl: result = virsh.pool_define(pool_xml, **acl_dargs) else: result = virsh.pool_define(pool_xml) utlv.check_exit_status(result, define_error) if define_error: # Redo under negative case to keep case continue result = virsh.pool_define(pool_xml) utlv.check_exit_status(result) # Step (3) # Buid pool, this step may fail for 'disk' and 'logical' types pool if pool_type not in ["disk", "logical"]: option = "" # Options --overwrite and --no-overwrite can only be used to # build a filesystem pool, but it will fail for now # if pool_type == "fs": # option = '--overwrite' if build_acl: result = virsh.pool_build(pool_name, option, **acl_dargs) else: result = virsh.pool_build(pool_name, option, ignore_status=True) utlv.check_exit_status(result, build_error) if build_error: # Redo under negative case to keep case continue result = virsh.pool_build(pool_name, option, ignore_status=True) utlv.check_exit_status(result) # For iSCSI pool, we need discover targets before start the pool if pool_type == 'iscsi': cmd = 'iscsiadm -m discovery -t sendtargets -p 127.0.0.1' process.run(cmd, shell=True) # Step (4) # Pool start if start_acl: result = virsh.pool_start(pool_name, **acl_dargs) else: result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result, start_error) if start_error: # Redo under negative case to keep case continue result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result) option = "--persistent --type %s" % pool_type check_pool_list(pool_name, option) # Step (5) # Pool destroy if destroy_acl: result = virsh.pool_destroy(pool_name, **acl_dargs) else: result = virsh.pool_destroy(pool_name) if result: if destroy_error: raise error.TestFail("Expect fail, but run successfully.") else: if not destroy_error: raise error.TestFail("Pool %s destroy failed, not expected." % pool_name) else: # Redo under negative case to keep case continue if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: raise error.TestFail("Destroy pool % failed." % pool_name) # Step (6) # Pool refresh for 'dir' type pool # Pool start result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result) if pool_type == "dir": os.mknod(vol_path) if refresh_acl: result = virsh.pool_refresh(pool_name, **acl_dargs) else: result = virsh.pool_refresh(pool_name) utlv.check_exit_status(result, refresh_error) # Step (7) # Pool vol-list if vol_list_acl: result = virsh.vol_list(pool_name, **acl_dargs) else: result = virsh.vol_list(pool_name) utlv.check_exit_status(result, vol_list_error) # Step (8) # Pool delete for 'dir' type pool if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: raise error.TestFail("Destroy pool % failed." % pool_name) if pool_type == "dir": if os.path.exists(vol_path): os.remove(vol_path) if delete_acl: result = virsh.pool_delete(pool_name, **acl_dargs) else: result = virsh.pool_delete(pool_name, ignore_status=True) utlv.check_exit_status(result, delete_error) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) if not delete_error: if os.path.exists(pool_target): raise error.TestFail("The target path '%s' still exist." % pool_target) result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result) check_pool_list(pool_name, "--all", True) finally: # Clean up if os.path.exists(pool_xml): os.remove(pool_xml) if not _pool.delete_pool(pool_name): logging.error("Can't delete pool: %s", pool_name) if cleanup_env[2]: cmd = "pvs |grep %s |awk '{print $1}'" % vg_name pv_name = process.system_output(cmd, shell=True) lv_utils.vg_remove(vg_name) process.run("pvremove %s" % pv_name, shell=True) if cleanup_env[1]: utlv.setup_or_cleanup_iscsi(False) if cleanup_env[0]: utlv.setup_or_cleanup_nfs( False, restore_selinux=cleanup_env[3])
# keep .ssh/authorized_keys for NFS cleanup later seLinuxBool.cleanup(True) if nfs_client: logging.info("Cleanup NFS client environment...") nfs_client.cleanup() logging.info("Remove the NFS image...") source_file = params.get("source_file") libvirt.delete_local_disk("file", path=source_file) logging.info("Cleanup NFS server environment...") exp_dir = params.get("export_dir") mount_dir = params.get("mnt_path_name") libvirt.setup_or_cleanup_nfs(False, export_dir=exp_dir, mount_dir=mount_dir, restore_selinux=local_selinux_bak) if exception: raise error.TestError("Error occurred. \n%s: %s" % (detail.__class__, detail)) # Check test result. if status_error == 'yes': if ret_migrate: raise error.TestFail("Migration finished with unexpected status.") else: if not ret_migrate: raise error.TestFail("Migration finished with unexpected status.") if not check_dest_state: raise error.TestFail("Wrong VM state on destination.") if not check_dest_persistent:
def run(test, params, env): """ Test command: domfsinfo [--domain] The command gets information of domain's mounted filesystems. """ start_vm = ("yes" == params.get("start_vm", "yes")) start_ga = ("yes" == params.get("start_ga", "yes")) prepare_channel = ("yes" == params.get("prepare_channel", "yes")) status_error = ("yes" == params.get("status_error", "no")) vm_name = params.get("main_vm") vm = env.get_vm(vm_name) mount_dir = params.get("mount_dir", None) quiet_mode = ("yes" == params.get("quiet_mode", False)) readonly_mode = ("yes" == params.get("readonly_mode", False)) nfs_mount = ("yes" == params.get("nfs_mount", False)) domfsfreeze = ("yes" == params.get("domfsfreeze", False)) # Hotplug and Unplug options hotplug_unplug = ("yes" == params.get("hotplug_unplug", False)) disk_name = params.get("disk_name", "test") disk_path = os.path.join(data_dir.get_tmp_dir(), disk_name) disk_target = params.get("disk_target", "vdb") fs_type = params.get("fs_type", "ext3") new_part = "" fail_pat = [] check_point_msg = params.get("check_point_msg", "") if check_point_msg: for msg in check_point_msg.split(";"): fail_pat.append(msg) def hotplug_domain_disk(domain, target, source=None, hotplug=True): """ Hot-plug/Hot-unplug disk for domain :param domain: Guest name :param source: Source of disk device, can leave None if hotplug=False :param target: Target of disk device :param hotplug: True means hotplug, False means hot-unplug :return: Virsh command object """ if hotplug: result = virsh.attach_disk(domain, source, target, "--live", ignore_status=False, debug=True) else: session = vm.wait_for_login() try: session.cmd("umount %s" % mount_dir) session.close() except: test.error( "fail to unmount the disk before unpluging the disk") result = virsh.detach_disk(domain, target, "--live", ignore_status=False, debug=True) # It need more time for attachment to take effect time.sleep(5) vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) cleanup_nfs = False try: reset_kwargs = { "start_vm": start_vm, "start_ga": start_ga, "prepare_channel": prepare_channel } reset_domain(vm, **reset_kwargs) if domfsfreeze: result = virsh.domfsfreeze(vm_name, debug=True) if result.exit_status: test.fail("Failed to execute virsh.domfsfreeze:\n%s" % result.stderr) if nfs_mount: nfs_device = libvirt.setup_or_cleanup_nfs(True, mount_dir=mount_dir, is_mount=True) if nfs_device: cleanup_nfs = True if hotplug_unplug: session = vm.wait_for_login() new_device = libvirt.create_local_disk("file", path=disk_path, size="1") parts_list_before_attach = utils_disk.get_parts_list(session) hotplug_domain_disk(vm_name, disk_target, new_device) parts_list_after_attach = utils_disk.get_parts_list(session) new_part = list( set(parts_list_after_attach).difference( set(parts_list_before_attach)))[0] logging.debug("The new partition is %s", new_part) libvirt.mkfs("/dev/%s" % new_part, fs_type, session=session) session.cmd_status( "mkdir -p {0} ; mount /dev/{1} {0}; ls {0}".format( mount_dir, new_part)) session.close() # Run test case command_dargs = { "readonly": readonly_mode, "quiet": quiet_mode, "debug": True } result = virsh.domfsinfo(vm_name, **command_dargs) if not result.exit_status: if fail_pat: test.fail("Expected fail with %s, but run succeed:\n%s" % (fail_pat, result)) else: if not fail_pat: test.fail("Expected success, but run failed:\n%s" % result) else: # If not any pattern matches(fail_pat, result.stderr) if not any(p in result.stderr for p in fail_pat): test.fail( "Expected fail with one of %s, but failed with:\n%s" % (fail_pat, result)) # Check virsh.domfsinfo output cmd_output = result.stdout.strip() if quiet_mode: head_pat = "Mountpoint\s+Name\s+Type\s+Target" check_output(cmd_output, head_pat, test, expected=False) elif nfs_mount: check_output(cmd_output, mount_dir, test, expected=False) elif hotplug_unplug: blk_target = re.findall(r'[a-z]+', new_part)[0] disk_pat = "%s\s+%s\s+%s\s+%s" % (mount_dir, new_part, fs_type, blk_target) check_output(cmd_output, disk_pat, test, expected=True) # Unplug domain disk hotplug_domain_disk(vm_name, target=new_part, hotplug=False) result = virsh.domfsinfo(vm_name, **command_dargs) if result.exit_status: test.fail( "Failed to run virsh.domfsinfo after disk unplug:\n%s" % result.stderr) check_output(result.stdout.strip(), disk_pat, test, expected=False) else: # Verify virsh.domfsinfo consistency if not status_error: session = vm.wait_for_login(timeout=120) domfsinfo = vm.domfsinfo() expected_result = get_mount_fs(session) if domfsinfo and expected_result: check_domfsinfo(domfsinfo, expected_result, test) else: logging.debug("Virsh.domfsinfo output:\n%s", domfsinfo) logging.debug("Expected_result is:\n%s", expected_result) test.error("Command output inconsistent with expected") session.close() finally: if cleanup_nfs: libvirt.setup_or_cleanup_nfs(False, mount_dir=mount_dir) if vm.is_alive(): vm.destroy() if hotplug_unplug: if disk_path: cmd = "rm -rf %s" % disk_path process.run(cmd) vmxml_backup.sync()
def run(test, params, env): """ Test command: virsh blockpull <domain> <path> 1) Prepare test environment. 2) Populate a disk from its backing image. 3) Recover test environment. 4) Check result. """ def make_disk_snapshot(): # Make four external snapshots for disks only for count in range(1, 5): snap_xml = snapshot_xml.SnapshotXML() snapshot_name = "snapshot_test%s" % count snap_xml.snap_name = snapshot_name snap_xml.description = "Snapshot Test %s" % count # Add all disks into xml file. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') new_disks = [] for src_disk_xml in disks: disk_xml = snap_xml.SnapDiskXML() disk_xml.xmltreefile = src_disk_xml.xmltreefile del disk_xml.device del disk_xml.address disk_xml.snapshot = "external" disk_xml.disk_name = disk_xml.target['dev'] # Only qcow2 works as external snapshot file format, update it # here driver_attr = disk_xml.driver driver_attr.update({'type': 'qcow2'}) disk_xml.driver = driver_attr new_attrs = disk_xml.source.attrs if disk_xml.source.attrs.has_key('file'): file_name = disk_xml.source.attrs['file'] new_file = "%s.snap%s" % (file_name.split('.')[0], count) snapshot_external_disks.append(new_file) new_attrs.update({'file': new_file}) hosts = None elif (disk_xml.source.attrs.has_key('name') and disk_src_protocol == 'gluster'): src_name = disk_xml.source.attrs['name'] new_name = "%s.snap%s" % (src_name.split('.')[0], count) new_attrs.update({'name': new_name}) snapshot_external_disks.append(new_name) hosts = disk_xml.source.hosts elif (disk_xml.source.attrs.has_key('dev') or disk_xml.source.attrs.has_key('name')): if (disk_xml.type_name == 'block' or disk_src_protocol == 'iscsi'): # Use local file as external snapshot target for block # and iscsi network type. # As block device will be treat as raw format by # default, it's not fit for external disk snapshot # target. A work around solution is use qemu-img again # with the target. # And external active snapshots are not supported on # 'network' disks using 'iscsi' protocol disk_xml.type_name = 'file' if new_attrs.has_key('dev'): del new_attrs['dev'] elif new_attrs.has_key('name'): del new_attrs['name'] del new_attrs['protocol'] new_file = "%s/blk_src_file.snap%s" % (tmp_dir, count) snapshot_external_disks.append(new_file) new_attrs.update({'file': new_file}) hosts = None new_src_dict = {"attrs": new_attrs} if hosts: new_src_dict.update({"hosts": hosts}) disk_xml.source = disk_xml.new_disk_source(**new_src_dict) new_disks.append(disk_xml) snap_xml.set_disks(new_disks) snapshot_xml_path = snap_xml.xml logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile) options = "--disk-only --xmlfile %s " % snapshot_xml_path snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) if snapshot_result.exit_status != 0: raise error.TestFail(snapshot_result.stderr) # Create a file flag in VM after each snapshot flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") file_path = flag_file.name flag_file.close() status, output = session.cmd_status_output("touch %s" % file_path) if status: raise error.TestFail("Touch file in vm failed. %s" % output) snapshot_flag_files.append(file_path) # MAIN TEST CODE ### # Process cartesian parameters vm_name = params.get("main_vm") vm = env.get_vm(vm_name) needs_agent = "yes" == params.get("needs_agent", "yes") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") snap_in_mirror = "yes" == params.get("snap_in_mirror", 'no') snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", 'no') bandwidth = params.get("bandwidth", None) with_timeout = ("yes" == params.get("with_timeout_option", "no")) status_error = ("yes" == params.get("status_error", "no")) base_option = params.get("base_option", None) keep_relative = "yes" == params.get("keep_relative", 'no') virsh_dargs = {'debug': True} # Process domain disk device parameters disk_type = params.get("disk_type") disk_target = params.get("disk_target", 'vda') disk_src_protocol = params.get("disk_source_protocol") restart_tgtd = params.get("restart_tgtd", "no") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("original xml is %s", vmxml_backup) # Abort the test if there are snapshots already exsiting_snaps = virsh.snapshot_list(vm_name) if len(exsiting_snaps) != 0: raise error.TestFail("There are snapshots created for %s already" % vm_name) snapshot_external_disks = [] try: if disk_src_protocol == 'iscsi' and disk_type == 'network': if not libvirt_version.version_compare(1, 0, 4): raise error.TestNAError("'iscsi' disk doesn't support in" " current libvirt version.") if disk_src_protocol == 'gluster': if not libvirt_version.version_compare(1, 2, 7): raise error.TestNAError("Snapshot on glusterfs not" " support in current " "version. Check more info " " with https://bugzilla.re" "dhat.com/show_bug.cgi?id=" "1017289") # Set vm xml and guest agent if replace_vm_disk: libvirt.set_vm_disk(vm, params, tmp_dir) if needs_agent: vm.prepare_guest_agent() # The first disk is supposed to include OS # We will perform blockpull operation for it. first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] blk_target = first_disk['target'] snapshot_flag_files = [] # get a vm session before snapshot session = vm.wait_for_login() # do snapshot make_disk_snapshot() vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug("The domain xml after snapshot is %s" % vmxml) # snapshot src file list snap_src_lst = [blk_source] snap_src_lst += snapshot_external_disks if snap_in_mirror: blockpull_options = "--bandwidth 1" else: blockpull_options = "--wait --verbose" if with_timeout: blockpull_options += " --timeout 1" if bandwidth: blockpull_options += " --bandwidth %s" % bandwidth if base_option == "async": blockpull_options += " --async" base_image = None base_index = None if (libvirt_version.version_compare(1, 2, 4) or disk_src_protocol == 'gluster'): if base_option == "shallow": base_index = 1 base_image = "%s[%s]" % (disk_target, base_index) elif base_option == "base": base_index = 2 base_image = "%s[%s]" % (disk_target, base_index) elif base_option == "top": base_index = 0 base_image = "%s[%s]" % (disk_target, base_index) else: if base_option == "shallow": base_image = snap_src_lst[3] elif base_option == "base": base_image = snap_src_lst[2] elif base_option == "top": base_image = snap_src_lst[4] if base_option and base_image: blockpull_options += " --base %s" % base_image if keep_relative: blockpull_options += " --keep-relative" # Run test case result = virsh.blockpull(vm_name, blk_target, blockpull_options, **virsh_dargs) status = result.exit_status # Check status_error libvirt.check_exit_status(result, status_error) if not status and not with_timeout: if snap_in_mirror: snap_mirror_path = "%s/snap_mirror" % tmp_dir snap_options = "--diskspec vda,snapshot=external," snap_options += "file=%s --disk-only" % snap_mirror_path snapshot_external_disks.append(snap_mirror_path) ret = virsh.snapshot_create_as(vm_name, snap_options, ignore_status=True, debug=True) libvirt.check_exit_status(ret, snap_in_mirror_err) return vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') for disk in disks: if disk.target['dev'] != blk_target: continue else: disk_xml = disk.xmltreefile break logging.debug("after pull the disk xml is: %s" % disk_xml) if libvirt_version.version_compare(1, 2, 4): err_msg = "Domain image backing chain check failed" if not base_option or "async" in base_option: chain_lst = snap_src_lst[-1:] ret = check_chain_xml(disk_xml, chain_lst) if not ret: raise error.TestFail(err_msg) elif "base" or "shallow" in base_option: chain_lst = snap_src_lst[::-1] if not base_index and base_image: base_index = chain_lst.index(base_image) val_tmp = [] for i in range(1, base_index): val_tmp.append(chain_lst[i]) for i in val_tmp: chain_lst.remove(i) ret = check_chain_xml(disk_xml, chain_lst) if not ret: raise error.TestFail(err_msg) # If base image is the top layer of snapshot chain, # virsh blockpull should fail, return directly if base_option == "top": return # Check flag files for flag in snapshot_flag_files: status, output = session.cmd_status_output("cat %s" % flag) if status: raise error.TestFail("blockpull failed: %s" % output) finally: if vm.is_alive(): vm.destroy(gracefully=False) # Recover xml of vm. vmxml_backup.sync("--snapshots-metadata") if not disk_src_protocol or disk_src_protocol != 'gluster': for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) libvirtd = utils_libvirtd.Libvirtd() if disk_src_protocol == 'iscsi': libvirt.setup_or_cleanup_iscsi(is_setup=False, restart_tgtd=restart_tgtd) elif disk_src_protocol == 'gluster': libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path) libvirtd.restart() elif disk_src_protocol == 'netfs': restore_selinux = params.get('selinux_status_bak') libvirt.setup_or_cleanup_nfs(is_setup=False, restore_selinux=restore_selinux)
def run(test, params, env): """ Test command: virsh blockpull <domain> <path> 1) Prepare test environment. 2) Populate a disk from its backing image. 3) Recover test environment. 4) Check result. """ def make_disk_snapshot(snapshot_take): """ Make external snapshots for disks only. :param snapshot_take: snapshots taken. """ for count in range(1, snapshot_take + 1): snap_xml = snapshot_xml.SnapshotXML() snapshot_name = "snapshot_test%s" % count snap_xml.snap_name = snapshot_name snap_xml.description = "Snapshot Test %s" % count # Add all disks into xml file. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') new_disks = [] for src_disk_xml in disks: disk_xml = snap_xml.SnapDiskXML() disk_xml.xmltreefile = src_disk_xml.xmltreefile # Skip cdrom if disk_xml.device == "cdrom": continue del disk_xml.device del disk_xml.address disk_xml.snapshot = "external" disk_xml.disk_name = disk_xml.target['dev'] # Only qcow2 works as external snapshot file format, update it # here driver_attr = disk_xml.driver driver_attr.update({'type': 'qcow2'}) disk_xml.driver = driver_attr new_attrs = disk_xml.source.attrs if 'file' in disk_xml.source.attrs: file_name = disk_xml.source.attrs['file'] new_file = "%s.snap%s" % (file_name.split('.')[0], count) snapshot_external_disks.append(new_file) new_attrs.update({'file': new_file}) hosts = None elif ('name' in disk_xml.source.attrs and disk_src_protocol == 'gluster'): src_name = disk_xml.source.attrs['name'] new_name = "%s.snap%s" % (src_name.split('.')[0], count) new_attrs.update({'name': new_name}) snapshot_external_disks.append(new_name) hosts = disk_xml.source.hosts elif ('dev' in disk_xml.source.attrs or 'name' in disk_xml.source.attrs): if (disk_xml.type_name == 'block' or disk_src_protocol in ['iscsi', 'rbd']): # Use local file as external snapshot target for block # and iscsi network type. # As block device will be treat as raw format by # default, it's not fit for external disk snapshot # target. A work around solution is use qemu-img again # with the target. # And external active snapshots are not supported on # 'network' disks using 'iscsi' protocol disk_xml.type_name = 'file' if 'dev' in new_attrs: del new_attrs['dev'] elif 'name' in new_attrs: del new_attrs['name'] del new_attrs['protocol'] new_file = "%s/blk_src_file.snap%s" % (tmp_dir, count) snapshot_external_disks.append(new_file) new_attrs.update({'file': new_file}) hosts = None new_src_dict = {"attrs": new_attrs} if hosts: new_src_dict.update({"hosts": hosts}) disk_xml.source = disk_xml.new_disk_source(**new_src_dict) new_disks.append(disk_xml) snap_xml.set_disks(new_disks) snapshot_xml_path = snap_xml.xml logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile) options = "--disk-only --xmlfile %s " % snapshot_xml_path snapshot_result = virsh.snapshot_create( vm_name, options, debug=True) if snapshot_result.exit_status != 0: test.fail(snapshot_result.stderr) # Create a file flag in VM after each snapshot flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") file_path = flag_file.name flag_file.close() status, output = session.cmd_status_output("touch %s" % file_path) if status: test.fail("Touch file in vm failed. %s" % output) snapshot_flag_files.append(file_path) def get_first_disk_source(): """ Get disk source of first device :return: first disk of first device. """ first_device = vm.get_first_disk_devices() firt_disk_src = first_device['source'] return firt_disk_src def make_relative_path_backing_files(): """ Create backing chain files of relative path. :return: absolute path of top active file """ first_disk_source = get_first_disk_source() basename = os.path.basename(first_disk_source) root_dir = os.path.dirname(first_disk_source) cmd = "mkdir -p %s" % os.path.join(root_dir, '{b..d}') ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) # Make three external relative path backing files. backing_file_dict = collections.OrderedDict() backing_file_dict["b"] = "../%s" % basename backing_file_dict["c"] = "../b/b.img" backing_file_dict["d"] = "../c/c.img" for key, value in list(backing_file_dict.items()): backing_file_path = os.path.join(root_dir, key) cmd = ("cd %s && qemu-img create -f qcow2 -o backing_file=%s,backing_fmt=qcow2 %s.img" % (backing_file_path, value, key)) ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) return os.path.join(backing_file_path, "d.img") def check_chain_backing_files(disk_src_file, expect_backing_file=False): """ Check backing chain files of relative path after blockcommit. :param disk_src_file: first disk src file. :param expect_backing_file: whether it expect to have backing files. """ first_disk_source = get_first_disk_source() # Validate source image need refer to original one after active blockcommit if not expect_backing_file and disk_src_file not in first_disk_source: test.fail("The disk image path:%s doesn't include the origin image: %s" % (first_disk_source, disk_src_file)) # Validate source image doesn't have backing files after active blockcommit cmd = "qemu-img info %s --backing-chain" % first_disk_source if qemu_img_locking_feature_support: cmd = "qemu-img info -U %s --backing-chain" % first_disk_source ret = process.run(cmd, shell=True).stdout_text.strip() if expect_backing_file: if 'backing file' not in ret: test.fail("The disk image doesn't have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) else: if 'backing file' in ret: test.fail("The disk image still have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) # MAIN TEST CODE ### # Process cartesian parameters vm_name = params.get("main_vm") vm = env.get_vm(vm_name) snapshot_take = int(params.get("snapshot_take", '0')) needs_agent = "yes" == params.get("needs_agent", "yes") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") snap_in_mirror = "yes" == params.get("snap_in_mirror", 'no') snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", 'no') bandwidth = params.get("bandwidth", None) with_timeout = ("yes" == params.get("with_timeout_option", "no")) status_error = ("yes" == params.get("status_error", "no")) base_option = params.get("base_option", None) keep_relative = "yes" == params.get("keep_relative", 'no') virsh_dargs = {'debug': True} # Check whether qemu-img need add -U suboption since locking feature was added afterwards qemu-2.10 qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support() backing_file_relative_path = "yes" == params.get("backing_file_relative_path", "no") # Process domain disk device parameters disk_type = params.get("disk_type") disk_target = params.get("disk_target", 'vda') disk_src_protocol = params.get("disk_source_protocol") restart_tgtd = params.get("restart_tgtd", "no") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("original xml is %s", vmxml_backup) # Abort the test if there are snapshots already exsiting_snaps = virsh.snapshot_list(vm_name) if len(exsiting_snaps) != 0: test.fail("There are snapshots created for %s already" % vm_name) snapshot_external_disks = [] # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" try: if disk_src_protocol == 'iscsi' and disk_type == 'network': if not libvirt_version.version_compare(1, 0, 4): test.cancel("'iscsi' disk doesn't support in" " current libvirt version.") if disk_src_protocol == 'gluster': if not libvirt_version.version_compare(1, 2, 7): test.cancel("Snapshot on glusterfs not" " support in current " "version. Check more info " " with https://bugzilla.re" "dhat.com/show_bug.cgi?id=" "1017289") # Set vm xml and guest agent if replace_vm_disk: if disk_src_protocol == "rbd" and disk_type == "network": src_host = params.get("disk_source_host", "EXAMPLE_HOSTS") mon_host = params.get("mon_host", "EXAMPLE_MON_HOST") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(mon_host) if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"): test.cancel("Please provide ceph host first.") if backing_file_relative_path: if vm.is_alive(): vm.destroy(gracefully=False) first_src_file = get_first_disk_source() blk_source_image = os.path.basename(first_src_file) blk_source_folder = os.path.dirname(first_src_file) replace_disk_image = make_relative_path_backing_files() params.update({'disk_source_name': replace_disk_image, 'disk_type': 'file', 'disk_src_protocol': 'file'}) vm.start() libvirt.set_vm_disk(vm, params, tmp_dir) if needs_agent: vm.prepare_guest_agent() # The first disk is supposed to include OS # We will perform blockpull operation for it. first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] blk_target = first_disk['target'] snapshot_flag_files = [] # get a vm session before snapshot session = vm.wait_for_login() # do snapshot make_disk_snapshot(snapshot_take) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug("The domain xml after snapshot is %s" % vmxml) # snapshot src file list snap_src_lst = [blk_source] snap_src_lst += snapshot_external_disks if snap_in_mirror: blockpull_options = "--bandwidth 1" else: blockpull_options = "--wait --verbose" if with_timeout: blockpull_options += " --timeout 1" if bandwidth: blockpull_options += " --bandwidth %s" % bandwidth if base_option == "async": blockpull_options += " --async" base_image = None base_index = None if (libvirt_version.version_compare(1, 2, 4) or disk_src_protocol == 'gluster'): # For libvirt is older version than 1.2.4 or source protocol is gluster # there are various base image,which depends on base option:shallow,base,top respectively if base_option == "shallow": base_index = 1 base_image = "%s[%s]" % (disk_target, base_index) elif base_option == "base": base_index = 2 base_image = "%s[%s]" % (disk_target, base_index) elif base_option == "top": base_index = 0 base_image = "%s[%s]" % (disk_target, base_index) else: if base_option == "shallow": base_image = snap_src_lst[3] elif base_option == "base": base_image = snap_src_lst[2] elif base_option == "top": base_image = snap_src_lst[4] if base_option and base_image: blockpull_options += " --base %s" % base_image if keep_relative: blockpull_options += " --keep-relative" if backing_file_relative_path: # Use block commit to shorten previous snapshots. blockcommit_options = " --active --verbose --shallow --pivot --keep-relative" for count in range(1, snapshot_take + 1): res = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) #Use block pull with --keep-relative flag,and reset base_index to 2. base_index = 2 for count in range(1, snapshot_take): # If block pull operations are more than or equal to 3,it need reset base_index to 1. if count >= 3: base_index = 1 base_image = "%s[%s]" % (disk_target, base_index) blockpull_options = " --wait --verbose --base %s --keep-relative" % base_image res = virsh.blockpull(vm_name, blk_target, blockpull_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) # Check final backing chain files. check_chain_backing_files(blk_source_image, True) return # Run test case result = virsh.blockpull(vm_name, blk_target, blockpull_options, **virsh_dargs) status = result.exit_status # If pull job aborted as timeout, the exit status is different # on RHEL6(0) and RHEL7(1) if with_timeout and 'Pull aborted' in result.stdout.strip(): if libvirt_version.version_compare(1, 1, 1): status_error = True else: status_error = False # Check status_error libvirt.check_exit_status(result, status_error) if not status and not with_timeout: if snap_in_mirror: snap_mirror_path = "%s/snap_mirror" % tmp_dir snap_options = "--diskspec vda,snapshot=external," snap_options += "file=%s --disk-only" % snap_mirror_path snapshot_external_disks.append(snap_mirror_path) ret = virsh.snapshot_create_as(vm_name, snap_options, ignore_status=True, debug=True) libvirt.check_exit_status(ret, snap_in_mirror_err) return vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') for disk in disks: if disk.target['dev'] != blk_target: continue else: disk_xml = disk.xmltreefile break logging.debug("after pull the disk xml is: %s" % disk_xml) if libvirt_version.version_compare(1, 2, 4): err_msg = "Domain image backing chain check failed" if not base_option or "async" in base_option: chain_lst = snap_src_lst[-1:] ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail(err_msg) elif "base" or "shallow" in base_option: chain_lst = snap_src_lst[::-1] if not base_index and base_image: base_index = chain_lst.index(base_image) val_tmp = [] for i in range(1, base_index): val_tmp.append(chain_lst[i]) for i in val_tmp: chain_lst.remove(i) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail(err_msg) # If base image is the top layer of snapshot chain, # virsh blockpull should fail, return directly if base_option == "top": return # Check flag files for flag in snapshot_flag_files: status, output = session.cmd_status_output("cat %s" % flag) if status: test.fail("blockpull failed: %s" % output) finally: # Remove ceph configure file if created if ceph_cfg: os.remove(ceph_cfg) if vm.is_alive(): vm.destroy(gracefully=False) # Recover xml of vm. vmxml_backup.sync("--snapshots-metadata") if not disk_src_protocol or disk_src_protocol != 'gluster': for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) if backing_file_relative_path: libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) process.run("cd %s && rm -rf b c d" % blk_source_folder, shell=True) libvirtd = utils_libvirtd.Libvirtd() if disk_src_protocol == 'iscsi': libvirt.setup_or_cleanup_iscsi(is_setup=False, restart_tgtd=restart_tgtd) elif disk_src_protocol == 'gluster': libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path) libvirtd.restart() elif disk_src_protocol == 'netfs': restore_selinux = params.get('selinux_status_bak') libvirt.setup_or_cleanup_nfs(is_setup=False, restore_selinux=restore_selinux)
logging.error(e) for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) # Clean up libvirt pool, which may be created by 'set_vm_disk' if disk_type == 'volume': virsh.pool_destroy(pool_name, ignore_status=True, debug=True) # Restore libvirtd conf and restart libvirtd libvirtd_conf.restore() libvirtd_utl.restart() if libvirtd_log_path and os.path.exists(libvirtd_log_path): os.unlink(libvirtd_log_path) # Clean up NFS try: if nfs_cleanup: utl.setup_or_cleanup_nfs(is_setup=False) except Exception, e: logging.error(e) # Clean up iSCSI try: for iscsi_n in list(set(emulated_iscsi)): utl.setup_or_cleanup_iscsi(is_setup=False, emulated_image=iscsi_n) # iscsid will be restarted, so give it a break before next loop time.sleep(5) except Exception, e: logging.error(e) if os.path.exists(dest_path): os.remove(dest_path) if os.path.exists(snap_path): os.remove(snap_path)
utils_misc.wait_for( lambda: virsh.domstate(vm_name, ignore_status=True).exit_status, 2) if active_snap or with_shallow: option = "--snapshots-metadata" else: option = None original_xml.sync(option) for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) if replace_vm_disk: if disk_source_protocol == "netfs": restore_selinux = params.get('selinux_status_bak') utl.setup_or_cleanup_nfs(is_setup=False, restore_selinux=restore_selinux) elif disk_source_protocol == "iscsi": if disk_type == 'volume': virsh.pool_destroy(pool_name, ignore_status=True, debug=True) if with_blockdev: utl.setup_or_cleanup_iscsi(is_setup=False, emulated_image=blkdev_n) if with_shallow: utl.setup_or_cleanup_iscsi(is_setup=False, emulated_image=back_n) utl.setup_or_cleanup_iscsi(is_setup=False, emulated_image=emu_image, restart_tgtd='yes') if os.path.exists(dest_path) and not with_blockdev:
def run(test, params, env): """ Test DAC setting in both domain xml and qemu.conf. (1) Init variables for test. (2) Set VM xml and qemu.conf with proper DAC label, also set monitor socket parent dir with propoer ownership and mode. (3) Start VM and check the context. """ # Get general variables. status_error = ('yes' == params.get("status_error", 'no')) host_sestatus = params.get("host_selinux", "enforcing") # Get variables about seclabel for VM. sec_type = params.get("vm_sec_type", "dynamic") vm_sec_model = params.get("vm_sec_model", "dac") vm_sec_label = params.get("vm_sec_label", None) vm_sec_relabel = params.get("vm_sec_relabel", "yes") sec_dict = {'type': sec_type, 'model': vm_sec_model, 'relabel': vm_sec_relabel} if vm_sec_label: sec_dict['label'] = vm_sec_label set_qemu_conf = "yes" == params.get("set_qemu_conf", "no") # Get per-img seclabel variables disk_type = params.get("disk_type") disk_target = params.get('disk_target') disk_src_protocol = params.get("disk_source_protocol") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) invalid_label = 'yes' == params.get("invalid_label", "no") relabel = params.get("per_img_sec_relabel") sec_label = params.get("per_img_sec_label") per_sec_model = params.get("per_sec_model", 'dac') per_img_dict = {'sec_model': per_sec_model, 'relabel': relabel, 'sec_label': sec_label} params.update(per_img_dict) # Get qemu.conf config variables qemu_user = params.get("qemu_user", 'qemu') qemu_group = params.get("qemu_group", 'qemu') dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes") # Get variables about VM and get a VM object and VMXML instance. vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Set selinux of host. backup_sestatus = utils_selinux.get_status() utils_selinux.set_status(host_sestatus) qemu_sock_mod = False qemu_sock_path = '/var/lib/libvirt/qemu/' qemu_conf = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() try: if set_qemu_conf: # Set qemu.conf for user and group if qemu_user: qemu_conf.user = qemu_user if qemu_group: qemu_conf.group = qemu_group if dynamic_ownership: qemu_conf.dynamic_ownership = 1 else: qemu_conf.dynamic_ownership = 0 logging.debug("the qemu.conf content is: %s" % qemu_conf) libvirtd.restart() st = os.stat(qemu_sock_path) if not bool(st.st_mode & stat.S_IWGRP): # chmod g+w os.chmod(qemu_sock_path, st.st_mode | stat.S_IWGRP) qemu_sock_mod = True # Set the context of the VM. logging.debug("sec_dict is %s" % sec_dict) vmxml.set_seclabel([sec_dict]) vmxml.sync() # Get per-image seclabel in id string if sec_label: per_img_usr, per_img_grp = sec_label.split(':') sec_label_id = format_user_group_str(per_img_usr, per_img_grp) # Start VM to check the qemu process and image. try: # Set per-img sec context and start vm utlv.set_vm_disk(vm, params) # Start VM successfully. if status_error: if invalid_label: # invalid label should fail, more info in bug 1165485 raise error.TestNAError("The label or model not valid, " "check more info in bug: https://" "bugzilla.redhat.com/show_bug.cgi" "?id=1165485") else: raise error.TestFail("Test succeeded in negative case.") # Get vm process label when VM is running. vm_pid = vm.get_pid() pid_stat = os.stat("/proc/%d" % vm_pid) vm_process_uid = pid_stat.st_uid vm_process_gid = pid_stat.st_gid vm_context = "%s:%s" % (vm_process_uid, vm_process_gid) logging.debug("vm process label is: %s", vm_context) # Get vm image label when VM is running if disk_type != "network": disks = vm.get_blk_devices() f = os.open(disks[disk_target]['source'], 0) stat_re = os.fstat(f) disk_context = "%s:%s" % (stat_re.st_uid, stat_re.st_gid) os.close(f) logging.debug("The disk dac label after vm start is: %s", disk_context) if sec_label and relabel == 'yes': if disk_context != sec_label_id: raise error.TestFail("The disk label is not equal to " "'%s'." % sec_label_id) except virt_vm.VMStartError, e: # Starting VM failed. if not status_error: raise error.TestFail("Test failed in positive case." "error: %s" % e) finally: # clean up if vm.is_alive(): vm.destroy(gracefully=False) backup_xml.sync() if qemu_sock_mod: st = os.stat(qemu_sock_path) os.chmod(qemu_sock_path, st.st_mode ^ stat.S_IWGRP) if set_qemu_conf: qemu_conf.restore() libvirtd.restart() utils_selinux.set_status(backup_sestatus) if disk_src_protocol == 'iscsi': utlv.setup_or_cleanup_iscsi(is_setup=False) elif disk_src_protocol == 'gluster': utlv.setup_or_cleanup_gluster(False, vol_name, brick_path) libvirtd.restart() elif disk_src_protocol == 'netfs': utlv.setup_or_cleanup_nfs(is_setup=False, restore_selinux=backup_sestatus)
""" return (vm.state() == "paused") if not utils_misc.wait_for(_check_state, timeout): test.fail("Guest does not paused, it is %s now" % vm.state()) else: logging.info("Now domain state changed to paused status") output = virsh.domblkerror(vm_name) if output.exit_status == 0: expect_result = "%s: %s" % (img_disk.target['dev'], error_type) if output.stdout.strip() == expect_result: logging.info("Get expect result: %s", expect_result) else: test.fail("Failed to get expect result, get %s" % output.stdout.strip()) else: test.fail("Fail to get domblkerror info:%s" % output.stderr) finally: logging.info("Do clean steps") if error_type == "unspecified error": nfs_service.start() vm.destroy() if os.path.isfile("%s.bak" % export_file): shutil.move("%s.bak" % export_file, export_file) res = libvirt.setup_or_cleanup_nfs(is_setup=False, restore_selinux=selinux_bak) elif error_type == "no space": vm.destroy() _pool_vol.cleanup_pool(pool_name, "fs", pool_target, img_name) vmxml_backup.sync()
def run(test, params, env): """ Test command: virsh blockcommit <domain> <path> 1) Prepare test environment. 2) Commit changes from a snapshot down to its backing image. 3) Recover test environment. 4) Check result. """ def make_disk_snapshot(postfix_n, snapshot_take, is_check_snapshot_tree=False): """ Make external snapshots for disks only. :param postfix_n: postfix option :param snapshot_take: snapshots taken. """ # Add all disks into command line. disks = vm.get_disk_devices() # Make three external snapshots for disks only for count in range(1, snapshot_take): options = "%s_%s %s%s-desc " % (postfix_n, count, postfix_n, count) options += "--disk-only --atomic --no-metadata" if needs_agent: options += " --quiesce" for disk in disks: disk_detail = disks[disk] basename = os.path.basename(disk_detail['source']) # Remove the original suffix if any, appending # ".postfix_n[0-9]" diskname = basename.split(".")[0] snap_name = "%s.%s%s" % (diskname, postfix_n, count) disk_external = os.path.join(tmp_dir, snap_name) snapshot_external_disks.append(disk_external) options += " %s,snapshot=external,file=%s" % (disk, disk_external) if is_check_snapshot_tree: options = options.replace("--no-metadata", "") cmd_result = virsh.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) status = cmd_result.exit_status if status != 0: test.fail("Failed to make snapshots for disks!") # Create a file flag in VM after each snapshot flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") file_path = flag_file.name flag_file.close() status, output = session.cmd_status_output("touch %s" % file_path) if status: test.fail("Touch file in vm failed. %s" % output) snapshot_flag_files.append(file_path) def check_snapshot_tree(): """ Check whether predefined snapshot names are equals to snapshot names by virsh snapshot-list --tree """ predefined_snapshot_name_list = [] for count in range(1, snapshot_take): predefined_snapshot_name_list.append("%s_%s" % (postfix_n, count)) snapshot_list_cmd = "virsh snapshot-list %s --tree" % vm_name result_output = process.run(snapshot_list_cmd, ignore_status=True, shell=True).stdout_text virsh_snapshot_name_list = [] for line in result_output.rsplit("\n"): strip_line = line.strip() if strip_line and "|" not in strip_line: virsh_snapshot_name_list.append(strip_line) # Compare two lists in their order and values, all need to be same. compare_list = [out_p for out_p, out_v in zip(predefined_snapshot_name_list, virsh_snapshot_name_list) if out_p not in out_v] if compare_list: test.fail("snapshot tree not correctly returned.") # If check_snapshot_tree is True, check snapshot tree output. if is_check_snapshot_tree: check_snapshot_tree() def get_first_disk_source(): """ Get disk source of first device :return: first disk of first device. """ first_device = vm.get_first_disk_devices() first_disk_src = first_device['source'] return first_disk_src def make_relative_path_backing_files(): """ Create backing chain files of relative path. :return: absolute path of top active file """ first_disk_source = get_first_disk_source() basename = os.path.basename(first_disk_source) root_dir = os.path.dirname(first_disk_source) cmd = "mkdir -p %s" % os.path.join(root_dir, '{b..d}') ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) # Make three external relative path backing files. backing_file_dict = collections.OrderedDict() backing_file_dict["b"] = "../%s" % basename backing_file_dict["c"] = "../b/b.img" backing_file_dict["d"] = "../c/c.img" for key, value in list(backing_file_dict.items()): backing_file_path = os.path.join(root_dir, key) cmd = ("cd %s && qemu-img create -f qcow2 -o backing_file=%s,backing_fmt=qcow2 %s.img" % (backing_file_path, value, key)) ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) return os.path.join(backing_file_path, "d.img") def check_chain_backing_files(disk_src_file, expect_backing_file=False): """ Check backing chain files of relative path after blockcommit. :param disk_src_file: first disk src file. :param expect_backing_file: whether it expect to have backing files. """ first_disk_source = get_first_disk_source() # Validate source image need refer to original one after active blockcommit if not expect_backing_file and disk_src_file not in first_disk_source: test.fail("The disk image path:%s doesn't include the origin image: %s" % (first_disk_source, disk_src_file)) # Validate source image doesn't have backing files after active blockcommit cmd = "qemu-img info %s --backing-chain" % first_disk_source if qemu_img_locking_feature_support: cmd = "qemu-img info -U %s --backing-chain" % first_disk_source ret = process.run(cmd, shell=True).stdout_text.strip() if expect_backing_file: if 'backing file' not in ret: test.fail("The disk image doesn't have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) else: if 'backing file' in ret: test.fail("The disk image still have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) # MAIN TEST CODE ### # Process cartesian parameters vm_name = params.get("main_vm") vm = env.get_vm(vm_name) snapshot_take = int(params.get("snapshot_take", '0')) vm_state = params.get("vm_state", "running") needs_agent = "yes" == params.get("needs_agent", "yes") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") top_inactive = ("yes" == params.get("top_inactive")) with_timeout = ("yes" == params.get("with_timeout_option", "no")) status_error = ("yes" == params.get("status_error", "no")) base_option = params.get("base_option", "none") middle_base = "yes" == params.get("middle_base", "no") pivot_opt = "yes" == params.get("pivot_opt", "no") snap_in_mirror = "yes" == params.get("snap_in_mirror", "no") snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", "no") with_active_commit = "yes" == params.get("with_active_commit", "no") multiple_chain = "yes" == params.get("multiple_chain", "no") virsh_dargs = {'debug': True} check_snapshot_tree = "yes" == params.get("check_snapshot_tree", "no") # Check whether qemu-img need add -U suboption since locking feature was added afterwards qemu-2.10 qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support() backing_file_relative_path = "yes" == params.get("backing_file_relative_path", "no") # Process domain disk device parameters disk_type = params.get("disk_type") disk_src_protocol = params.get("disk_source_protocol") restart_tgtd = params.get("restart_tgtd", 'no') vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) if not top_inactive: if not libvirt_version.version_compare(1, 2, 4): test.cancel("live active block commit is not supported" " in current libvirt version.") # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Abort the test if there are snapshots already exsiting_snaps = virsh.snapshot_list(vm_name) if len(exsiting_snaps) != 0: test.fail("There are snapshots created for %s already" % vm_name) snapshot_external_disks = [] cmd_session = None # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = '' try: if disk_src_protocol == 'iscsi' and disk_type == 'network': if not libvirt_version.version_compare(1, 0, 4): test.cancel("'iscsi' disk doesn't support in" " current libvirt version.") # Set vm xml and guest agent if replace_vm_disk: if disk_src_protocol == "rbd" and disk_type == "network": src_host = params.get("disk_source_host", "EXAMPLE_HOSTS") mon_host = params.get("mon_host", "EXAMPLE_MON_HOST") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(mon_host) if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"): test.cancel("Please provide rbd host first.") if backing_file_relative_path: if vm.is_alive(): vm.destroy(gracefully=False) first_src_file = get_first_disk_source() blk_source_image = os.path.basename(first_src_file) blk_source_folder = os.path.dirname(first_src_file) replace_disk_image = make_relative_path_backing_files() params.update({'disk_source_name': replace_disk_image, 'disk_type': 'file', 'disk_src_protocol': 'file'}) vm.start() libvirt.set_vm_disk(vm, params, tmp_dir) if needs_agent: vm.prepare_guest_agent() # The first disk is supposed to include OS # We will perform blockcommit operation for it. first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] blk_target = first_disk['target'] snapshot_flag_files = [] # get a vm session before snapshot session = vm.wait_for_login() # do snapshot postfix_n = 'snap' make_disk_snapshot(postfix_n, snapshot_take, check_snapshot_tree) basename = os.path.basename(blk_source) diskname = basename.split(".")[0] snap_src_lst = [blk_source] if multiple_chain: snap_name = "%s.%s1" % (diskname, postfix_n) snap_top = os.path.join(tmp_dir, snap_name) top_index = snapshot_external_disks.index(snap_top) + 1 omit_list = snapshot_external_disks[top_index:] vm.destroy(gracefully=False) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = '' disk_xmls = vmxml.get_devices(device_type="disk") for disk in disk_xmls: if disk.get('device_tag') == 'disk': disk_xml = disk break vmxml.del_device(disk_xml) disk_dict = {'attrs': {'file': snap_top}} disk_xml.source = disk_xml.new_disk_source(**disk_dict) vmxml.add_device(disk_xml) vmxml.sync() vm.start() session = vm.wait_for_login() postfix_n = 'new_snap' make_disk_snapshot(postfix_n, snapshot_take) snap_src_lst = [blk_source] snap_src_lst += snapshot_external_disks logging.debug("omit list is %s", omit_list) for i in omit_list: snap_src_lst.remove(i) else: # snapshot src file list snap_src_lst += snapshot_external_disks backing_chain = '' for i in reversed(list(range(snapshot_take))): if i == 0: backing_chain += "%s" % snap_src_lst[i] else: backing_chain += "%s -> " % snap_src_lst[i] logging.debug("The backing chain is: %s" % backing_chain) # check snapshot disk xml backingStore is expected vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') disk_xml = None for disk in disks: if disk.target['dev'] != blk_target: continue else: if disk.device != 'disk': continue disk_xml = disk.xmltreefile logging.debug("the target disk xml after snapshot is %s", disk_xml) break if not disk_xml: test.fail("Can't find disk xml with target %s" % blk_target) elif libvirt_version.version_compare(1, 2, 4): # backingStore element introuduced in 1.2.4 chain_lst = snap_src_lst[::-1] ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing chain check failed") # set blockcommit_options top_image = None blockcommit_options = "--wait --verbose" if with_timeout: blockcommit_options += " --timeout 1" if base_option == "shallow": blockcommit_options += " --shallow" elif base_option == "base": if middle_base: snap_name = "%s.%s1" % (diskname, postfix_n) blk_source = os.path.join(tmp_dir, snap_name) blockcommit_options += " --base %s" % blk_source if top_inactive: snap_name = "%s.%s2" % (diskname, postfix_n) top_image = os.path.join(tmp_dir, snap_name) blockcommit_options += " --top %s" % top_image else: blockcommit_options += " --active" if pivot_opt: blockcommit_options += " --pivot" if vm_state == "shut off": vm.destroy(gracefully=True) if with_active_commit: # inactive commit follow active commit will fail with bug 1135339 cmd = "virsh blockcommit %s %s --active --pivot" % (vm_name, blk_target) cmd_session = aexpect.ShellSession(cmd) if backing_file_relative_path: blockcommit_options = " --active --verbose --shallow --pivot --keep-relative" block_commit_index = snapshot_take expect_backing_file = False # Do block commit using --active for count in range(1, snapshot_take): res = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) if top_inactive: blockcommit_options = " --wait --verbose --top vda[1] --base vda[2] --keep-relative" block_commit_index = snapshot_take - 1 expect_backing_file = True # Do block commit with --wait if top_inactive for count in range(1, block_commit_index): res = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) check_chain_backing_files(blk_source_image, expect_backing_file) return # Run test case # Active commit does not support on rbd based disk with bug 1200726 result = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) # Check status_error libvirt.check_exit_status(result, status_error) if result.exit_status and status_error: return while True: vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') for disk in disks: if disk.target['dev'] != blk_target: continue else: disk_xml = disk.xmltreefile break if not top_inactive: disk_mirror = disk_xml.find('mirror') if '--pivot' not in blockcommit_options: if disk_mirror is not None: job_type = disk_mirror.get('job') job_ready = disk_mirror.get('ready') src_element = disk_mirror.find('source') disk_src_file = None for elem in ('file', 'name', 'dev'): elem_val = src_element.get(elem) if elem_val: disk_src_file = elem_val break err_msg = "blockcommit base source " err_msg += "%s not expected" % disk_src_file if '--shallow' in blockcommit_options: if not multiple_chain: if disk_src_file != snap_src_lst[2]: test.fail(err_msg) else: if disk_src_file != snap_src_lst[3]: test.fail(err_msg) else: if disk_src_file != blk_source: test.fail(err_msg) if libvirt_version.version_compare(1, 2, 7): # The job attribute mentions which API started the # operation since 1.2.7. if job_type != 'active-commit': test.fail("blockcommit job type '%s'" " not expected" % job_type) if job_ready != 'yes': # The attribute ready, if present, tracks # progress of the job: yes if the disk is known # to be ready to pivot, or, since 1.2.7, abort # or pivot if the job is in the process of # completing. continue else: logging.debug("after active block commit job " "ready for pivot, the target disk" " xml is %s", disk_xml) break else: break else: break else: if disk_mirror is None: logging.debug(disk_xml) if "--shallow" in blockcommit_options: chain_lst = snap_src_lst[::-1] chain_lst.pop(0) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing " "chain check failed") cmd_result = virsh.blockjob(vm_name, blk_target, '', ignore_status=True, debug=True) libvirt.check_exit_status(cmd_result) elif "--base" in blockcommit_options: chain_lst = snap_src_lst[::-1] base_index = chain_lst.index(blk_source) chain_lst = chain_lst[base_index:] ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing " "chain check failed") break else: # wait pivot after commit is synced continue else: logging.debug("after inactive commit the disk xml is: %s" % disk_xml) if libvirt_version.version_compare(1, 2, 4): if "--shallow" in blockcommit_options: chain_lst = snap_src_lst[::-1] chain_lst.remove(top_image) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing chain " "check failed") elif "--base" in blockcommit_options: chain_lst = snap_src_lst[::-1] top_index = chain_lst.index(top_image) base_index = chain_lst.index(blk_source) val_tmp = [] for i in range(top_index, base_index): val_tmp.append(chain_lst[i]) for i in val_tmp: chain_lst.remove(i) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing chain " "check failed") break else: break # Check flag files if not vm_state == "shut off" and not multiple_chain: for flag in snapshot_flag_files: status, output = session.cmd_status_output("cat %s" % flag) if status: test.fail("blockcommit failed: %s" % output) if not pivot_opt and snap_in_mirror: # do snapshot during mirror phase snap_path = "%s/%s.snap" % (tmp_dir, vm_name) snap_opt = "--disk-only --atomic --no-metadata " snap_opt += "vda,snapshot=external,file=%s" % snap_path snapshot_external_disks.append(snap_path) cmd_result = virsh.snapshot_create_as(vm_name, snap_opt, ignore_statues=True, debug=True) libvirt.check_exit_status(cmd_result, snap_in_mirror_err) finally: # Remove ceph configure file if created if ceph_cfg: os.remove(ceph_cfg) if vm.is_alive(): vm.destroy(gracefully=False) # Recover xml of vm. vmxml_backup.sync("--snapshots-metadata") # Clean ceph image if used in test if 'mon_host' in locals(): if utils_package.package_install(["ceph-common"]): disk_source_name = params.get("disk_source_name") cmd = ("rbd -m {0} info {1} && rbd -m {0} rm " "{1}".format(mon_host, disk_source_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("result of rbd removal: %s", cmd_result) else: logging.debug('Failed to install ceph-common to clean ceph.') if cmd_session: cmd_session.close() for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) if backing_file_relative_path: libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) process.run("cd %s && rm -rf b c d" % blk_source_folder, shell=True) if disk_src_protocol == 'iscsi': libvirt.setup_or_cleanup_iscsi(is_setup=False, restart_tgtd=restart_tgtd) elif disk_src_protocol == 'gluster': libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path) libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() elif disk_src_protocol == 'netfs': restore_selinux = params.get('selinux_status_bak') libvirt.setup_or_cleanup_nfs(is_setup=False, restore_selinux=restore_selinux)
# keep .ssh/authorized_keys for NFS cleanup later seLinuxBool.cleanup(True) if nfs_client: logging.info("Cleanup NFS client environment...") nfs_client.cleanup() logging.info("Remove the NFS image...") source_file = params.get("source_file") libvirt.delete_local_disk("file", path=source_file) logging.info("Cleanup NFS server environment...") exp_dir = params.get("export_dir") mount_dir = params.get("mnt_path_name") libvirt.setup_or_cleanup_nfs(False, export_dir=exp_dir, mount_dir=mount_dir, restore_selinux=local_selinux_bak) if exception: raise error.TestError( "Error occurred. \n%s: %s" % (detail.__class__, detail)) # Check test result. if status_error == 'yes': if ret_migrate: raise error.TestFail("Migration finished with unexpected status.") else: if not ret_migrate: raise error.TestFail("Migration finished with unexpected status.") if not check_dest_state: raise error.TestFail("Wrong VM state on destination.") if not check_dest_persistent:
def run(test, params, env): """ Test command: virsh blockcopy. This command can copy a disk backing image chain to dest. 1. Positive testing 1.1 Copy a disk to a new image file. 1.2 Reuse existing destination copy. 1.3 Valid blockcopy timeout and bandwidth test. 2. Negative testing 2.1 Copy a disk to a non-exist directory. 2.2 Copy a disk with invalid options. 2.3 Do block copy for a persistent domain. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) target = params.get("target_disk", "") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") disk_source_protocol = params.get("disk_source_protocol") disk_type = params.get("disk_type") pool_name = params.get("pool_name") image_size = params.get("image_size") emu_image = params.get("emulated_image") copy_to_nfs = "yes" == params.get("copy_to_nfs", "no") mnt_path_name = params.get("mnt_path_name") options = params.get("blockcopy_options", "") bandwidth = params.get("blockcopy_bandwidth", "") bandwidth_byte = "yes" == params.get("bandwidth_byte", "no") reuse_external = "yes" == params.get("reuse_external", "no") persistent_vm = params.get("persistent_vm", "no") status_error = "yes" == params.get("status_error", "no") active_error = "yes" == params.get("active_error", "no") active_snap = "yes" == params.get("active_snap", "no") active_save = "yes" == params.get("active_save", "no") check_state_lock = "yes" == params.get("check_state_lock", "no") check_finish_job = "yes" == params.get("check_finish_job", "yes") with_shallow = "yes" == params.get("with_shallow", "no") with_blockdev = "yes" == params.get("with_blockdev", "no") setup_libvirt_polkit = "yes" == params.get('setup_libvirt_polkit') bug_url = params.get("bug_url", "") timeout = int(params.get("timeout", 1200)) relative_path = params.get("relative_path") rerun_flag = 0 blkdev_n = None back_n = 'blockdev-backing-iscsi' snapshot_external_disks = [] snapshots_take = int(params.get("snapshots_take", '0')) external_disk_only_snapshot = "yes" == params.get( "external_disk_only_snapshot", "no") enable_iscsi_auth = "yes" == params.get("enable_iscsi_auth", "no") # Skip/Fail early if with_blockdev and not libvirt_version.version_compare(1, 2, 13): raise exceptions.TestSkipError("--blockdev option not supported in " "current version") if not target: raise exceptions.TestSkipError("Require target disk to copy") if setup_libvirt_polkit and not libvirt_version.version_compare(1, 1, 1): raise exceptions.TestSkipError("API acl test not supported in current" " libvirt version") if copy_to_nfs and not libvirt_version.version_compare(1, 1, 1): raise exceptions.TestSkipError("Bug will not fix: %s" % bug_url) if bandwidth_byte and not libvirt_version.version_compare(1, 3, 3): raise exceptions.TestSkipError("--bytes option not supported in " "current version") if relative_path == "yes" and not libvirt_version.version_compare(3, 0, 0): test.cancel( "Forbid using relative path or file name only is added since libvirt-3.0.0" ) if "--transient-job" in options and not libvirt_version.version_compare( 4, 5, 0): test.cancel( "--transient-job option is supported until libvirt 4.5.0 version") # Check the source disk if vm_xml.VMXML.check_disk_exist(vm_name, target): logging.debug("Find %s in domain %s", target, vm_name) else: raise exceptions.TestFail("Can't find %s in domain %s" % (target, vm_name)) original_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) tmp_dir = data_dir.get_tmp_dir() # Prepare dest path params dest_path = params.get("dest_path", "") dest_format = params.get("dest_format", "") # Ugh... this piece of chicanery brought to you by the QemuImg which # will "add" the 'dest_format' extension during the check_format code. # So if we create the file with the extension and then remove it when # doing the check_format later, then we avoid erroneous failures. dest_extension = "" if dest_format != "": dest_extension = ".%s" % dest_format # Prepare for --reuse-external option if reuse_external: options += "--reuse-external --wait" # Set rerun_flag=1 to do blockcopy twice, and the first time created # file can be reused in the second time if no dest_path given # This will make sure the image size equal to original disk size if dest_path == "/path/non-exist": if os.path.exists(dest_path) and not os.path.isdir(dest_path): os.remove(dest_path) else: rerun_flag = 1 # Prepare other options if dest_format == "raw": options += " --raw" if with_blockdev: options += " --blockdev" if len(bandwidth): options += " --bandwidth %s" % bandwidth if bandwidth_byte: options += " --bytes" if with_shallow: options += " --shallow" # Prepare acl options uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' extra_dict = { 'uri': uri, 'unprivileged_user': unprivileged_user, 'debug': True, 'ignore_status': True, 'timeout': timeout } libvirtd_utl = utils_libvirtd.Libvirtd() libvirtd_log_path = os.path.join(data_dir.get_tmp_dir(), "libvirt_daemons.log") libvirtd_conf_dict = { "log_filter": '"3:json 1:libvirt 1:qemu"', "log_outputs": '"1:file:%s"' % libvirtd_log_path } logging.debug("the libvirtd conf file content is :\n %s" % libvirtd_conf_dict) libvirtd_conf = utl.customize_libvirt_config(libvirtd_conf_dict) def check_format(dest_path, dest_extension, expect): """ Check the image format :param dest_path: Path of the copy to create :param expect: Expect image format """ # And now because the QemuImg will add the extension for us # we have to remove it here. path_noext = dest_path.strip(dest_extension) params['image_name'] = path_noext params['image_format'] = expect image = qemu_storage.QemuImg(params, "/", path_noext) if image.get_format() == expect: logging.debug("%s format is %s", dest_path, expect) else: raise exceptions.TestFail("%s format is not %s" % (dest_path, expect)) def _blockjob_and_libvirtd_chk(cmd_result): """ Raise TestFail when blockcopy fail with block-job-complete error or blockcopy hang with state change lock. This is a specific bug verify, so ignore status_error here. """ failure_msg = "" err_msg = "internal error: unable to execute QEMU command" err_msg += " 'block-job-complete'" if err_msg in cmd_result.stderr: failure_msg += "Virsh cmd error happened: %s\n" % err_msg err_pattern = "Timed out during operation: cannot acquire" err_pattern += " state change lock" ret = chk_libvirtd_log(libvirtd_log_path, err_pattern, "error") if ret: failure_msg += "Libvirtd log error happened: %s\n" % err_pattern if failure_msg: if not libvirt_version.version_compare(1, 3, 2): bug_url_ = "https://bugzilla.redhat.com/show_bug.cgi?id=1197592" failure_msg += "Hit on bug: %s " % bug_url_ test.fail(failure_msg) def _make_snapshot(snapshot_numbers_take): """ Make external disk snapshot :param snapshot_numbers_take: snapshot numbers. """ for count in range(0, snapshot_numbers_take): snap_xml = snapshot_xml.SnapshotXML() snapshot_name = "blockcopy_snap" snap_xml.snap_name = snapshot_name + "_%s" % count snap_xml.description = "blockcopy snapshot" # Add all disks into xml file. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') # Remove non-storage disk such as 'cdrom' for disk in disks: if disk.device != 'disk': disks.remove(disk) new_disks = [] src_disk_xml = disks[0] disk_xml = snap_xml.SnapDiskXML() disk_xml.xmltreefile = src_disk_xml.xmltreefile del disk_xml.device del disk_xml.address disk_xml.snapshot = "external" disk_xml.disk_name = disk_xml.target['dev'] # Only qcow2 works as external snapshot file format, update it # here driver_attr = disk_xml.driver driver_attr.update({'type': 'qcow2'}) disk_xml.driver = driver_attr new_attrs = disk_xml.source.attrs if 'file' in disk_xml.source.attrs: new_file = os.path.join(tmp_dir, "blockcopy_shallow_%s.snap" % count) snapshot_external_disks.append(new_file) new_attrs.update({'file': new_file}) hosts = None elif ('dev' in disk_xml.source.attrs or 'name' in disk_xml.source.attrs or 'pool' in disk_xml.source.attrs): if (disk_xml.type_name == 'block' or disk_source_protocol == 'iscsi'): disk_xml.type_name = 'block' if 'name' in new_attrs: del new_attrs['name'] del new_attrs['protocol'] elif 'pool' in new_attrs: del new_attrs['pool'] del new_attrs['volume'] del new_attrs['mode'] back_path = utl.setup_or_cleanup_iscsi( is_setup=True, is_login=True, image_size="1G", emulated_image=back_n) emulated_iscsi.append(back_n) cmd = "qemu-img create -f qcow2 %s 1G" % back_path process.run(cmd, shell=True) new_attrs.update({'dev': back_path}) hosts = None new_src_dict = {"attrs": new_attrs} if hosts: new_src_dict.update({"hosts": hosts}) disk_xml.source = disk_xml.new_disk_source(**new_src_dict) new_disks.append(disk_xml) snap_xml.set_disks(new_disks) snapshot_xml_path = snap_xml.xml logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile) options = "--disk-only --xmlfile %s " % snapshot_xml_path snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) if snapshot_result.exit_status != 0: raise exceptions.TestFail(snapshot_result.stderr) snap_path = '' save_path = '' emulated_iscsi = [] nfs_cleanup = False try: # Prepare dest_path tmp_file = time.strftime("%Y-%m-%d-%H.%M.%S.img") tmp_file += dest_extension if not dest_path: if enable_iscsi_auth: utils_secret.clean_up_secrets() setup_auth_enabled_iscsi_disk(vm, params) dest_path = os.path.join(tmp_dir, tmp_file) elif with_blockdev: blkdev_n = 'blockdev-iscsi' dest_path = utl.setup_or_cleanup_iscsi(is_setup=True, is_login=True, image_size=image_size, emulated_image=blkdev_n) emulated_iscsi.append(blkdev_n) # Make sure the new disk show up utils_misc.wait_for(lambda: os.path.exists(dest_path), 5) else: if copy_to_nfs: tmp_dir = "%s/%s" % (tmp_dir, mnt_path_name) dest_path = os.path.join(tmp_dir, tmp_file) # Domain disk replacement with desire type if replace_vm_disk: # Calling 'set_vm_disk' is bad idea as it left lots of cleanup jobs # after test, such as pool, volume, nfs, iscsi and so on # TODO: remove this function in the future if disk_source_protocol == 'iscsi': emulated_iscsi.append(emu_image) if disk_source_protocol == 'netfs': nfs_cleanup = True utl.set_vm_disk(vm, params, tmp_dir, test) new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if with_shallow or external_disk_only_snapshot or enable_iscsi_auth: _make_snapshot(snapshots_take) # Prepare transient/persistent vm if persistent_vm == "no" and vm.is_persistent(): vm.undefine() elif persistent_vm == "yes" and not vm.is_persistent(): new_xml.define() # Run blockcopy command to create destination file if rerun_flag == 1: options1 = "--wait %s --finish --verbose" % dest_format if with_blockdev: options1 += " --blockdev" if with_shallow: options1 += " --shallow" cmd_result = virsh.blockcopy(vm_name, target, dest_path, options1, **extra_dict) status = cmd_result.exit_status if status != 0: raise exceptions.TestFail("Run blockcopy command fail: %s" % cmd_result.stdout.strip() + cmd_result.stderr) elif not os.path.exists(dest_path): raise exceptions.TestFail("Cannot find the created copy") if "--transient-job" in options: pool = ThreadPool(processes=1) async_result = pool.apply_async( blockcopy_thread, (vm_name, target, dest_path, options)) kill_blockcopy_process() utl.check_blockjob(vm_name, target) return # Run the real testing command cmd_result = virsh.blockcopy(vm_name, target, dest_path, options, **extra_dict) # check BZ#1197592 _blockjob_and_libvirtd_chk(cmd_result) status = cmd_result.exit_status if not libvirtd_utl.is_running(): raise exceptions.TestFail("Libvirtd service is dead") if not status_error: if status == 0: ret = utils_misc.wait_for( lambda: check_xml(vm_name, target, dest_path, options), 5) if not ret: raise exceptions.TestFail("Domain xml not expected after" " blockcopy") if options.count("--bandwidth"): if options.count('--bytes'): bandwidth += 'B' else: bandwidth += 'M' if not (bandwidth in ['0B', '0M']) and not utl.check_blockjob( vm_name, target, "bandwidth", bandwidth): raise exceptions.TestFail("Check bandwidth failed") val = options.count("--pivot") + options.count("--finish") # Don't wait for job finish when using --byte option val += options.count('--bytes') if val == 0 and check_finish_job: try: finish_job(vm_name, target, timeout) except JobTimeout as excpt: raise exceptions.TestFail("Run command failed: %s" % excpt) if options.count("--raw") and not with_blockdev: check_format(dest_path, dest_extension, dest_format) if active_snap: snap_path = "%s/%s.snap" % (tmp_dir, vm_name) snap_opt = "--disk-only --atomic --no-metadata " snap_opt += "vda,snapshot=external,file=%s" % snap_path ret = virsh.snapshot_create_as(vm_name, snap_opt, ignore_status=True, debug=True) utl.check_exit_status(ret, active_error) if active_save: save_path = "%s/%s.save" % (tmp_dir, vm_name) ret = virsh.save(vm_name, save_path, ignore_status=True, debug=True) utl.check_exit_status(ret, active_error) if check_state_lock: # Run blockjob pivot in subprocess as it will hang # for a while, run blockjob info again to check # job state command = "virsh blockjob %s %s --pivot" % (vm_name, target) session = aexpect.ShellSession(command) ret = virsh.blockjob(vm_name, target, "--info") err_info = "cannot acquire state change lock" if err_info in ret.stderr: raise exceptions.TestFail("Hit on bug: %s" % bug_url) utl.check_exit_status(ret, status_error) session.close() else: raise exceptions.TestFail(cmd_result.stdout.strip() + cmd_result.stderr) else: if status: logging.debug("Expect error: %s", cmd_result.stderr) else: # Commit id '4c297728' changed how virsh exits when # unexpectedly failing due to timeout from a fail (1) # to a success(0), so we need to look for a different # marker to indicate the copy aborted. As "stdout: Now # in mirroring phase" could be in stdout which fail the # check, so also do check in libvirtd log to confirm. if options.count("--timeout") and options.count("--wait"): log_pattern = "Copy aborted" if (re.search(log_pattern, cmd_result.stdout.strip()) or chk_libvirtd_log(libvirtd_log_path, log_pattern, "debug")): logging.debug("Found success a timed out block copy") else: raise exceptions.TestFail("Expect fail, but run " "successfully: %s" % bug_url) finally: # Recover VM may fail unexpectedly, we need using try/except to # proceed the following cleanup steps try: # Abort exist blockjob to avoid any possible lock error virsh.blockjob(vm_name, target, '--abort', ignore_status=True) vm.destroy(gracefully=False) # It may take a long time to shutdown the VM which has # blockjob running utils_misc.wait_for( lambda: virsh.domstate(vm_name, ignore_status=True). exit_status, 180) if virsh.domain_exists(vm_name): if active_snap or with_shallow: option = "--snapshots-metadata" else: option = None original_xml.sync(option) else: original_xml.define() except Exception as e: logging.error(e) for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) # Clean up libvirt pool, which may be created by 'set_vm_disk' if disk_type == 'volume': virsh.pool_destroy(pool_name, ignore_status=True, debug=True) # Restore libvirtd conf and restart libvirtd libvirtd_conf.restore() libvirtd_utl.restart() if libvirtd_log_path and os.path.exists(libvirtd_log_path): os.unlink(libvirtd_log_path) # Clean up NFS try: if nfs_cleanup: utl.setup_or_cleanup_nfs(is_setup=False) except Exception as e: logging.error(e) # Clean up iSCSI try: for iscsi_n in list(set(emulated_iscsi)): utl.setup_or_cleanup_iscsi(is_setup=False, emulated_image=iscsi_n) # iscsid will be restarted, so give it a break before next loop time.sleep(5) except Exception as e: logging.error(e) if os.path.exists(dest_path): os.remove(dest_path) if os.path.exists(snap_path): os.remove(snap_path) if os.path.exists(save_path): os.remove(save_path) # Restart virtlogd service to release VM log file lock try: path.find_command('virtlogd') process.run('systemctl reset-failed virtlogd') process.run('systemctl restart virtlogd ') except path.CmdNotFoundError: pass
def run(test, params, env): """ Test virsh migrate-setmaxdowntime command. 1) Prepare migration environment 2) Start migration and set migrate-maxdowntime 3) Cleanup environment(migrated vm on destination) 4) Check result """ dest_uri = params.get( "virsh_migrate_dest_uri", "qemu+ssh://MIGRATE_EXAMPLE/system") src_uri = params.get( "virsh_migrate_src_uri", "qemu+ssh://MIGRATE_EXAMPLE/system") if dest_uri.count('///') or dest_uri.count('MIGRATE_EXAMPLE'): raise error.TestNAError("Set your destination uri first.") if src_uri.count('MIGRATE_EXAMPLE'): raise error.TestNAError("Set your source uri first.") if src_uri == dest_uri: raise error.TestNAError("You should not set dest uri same as local.") vm_ref = params.get("setmmdt_vm_ref", "domname") pre_vm_state = params.get("pre_vm_state", "running") status_error = "yes" == params.get("status_error", "no") do_migrate = "yes" == params.get("do_migrate", "yes") migrate_maxdowntime = params.get("migrate_maxdowntime", 1.000) if (migrate_maxdowntime == ""): downtime = "" else: downtime = int(float(migrate_maxdowntime)) * 1000 extra = params.get("setmmdt_extra") # A delay between threads delay_time = int(params.get("delay_time", 1)) # timeout of threads thread_timeout = 180 vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) domuuid = vm.get_uuid() grep_str_local = params.get("grep_str_from_local_libvirt_log", "") # For safety reasons, we'd better back up original guest xml orig_config_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if not orig_config_xml: raise error.TestError("Backing up xmlfile failed.") # Params to configure libvirtd.conf log_file = "/var/log/libvirt/libvirtd.log" log_level = "1" log_filters = '"1:json 1:libvirt 1:qemu 1:monitor 3:remote 4:event"' libvirtd_conf_dict = {"log_level": log_level, "log_filters": log_filters, "log_outputs": '"%s:file:%s"' % (log_level, log_file)} # Update libvirtd config with new parameters libvirtd = utils_libvirtd.Libvirtd() libvirtd_conf = config_libvirt(libvirtd_conf_dict) libvirtd.restart() # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Params to setup SSH connection params["server_ip"] = params.get("migrate_dest_host") params["server_pwd"] = params.get("migrate_dest_pwd") params["client_ip"] = params.get("migrate_source_host") params["client_pwd"] = params.get("migrate_source_pwd") params["nfs_client_ip"] = params.get("migrate_dest_host") params["nfs_server_ip"] = params.get("migrate_source_host") # Params to enable SELinux boolean on remote host params["remote_boolean_varible"] = "virt_use_nfs" params["remote_boolean_value"] = "on" params["set_sebool_remote"] = "yes" remote_host = params.get("migrate_dest_host") username = params.get("migrate_dest_user", "root") password = params.get("migrate_dest_pwd") # Config ssh autologin for remote host ssh_key.setup_ssh_key(remote_host, username, password, port=22) setmmdt_dargs = {'debug': True, 'ignore_status': True, 'uri': src_uri} migrate_dargs = {'debug': True, 'ignore_status': True} seLinuxBool = None nfs_client = None local_selinux_bak = "" try: # Update the disk using shared storage libvirt.set_vm_disk(vm, params) # Backup the SELinux status on local host for recovering local_selinux_bak = params.get("selinux_status_bak", "") # Configure NFS client on remote host nfs_client = nfs.NFSClient(params) nfs_client.setup() logging.info("Enable virt NFS SELinux boolean on target host") seLinuxBool = utils_misc.SELinuxBoolean(params) seLinuxBool.setup() if not vm.is_alive(): vm.start() vm.wait_for_login() domid = vm.get_id() # Confirm how to reference a VM. if vm_ref == "domname": vm_ref = vm_name elif vm_ref == "domid": vm_ref = domid elif vm_ref == "domuuid": vm_ref = domuuid # Prepare vm state if pre_vm_state == "paused": vm.pause() elif pre_vm_state == "shutoff": vm.destroy(gracefully=False) # Ensure VM in 'shut off' status utils_misc.wait_for(lambda: vm.state() == "shut off", 30) # Set max migration downtime must be during migration # Using threads for synchronization threads = [] if do_migrate: threads.append(threading.Thread(target=thread_func_live_migration, args=(vm, dest_uri, migrate_dargs))) threads.append(threading.Thread(target=thread_func_setmmdt, args=(vm_ref, downtime, extra, setmmdt_dargs))) for thread in threads: thread.start() # Migration must be executing before setting maxdowntime time.sleep(delay_time) # Wait until thread is over for thread in threads: thread.join(thread_timeout) if (status_error is False or do_migrate is False): logging.debug("To match the expected pattern '%s' ...", grep_str_local) cmd = "grep -E '%s' %s" % (grep_str_local, log_file) cmdResult = process.run(cmd, shell=True, verbose=False) logging.debug(cmdResult) finally: # Clean up. if do_migrate: logging.debug("Cleanup VM on remote host...") cleanup_dest(vm, src_uri, dest_uri) if orig_config_xml: logging.debug("Recover VM XML...") orig_config_xml.sync() if seLinuxBool: logging.info("Recover NFS SELinux boolean on remote host...") seLinuxBool.cleanup(True) if nfs_client: logging.info("Cleanup NFS client environment...") nfs_client.cleanup() logging.info("Remove the NFS image...") source_file = params.get("source_file") libvirt.delete_local_disk("file", path=source_file) logging.info("Cleanup NFS server environment...") exp_dir = params.get("export_dir") mount_dir = params.get("mnt_path_name") libvirt.setup_or_cleanup_nfs(False, export_dir=exp_dir, mount_dir=mount_dir, restore_selinux=local_selinux_bak) # Recover libvirtd service configuration on local if libvirtd_conf: logging.debug("Recover local libvirtd configuration...") libvirtd_conf.restore() libvirtd.restart() os.remove(log_file) # Check results. if status_error: if ret_setmmdt: if not do_migrate and libvirt_version.version_compare(1, 2, 9): # https://bugzilla.redhat.com/show_bug.cgi?id=1146618 # Commit fe808d9 fix it and allow setting migration # max downtime any time since libvirt-1.2.9 logging.info("Libvirt version is newer than 1.2.9," "Allow set maxdowntime while VM isn't migrating") else: raise error.TestFail("virsh migrate-setmaxdowntime succeed " "but not expected.") else: if do_migrate and not ret_migration: raise error.TestFail("Migration failed.") if not ret_setmmdt: raise error.TestFail("virsh migrate-setmaxdowntime failed.")
def run(test, params, env): """ Test command: virsh blockpull <domain> <path> 1) Prepare test environment. 2) Populate a disk from its backing image. 3) Recover test environment. 4) Check result. """ def make_disk_snapshot(snapshot_take): """ Make external snapshots for disks only. :param snapshot_take: snapshots taken. """ for count in range(1, snapshot_take + 1): snap_xml = snapshot_xml.SnapshotXML() snapshot_name = "snapshot_test%s" % count snap_xml.snap_name = snapshot_name snap_xml.description = "Snapshot Test %s" % count # Add all disks into xml file. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') new_disks = [] for src_disk_xml in disks: disk_xml = snap_xml.SnapDiskXML() disk_xml.xmltreefile = src_disk_xml.xmltreefile # Skip cdrom if disk_xml.device == "cdrom": continue del disk_xml.device del disk_xml.address disk_xml.snapshot = "external" disk_xml.disk_name = disk_xml.target['dev'] # Only qcow2 works as external snapshot file format, update it # here driver_attr = disk_xml.driver driver_attr.update({'type': 'qcow2'}) disk_xml.driver = driver_attr new_attrs = disk_xml.source.attrs if 'file' in disk_xml.source.attrs: file_name = disk_xml.source.attrs['file'] new_file = "%s.snap%s" % (file_name.split('.')[0], count) snapshot_external_disks.append(new_file) new_attrs.update({'file': new_file}) hosts = None elif ('name' in disk_xml.source.attrs and disk_src_protocol == 'gluster'): src_name = disk_xml.source.attrs['name'] new_name = "%s.snap%s" % (src_name.split('.')[0], count) new_attrs.update({'name': new_name}) snapshot_external_disks.append(new_name) hosts = disk_xml.source.hosts elif ('dev' in disk_xml.source.attrs or 'name' in disk_xml.source.attrs): if (disk_xml.type_name == 'block' or disk_src_protocol in ['iscsi', 'rbd']): # Use local file as external snapshot target for block # and iscsi network type. # As block device will be treat as raw format by # default, it's not fit for external disk snapshot # target. A work around solution is use qemu-img again # with the target. # And external active snapshots are not supported on # 'network' disks using 'iscsi' protocol disk_xml.type_name = 'file' if 'dev' in new_attrs: del new_attrs['dev'] elif 'name' in new_attrs: del new_attrs['name'] del new_attrs['protocol'] new_file = "%s/blk_src_file.snap%s" % (tmp_dir, count) snapshot_external_disks.append(new_file) new_attrs.update({'file': new_file}) hosts = None new_src_dict = {"attrs": new_attrs} if hosts: new_src_dict.update({"hosts": hosts}) disk_xml.source = disk_xml.new_disk_source(**new_src_dict) new_disks.append(disk_xml) snap_xml.set_disks(new_disks) snapshot_xml_path = snap_xml.xml logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile) options = "--disk-only --xmlfile %s " % snapshot_xml_path snapshot_result = virsh.snapshot_create( vm_name, options, debug=True) if snapshot_result.exit_status != 0: test.fail(snapshot_result.stderr) # Create a file flag in VM after each snapshot flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") file_path = flag_file.name flag_file.close() status, output = session.cmd_status_output("touch %s" % file_path) if status: test.fail("Touch file in vm failed. %s" % output) snapshot_flag_files.append(file_path) def get_first_disk_source(): """ Get disk source of first device :return: first disk of first device. """ first_device = vm.get_first_disk_devices() firt_disk_src = first_device['source'] return firt_disk_src def make_relative_path_backing_files(): """ Create backing chain files of relative path. :return: absolute path of top active file """ first_disk_source = get_first_disk_source() basename = os.path.basename(first_disk_source) root_dir = os.path.dirname(first_disk_source) cmd = "mkdir -p %s" % os.path.join(root_dir, '{b..d}') ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) # Make three external relative path backing files. backing_file_dict = collections.OrderedDict() backing_file_dict["b"] = "../%s" % basename backing_file_dict["c"] = "../b/b.img" backing_file_dict["d"] = "../c/c.img" for key, value in list(backing_file_dict.items()): backing_file_path = os.path.join(root_dir, key) cmd = ("cd %s && qemu-img create -f qcow2 -o backing_file=%s,backing_fmt=qcow2 %s.img" % (backing_file_path, value, key)) ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) return os.path.join(backing_file_path, "d.img") def check_chain_backing_files(disk_src_file, expect_backing_file=False): """ Check backing chain files of relative path after blockcommit. :param disk_src_file: first disk src file. :param expect_backing_file: whether it expect to have backing files. """ first_disk_source = get_first_disk_source() # Validate source image need refer to original one after active blockcommit if not expect_backing_file and disk_src_file not in first_disk_source: test.fail("The disk image path:%s doesn't include the origin image: %s" % (first_disk_source, disk_src_file)) # Validate source image doesn't have backing files after active blockcommit cmd = "qemu-img info %s --backing-chain" % first_disk_source if qemu_img_locking_feature_support: cmd = "qemu-img info -U %s --backing-chain" % first_disk_source ret = process.run(cmd, shell=True).stdout_text.strip() if expect_backing_file: if 'backing file' not in ret: test.fail("The disk image doesn't have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) else: if 'backing file' in ret: test.fail("The disk image still have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) # MAIN TEST CODE ### # Process cartesian parameters vm_name = params.get("main_vm") vm = env.get_vm(vm_name) snapshot_take = int(params.get("snapshot_take", '0')) needs_agent = "yes" == params.get("needs_agent", "yes") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") snap_in_mirror = "yes" == params.get("snap_in_mirror", 'no') snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", 'no') bandwidth = params.get("bandwidth", None) with_timeout = ("yes" == params.get("with_timeout_option", "no")) status_error = ("yes" == params.get("status_error", "no")) base_option = params.get("base_option", None) keep_relative = "yes" == params.get("keep_relative", 'no') virsh_dargs = {'debug': True} # Check whether qemu-img need add -U suboption since locking feature was added afterwards qemu-2.10 qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support() backing_file_relative_path = "yes" == params.get("backing_file_relative_path", "no") # Process domain disk device parameters disk_type = params.get("disk_type") disk_target = params.get("disk_target", 'vda') disk_src_protocol = params.get("disk_source_protocol") restart_tgtd = params.get("restart_tgtd", "no") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("original xml is %s", vmxml_backup) # Abort the test if there are snapshots already exsiting_snaps = virsh.snapshot_list(vm_name) if len(exsiting_snaps) != 0: test.fail("There are snapshots created for %s already" % vm_name) snapshot_external_disks = [] # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" try: if disk_src_protocol == 'iscsi' and disk_type == 'network': if not libvirt_version.version_compare(1, 0, 4): test.cancel("'iscsi' disk doesn't support in" " current libvirt version.") if disk_src_protocol == 'gluster': if not libvirt_version.version_compare(1, 2, 7): test.cancel("Snapshot on glusterfs not" " support in current " "version. Check more info " " with https://bugzilla.re" "dhat.com/show_bug.cgi?id=" "1017289") # Set vm xml and guest agent if replace_vm_disk: if disk_src_protocol == "rbd" and disk_type == "network": src_host = params.get("disk_source_host", "EXAMPLE_HOSTS") mon_host = params.get("mon_host", "EXAMPLE_MON_HOST") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(mon_host) if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"): test.cancel("Please provide ceph host first.") if backing_file_relative_path: if vm.is_alive(): vm.destroy(gracefully=False) first_src_file = get_first_disk_source() blk_source_image = os.path.basename(first_src_file) blk_source_folder = os.path.dirname(first_src_file) replace_disk_image = make_relative_path_backing_files() params.update({'disk_source_name': replace_disk_image, 'disk_type': 'file', 'disk_src_protocol': 'file'}) vm.start() libvirt.set_vm_disk(vm, params, tmp_dir) if needs_agent: vm.prepare_guest_agent() # The first disk is supposed to include OS # We will perform blockpull operation for it. first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] blk_target = first_disk['target'] snapshot_flag_files = [] # get a vm session before snapshot session = vm.wait_for_login() # do snapshot make_disk_snapshot(snapshot_take) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug("The domain xml after snapshot is %s" % vmxml) # snapshot src file list snap_src_lst = [blk_source] snap_src_lst += snapshot_external_disks if snap_in_mirror: blockpull_options = "--bandwidth 1" else: blockpull_options = "--wait --verbose" if with_timeout: blockpull_options += " --timeout 1" if bandwidth: blockpull_options += " --bandwidth %s" % bandwidth if base_option == "async": blockpull_options += " --async" base_image = None base_index = None if (libvirt_version.version_compare(1, 2, 4) or disk_src_protocol == 'gluster'): # For libvirt is older version than 1.2.4 or source protocol is gluster # there are various base image,which depends on base option:shallow,base,top respectively if base_option == "shallow": base_index = 1 base_image = "%s[%s]" % (disk_target, base_index) elif base_option == "base": base_index = 2 base_image = "%s[%s]" % (disk_target, base_index) elif base_option == "top": base_index = 0 base_image = "%s[%s]" % (disk_target, base_index) else: if base_option == "shallow": base_image = snap_src_lst[3] elif base_option == "base": base_image = snap_src_lst[2] elif base_option == "top": base_image = snap_src_lst[4] if base_option and base_image: blockpull_options += " --base %s" % base_image if keep_relative: blockpull_options += " --keep-relative" if backing_file_relative_path: # Use block commit to shorten previous snapshots. blockcommit_options = " --active --verbose --shallow --pivot --keep-relative" for count in range(1, snapshot_take + 1): res = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) #Use block pull with --keep-relative flag,and reset base_index to 2. base_index = 2 for count in range(1, snapshot_take): # If block pull operations are more than or equal to 3,it need reset base_index to 1. if count >= 3: base_index = 1 base_image = "%s[%s]" % (disk_target, base_index) blockpull_options = " --wait --verbose --base %s --keep-relative" % base_image res = virsh.blockpull(vm_name, blk_target, blockpull_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) # Check final backing chain files. check_chain_backing_files(blk_source_image, True) return # Run test case result = virsh.blockpull(vm_name, blk_target, blockpull_options, **virsh_dargs) status = result.exit_status # If pull job aborted as timeout, the exit status is different # on RHEL6(0) and RHEL7(1) if with_timeout and 'Pull aborted' in result.stdout.strip(): if libvirt_version.version_compare(1, 1, 1): status_error = True else: status_error = False # Check status_error libvirt.check_exit_status(result, status_error) if not status and not with_timeout: if snap_in_mirror: snap_mirror_path = "%s/snap_mirror" % tmp_dir snap_options = "--diskspec vda,snapshot=external," snap_options += "file=%s --disk-only" % snap_mirror_path snapshot_external_disks.append(snap_mirror_path) ret = virsh.snapshot_create_as(vm_name, snap_options, ignore_status=True, debug=True) libvirt.check_exit_status(ret, snap_in_mirror_err) return vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') for disk in disks: if disk.target['dev'] != blk_target: continue else: disk_xml = disk.xmltreefile break logging.debug("after pull the disk xml is: %s" % disk_xml) if libvirt_version.version_compare(1, 2, 4): err_msg = "Domain image backing chain check failed" if not base_option or "async" in base_option: chain_lst = snap_src_lst[-1:] ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail(err_msg) elif "base" or "shallow" in base_option: chain_lst = snap_src_lst[::-1] if not base_index and base_image: base_index = chain_lst.index(base_image) val_tmp = [] for i in range(1, base_index): val_tmp.append(chain_lst[i]) for i in val_tmp: chain_lst.remove(i) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail(err_msg) # If base image is the top layer of snapshot chain, # virsh blockpull should fail, return directly if base_option == "top": return # Check flag files for flag in snapshot_flag_files: status, output = session.cmd_status_output("cat %s" % flag) if status: test.fail("blockpull failed: %s" % output) finally: # Remove ceph configure file if created if ceph_cfg: os.remove(ceph_cfg) if vm.is_alive(): vm.destroy(gracefully=False) # Recover xml of vm. vmxml_backup.sync("--snapshots-metadata") # Clean ceph image if used in test if 'mon_host' in locals(): if utils_package.package_install(["ceph-common"]): disk_source_name = params.get("disk_source_name") cmd = ("rbd -m {0} info {1} && rbd -m {0} rm " "{1}".format(mon_host, disk_source_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("result of rbd removal: %s", cmd_result) else: logging.debug('Failed to install ceph-common to clean ceph.') if not disk_src_protocol or disk_src_protocol != 'gluster': for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) if backing_file_relative_path: libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) process.run("cd %s && rm -rf b c d" % blk_source_folder, shell=True) libvirtd = utils_libvirtd.Libvirtd() if disk_src_protocol == 'iscsi': libvirt.setup_or_cleanup_iscsi(is_setup=False, restart_tgtd=restart_tgtd) elif disk_src_protocol == 'gluster': logging.info("clean gluster env") libvirt.setup_or_cleanup_gluster(False, brick_path=brick_path, **params) libvirtd.restart() elif disk_src_protocol == 'netfs': restore_selinux = params.get('selinux_status_bak') libvirt.setup_or_cleanup_nfs(is_setup=False, restore_selinux=restore_selinux)
def run(test, params, env): """ Test the virsh pool commands with acl, initiate a pool then do following operations. (1) Undefine a given type pool (2) Define the pool from xml (3) Build given type pool (4) Start pool (5) Destroy pool (6) Refresh pool after start it (7) Run vol-list with the pool (9) Delete pool For negative cases, redo failed step to make the case run continue. Run cleanup at last restore env. """ # Initialize the variables pool_name = params.get("pool_name", "temp_pool_1") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target", "") # The file for dumped pool xml pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp") if os.path.dirname(pool_target) is "": pool_target = os.path.join(test.tmpdir, pool_target) vol_name = params.get("vol_name", "temp_vol_1") # Use pool name as VG name vg_name = pool_name vol_path = os.path.join(pool_target, vol_name) define_acl = "yes" == params.get("define_acl", "no") undefine_acl = "yes" == params.get("undefine_acl", "no") start_acl = "yes" == params.get("start_acl", "no") destroy_acl = "yes" == params.get("destroy_acl", "no") build_acl = "yes" == params.get("build_acl", "no") delete_acl = "yes" == params.get("delete_acl", "no") refresh_acl = "yes" == params.get("refresh_acl", "no") vol_list_acl = "yes" == params.get("vol_list_acl", "no") list_dumpxml_acl = "yes" == params.get("list_dumpxml_acl", "no") src_pool_error = "yes" == params.get("src_pool_error", "no") define_error = "yes" == params.get("define_error", "no") undefine_error = "yes" == params.get("undefine_error", "no") start_error = "yes" == params.get("start_error", "no") destroy_error = "yes" == params.get("destroy_error", "no") build_error = "yes" == params.get("build_error", "no") delete_error = "yes" == params.get("delete_error", "no") refresh_error = "yes" == params.get("refresh_error", "no") vol_list_error = "yes" == params.get("vol_list_error", "no") # Clean up flags: # cleanup_env[0] for nfs, cleanup_env[1] for iscsi, cleanup_env[2] for lvm # cleanup_env[3] for selinux backup status, cleanup_env[4] for gluster cleanup_env = [False, False, False, "", False] # libvirt acl related params uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") acl_dargs = { 'uri': uri, 'unprivileged_user': unprivileged_user, 'debug': True } def check_pool_list(pool_name, option="--all", expect_error=False): """ Check pool by running pool-list command with given option. :param pool_name: Name of the pool :param option: option for pool-list command :param expect_error: Boolean value, expect command success or fail """ found = False # Get the list stored in a variable if list_dumpxml_acl: result = virsh.pool_list(option, **acl_dargs) else: result = virsh.pool_list(option, ignore_status=True) utlv.check_exit_status(result, False) output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]", str(result.stdout)) for item in output: if pool_name in item[0]: found = True break if found: logging.debug("Find pool '%s' in pool list.", pool_name) else: logging.debug("Not find pool %s in pool list.", pool_name) if expect_error and found: raise error.TestFail("Unexpect pool '%s' exist." % pool_name) if not expect_error and not found: raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name) # Run Testcase kwargs = {'source_format': params.get('pool_source_format', 'ext4')} try: _pool = libvirt_storage.StoragePool() # Init a pool for test result = utlv.define_pool(pool_name, pool_type, pool_target, cleanup_env, **kwargs) utlv.check_exit_status(result, src_pool_error) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) if list_dumpxml_acl: xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml, **acl_dargs) else: xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml) logging.debug("Pool '%s' XML:\n%s", pool_name, xml) # Step (1) # Undefine pool if undefine_acl: result = virsh.pool_undefine(pool_name, **acl_dargs) else: result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result, undefine_error) if undefine_error: check_pool_list(pool_name, "--all", False) # Redo under negative case to keep case continue result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result) check_pool_list(pool_name, "--all", True) else: check_pool_list(pool_name, "--all", True) # Step (2) # Define pool from XML file if define_acl: result = virsh.pool_define(pool_xml, **acl_dargs) else: result = virsh.pool_define(pool_xml) utlv.check_exit_status(result, define_error) if define_error: # Redo under negative case to keep case continue result = virsh.pool_define(pool_xml) utlv.check_exit_status(result) # Step (3) # '--overwrite/--no-overwrite' just for fs/disk/logiacl type pool # disk/fs pool: as prepare step already make label and create filesystem # for the disk, use '--overwrite' is necessary # logical_pool: build pool will fail if VG already exist, BZ#1373711 if pool_type != "logical": option = '' if pool_type in ['disk', 'fs']: option = '--overwrite' result = virsh.pool_build(pool_name, option, ignore_status=True) utlv.check_exit_status(result) if build_acl: result = virsh.pool_build(pool_name, option, **acl_dargs) else: result = virsh.pool_build(pool_name, option, ignore_status=True) utlv.check_exit_status(result, build_error) if build_error: # Redo under negative case to keep case continue result = virsh.pool_build(pool_name, option, ignore_status=True) utlv.check_exit_status(result) # For iSCSI pool, we need discover targets before start the pool if pool_type == 'iscsi': cmd = 'iscsiadm -m discovery -t sendtargets -p 127.0.0.1' process.run(cmd, shell=True) # Step (4) # Pool start if start_acl: result = virsh.pool_start(pool_name, **acl_dargs) else: result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result, start_error) if start_error: # Redo under negative case to keep case continue result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result) option = "--persistent --type %s" % pool_type check_pool_list(pool_name, option) # Step (5) # Pool destroy if destroy_acl: result = virsh.pool_destroy(pool_name, **acl_dargs) else: result = virsh.pool_destroy(pool_name) if result: if destroy_error: raise error.TestFail("Expect fail, but run successfully.") else: if not destroy_error: raise error.TestFail("Pool %s destroy failed, not expected." % pool_name) else: # Redo under negative case to keep case continue if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: raise error.TestFail("Destroy pool % failed." % pool_name) # Step (6) # Pool refresh for 'dir' type pool # Pool start result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result) if pool_type == "dir": os.mknod(vol_path) if refresh_acl: result = virsh.pool_refresh(pool_name, **acl_dargs) else: result = virsh.pool_refresh(pool_name) utlv.check_exit_status(result, refresh_error) # Step (7) # Pool vol-list if vol_list_acl: result = virsh.vol_list(pool_name, **acl_dargs) else: result = virsh.vol_list(pool_name) utlv.check_exit_status(result, vol_list_error) # Step (8) # Pool delete for 'dir' type pool if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: raise error.TestFail("Destroy pool % failed." % pool_name) if pool_type == "dir": if os.path.exists(vol_path): os.remove(vol_path) if delete_acl: result = virsh.pool_delete(pool_name, **acl_dargs) else: result = virsh.pool_delete(pool_name, ignore_status=True) utlv.check_exit_status(result, delete_error) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) if not delete_error: if os.path.exists(pool_target): raise error.TestFail("The target path '%s' still exist." % pool_target) result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result) check_pool_list(pool_name, "--all", True) finally: # Clean up if os.path.exists(pool_xml): os.remove(pool_xml) if not _pool.delete_pool(pool_name): logging.error("Can't delete pool: %s", pool_name) if cleanup_env[2]: cmd = "pvs |grep %s |awk '{print $1}'" % vg_name pv_name = process.system_output(cmd, shell=True) lv_utils.vg_remove(vg_name) process.run("pvremove %s" % pv_name, shell=True) if cleanup_env[1]: utlv.setup_or_cleanup_iscsi(False) if cleanup_env[0]: utlv.setup_or_cleanup_nfs(False, restore_selinux=cleanup_env[3])
def run(test, params, env): """ Test virsh domblkerror in 2 types error 1. unspecified error 2. no space """ if not virsh.has_help_command('domblkerror'): test.cancel("This version of libvirt does not support domblkerror " "test") vm_name = params.get("main_vm", "avocado-vt-vm1") error_type = params.get("domblkerror_error_type") timeout = params.get("domblkerror_timeout", 240) mnt_dir = params.get("domblkerror_mnt_dir", "/home/test") export_file = params.get("nfs_export_file", "/etc/exports") img_name = params.get("domblkerror_img_name", "libvirt-disk") img_size = params.get("domblkerror_img_size") target_dev = params.get("domblkerror_target_dev", "vdb") pool_name = params.get("domblkerror_pool_name", "fs_pool") vol_name = params.get("domblkerror_vol_name", "vol1") ubuntu = distro.detect().name == 'Ubuntu' rhel = distro.detect().name == 'rhel' nfs_service_package = params.get("nfs_service_package", "nfs-kernel-server") nfs_service = None selinux_bool = None session = None selinux_bak = "" vm = env.get_vm(vm_name) if error_type == "unspecified error": selinux_local = params.get("setup_selinux_local", "yes") == "yes" if not ubuntu and not rhel: nfs_service_package = "nfs" elif rhel: nfs_service_package = "nfs-server" if not rhel and not utils_package.package_install(nfs_service_package): test.cancel("NFS package not available in host to test") # backup /etc/exports shutil.copyfile(export_file, "%s.bak" % export_file) # backup xml vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: # Gerenate tmp dir tmp_dir = data_dir.get_tmp_dir() img_dir = os.path.join(tmp_dir, 'images') if not os.path.exists(img_dir): os.mkdir(img_dir) # Generate attached disk process.run("qemu-img create %s %s" % (os.path.join(img_dir, img_name), img_size), shell=True, verbose=True) # Get unspecified error if error_type == "unspecified error": # In this situation, guest will attach a disk on nfs, stop nfs # service will cause guest paused and get unspecified error nfs_dir = os.path.join(tmp_dir, 'mnt') if not os.path.exists(nfs_dir): os.mkdir(nfs_dir) mount_opt = "rw,no_root_squash,async" res = libvirt.setup_or_cleanup_nfs(is_setup=True, mount_dir=nfs_dir, is_mount=False, export_options=mount_opt, export_dir=img_dir) if not ubuntu: selinux_bak = res["selinux_status_bak"] process.run("mount -o nolock,soft,timeo=1,retrans=1,retry=0 " "127.0.0.1:%s %s" % (img_dir, nfs_dir), shell=True, verbose=True) img_path = os.path.join(nfs_dir, img_name) nfs_service = Factory.create_service(nfs_service_package) if not ubuntu and selinux_local: params['set_sebool_local'] = "yes" params['local_boolean_varible'] = "virt_use_nfs" params['local_boolean_value'] = "on" selinux_bool = utils_misc.SELinuxBoolean(params) selinux_bool.setup() elif error_type == "no space": # Steps to generate no space block error: # 1. Prepare a iscsi disk and build fs pool with it # 2. Create vol with larger capacity and 0 allocation # 3. Attach this disk in guest # 4. In guest, create large image in the vol, which may cause # guest paused _pool_vol = None pool_target = os.path.join(tmp_dir, pool_name) _pool_vol = libvirt.PoolVolumeTest(test, params) _pool_vol.pre_pool(pool_name, "fs", pool_target, img_name, image_size=img_size) _pool_vol.pre_vol(vol_name, "raw", "100M", "0", pool_name) img_path = os.path.join(pool_target, vol_name) # Generate disk xml # Guest will attach a disk with cache=none and error_policy=stop img_disk = Disk(type_name="file") img_disk.device = "disk" img_disk.source = img_disk.new_disk_source( **{'attrs': {'file': img_path}}) img_disk.driver = {'name': "qemu", 'type': "raw", 'cache': "none", 'error_policy': "stop"} img_disk.target = {'dev': target_dev, 'bus': "virtio"} logging.debug("disk xml is %s", img_disk.xml) # Start guest and get session if not vm.is_alive(): vm.start() session = vm.wait_for_login() # Get disk list before operation get_disks_cmd = "fdisk -l|grep '^Disk /dev'|cut -d: -f1|cut -d' ' -f2" bef_list = str(session.cmd_output(get_disks_cmd)).strip().split("\n") logging.debug("disk_list_debug = %s", bef_list) # Attach disk to guest ret = virsh.attach_device(vm_name, img_disk.xml) if ret.exit_status != 0: test.fail("Fail to attach device %s" % ret.stderr) time.sleep(2) logging.debug("domain xml is %s", virsh.dumpxml(vm_name)) # get disk list after attach aft_list = str(session.cmd_output(get_disks_cmd)).strip().split("\n") logging.debug("disk list after attaching - %s", aft_list) # Find new disk after attach new_disk = "".join(list(set(bef_list) ^ set(aft_list))) logging.debug("new disk is %s", new_disk) def create_large_image(): """ Create large image in guest """ # install dependent packages pkg_list = ["parted", "e2fsprogs"] for pkg in pkg_list: if not utils_package.package_install(pkg, session): test.error("Failed to install dependent package %s" % pkg) # create partition and file system session.cmd("parted -s %s mklabel msdos" % new_disk) session.cmd("parted -s %s mkpart primary ext3 '0%%' '100%%'" % new_disk) # mount disk and write file in it session.cmd("mkfs.ext3 %s1" % new_disk) session.cmd("mkdir -p %s && mount %s1 %s" % (mnt_dir, new_disk, mnt_dir)) # The following step may cause guest paused before it return try: session.cmd("dd if=/dev/zero of=%s/big_file bs=1024 " "count=51200 && sync" % mnt_dir) except Exception as err: logging.debug("Expected Fail %s", err) session.close() create_large_image() if error_type == "unspecified error": # umount nfs to trigger error after create large image if nfs_service is not None: nfs_service.stop() logging.debug("nfs status is %s", nfs_service.status()) # wait and check the guest status with timeout def _check_state(): """ Check domain state """ return (vm.state() == "paused") if not utils_misc.wait_for(_check_state, timeout): # If not paused, perform one more IO operation to the mnt disk session = vm.wait_for_login() session.cmd("echo 'one more write to big file' > %s/big_file" % mnt_dir) if not utils_misc.wait_for(_check_state, 60): test.fail("Guest does not paused, it is %s now" % vm.state()) else: logging.info("Now domain state changed to paused status") output = virsh.domblkerror(vm_name) if output.exit_status == 0: expect_result = "%s: %s" % (img_disk.target['dev'], error_type) if output.stdout.strip() == expect_result: logging.info("Get expect result: %s", expect_result) else: test.fail("Failed to get expect result, get %s" % output.stdout.strip()) else: test.fail("Fail to get domblkerror info:%s" % output.stderr) finally: logging.info("Do clean steps") if session: session.close() if error_type == "unspecified error": if nfs_service is not None: nfs_service.start() vm.destroy() if os.path.isfile("%s.bak" % export_file): shutil.move("%s.bak" % export_file, export_file) res = libvirt.setup_or_cleanup_nfs(is_setup=False, mount_dir=nfs_dir, export_dir=img_dir, restore_selinux=selinux_bak) if selinux_bool: selinux_bool.cleanup(keep_authorized_keys=True) elif error_type == "no space": vm.destroy() if _pool_vol: _pool_vol.cleanup_pool(pool_name, "fs", pool_target, img_name) vmxml_backup.sync() data_dir.clean_tmp_files()
def run(test, params, env): """ Test command: virsh blockcopy. This command can copy a disk backing image chain to dest. 1. Positive testing 1.1 Copy a disk to a new image file. 1.2 Reuse existing destination copy. 1.3 Valid blockcopy timeout and bandwidth test. 2. Negative testing 2.1 Copy a disk to a non-exist directory. 2.2 Copy a disk with invalid options. 2.3 Do block copy for a persistent domain. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) target = params.get("target_disk", "") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") disk_source_protocol = params.get("disk_source_protocol") disk_type = params.get("disk_type") pool_name = params.get("pool_name") image_size = params.get("image_size") emu_image = params.get("emulated_image") copy_to_nfs = "yes" == params.get("copy_to_nfs", "no") mnt_path_name = params.get("mnt_path_name") options = params.get("blockcopy_options", "") bandwidth = params.get("blockcopy_bandwidth", "") bandwidth_byte = "yes" == params.get("bandwidth_byte", "no") reuse_external = "yes" == params.get("reuse_external", "no") persistent_vm = params.get("persistent_vm", "no") status_error = "yes" == params.get("status_error", "no") active_error = "yes" == params.get("active_error", "no") active_snap = "yes" == params.get("active_snap", "no") active_save = "yes" == params.get("active_save", "no") check_state_lock = "yes" == params.get("check_state_lock", "no") with_shallow = "yes" == params.get("with_shallow", "no") with_blockdev = "yes" == params.get("with_blockdev", "no") setup_libvirt_polkit = "yes" == params.get('setup_libvirt_polkit') bug_url = params.get("bug_url", "") timeout = int(params.get("timeout", 1200)) relative_path = params.get("relative_path") rerun_flag = 0 blkdev_n = None back_n = 'blockdev-backing-iscsi' snapshot_external_disks = [] # Skip/Fail early if with_blockdev and not libvirt_version.version_compare(1, 2, 13): raise exceptions.TestSkipError("--blockdev option not supported in " "current version") if not target: raise exceptions.TestSkipError("Require target disk to copy") if setup_libvirt_polkit and not libvirt_version.version_compare(1, 1, 1): raise exceptions.TestSkipError("API acl test not supported in current" " libvirt version") if copy_to_nfs and not libvirt_version.version_compare(1, 1, 1): raise exceptions.TestSkipError("Bug will not fix: %s" % bug_url) if bandwidth_byte and not libvirt_version.version_compare(1, 3, 3): raise exceptions.TestSkipError("--bytes option not supported in " "current version") if relative_path == "yes" and not libvirt_version.version_compare(3, 0, 0): test.cancel("Forbid using relative path or file name only is added since libvirt-3.0.0") # Check the source disk if vm_xml.VMXML.check_disk_exist(vm_name, target): logging.debug("Find %s in domain %s", target, vm_name) else: raise exceptions.TestFail("Can't find %s in domain %s" % (target, vm_name)) original_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) tmp_dir = data_dir.get_tmp_dir() # Prepare dest path params dest_path = params.get("dest_path", "") dest_format = params.get("dest_format", "") # Ugh... this piece of chicanery brought to you by the QemuImg which # will "add" the 'dest_format' extension during the check_format code. # So if we create the file with the extension and then remove it when # doing the check_format later, then we avoid erroneous failures. dest_extension = "" if dest_format != "": dest_extension = ".%s" % dest_format # Prepare for --reuse-external option if reuse_external: options += "--reuse-external --wait" # Set rerun_flag=1 to do blockcopy twice, and the first time created # file can be reused in the second time if no dest_path given # This will make sure the image size equal to original disk size if dest_path == "/path/non-exist": if os.path.exists(dest_path) and not os.path.isdir(dest_path): os.remove(dest_path) else: rerun_flag = 1 # Prepare other options if dest_format == "raw": options += " --raw" if with_blockdev: options += " --blockdev" if len(bandwidth): options += " --bandwidth %s" % bandwidth if bandwidth_byte: options += " --bytes" if with_shallow: options += " --shallow" # Prepare acl options uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' extra_dict = {'uri': uri, 'unprivileged_user': unprivileged_user, 'debug': True, 'ignore_status': True, 'timeout': timeout} libvirtd_utl = utils_libvirtd.Libvirtd() libvirtd_conf = utils_config.LibvirtdConfig() libvirtd_conf["log_filters"] = '"3:json 1:libvirt 1:qemu"' libvirtd_log_path = os.path.join(data_dir.get_tmp_dir(), "libvirtd.log") libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path logging.debug("the libvirtd config file content is:\n %s" % libvirtd_conf) libvirtd_utl.restart() def check_format(dest_path, dest_extension, expect): """ Check the image format :param dest_path: Path of the copy to create :param expect: Expect image format """ # And now because the QemuImg will add the extension for us # we have to remove it here. path_noext = dest_path.strip(dest_extension) params['image_name'] = path_noext params['image_format'] = expect image = qemu_storage.QemuImg(params, "/", path_noext) if image.get_format() == expect: logging.debug("%s format is %s", dest_path, expect) else: raise exceptions.TestFail("%s format is not %s" % (dest_path, expect)) def _blockjob_and_libvirtd_chk(cmd_result): """ Raise TestFail when blockcopy fail with block-job-complete error or blockcopy hang with state change lock. This is a specific bug verify, so ignore status_error here. """ bug_url_ = "https://bugzilla.redhat.com/show_bug.cgi?id=1197592" err_msg = "internal error: unable to execute QEMU command" err_msg += " 'block-job-complete'" if err_msg in cmd_result.stderr: raise exceptions.TestFail("Hit on bug: %s" % bug_url_) err_pattern = "Timed out during operation: cannot acquire" err_pattern += " state change lock" ret = chk_libvirtd_log(libvirtd_log_path, err_pattern, "error") if ret: raise exceptions.TestFail("Hit on bug: %s" % bug_url_) def _make_snapshot(): """ Make external disk snapshot """ snap_xml = snapshot_xml.SnapshotXML() snapshot_name = "blockcopy_snap" snap_xml.snap_name = snapshot_name snap_xml.description = "blockcopy snapshot" # Add all disks into xml file. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') # Remove non-storage disk such as 'cdrom' for disk in disks: if disk.device != 'disk': disks.remove(disk) new_disks = [] src_disk_xml = disks[0] disk_xml = snap_xml.SnapDiskXML() disk_xml.xmltreefile = src_disk_xml.xmltreefile del disk_xml.device del disk_xml.address disk_xml.snapshot = "external" disk_xml.disk_name = disk_xml.target['dev'] # Only qcow2 works as external snapshot file format, update it # here driver_attr = disk_xml.driver driver_attr.update({'type': 'qcow2'}) disk_xml.driver = driver_attr new_attrs = disk_xml.source.attrs if 'file' in disk_xml.source.attrs: new_file = os.path.join(tmp_dir, "blockcopy_shallow.snap") snapshot_external_disks.append(new_file) new_attrs.update({'file': new_file}) hosts = None elif ('dev' in disk_xml.source.attrs or 'name' in disk_xml.source.attrs or 'pool' in disk_xml.source.attrs): if (disk_xml.type_name == 'block' or disk_source_protocol == 'iscsi'): disk_xml.type_name = 'block' if 'name' in new_attrs: del new_attrs['name'] del new_attrs['protocol'] elif 'pool' in new_attrs: del new_attrs['pool'] del new_attrs['volume'] del new_attrs['mode'] back_path = utl.setup_or_cleanup_iscsi(is_setup=True, is_login=True, image_size="1G", emulated_image=back_n) emulated_iscsi.append(back_n) cmd = "qemu-img create -f qcow2 %s 1G" % back_path process.run(cmd, shell=True) new_attrs.update({'dev': back_path}) hosts = None new_src_dict = {"attrs": new_attrs} if hosts: new_src_dict.update({"hosts": hosts}) disk_xml.source = disk_xml.new_disk_source(**new_src_dict) new_disks.append(disk_xml) snap_xml.set_disks(new_disks) snapshot_xml_path = snap_xml.xml logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile) options = "--disk-only --xmlfile %s " % snapshot_xml_path snapshot_result = virsh.snapshot_create( vm_name, options, debug=True) if snapshot_result.exit_status != 0: raise exceptions.TestFail(snapshot_result.stderr) snap_path = '' save_path = '' emulated_iscsi = [] nfs_cleanup = False try: # Prepare dest_path tmp_file = time.strftime("%Y-%m-%d-%H.%M.%S.img") tmp_file += dest_extension if not dest_path: if with_blockdev: blkdev_n = 'blockdev-iscsi' dest_path = utl.setup_or_cleanup_iscsi(is_setup=True, is_login=True, image_size=image_size, emulated_image=blkdev_n) emulated_iscsi.append(blkdev_n) # Make sure the new disk show up utils_misc.wait_for(lambda: os.path.exists(dest_path), 5) else: if copy_to_nfs: tmp_dir = "%s/%s" % (tmp_dir, mnt_path_name) dest_path = os.path.join(tmp_dir, tmp_file) # Domain disk replacement with desire type if replace_vm_disk: # Calling 'set_vm_disk' is bad idea as it left lots of cleanup jobs # after test, such as pool, volume, nfs, iscsi and so on # TODO: remove this function in the future if disk_source_protocol == 'iscsi': emulated_iscsi.append(emu_image) if disk_source_protocol == 'netfs': nfs_cleanup = True utl.set_vm_disk(vm, params, tmp_dir, test) new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if with_shallow: _make_snapshot() # Prepare transient/persistent vm if persistent_vm == "no" and vm.is_persistent(): vm.undefine("--nvram") elif persistent_vm == "yes" and not vm.is_persistent(): new_xml.define() # Run blockcopy command to create destination file if rerun_flag == 1: options1 = "--wait %s --finish --verbose" % dest_format if with_blockdev: options1 += " --blockdev" if with_shallow: options1 += " --shallow" cmd_result = virsh.blockcopy(vm_name, target, dest_path, options1, **extra_dict) status = cmd_result.exit_status if status != 0: raise exceptions.TestFail("Run blockcopy command fail: %s" % cmd_result.stdout.strip() + cmd_result.stderr) elif not os.path.exists(dest_path): raise exceptions.TestFail("Cannot find the created copy") # Run the real testing command cmd_result = virsh.blockcopy(vm_name, target, dest_path, options, **extra_dict) # check BZ#1197592 _blockjob_and_libvirtd_chk(cmd_result) status = cmd_result.exit_status if not libvirtd_utl.is_running(): raise exceptions.TestFail("Libvirtd service is dead") if not status_error: if status == 0: ret = utils_misc.wait_for( lambda: check_xml(vm_name, target, dest_path, options), 5) if not ret: raise exceptions.TestFail("Domain xml not expected after" " blockcopy") if options.count("--bandwidth"): if options.count('--bytes'): bandwidth += 'B' else: bandwidth += 'M' if not utl.check_blockjob(vm_name, target, "bandwidth", bandwidth): raise exceptions.TestFail("Check bandwidth failed") val = options.count("--pivot") + options.count("--finish") # Don't wait for job finish when using --byte option val += options.count('--bytes') if val == 0: try: finish_job(vm_name, target, timeout) except JobTimeout as excpt: raise exceptions.TestFail("Run command failed: %s" % excpt) if options.count("--raw") and not with_blockdev: check_format(dest_path, dest_extension, dest_format) if active_snap: snap_path = "%s/%s.snap" % (tmp_dir, vm_name) snap_opt = "--disk-only --atomic --no-metadata " snap_opt += "vda,snapshot=external,file=%s" % snap_path ret = virsh.snapshot_create_as(vm_name, snap_opt, ignore_status=True, debug=True) utl.check_exit_status(ret, active_error) if active_save: save_path = "%s/%s.save" % (tmp_dir, vm_name) ret = virsh.save(vm_name, save_path, ignore_status=True, debug=True) utl.check_exit_status(ret, active_error) if check_state_lock: # Run blockjob pivot in subprocess as it will hang # for a while, run blockjob info again to check # job state command = "virsh blockjob %s %s --pivot" % (vm_name, target) session = aexpect.ShellSession(command) ret = virsh.blockjob(vm_name, target, "--info") err_info = "cannot acquire state change lock" if err_info in ret.stderr: raise exceptions.TestFail("Hit on bug: %s" % bug_url) utl.check_exit_status(ret, status_error) session.close() else: raise exceptions.TestFail(cmd_result.stdout.strip() + cmd_result.stderr) else: if status: logging.debug("Expect error: %s", cmd_result.stderr) else: # Commit id '4c297728' changed how virsh exits when # unexpectedly failing due to timeout from a fail (1) # to a success(0), so we need to look for a different # marker to indicate the copy aborted. As "stdout: Now # in mirroring phase" could be in stdout which fail the # check, so also do check in libvirtd log to confirm. if options.count("--timeout") and options.count("--wait"): log_pattern = "Copy aborted" if (re.search(log_pattern, cmd_result.stdout.strip()) or chk_libvirtd_log(libvirtd_log_path, log_pattern, "debug")): logging.debug("Found success a timed out block copy") else: raise exceptions.TestFail("Expect fail, but run " "successfully: %s" % bug_url) finally: # Recover VM may fail unexpectedly, we need using try/except to # proceed the following cleanup steps try: # Abort exist blockjob to avoid any possible lock error virsh.blockjob(vm_name, target, '--abort', ignore_status=True) vm.destroy(gracefully=False) # It may take a long time to shutdown the VM which has # blockjob running utils_misc.wait_for( lambda: virsh.domstate(vm_name, ignore_status=True).exit_status, 180) if virsh.domain_exists(vm_name): if active_snap or with_shallow: option = "--snapshots-metadata" else: option = None original_xml.sync(option) else: original_xml.define() except Exception as e: logging.error(e) for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) # Clean up libvirt pool, which may be created by 'set_vm_disk' if disk_type == 'volume': virsh.pool_destroy(pool_name, ignore_status=True, debug=True) # Restore libvirtd conf and restart libvirtd libvirtd_conf.restore() libvirtd_utl.restart() if libvirtd_log_path and os.path.exists(libvirtd_log_path): os.unlink(libvirtd_log_path) # Clean up NFS try: if nfs_cleanup: utl.setup_or_cleanup_nfs(is_setup=False) except Exception as e: logging.error(e) # Clean up iSCSI try: for iscsi_n in list(set(emulated_iscsi)): utl.setup_or_cleanup_iscsi(is_setup=False, emulated_image=iscsi_n) # iscsid will be restarted, so give it a break before next loop time.sleep(5) except Exception as e: logging.error(e) if os.path.exists(dest_path): os.remove(dest_path) if os.path.exists(snap_path): os.remove(snap_path) if os.path.exists(save_path): os.remove(save_path) # Restart virtlogd service to release VM log file lock try: path.find_command('virtlogd') process.run('systemctl reset-failed virtlogd') process.run('systemctl restart virtlogd ') except path.CmdNotFoundError: pass