def attach_disk_test(): """ Attach-disk testcase. 1.Attch a disk to guest. 2.Perform domblkinfo operation. 3.Detach the disk. :return: Command status and output. """ try: source_file = open(test_disk_source, 'wb') source_file.seek((512 * 1024 * 1024) - 1) source_file.write(str(0)) source_file.close() virsh.attach_disk(vm_name, test_disk_source, front_dev, debug=True) vm_ref = vm_name result_source = virsh.domblkinfo(vm_ref, test_disk_source, ignore_status=True, debug=True) status_source = result_source.exit_status output_source = result_source.stdout.strip() if driver == "qemu": result_target = virsh.domblkinfo(vm_ref, front_dev, ignore_status=True, debug=True) status_target = result_target.exit_status output_target = result_target.stdout.strip() else: status_target = 0 output_target = "Xen doesn't support domblkinfo target!" virsh.detach_disk(vm_name, front_dev, debug=True) return status_target, output_target, status_source, output_source except (error.CmdError, IOError): return 1, "", 1, ""
def add_cdrom_device(vm_name, init_cdrom): """ Add cdrom device for test vm """ if vm.is_alive(): virsh.destroy(vm_name) virsh.attach_disk(vm_name, init_cdrom, " hdc", " --type cdrom --sourcetype file --config", debug=True)
def modify_source(vm_name, target, dst_image): """ Modify domain's configuration to change its disk source """ try: virsh.detach_disk(vm_name, target, extra="--config", ignore_status=False) virsh.attach_disk(vm_name, dst_image, target, extra="--config", ignore_status=False) except (remote.LoginError, virt_vm.VMError, aexpect.ShellError), detail: raise error.TestFail("Modify guest source failed: %s" % detail)
def attach_removable_media(type, source, dev): bus = {'cdrom': 'ide', 'floppy': 'fdc'} args = {'driver': 'qemu', 'subdriver': 'raw', 'sourcetype': 'file', 'type': type, 'targetbus': bus[type]} if type == 'cdrom': args.update({'mode': 'readonly'}) config = '' # Join all options together to get command line for key in args.keys(): config += ' --%s %s' % (key, args[key]) config += ' --current' virsh.attach_disk(vm_name, source, dev, extra=config)
def add_cdrom_device(vm_name, init_cdrom): """ Add cdrom device for test vm @param: vm_name: guest name @param: init_cdrom: source file """ if vm.is_alive(): virsh.destroy(vm_name) virsh.attach_disk(vm_name, init_cdrom, disk_device, " --type cdrom --sourcetype file --config", debug=True)
def add_device(vm_name, init_source="''"): """ Add device for test vm :param vm_name: guest name :param init_source: source file """ if vm.is_alive(): virsh.destroy(vm_name) virsh.attach_disk(vm_name, init_source, target_device, "--type %s --sourcetype file --config" % device_type, debug=True)
def modify_source(vm_name, target, dst_image): """ Modify domain's configuration to change its disk source """ try: virsh.detach_disk(vm_name, target, extra="--config", ignore_status=False) dst_image_format = utils_test.get_image_info(dst_image)['format'] options = "--config --subdriver %s" % dst_image_format virsh.attach_disk(vm_name, dst_image, target, extra=options, ignore_status=False) except (remote.LoginError, virt_vm.VMError, aexpect.ShellError), detail: raise error.TestFail("Modify guest source failed: %s" % detail)
def create_disk(vm_name, disk_iso, disk_type, target_dev, mode=""): """ :param vm_name : vm_name :param disk_iso : disk's source backing file :param disk_type: disk's device type: cdrom or floppy :param target_dev: disk's target device name :param mode: readonly or shareable """ libvirt.create_local_disk("iso", disk_iso) options = "--type %s --sourcetype=file --config" % disk_type if mode: options += " --mode %s" % mode try: virsh.attach_disk(vm_name, disk_iso, target_dev, options) except: os.remove(disk_iso) raise exceptions.TestFail("Failed to attach")
def attach_removable_media(type, source, dev): bus = {'cdrom': 'ide', 'floppy': 'fdc', 'disk': 'virtio'} args = { 'driver': 'qemu', 'subdriver': 'raw', 'sourcetype': 'file', 'type': type, 'targetbus': bus[type] } if type == 'cdrom': args.update({'mode': 'readonly'}) config = '' # Join all options together to get command line for key in list(args.keys()): config += ' --%s %s' % (key, args[key]) config += ' --current' virsh.attach_disk(vm_name, source, dev, extra=config)
def create_disk(vm_name, disk_iso, disk_type, target_dev, mode=""): """ :param vm_name : vm_name :param disk_iso : disk's source backing file :param disk_type: disk's device type: cdrom or floppy :param target_dev: disk's target device name :param mode: readonly or shareable """ libvirt.create_local_disk("iso", disk_iso) options = "--type %s --sourcetype=file --config" % disk_type if mode: options += " --mode %s" % mode try: virsh.attach_disk(vm_name, disk_iso, target_dev, options) except Exception: os.remove(disk_iso) raise exceptions.TestFail("Failed to attach")
def attach_disk_test(test_disk_source, front_dev): """ Attach-disk testcase. 1.Attch a disk to guest. 2.Perform domblkinfo operation. 3.Detach the disk. :param: test_disk_source disk source file path. :param: front_dev front end device name. :return: Command status and output. """ try: disk_source = test_disk_source front_device = front_dev with open(disk_source, 'wb') as source_file: source_file.seek((512 * 1024 * 1024) - 1) source_file.write(str(0).encode()) virsh.attach_disk(vm_name, disk_source, front_device, debug=True) vm_ref = vm_name if "--all" in extra: disk_source = "" vm_ref = "%s %s" % (vm_name, extra) result_source = virsh.domblkinfo(vm_ref, disk_source, ignore_status=True, debug=True) status_source = result_source.exit_status output_source = result_source.stdout.strip() if driver == "qemu": if "--all" in extra: front_device = "" result_target = virsh.domblkinfo(vm_ref, front_device, ignore_status=True, debug=True) status_target = result_target.exit_status output_target = result_target.stdout.strip() else: status_target = 0 output_target = "Xen doesn't support domblkinfo target!" front_device = front_dev virsh.detach_disk(vm_name, front_device, debug=True) return status_target, output_target, status_source, output_source except (process.CmdError, IOError): return 1, "", 1, ""
def create_cdrom(vm_name, orig_iso, target_dev): """ :param vm_name : vm_name :param source_iso : disk's source backing file. """ try: _file = open(orig_iso, 'wb') _file.seek((1024 * 1024) - 1) _file.write(str(0)) _file.close() except IOError: raise error.TestFail("Create orig_iso failed!") try: virsh.attach_disk(vm_name, orig_iso, target_dev, "--type cdrom --sourcetype=file --config") except: os.remote(orig_iso) raise error.TestFail("Failed to attach")
def add_device(vm_name, init_source="''"): """ Add device for test vm :param vm_name: guest name :param init_source: source file """ if vm.is_alive(): virsh.destroy(vm_name) device_target_bus = params.get("device_target_bus", "ide") virsh.attach_disk( vm_name, init_source, target_device, "--type %s --sourcetype file --targetbus %s --config" % (device_type, device_target_bus), debug=True)
def modify_source(vm_name, target, dst_image): """ Modify domain's configuration to change its disk source """ try: virsh.detach_disk(vm_name, target, extra="--config", ignore_status=False) dst_image_format = utils_test.get_image_info(dst_image)['format'] options = "--config --subdriver %s" % dst_image_format virsh.attach_disk(vm_name, dst_image, target, extra=options, ignore_status=False) except (remote.LoginError, virt_vm.VMError, aexpect.ShellError, error.CmdError), detail: raise error.TestFail("Modify guest source failed: %s" % detail)
def create_disk(test, vm_name, orig_iso, disk_type, target_dev, mode=""): """ :param vm_name : vm_name :param source_iso : disk's source backing file :param disk_type: disk's device type: cdrom or floppy :param target_dev: disk's target device name :param mode: readonly or shareable """ try: with open(orig_iso, 'wb') as _file: _file.seek((1024 * 1024) - 1) _file.write(str(0).encode()) except IOError: test.fail("Create orig_iso failed!") options = "--type %s --sourcetype=file --config" % disk_type if mode: options += " --mode %s" % mode try: virsh.attach_disk(vm_name, orig_iso, target_dev, options) except Exception: os.remove(orig_iso) test.fail("Failed to attach")
def create_disk(params, test, vm_name, orig_iso, disk_type, target_dev, disk_format="", mode=""): """ Prepare image for attach and attach it to domain :param parameters from cfg file :param vm_name : vm_name :param source_iso : disk's source backing file :param disk_type: disk's device type: cdrom or floppy :param target_dev: disk's target device name :param disk_format: disk's target format :param mode: readonly or shareable """ slice_test = "yes" == params.get("disk_slice", "yes") try: # create slice cdrom image for slice test if slice_test: libvirt.create_local_disk("file", orig_iso, size="10", disk_format=disk_format, extra="-o preallocation=full") params["input_source_file"] = orig_iso params["disk_slice"] = {"slice": "yes"} params["target_dev"] = "sdc" disk_xml = libvirt.create_disk_xml(params) else: with open(orig_iso, 'wb') as _file: _file.seek((1024 * 1024) - 1) _file.write(str(0).encode()) except IOError: test.fail("Create orig_iso failed!") options = "--type %s --sourcetype=file --config" % disk_type try: if mode: options += " --mode %s" % mode if slice_test: # Use attach_device to add cdrom file with slice to guest virsh.attach_device(vm_name, disk_xml, flagstr="--config") else: virsh.attach_disk(vm_name, orig_iso, target_dev, options) except Exception: os.remove(orig_iso) test.fail("Failed to attach")
def create_disk(test, vm_name, orig_iso, disk_type, target_dev, mode=""): """ :param vm_name : vm_name :param source_iso : disk's source backing file :param disk_type: disk's device type: cdrom or floppy :param target_dev: disk's target device name :param mode: readonly or shareable """ try: with open(orig_iso, 'wb') as _file: _file.seek((1024 * 1024) - 1) _file.write(str(0).encode()) except IOError: test.fail("Create orig_iso failed!") options = "--type %s --sourcetype=file --config" % disk_type if mode: options += " --mode %s" % mode try: virsh.attach_disk(vm_name, orig_iso, target_dev, options) except: os.remove(orig_iso) test.fail("Failed to attach")
def add_disk(vm_name, init_source, target_device, extra_param, format=''): """ Add disk/cdrom for test vm :param vm_name: guest name :param init_source: source file :param target_device: target of disk device :param extra_param: additional arguments to command :param format: init_source format(qcow2 or raw) """ if not os.path.exists(new_disk): if format == "qcow2": process.run('qemu-img create -f qcow2 %s %s -o preallocation=full' % (new_disk, '1G'), shell=True, verbose=True) elif format == "raw": process.run('qemu-img create -f raw %s %s' % (new_disk, '1G'), shell=True, verbose=True) else: open(new_disk, 'a').close() if virsh.is_alive(vm_name) and 'cdrom' in extra_param: virsh.destroy(vm_name) virsh.attach_disk(vm_name, init_source, target_device, extra_param, **virsh_dargs)
def setup_raw_disk_blockresize(): """ Prepare raw disk and create snapshots. """ if not vm.is_alive(): vm.start() vm.wait_for_login().close() # Create raw type image image_path = test_obj.tmp_dir + '/blockresize_test' libvirt.create_local_disk("file", path=image_path, size='500K', disk_format="raw") test_obj.new_image_path = image_path # attach new disk virsh.attach_disk(vm.name, source=image_path, target=device, extra=extra, debug=True) test_obj.new_dev = device # create snap chain test_obj.prepare_snapshot()
def attach_disk_iothread_two(vm_name, device_source, device_target): """ Attach a disk with an iothread value of 2. """ disk_attach_success = params.get("virt_disk_attach_success") attach_option = params.get("disk_attach_option_io_2") ret = virsh.attach_disk(vm_name, device_source, device_target, attach_option, debug=True) disk_attach_error = False libvirt.check_exit_status(ret, disk_attach_error) libvirt.check_result(ret, expected_match=disk_attach_success)
def setup_blockcopy_extended_l2(): """ Prepare running domain with extended_l2=on type image. """ # prepare image image_path = test_obj.tmp_dir + '/new_image' libvirt.create_local_disk("file", path=image_path, size='500M', disk_format=disk_format, extra=disk_extras) check_obj.check_image_info(image_path, check_item='extended l2', expected_value='true') test_obj.new_image_path = image_path # start get old parts session = vm.wait_for_login() test_obj.old_parts = utils_disk.get_parts_list(session) session.close() # attach new disk if encryption_disk: secret_disk_dict = eval(params.get("secret_disk_dict", '{}')) test_obj.prepare_secret_disk(image_path, secret_disk_dict) if not vm.is_alive(): vm.start() else: virsh.attach_disk(vm.name, source=image_path, target=device, extra=attach_disk_extra, debug=True, ignore_status=False) test_obj.new_dev = device # clean copy file if os.path.exists(tmp_copy_path): process.run('rm -f %s' % tmp_copy_path)
def hotplug_disk(disk_name): """ hotplug a disk to guest :param disk_name: the name of the disk be hotplugged """ device_source = os.path.join(data_dir.get_tmp_dir(), disk_name) libvirt.create_local_disk("file", device_source, size='1') try: res = virsh.attach_disk(vm_name, device_source, disk_target_name, debug=True) utils_misc.wait_for(lambda: (res.stdout == "Disk attached successfully"), 10) except Exception: test.error("Can not attach %s to the guest" % disk_target_name)
def attach_disk_iothread_zero(vm_name, device_source, device_target): """ Attempt to attach a disk with an iothread value of 0. """ disk_errors = \ params.get("virt_disk_iothread_0_errors") attach_option = params.get("disk_attach_option_io_0", "--iothread 0") ret = virsh.attach_disk(vm_name, device_source, device_target, attach_option, debug=True) disk_attach_error = True libvirt.check_exit_status(ret, disk_attach_error) libvirt.check_result(ret, expected_fails=disk_errors)
def attach_additional_disk(vm, disksize, targetdev): """ Create a disk with disksize, then attach it to given vm. @param vm: Libvirt VM object. @param disksize: size of attached disk @param targetdev: target of disk device """ logging.info("Attaching disk...") disk_path = os.path.join(data_dir.get_tmp_dir(), targetdev) cmd = "qemu-img create %s %s" % (disk_path, disksize) status, output = commands.getstatusoutput(cmd) if status: return (False, output) # To confirm attached device do not exist. virsh.detach_disk(vm.name, targetdev, extra="--config") attach_result = virsh.attach_disk(vm.name, disk_path, targetdev, extra="--config", debug=True) if attach_result.exit_status: return (False, attach_result) return (True, disk_path)
def prepare_block_device(params, vm_name): """ Prepare block_device for test :param params: dict, parameters used :param vm_name: vm name """ target_dev = params.get("target_dev") block_device = params.get("nfs_mount_dir") + '/' + params.get( "block_device_name") libvirt.create_local_disk("file", block_device, '1', "qcow2") source_loop_dev = setup_loop_dev(block_device) target_loop_dev = setup_loop_dev(block_device, remote_params=params) # Attach block device to guest result = virsh.attach_disk(vm_name, source_loop_dev, target_dev, debug=True) libvirt.check_exit_status(result) return (source_loop_dev, target_loop_dev)
def run(test, params, env): """ Test command: virsh qemu-agent-command. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) status_cmd = params.get("status_cmd", "") freeze_cmd = params.get("freeze_cmd", "") thaw_cmd = params.get("thaw_cmd", "") xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: def get_dirty(session, frozen=False): """ Get dirty data of guest """ try: data_cmd = "cat /proc/meminfo | grep Dirty" if not frozen: result = utils_misc.wait_for(lambda: int(session. cmd_output(data_cmd). strip().split()[1]) != 0, 60) if result: return int(session.cmd_output(data_cmd).strip(). split()[1]) else: return 0 dirty_info = session.cmd_output(data_cmd).strip() return int(dirty_info.split()[1]) else: result = utils_misc.wait_for(lambda: int(session. cmd_output(data_cmd). strip().split()[1]) == 0, 60) if result: return 0 else: return int(session.cmd_output(data_cmd).strip(). split()[1]) except (IndexError, ValueError) as details: test.fail("Get dirty info failed: %s" % details) device_source_path = os.path.join(data_dir.get_tmp_dir(), "disk.img") device_source = libvirt.create_local_disk("file", path=device_source_path, disk_format="qcow2") vm.prepare_guest_agent() # Do operation before freeze guest filesystem session = vm.wait_for_login() tmp_file = "/mnt/test.file" try: # Create extra image and attach to guest, then mount old_parts = libvirt.get_parts_list(session) ret = virsh.attach_disk(vm_name, device_source, "vdd") if ret.exit_status: test.fail("Attaching device failed before testing agent:%s" % ret.stdout.strip()) time.sleep(1) new_parts = libvirt.get_parts_list(session) added_part = list(set(new_parts).difference(set(old_parts))) session.cmd("mkfs.ext3 -F /dev/{0} && mount /dev/{0} /mnt".format(added_part[0])) # Generate dirty memory session.cmd("rm -f %s" % tmp_file) session.cmd_output("cp /dev/zero %s 2>/dev/null &" % tmp_file) # Get original dirty data org_dirty_info = get_dirty(session) fz_cmd_result = virsh.qemu_agent_command(vm_name, freeze_cmd, ignore_status=True, debug=True) libvirt.check_exit_status(fz_cmd_result) # Get frozen dirty data fz_dirty_info = get_dirty(session, True) st_cmd_result = virsh.qemu_agent_command(vm_name, status_cmd, ignore_status=True, debug=True) libvirt.check_exit_status(st_cmd_result) if not st_cmd_result.stdout.strip().count("frozen"): test.fail("Guest filesystem status is not frozen: %s" % st_cmd_result.stdout.strip()) tw_cmd_result = virsh.qemu_agent_command(vm_name, thaw_cmd, ignore_status=True, debug=True) libvirt.check_exit_status(tw_cmd_result) # Get thawed dirty data tw_dirty_info = get_dirty(session) st_cmd_result = virsh.qemu_agent_command(vm_name, status_cmd, ignore_status=True, debug=True) libvirt.check_exit_status(st_cmd_result) if not st_cmd_result.stdout.strip().count("thawed"): test.fail("Guest filesystem status is not thawed: %s" % st_cmd_result.stdout.strip()) logging.info("Original dirty data: %s" % org_dirty_info) logging.info("Frozen dirty data: %s" % fz_dirty_info) logging.info("Thawed dirty data: %s" % tw_dirty_info) if not tw_dirty_info or not org_dirty_info: test.fail("The thawed dirty data should not be 0!") if fz_dirty_info: test.fail("The frozen dirty data should be 0!") finally: # Thaw the file system that remove action can be done virsh.qemu_agent_command(vm_name, thaw_cmd, ignore_status=True) session.cmd("rm -f %s" % tmp_file) session.close() finally: xml_backup.sync() os.remove(device_source_path)
auditd_service = Factory.create_service("auditd") if not auditd_service.status(): auditd_service.start() logging.info("Auditd service status: %s" % auditd_service.status()) # If we are testing cdrom device, we need to detach hdc in VM first. if device == "cdrom": if vm.is_alive(): vm.destroy(gracefully=False) s_detach = virsh.detach_disk(vm_name, device_target, "--config") if not s_detach: logging.error("Detach hdc failed before test.") # If we are testing detach-disk, we need to attach certain device first. if test_cmd == "detach-disk" and no_attach != "yes": s_attach = virsh.attach_disk(vm_name, device_source, device_target, "--driver qemu --config").exit_status if s_attach != 0: logging.error("Attaching device failed before testing detach-disk") if test_twice: device_target2 = params.get("at_dt_disk_device_target2", device_target) create_device_file(device_source) s_attach = virsh.attach_disk(vm_name, device_source, device_target2, "--driver qemu --config").exit_status if s_attach != 0: logging.error("Attaching device failed before testing " "detach-disk test_twice") vm.start()
def trigger_events(events_list=[]): """ Trigger various events in events_list """ expected_events_list = [] tmpdir = data_dir.get_tmp_dir() save_path = os.path.join(tmpdir, "vm_event.save") new_disk = os.path.join(tmpdir, "new_disk.img") try: for event in events_list: if event in ["start", "restore"]: if vm.is_alive(): vm.destroy() else: if not vm.is_alive(): vm.start() vm.wait_for_login().close() if event == "start": virsh.start(vm_name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Started Booted") vm.wait_for_login().close() elif event == "save": virsh.save(vm_name, save_path, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Stopped Saved") elif event == "restore": if not os.path.exists(save_path): logging.error("%s not exist", save_path) else: virsh.restore(save_path, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Started Restored") elif event == "destroy": virsh.destroy(vm_name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Stopped Destroyed") elif event == "reset": virsh.reset(vm_name, **virsh_dargs) expected_events_list.append("'reboot' for %s") elif event == "vcpupin": virsh.vcpupin(vm_name, "0", "0", **virsh_dargs) expected_events_list.append("'tunable' for %s:" "\n\tcputune.vcpupin0: 0") elif event == "emulatorpin": virsh.emulatorpin(vm_name, "0", **virsh_dargs) expected_events_list.append("'tunable' for %s:" "\n\tcputune.emulatorpin: 0") elif event == "setmem": virsh.setmem(vm_name, 1048576, **virsh_dargs) expected_events_list.append("'balloon-change' for %s:") elif event == "detach-disk": if not os.path.exists(new_disk): open(new_disk, "a").close() # Attach disk firstly, this event will not be catched virsh.attach_disk(vm_name, new_disk, "vdb", **virsh_dargs) virsh.detach_disk(vm_name, "vdb", **virsh_dargs) expected_events_list.append("'device-removed' for %s:" " virtio-disk1") else: raise error.TestError("Unsupported event: %s" % event) # Event may not received immediately time.sleep(3) finally: if os.path.exists(save_path): os.unlink(save_path) if os.path.exists(new_disk): os.unlink(new_disk) return expected_events_list
def run(test, params, env): """ Test virsh {at|de}tach-disk command for lxc. The command can attach new disk/detach disk. 1.Prepare test environment,destroy or suspend a VM. 2.Perform virsh attach/detach-disk operation. 3.Recover test environment. 4.Confirm the test result. """ vm_ref = params.get("at_dt_disk_vm_ref", "name") at_options = params.get("at_dt_disk_at_options", "") dt_options = params.get("at_dt_disk_dt_options", "") pre_vm_state = params.get("at_dt_disk_pre_vm_state", "running") status_error = "yes" == params.get("status_error", 'no') no_attach = params.get("at_dt_disk_no_attach", 'no') # Get test command. test_cmd = params.get("at_dt_disk_test_cmd", "attach-disk") # Disk specific attributes. device_source = params.get("at_dt_disk_device_source", "/dev/sdc1") device_target = params.get("at_dt_disk_device_target", "vdd") test_twice = "yes" == params.get("at_dt_disk_test_twice", "no") test_audit = "yes" == params.get("at_dt_disk_check_audit", "no") serial = params.get("at_dt_disk_serial", "") address = params.get("at_dt_disk_address", "") address2 = params.get("at_dt_disk_address2", "") if serial: at_options += (" --serial %s" % serial) if address2: at_options_twice = at_options + (" --address %s" % address2) if address: at_options += (" --address %s" % address) vm_name = params.get("main_vm") vm = env.get_vm(vm_name) if vm.is_alive(): vm.destroy(gracefully=False) # Back up xml file. backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Create virtual device file if user doesn't prepare a partition. test_block_dev = False if device_source.count("ENTER"): device_source = libvirt.setup_or_cleanup_iscsi(True) test_block_dev = True if not device_source: # We should skip this case raise error.TestNAError("Can not get iscsi device name in host") if vm.is_alive(): vm.destroy(gracefully=False) # if we are testing audit, we need to start audit servcie first. if test_audit: auditd_service = Factory.create_service("auditd") if not auditd_service.status(): auditd_service.start() logging.info("Auditd service status: %s" % auditd_service.status()) # If we are testing detach-disk, we need to attach certain device first. if test_cmd == "detach-disk" and no_attach != "yes": s_attach = virsh.attach_disk(vm_name, device_source, device_target, "--config").exit_status if s_attach != 0: logging.error("Attaching device failed before testing detach-disk") if test_twice: device_target2 = params.get("at_dt_disk_device_target2", device_target) s_attach = virsh.attach_disk(vm_name, device_source, device_target2, "--config").exit_status if s_attach != 0: logging.error("Attaching device failed before testing " "detach-disk test_twice") vm.start() # Turn VM into certain state. if pre_vm_state == "paused": logging.info("Suspending %s..." % vm_name) if vm.is_alive(): vm.pause() elif pre_vm_state == "shut off": logging.info("Shuting down %s..." % vm_name) if vm.is_alive(): vm.destroy(gracefully=False) # Get disk count before test. disk_count_before_cmd = vm_xml.VMXML.get_disk_count(vm_name) # Test. domid = vm.get_id() domuuid = vm.get_uuid() # Confirm how to reference a VM. if vm_ref == "name": vm_ref = vm_name elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "id": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref == "uuid": vm_ref = domuuid else: vm_ref = "" if test_cmd == "attach-disk": status = virsh.attach_disk(vm_ref, device_source, device_target, at_options, debug=True).exit_status elif test_cmd == "detach-disk": status = virsh.detach_disk(vm_ref, device_target, dt_options, debug=True).exit_status if test_twice: device_target2 = params.get("at_dt_disk_device_target2", device_target) if test_cmd == "attach-disk": if address2: at_options = at_options_twice status = virsh.attach_disk(vm_ref, device_source, device_target2, at_options, debug=True).exit_status elif test_cmd == "detach-disk": status = virsh.detach_disk(vm_ref, device_target2, dt_options, debug=True).exit_status # Resume guest after command. On newer libvirt this is fixed as it has # been a bug. The change in xml file is done after the guest is resumed. if pre_vm_state == "paused": vm.resume() # Check audit log check_audit_after_cmd = True if test_audit: grep_audit = ('grep "%s" /var/log/audit/audit.log' % test_cmd.split("-")[0]) cmd = (grep_audit + ' | ' + 'grep "%s" | tail -n1 | grep "res=success"' % device_source) if utils.run(cmd).exit_status: logging.error("Audit check failed") check_audit_after_cmd = False # Check disk count after command. check_count_after_cmd = True disk_count_after_cmd = vm_xml.VMXML.get_disk_count(vm_name) if test_cmd == "attach-disk": if disk_count_after_cmd == disk_count_before_cmd: check_count_after_cmd = False elif test_cmd == "detach-disk": if disk_count_after_cmd < disk_count_before_cmd: check_count_after_cmd = False # Recover VM state. if pre_vm_state == "shut off": vm.start() # Check disk type after attach. check_disk_type = True try: check_disk_type = vm_xml.VMXML.check_disk_type(vm_name, device_source, "block") except xcepts.LibvirtXMLError: # No disk found check_disk_type = False # Check disk serial after attach. check_disk_serial = True if serial: disk_serial = vm_xml.VMXML.get_disk_serial(vm_name, device_target) if serial != disk_serial: check_disk_serial = False # Check disk address after attach. check_disk_address = True if address: disk_address = vm_xml.VMXML.get_disk_address(vm_name, device_target) if utils_test.canonicalize_disk_address(address) !=\ utils_test.canonicalize_disk_address(disk_address): check_disk_address = False # Check multifunction address after attach. check_disk_address2 = True if address2: disk_address2 = vm_xml.VMXML.get_disk_address(vm_name, device_target2) if utils_test.canonicalize_disk_address(address2) !=\ utils_test.canonicalize_disk_address(disk_address2): check_disk_address2 = False # Destroy VM. vm.destroy(gracefully=False) # Check disk count after VM shutdown (with --config). check_count_after_shutdown = True disk_count_after_shutdown = vm_xml.VMXML.get_disk_count(vm_name) if test_cmd == "attach-disk": if disk_count_after_shutdown == disk_count_before_cmd: check_count_after_shutdown = False elif test_cmd == "detach-disk": if disk_count_after_shutdown < disk_count_before_cmd: check_count_after_shutdown = False # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) backup_xml.sync() if test_block_dev: libvirt.setup_or_cleanup_iscsi(False) # Check results. if status_error: if not status: raise error.TestFail("virsh %s exit with unexpected value." % test_cmd) else: if status: raise error.TestFail("virsh %s failed." % test_cmd) if test_cmd == "attach-disk": if at_options.count("config"): if not check_count_after_shutdown: raise error.TestFail("Cannot see config attached device " "in xml file after VM shutdown.") if not check_disk_serial: raise error.TestFail("Serial set failed after attach") if not check_disk_address: raise error.TestFail("Address set failed after attach") if not check_disk_address2: raise error.TestFail("Address(multifunction) set failed" " after attach") else: if not check_count_after_cmd: raise error.TestFail("Cannot see device in xml file" " after attach.") if not check_disk_type: raise error.TestFail("Check disk type failed after" " attach.") if not check_audit_after_cmd: raise error.TestFail("Audit hotplug failure after attach") if at_options.count("persistent"): if not check_count_after_shutdown: raise error.TestFail("Cannot see device attached " "with persistent after " "VM shutdown.") else: if check_count_after_shutdown: raise error.TestFail("See non-config attached device " "in xml file after VM shutdown.") elif test_cmd == "detach-disk": if dt_options.count("config"): if check_count_after_shutdown: raise error.TestFail("See config detached device in " "xml file after VM shutdown.") else: if check_count_after_cmd: raise error.TestFail("See device in xml file " "after detach.") if not check_audit_after_cmd: raise error.TestFail("Audit hotunplug failure " "after detach") if dt_options.count("persistent"): if check_count_after_shutdown: raise error.TestFail("See device deattached " "with persistent after " "VM shutdown.") else: if not check_count_after_shutdown: raise error.TestFail("See non-config detached " "device in xml file after " "VM shutdown.") else: raise error.TestError("Unknown command %s." % test_cmd)
def run(test, params, env): """ Test migration of multi vms. """ vm_names = params.get("migrate_vms").split() if len(vm_names) < 2: raise exceptions.TestSkipError("No multi vms provided.") # Prepare parameters method = params.get("virsh_migrate_method") jobabort = "yes" == params.get("virsh_migrate_jobabort", "no") options = params.get("virsh_migrate_options", "") status_error = "yes" == params.get("status_error", "no") remote_host = params.get("remote_host", "DEST_HOSTNAME.EXAMPLE.COM") local_host = params.get("local_host", "SOURCE_HOSTNAME.EXAMPLE.COM") host_user = params.get("host_user", "root") host_passwd = params.get("host_password", "PASSWORD") nfs_shared_disk = params.get("nfs_shared_disk", True) migration_type = params.get("virsh_migration_type", "simultaneous") migrate_timeout = int(params.get("virsh_migrate_thread_timeout", 900)) migration_time = int(params.get("virsh_migrate_timeout", 60)) # Params for NFS and SSH setup params["server_ip"] = params.get("migrate_dest_host") params["server_user"] = "******" params["server_pwd"] = params.get("migrate_dest_pwd") params["client_ip"] = params.get("migrate_source_host") params["client_user"] = "******" params["client_pwd"] = params.get("migrate_source_pwd") params["nfs_client_ip"] = params.get("migrate_dest_host") params["nfs_server_ip"] = params.get("migrate_source_host") desturi = libvirt_vm.get_uri_with_transport(transport="ssh", dest_ip=remote_host) srcuri = libvirt_vm.get_uri_with_transport(transport="ssh", dest_ip=local_host) # Don't allow the defaults. if srcuri.count('///') or srcuri.count('EXAMPLE'): raise exceptions.TestSkipError("The srcuri '%s' is invalid" % srcuri) if desturi.count('///') or desturi.count('EXAMPLE'): raise exceptions.TestSkipError("The desturi '%s' is invalid" % desturi) # Config ssh autologin for remote host ssh_key.setup_remote_ssh_key(remote_host, host_user, host_passwd, port=22, public_key="rsa") # Prepare local session and remote session localrunner = remote.RemoteRunner(host=remote_host, username=host_user, password=host_passwd) remoterunner = remote.RemoteRunner(host=remote_host, username=host_user, password=host_passwd) # Configure NFS in remote host if nfs_shared_disk: nfs_client = nfs.NFSClient(params) nfs_client.setup() # Prepare MigrationHelper instance vms = [] for vm_name in vm_names: vm = env.get_vm(vm_name) vms.append(vm) try: option = make_migration_options(method, options, migration_time) # make sure cache=none if "unsafe" not in options: device_target = params.get("virsh_device_target", "sda") for vm in vms: if vm.is_alive(): vm.destroy() for each_vm in vm_names: logging.info("configure cache=none") vmxml = vm_xml.VMXML.new_from_dumpxml(each_vm) device_source = str(vmxml.get_disk_attr(each_vm, device_target, 'source', 'file')) ret_detach = virsh.detach_disk(each_vm, device_target, "--config") status = ret_detach.exit_status output = ret_detach.stdout.strip() logging.info("Status:%s", status) logging.info("Output:\n%s", output) if not ret_detach: raise exceptions.TestError("Detach disks fails") subdriver = utils_test.get_image_info(device_source)['format'] ret_attach = virsh.attach_disk(each_vm, device_source, device_target, "--driver qemu " "--config --cache none " "--subdriver %s" % subdriver) status = ret_attach.exit_status output = ret_attach.stdout.strip() logging.info("Status:%s", status) logging.info("Output:\n%s", output) if not ret_attach: raise exceptions.TestError("Attach disks fails") for vm in vms: if vm.is_dead(): vm.start() vm.wait_for_login() multi_migration(vms, srcuri, desturi, option, migration_type, migrate_timeout, jobabort, lrunner=localrunner, rrunner=remoterunner) except Exception, info: logging.error("Test failed: %s" % info) flag_migration = False
def run(test, params, env): """ Test command: virsh domblklist. 1.Prepare test environment. 2.Run domblklist and check 3.Do attach disk and rerun domblklist with check 4.Clean test environment. """ def domblklist_test(): """ Run domblklist and check result, raise error if check fail. """ output_disk_info = {} result = virsh.domblklist(vm_ref, options, ignore_status=True, debug=True) status = result.exit_status output = result.stdout.strip() # Check status_error if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status == 1: raise error.TestFail("Run failed with right command") # Check disk information. disk_info = get_disk_info(vm_name, options) logging.debug("The disk info dict from xml is: %s" % disk_info) output_list = output.split('\n') for i in range(2, len(output_list)): output_disk_info[i-2] = output_list[i].split() logging.debug("The disk info dict from command output is: %s" % output_disk_info) if "--details" in options: if disk_info != output_disk_info: raise error.TestFail("The output did not match with disk" " info from xml") else: for i in range(len(disk_info.keys())): disk_info[i] = disk_info[i][2:] if disk_info != output_disk_info: raise error.TestFail("The output did not match with disk" " info from xml") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) # Get all parameters from configuration. vm_ref = params.get("domblklist_vm_ref") options = params.get("domblklist_options", "") status_error = params.get("status_error", "no") front_dev = params.get("domblkinfo_front_dev", "vdd") test_attach_disk = os.path.join(test.virtdir, "tmp.img") extra = "" domid = vm.get_id() domuuid = vm.get_uuid() vm_state = vm.state() if vm_ref == "id": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "name": vm_ref = vm_name elif vm_ref == "uuid": vm_ref = domuuid # run domblklist and check domblklist_test() if status_error == "no": try: # attach disk and check source_file = open(test_attach_disk, 'wb') source_file.seek((512 * 1024 * 1024) - 1) source_file.write(str(0)) source_file.close() # since bug 1049529, --config will work with detach when # domain is running, so change it back using --config here if "--inactive" in options or vm_state == "shut off": extra = "--config" virsh.attach_disk(vm_name, test_attach_disk, front_dev, extra, debug=True) domblklist_test() finally: virsh.detach_disk(vm_name, front_dev, extra, debug=True) if os.path.exists(test_attach_disk): os.remove(test_attach_disk)
vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir, vm.address_cache) # Change the disk of the vm to shared disk # Detach exist devices devices = vm.get_blk_devices() for device in devices: s_detach = virsh.detach_disk(vm.name, device, "--config", debug=True) if not s_detach: raise error.TestError("Detach %s failed before test.", device) # Attach system image as vda # Then added scsi disks will be sda,sdb... attach_args = "--subdriver %s --config" % sys_image_fmt virsh.attach_disk(vm.name, sys_image_source, "vda", attach_args, debug=True) vms = [vm] def start_check_vm(vm): try: vm.start() except virt_vm.VMStartError, detail: if status_error: logging.debug("Expected failure:%s", detail) return None, None else: raise vm.wait_for_login() # Confirm VM can be accessed through network.
def run(test, params, env): """ Test command: virsh qemu-agent-command. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) status_cmd = params.get("status_cmd", "") freeze_cmd = params.get("freeze_cmd", "") thaw_cmd = params.get("thaw_cmd", "") xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: def get_dirty(session, frozen=False): """ Get dirty data of guest """ try: data_cmd = "cat /proc/meminfo | grep Dirty" if not frozen: result = utils_misc.wait_for( lambda: int( session.cmd_output(data_cmd).strip().split()[1]) != 0, 60) if result: return int( session.cmd_output(data_cmd).strip().split()[1]) else: return 0 dirty_info = session.cmd_output(data_cmd).strip() return int(dirty_info.split()[1]) else: result = utils_misc.wait_for( lambda: int( session.cmd_output(data_cmd).strip().split()[1]) == 0, 60) if result: return 0 else: return int( session.cmd_output(data_cmd).strip().split()[1]) except (IndexError, ValueError) as details: test.fail("Get dirty info failed: %s" % details) device_source_path = os.path.join(data_dir.get_tmp_dir(), "disk.img") device_source = libvirt.create_local_disk("file", path=device_source_path, disk_format="qcow2") vm.prepare_guest_agent() # Do operation before freeze guest filesystem session = vm.wait_for_login() tmp_file = "/mnt/test.file" try: # Create extra image and attach to guest, then mount old_parts = utils_disk.get_parts_list(session) ret = virsh.attach_disk(vm_name, device_source, "vdd") if ret.exit_status: test.fail("Attaching device failed before testing agent:%s" % ret.stdout.strip()) time.sleep(1) new_parts = utils_disk.get_parts_list(session) added_part = list(set(new_parts).difference(set(old_parts))) session.cmd("mkfs.ext3 -F /dev/{0} && mount /dev/{0} /mnt".format( added_part[0])) # Generate dirty memory session.cmd("rm -f %s" % tmp_file) session.cmd_output("cp /dev/zero %s 2>/dev/null &" % tmp_file) # Get original dirty data org_dirty_info = get_dirty(session) fz_cmd_result = virsh.qemu_agent_command(vm_name, freeze_cmd, ignore_status=True, debug=True) libvirt.check_exit_status(fz_cmd_result) # Get frozen dirty data fz_dirty_info = get_dirty(session, True) st_cmd_result = virsh.qemu_agent_command(vm_name, status_cmd, ignore_status=True, debug=True) libvirt.check_exit_status(st_cmd_result) if not st_cmd_result.stdout.strip().count("frozen"): test.fail("Guest filesystem status is not frozen: %s" % st_cmd_result.stdout.strip()) tw_cmd_result = virsh.qemu_agent_command(vm_name, thaw_cmd, ignore_status=True, debug=True) libvirt.check_exit_status(tw_cmd_result) # Get thawed dirty data tw_dirty_info = get_dirty(session) st_cmd_result = virsh.qemu_agent_command(vm_name, status_cmd, ignore_status=True, debug=True) libvirt.check_exit_status(st_cmd_result) if not st_cmd_result.stdout.strip().count("thawed"): test.fail("Guest filesystem status is not thawed: %s" % st_cmd_result.stdout.strip()) logging.info("Original dirty data: %s" % org_dirty_info) logging.info("Frozen dirty data: %s" % fz_dirty_info) logging.info("Thawed dirty data: %s" % tw_dirty_info) if not tw_dirty_info or not org_dirty_info: test.fail("The thawed dirty data should not be 0!") if fz_dirty_info: test.fail("The frozen dirty data should be 0!") finally: # Thaw the file system that remove action can be done virsh.qemu_agent_command(vm_name, thaw_cmd, ignore_status=True) session.cmd("rm -f %s" % tmp_file) session.close() finally: xml_backup.sync() os.remove(device_source_path)
def run(test, params, env): """ Test rbd disk device. 1.Prepare test environment,destroy or suspend a VM. 2.Prepare disk image. 3.Edit disks xml and start the domain. 4.Perform test operation. 5.Recover test environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} additional_xml_file = os.path.join(data_dir.get_tmp_dir(), "additional_disk.xml") def config_ceph(): """ Write the configs to the file. """ src_host = disk_src_host.split() src_port = disk_src_port.split() conf_str = "mon_host = " hosts = [] for host, port in zip(src_host, src_port): hosts.append("%s:%s" % (host, port)) with open(disk_src_config, 'w') as f: f.write(conf_str + ','.join(hosts) + '\n') def create_pool(): """ Define and start a pool. """ sp = libvirt_storage.StoragePool() if create_by_xml: p_xml = pool_xml.PoolXML(pool_type=pool_type) p_xml.name = pool_name s_xml = pool_xml.SourceXML() s_xml.vg_name = disk_src_pool source_host = [] for (host_name, host_port) in zip(disk_src_host.split(), disk_src_port.split()): source_host.append({'name': host_name, 'port': host_port}) s_xml.hosts = source_host if auth_type: s_xml.auth_type = auth_type if auth_user: s_xml.auth_username = auth_user if auth_usage: s_xml.secret_usage = auth_usage p_xml.source = s_xml logging.debug("Pool xml: %s", p_xml) p_xml.xmltreefile.write() ret = virsh.pool_define(p_xml.xml, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_build(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_start(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) else: auth_opt = "" if client_name and client_key: auth_opt = ( "--auth-type %s --auth-username %s --secret-usage '%s'" % (auth_type, auth_user, auth_usage)) if not sp.define_rbd_pool( pool_name, mon_host, disk_src_pool, extra=auth_opt): test.fail("Failed to define storage pool") if not sp.build_pool(pool_name): test.fail("Failed to build storage pool") if not sp.start_pool(pool_name): test.fail("Failed to start storage pool") # Check pool operation ret = virsh.pool_refresh(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_uuid(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) # pool-info pool_info = sp.pool_info(pool_name) if pool_info["Autostart"] != 'no': test.fail("Failed to check pool information") # pool-autostart if not sp.set_pool_autostart(pool_name): test.fail("Failed to set pool autostart") pool_info = sp.pool_info(pool_name) if pool_info["Autostart"] != 'yes': test.fail("Failed to check pool information") # pool-autostart --disable if not sp.set_pool_autostart(pool_name, "--disable"): test.fail("Failed to set pool autostart") # If port is not pre-configured, port value should not be hardcoded in pool information. if "yes" == params.get("rbd_port", "no"): if 'port' in virsh.pool_dumpxml(pool_name): test.fail("port attribute should not be in pool information") # find-storage-pool-sources-as if "yes" == params.get("find_storage_pool_sources_as", "no"): ret = virsh.find_storage_pool_sources_as("rbd", mon_host) libvirt.check_result(ret, skip_if=unsupported_err) def create_vol(vol_params): """ Create volume. :param p_name. Pool name. :param vol_params. Volume parameters dict. :return: True if create successfully. """ pvt = libvirt.PoolVolumeTest(test, params) if create_by_xml: pvt.pre_vol_by_xml(pool_name, **vol_params) else: pvt.pre_vol(vol_name, None, '2G', None, pool_name) def check_vol(vol_params): """ Check volume information. """ pv = libvirt_storage.PoolVolume(pool_name) # Supported operation if vol_name not in pv.list_volumes(): test.fail("Volume %s doesn't exist" % vol_name) ret = virsh.vol_dumpxml(vol_name, pool_name) libvirt.check_exit_status(ret) # vol-info if not pv.volume_info(vol_name): test.fail("Can't see volume info") # vol-key ret = virsh.vol_key(vol_name, pool_name) libvirt.check_exit_status(ret) if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip(): test.fail("Volume key isn't correct") # vol-path ret = virsh.vol_path(vol_name, pool_name) libvirt.check_exit_status(ret) if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip(): test.fail("Volume path isn't correct") # vol-pool ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name)) libvirt.check_exit_status(ret) if pool_name not in ret.stdout.strip(): test.fail("Volume pool isn't correct") # vol-name ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name)) libvirt.check_exit_status(ret) if vol_name not in ret.stdout.strip(): test.fail("Volume name isn't correct") # vol-resize ret = virsh.vol_resize(vol_name, "2G", pool_name) libvirt.check_exit_status(ret) # Not supported operation # vol-clone ret = virsh.vol_clone(vol_name, cloned_vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-create-from volxml = vol_xml.VolXML() vol_params.update({"name": "%s" % create_from_cloned_volume}) v_xml = volxml.new_vol(**vol_params) v_xml.xmltreefile.write() ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-wipe ret = virsh.vol_wipe(vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-upload ret = virsh.vol_upload(vol_name, vm.get_first_disk_devices()['source'], "--pool %s" % pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-download ret = virsh.vol_download(vol_name, cloned_vol_name, "--pool %s" % pool_name) libvirt.check_result(ret, skip_if=unsupported_err) def check_qemu_cmd(): """ Check qemu command line options. """ cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) process.run(cmd, shell=True) if disk_src_name: cmd += " | grep file=rbd:%s:" % disk_src_name if auth_user and auth_key: cmd += ('id=%s:auth_supported=cephx' % auth_user) if disk_src_config: cmd += " | grep 'conf=%s'" % disk_src_config elif mon_host: hosts = '\:6789\;'.join(mon_host.split()) cmd += " | grep 'mon_host=%s'" % hosts if driver_iothread: cmd += " | grep iothread%s" % driver_iothread # Run the command process.run(cmd, shell=True) def check_save_restore(): """ Test save and restore operation """ save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name) ret = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(ret) if os.path.exists(save_file): os.remove(save_file) # Login to check vm status vm.wait_for_login().close() def check_snapshot(snap_option, target_dev='vda'): """ Test snapshot operation. """ snap_name = "s1" snap_mem = os.path.join(data_dir.get_tmp_dir(), "rbd.mem") snap_disk = os.path.join(data_dir.get_tmp_dir(), "rbd.disk") xml_snap_exp = [ "disk name='%s' snapshot='external' type='file'" % target_dev ] xml_dom_exp = [ "source file='%s'" % snap_disk, "backingStore type='network' index='1'", "source protocol='rbd' name='%s'" % disk_src_name ] if snap_option.count("disk-only"): options = ("%s --diskspec %s,file=%s --disk-only" % (snap_name, target_dev, snap_disk)) elif snap_option.count("disk-mem"): options = ("%s --memspec file=%s --diskspec %s,file=" "%s" % (snap_name, snap_mem, target_dev, snap_disk)) xml_snap_exp.append("memory snapshot='external' file='%s'" % snap_mem) else: options = snap_name ret = virsh.snapshot_create_as(vm_name, options) if test_disk_internal_snapshot or test_disk_readonly: libvirt.check_result(ret, expected_fails=unsupported_err) else: libvirt.check_result(ret, skip_if=unsupported_err) # check xml file. if not ret.exit_status: snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name, debug=True).stdout.strip() dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() # Delete snapshots. libvirt.clean_up_snapshots(vm_name) if os.path.exists(snap_mem): os.remove(snap_mem) if os.path.exists(snap_disk): os.remove(snap_disk) if not all([x in snap_xml for x in xml_snap_exp]): test.fail("Failed to check snapshot xml") if not all([x in dom_xml for x in xml_dom_exp]): test.fail("Failed to check domain xml") def check_blockcopy(target): """ Block copy operation test. """ blk_file = os.path.join(data_dir.get_tmp_dir(), "blk.rbd") if os.path.exists(blk_file): os.remove(blk_file) blk_mirror = ("mirror type='file' file='%s' " "format='raw' job='copy'" % blk_file) # Do blockcopy ret = virsh.blockcopy(vm_name, target, blk_file) libvirt.check_result(ret, skip_if=unsupported_err) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if not dom_xml.count(blk_mirror): test.fail("Can't see block job in domain xml") # Abort ret = virsh.blockjob(vm_name, target, "--abort") libvirt.check_exit_status(ret) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if dom_xml.count(blk_mirror): test.fail("Failed to abort block job") if os.path.exists(blk_file): os.remove(blk_file) # Sleep for a while after abort operation. time.sleep(5) # Do blockcopy again ret = virsh.blockcopy(vm_name, target, blk_file) libvirt.check_exit_status(ret) # Wait for complete def wait_func(): ret = virsh.blockjob(vm_name, target, "--info") return ret.stderr.count("Block Copy: [100 %]") timeout = params.get("blockjob_timeout", 600) utils_misc.wait_for(wait_func, int(timeout)) # Pivot ret = virsh.blockjob(vm_name, target, "--pivot") libvirt.check_exit_status(ret) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if not dom_xml.count("source file='%s'" % blk_file): test.fail("Failed to pivot block job") # Remove the disk file. if os.path.exists(blk_file): os.remove(blk_file) def check_in_vm(vm_obj, target, old_parts, read_only=False): """ Check mount/read/write disk in VM. :param vm. VM guest. :param target. Disk dev in VM. :return: True if check successfully. """ try: session = vm_obj.wait_for_login() new_parts = utils_disk.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False added_part = None if target.startswith("vd"): if added_parts[0].startswith("vd"): added_part = added_parts[0] elif target.startswith("hd"): if added_parts[0].startswith("sd"): added_part = added_parts[0] if not added_part: logging.error("Can't see added partition in VM") return False cmd = ("mount /dev/{0} /mnt && ls /mnt && (sleep 15;" " touch /mnt/testfile; umount /mnt)".format(added_part)) s, o = session.cmd_status_output(cmd, timeout=60) session.close() logging.info("Check disk operation in VM:\n, %s, %s", s, o) # Readonly fs, check the error messages. # The command may return True, read-only # messges can be found from the command output if read_only: if "Read-only file system" not in o: return False else: return True # Other errors if s != 0: return False return True except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False def clean_up_volume_snapshots(): """ Get all snapshots for rbd_vol.img volume,unprotect and then clean up them. """ cmd = ("rbd -m {0} {1} info {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) if process.run(cmd, ignore_status=True, shell=True).exit_status: return # Get snapshot list. cmd = ("rbd -m {0} {1} snap" " list {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) snaps_out = process.run(cmd, ignore_status=True, shell=True).stdout_text snap_names = [] if snaps_out: for line in snaps_out.rsplit("\n"): if line.startswith("SNAPID") or line == "": continue snap_line = line.rsplit() if len(snap_line) == 4: snap_names.append(snap_line[1]) logging.debug("Find snapshots: %s", snap_names) # Unprotect snapshot first,otherwise it will fail to purge volume for snap_name in snap_names: cmd = ("rbd -m {0} {1} snap" " unprotect {2}@{3}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name), snap_name)) process.run(cmd, ignore_status=True, shell=True) # Purge volume,and then delete volume. cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap" " purge {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) process.run(cmd, ignore_status=True, shell=True) def make_snapshot(): """ make external snapshots. :return external snapshot path list """ logging.info("Making snapshot...") first_disk_source = vm.get_first_disk_devices()['source'] snapshot_path_list = [] snapshot2_file = os.path.join(data_dir.get_tmp_dir(), "mem.s2") snapshot3_file = os.path.join(data_dir.get_tmp_dir(), "mem.s3") snapshot4_file = os.path.join(data_dir.get_tmp_dir(), "mem.s4") snapshot4_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s4") snapshot5_file = os.path.join(data_dir.get_tmp_dir(), "mem.s5") snapshot5_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s5") # Attempt to take different types of snapshots. snapshots_param_dict = { "s1": "s1 --disk-only --no-metadata", "s2": "s2 --memspec %s --no-metadata" % snapshot2_file, "s3": "s3 --memspec %s --no-metadata --live" % snapshot3_file, "s4": "s4 --memspec %s --diskspec vda,file=%s --no-metadata" % (snapshot4_file, snapshot4_disk_file), "s5": "s5 --memspec %s --diskspec vda,file=%s --live --no-metadata" % (snapshot5_file, snapshot5_disk_file) } for snapshot_name in sorted(snapshots_param_dict.keys()): ret = virsh.snapshot_create_as(vm_name, snapshots_param_dict[snapshot_name], **virsh_dargs) libvirt.check_exit_status(ret) if snapshot_name != 's4' and snapshot_name != 's5': snapshot_path_list.append( first_disk_source.replace('qcow2', snapshot_name)) return snapshot_path_list def get_secret_list(): """ Get secret list. :return secret list """ logging.info("Get secret list ...") secret_list_result = virsh.secret_list() secret_list = results_stdout_52lts( secret_list_result).strip().splitlines() # First two lines contain table header followed by entries # for each secret, such as: # # UUID Usage # -------------------------------------------------------------------------------- # b4e8f6d3-100c-4e71-9f91-069f89742273 ceph client.libvirt secret secret_list = secret_list[2:] result = [] # If secret list is empty. if secret_list: for line in secret_list: # Split on whitespace, assume 1 column linesplit = line.split(None, 1) result.append(linesplit[0]) return result mon_host = params.get("mon_host") disk_src_name = params.get("disk_source_name") disk_src_config = params.get("disk_source_config") disk_src_host = params.get("disk_source_host") disk_src_port = params.get("disk_source_port") disk_src_pool = params.get("disk_source_pool") disk_format = params.get("disk_format", "raw") driver_iothread = params.get("driver_iothread") snap_name = params.get("disk_snap_name") attach_device = "yes" == params.get("attach_device", "no") attach_disk = "yes" == params.get("attach_disk", "no") test_save_restore = "yes" == params.get("test_save_restore", "no") test_snapshot = "yes" == params.get("test_snapshot", "no") test_blockcopy = "yes" == params.get("test_blockcopy", "no") test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no") test_vm_parts = "yes" == params.get("test_vm_parts", "no") additional_guest = "yes" == params.get("additional_guest", "no") create_snapshot = "yes" == params.get("create_snapshot", "no") convert_image = "yes" == params.get("convert_image", "no") create_volume = "yes" == params.get("create_volume", "no") create_by_xml = "yes" == params.get("create_by_xml", "no") client_key = params.get("client_key") client_name = params.get("client_name") auth_key = params.get("auth_key") auth_user = params.get("auth_user") auth_type = params.get("auth_type") auth_usage = params.get("secret_usage") pool_name = params.get("pool_name") pool_type = params.get("pool_type") vol_name = params.get("vol_name") cloned_vol_name = params.get("cloned_volume", "cloned_test_volume") create_from_cloned_volume = params.get("create_from_cloned_volume", "create_from_cloned_test_volume") vol_cap = params.get("vol_cap") vol_cap_unit = params.get("vol_cap_unit") start_vm = "yes" == params.get("start_vm", "no") test_disk_readonly = "yes" == params.get("test_disk_readonly", "no") test_disk_internal_snapshot = "yes" == params.get( "test_disk_internal_snapshot", "no") test_json_pseudo_protocol = "yes" == params.get("json_pseudo_protocol", "no") disk_snapshot_with_sanlock = "yes" == params.get( "disk_internal_with_sanlock", "no") auth_place_in_source = params.get("auth_place_in_source") # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(mon_host) # After libvirt 3.9.0, auth element can be put into source part. if auth_place_in_source and not libvirt_version.version_compare(3, 9, 0): test.cancel( "place auth in source is not supported in current libvirt version") # Start vm and get all partions in vm. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) session.close() vm.destroy(gracefully=False) if additional_guest: guest_name = "%s_%s" % (vm_name, '1') timeout = params.get("clone_timeout", 360) utils_libguestfs.virt_clone_cmd(vm_name, guest_name, True, timeout=timeout, ignore_status=False) additional_vm = vm.clone(guest_name) if start_vm: virsh.start(guest_name) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) key_opt = "" secret_uuid = None snapshot_path = None key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") img_file = os.path.join(data_dir.get_tmp_dir(), "%s_test.img" % vm_name) front_end_img_file = os.path.join(data_dir.get_tmp_dir(), "%s_frontend_test.img" % vm_name) # Construct a unsupported error message list to skip these kind of tests unsupported_err = [] if driver_iothread: unsupported_err.append('IOThreads not supported') if test_snapshot: unsupported_err.append('live disk snapshot not supported') if test_disk_readonly: if not libvirt_version.version_compare(5, 0, 0): unsupported_err.append('Could not create file: Permission denied') unsupported_err.append('Permission denied') else: unsupported_err.append( 'unsupported configuration: external snapshot ' + 'for readonly disk vdb is not supported') if test_disk_internal_snapshot: unsupported_err.append( 'unsupported configuration: internal snapshot for disk ' + 'vdb unsupported for storage type raw') if test_blockcopy: unsupported_err.append('block copy is not supported') if attach_disk: unsupported_err.append('No such file or directory') if create_volume: unsupported_err.append("backing 'volume' disks isn't yet supported") unsupported_err.append('this function is not supported') try: # Clean up dirty secrets in test environments if there have. dirty_secret_list = get_secret_list() if dirty_secret_list: for dirty_secret_uuid in dirty_secret_list: virsh.secret_undefine(dirty_secret_uuid) # Prepare test environment. qemu_config = LibvirtQemuConfig() if disk_snapshot_with_sanlock: # Install necessary package:sanlock,libvirt-lock-sanlock if not utils_package.package_install(["sanlock"]): test.error("fail to install sanlock") if not utils_package.package_install(["libvirt-lock-sanlock"]): test.error("fail to install libvirt-lock-sanlock") # Set virt_use_sanlock result = process.run("setsebool -P virt_use_sanlock 1", shell=True) if result.exit_status: test.error("Failed to set virt_use_sanlock value") # Update lock_manager in qemu.conf qemu_config.lock_manager = 'sanlock' # Update qemu-sanlock.conf. san_lock_config = LibvirtSanLockConfig() san_lock_config.user = '******' san_lock_config.group = 'sanlock' san_lock_config.host_id = 1 san_lock_config.auto_disk_leases = True process.run("mkdir -p /var/lib/libvirt/sanlock", shell=True) san_lock_config.disk_lease_dir = "/var/lib/libvirt/sanlock" san_lock_config.require_lease_for_disks = False # Start sanlock service and restart libvirtd to enforce changes. result = process.run("systemctl start wdmd", shell=True) if result.exit_status: test.error("Failed to start wdmd service") result = process.run("systemctl start sanlock", shell=True) if result.exit_status: test.error("Failed to start sanlock service") utils_libvirtd.Libvirtd().restart() # Prepare lockspace and lease file for sanlock in order. sanlock_cmd_dict = OrderedDict() sanlock_cmd_dict[ "truncate -s 1M /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to truncate TEST_LS" sanlock_cmd_dict[ "sanlock direct init -s TEST_LS:0:/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to sanlock direct init TEST_LS:0" sanlock_cmd_dict[ "chown sanlock:sanlock /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to chown sanlock TEST_LS" sanlock_cmd_dict[ "restorecon -R -v /var/lib/libvirt/sanlock"] = "Failed to restorecon sanlock" sanlock_cmd_dict[ "truncate -s 1M /var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to truncate test-disk-resource-lock" sanlock_cmd_dict[ "sanlock direct init -r TEST_LS:test-disk-resource-lock:" + "/var/lib/libvirt/sanlock/test-disk-resource-lock:0"] = "Failed to sanlock direct init test-disk-resource-lock" sanlock_cmd_dict[ "chown sanlock:sanlock " + "/var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to chown test-disk-resource-loc" sanlock_cmd_dict[ "sanlock client add_lockspace -s TEST_LS:1:" + "/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to client add_lockspace -s TEST_LS:0" for sanlock_cmd in sanlock_cmd_dict.keys(): result = process.run(sanlock_cmd, shell=True) if result.exit_status: test.error(sanlock_cmd_dict[sanlock_cmd]) # Create one lease device and add it to VM. san_lock_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) lease_device = Lease() lease_device.lockspace = 'TEST_LS' lease_device.key = 'test-disk-resource-lock' lease_device.target = { 'path': '/var/lib/libvirt/sanlock/test-disk-resource-lock' } san_lock_vmxml.add_device(lease_device) san_lock_vmxml.sync() # Install ceph-common package which include rbd command if utils_package.package_install(["ceph-common"]): if client_name and client_key: with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (client_name, client_key)) key_opt = "--keyring %s" % key_file # Create secret xml sec_xml = secret_xml.SecretXML("no", "no") sec_xml.usage = auth_type sec_xml.usage_name = auth_usage sec_xml.xmltreefile.write() logging.debug("Secret xml: %s", sec_xml) ret = virsh.secret_define(sec_xml.xml) libvirt.check_exit_status(ret) secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout.strip())[0].lstrip() logging.debug("Secret uuid %s", secret_uuid) if secret_uuid is None: test.error("Failed to get secret uuid") # Set secret value auth_key = params.get("auth_key") ret = virsh.secret_set_value(secret_uuid, auth_key, **virsh_dargs) libvirt.check_exit_status(ret) # Delete the disk if it exists cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) else: test.error("Failed to install ceph-common") if disk_src_config: config_ceph() disk_path = ("rbd:%s:mon_host=%s" % (disk_src_name, mon_host)) if auth_user and auth_key: disk_path += (":id=%s:key=%s" % (auth_user, auth_key)) targetdev = params.get("disk_target", "vdb") # To be compatible with create_disk_xml function, # some parameters need to be updated. params.update({ "type_name": params.get("disk_type", "network"), "target_bus": params.get("disk_target_bus"), "target_dev": targetdev, "secret_uuid": secret_uuid, "source_protocol": params.get("disk_source_protocol"), "source_name": disk_src_name, "source_host_name": disk_src_host, "source_host_port": disk_src_port }) # Prepare disk image if convert_image: first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] # Convert the image to remote storage disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert" " -O %s %s %s" % (mon_host, key_opt, disk_src_name, disk_format, blk_source, disk_path)) process.run(disk_cmd, ignore_status=False, shell=True) elif create_volume: vol_params = { "name": vol_name, "capacity": int(vol_cap), "capacity_unit": vol_cap_unit, "format": disk_format } create_pool() create_vol(vol_params) check_vol(vol_params) else: # Create an local image and make FS on it. disk_cmd = ("qemu-img create -f %s %s 10M && mkfs.ext4 -F %s" % (disk_format, img_file, img_file)) process.run(disk_cmd, ignore_status=False, shell=True) # Convert the image to remote storage disk_cmd = ( "rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O" " %s %s %s" % (mon_host, key_opt, disk_src_name, disk_format, img_file, disk_path)) process.run(disk_cmd, ignore_status=False, shell=True) # Create disk snapshot if needed. if create_snapshot: snap_cmd = ("rbd -m %s %s snap create %s@%s" % (mon_host, key_opt, disk_src_name, snap_name)) process.run(snap_cmd, ignore_status=False, shell=True) if test_json_pseudo_protocol: # Create one frontend image with the rbd backing file. json_str = ('json:{"file.driver":"rbd",' '"file.filename":"rbd:%s:mon_host=%s"}' % (disk_src_name, mon_host)) # pass different json string according to the auth config if auth_user and auth_key: json_str = ('%s:id=%s:key=%s"}' % (json_str[:-2], auth_user, auth_key)) disk_cmd = ("qemu-img create -f qcow2 -b '%s' %s" % (json_str, front_end_img_file)) disk_path = front_end_img_file process.run(disk_cmd, ignore_status=False, shell=True) # If hot plug, start VM first, and then wait the OS boot. # Otherwise stop VM if running. if start_vm: if vm.is_dead(): vm.start() vm.wait_for_login().close() else: if not vm.is_dead(): vm.destroy() if attach_device: if create_volume: params.update({"source_pool": pool_name}) params.update({"type_name": "volume"}) # No need auth options for volume if "auth_user" in params: params.pop("auth_user") if "auth_type" in params: params.pop("auth_type") if "secret_type" in params: params.pop("secret_type") if "secret_uuid" in params: params.pop("secret_uuid") if "secret_usage" in params: params.pop("secret_usage") # After 3.9.0,the auth element can be place in source part. if auth_place_in_source: params.update({"auth_in_source": auth_place_in_source}) xml_file = libvirt.create_disk_xml(params) if additional_guest: # Copy xml_file for additional guest VM. shutil.copyfile(xml_file, additional_xml_file) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) if additional_guest: # Make sure the additional VM is running if additional_vm.is_dead(): additional_vm.start() additional_vm.wait_for_login().close() ret = virsh.attach_device(guest_name, additional_xml_file, "", debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif attach_disk: opts = params.get("attach_option", "") ret = virsh.attach_disk(vm_name, disk_path, targetdev, opts) libvirt.check_result(ret, skip_if=unsupported_err) elif test_disk_readonly: params.update({'readonly': "yes"}) xml_file = libvirt.create_disk_xml(params) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif test_disk_internal_snapshot: xml_file = libvirt.create_disk_xml(params) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif disk_snapshot_with_sanlock: if vm.is_dead(): vm.start() snapshot_path = make_snapshot() if vm.is_alive(): vm.destroy() elif not create_volume: libvirt.set_vm_disk(vm, params) if test_blockcopy: logging.info("Creating %s...", vm_name) vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if vm.is_alive(): vm.destroy(gracefully=False) vm.undefine() if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status: vmxml_backup.define() test.fail("Can't create the domain") elif vm.is_dead(): vm.start() # Wait for vm is running vm.wait_for_login(timeout=600).close() if additional_guest: if additional_vm.is_dead(): additional_vm.start() # Check qemu command line if test_qemu_cmd: check_qemu_cmd() # Check partitions in vm if test_vm_parts: if not check_in_vm( vm, targetdev, old_parts, read_only=create_snapshot): test.fail("Failed to check vm partitions") if additional_guest: if not check_in_vm(additional_vm, targetdev, old_parts): test.fail("Failed to check vm partitions") # Save and restore operation if test_save_restore: check_save_restore() if test_snapshot: snap_option = params.get("snapshot_option", "") check_snapshot(snap_option) if test_blockcopy: check_blockcopy(targetdev) if test_disk_readonly: snap_option = params.get("snapshot_option", "") check_snapshot(snap_option, 'vdb') if test_disk_internal_snapshot: snap_option = params.get("snapshot_option", "") check_snapshot(snap_option, targetdev) # Detach the device. if attach_device: xml_file = libvirt.create_disk_xml(params) ret = virsh.detach_device(vm_name, xml_file) libvirt.check_exit_status(ret) if additional_guest: ret = virsh.detach_device(guest_name, xml_file) libvirt.check_exit_status(ret) elif attach_disk: ret = virsh.detach_disk(vm_name, targetdev) libvirt.check_exit_status(ret) # Check disk in vm after detachment. if attach_device or attach_disk: session = vm.wait_for_login() new_parts = utils_disk.get_parts_list(session) if len(new_parts) != len(old_parts): test.fail("Disk still exists in vm" " after detachment") session.close() except virt_vm.VMStartError as details: for msg in unsupported_err: if msg in str(details): test.cancel(str(details)) else: test.fail("VM failed to start." "Error: %s" % str(details)) finally: # Remove ceph configure file if created. if ceph_cfg: os.remove(ceph_cfg) # Delete snapshots. snapshot_lists = virsh.snapshot_list(vm_name) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snap in snapshot_lists: virsh.snapshot_delete(vm_name, snap, "--metadata") # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) if additional_guest: virsh.remove_domain(guest_name, "--remove-all-storage", ignore_stauts=True) # Remove the snapshot. if create_snapshot: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap" " purge {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) elif create_volume: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, cloned_vol_name))) process.run(cmd, ignore_status=True, shell=True) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format( mon_host, key_opt, os.path.join(disk_src_pool, create_from_cloned_volume))) process.run(cmd, ignore_status=True, shell=True) clean_up_volume_snapshots() else: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) # Delete tmp files. if os.path.exists(key_file): os.remove(key_file) if os.path.exists(img_file): os.remove(img_file) # Clean up volume, pool if vol_name and vol_name in str(virsh.vol_list(pool_name).stdout): virsh.vol_delete(vol_name, pool_name) if pool_name and pool_name in virsh.pool_state_dict(): virsh.pool_destroy(pool_name, **virsh_dargs) virsh.pool_undefine(pool_name, **virsh_dargs) # Clean up secret secret_list = get_secret_list() if secret_list: for secret_uuid in secret_list: virsh.secret_undefine(secret_uuid) logging.info("Restoring vm...") vmxml_backup.sync() if disk_snapshot_with_sanlock: # Restore virt_use_sanlock setting. process.run("setsebool -P virt_use_sanlock 0", shell=True) # Restore qemu config qemu_config.restore() utils_libvirtd.Libvirtd().restart() # Force shutdown sanlock service. process.run("sanlock client shutdown -f 1", shell=True) # Clean up lockspace folder process.run("rm -rf /var/lib/libvirt/sanlock/*", shell=True) if snapshot_path is not None: for snapshot in snapshot_path: if os.path.exists(snapshot): os.remove(snapshot)
def run(test, params, env): """ Test virsh blockresize command for block device of domain. 1) Init the variables from params. 2) Create an image with specified format. 3) Attach a disk image to vm. 4) Test blockresize for the disk 5) Detach the disk """ # MAIN TEST CODE ### # Process cartesian parameters vm_name = params.get("main_vm", "virt-tests-vm1") image_format = params.get("disk_image_format", "qcow2") initial_disk_size = params.get("initial_disk_size", "500K") status_error = "yes" == params.get("status_error", "yes") resize_value = params.get("resize_value") virsh_dargs = {'debug': True} vm = env.get_vm(vm_name) if vm.is_alive(): vm.destroy() vm.start() # Skip 'qed' cases for libvirt version greater than 1.1.0 if libvirt_version.version_compare(1, 1, 0): if image_format == "qed": raise error.TestNAError("QED support changed, check bug: " "https://bugzilla.redhat.com/show_bug.cgi" "?id=731570") # Create an image. tmp_dir = data_dir.get_tmp_dir() image_path = os.path.join(tmp_dir, "blockresize_test") logging.info("Create image: %s, " "size %s, " "format %s", image_path, initial_disk_size, image_format) cmd = "qemu-img create -f %s %s %s" % (image_format, image_path, initial_disk_size) status, output = commands.getstatusoutput(cmd) if status: raise error.TestError("Creating image file %s failed: %s" % (image_path, output)) # Hotplug the image as disk device result = virsh.attach_disk(vm_name, source=image_path, target="vdd", extra=" --subdriver %s" % image_format) if result.exit_status: raise error.TestError("Failed to attach disk %s to VM: %s." % (image_path, result.stderr)) if resize_value == "over_size": # Use byte unit for over_size test resize_value = "%s" % OVER_SIZE + "b" # Run the test try: result = virsh.blockresize(vm_name, image_path, resize_value, **virsh_dargs) status = result.exit_status err = result.stderr.strip() # Check status_error if status_error: if status == 0 or err == "": raise error.TestFail("Expect failure, but run successfully!") # No need to do more test return else: if status != 0 or err != "": # bz 1002813 will result in an error on this err_str = "unable to execute QEMU command 'block_resize': Could not resize: Invalid argument" if resize_value[-2] in "kb" and re.search(err_str, err): raise error.TestNAError("BZ 1002813 not yet applied") else: raise error.TestFail("Run failed with right " "virsh blockresize command") # Although kb should not be used, libvirt/virsh will accept it and # consider it as a 1000 bytes, which caused issues for qed & qcow2 # since they expect a value evenly divisible by 512 (hence bz 1002813). if "kb" in resize_value: value = int(resize_value[:-2]) if image_format in ["qed", "qcow2"]: # qcow2 and qed want a VIR_ROUND_UP value based on 512 byte # sectors - hence this less than visually appealing formula expected_size = (((value * 1000) + 512 - 1) / 512) * 512 else: # Raw images... # Ugh - there's some rather ugly looking math when kb # (or mb, gb, tb, etc.) are used as the scale for the # value to create an image. The blockresize for the # running VM uses a qemu json call which differs from # qemu-img would do - resulting in (to say the least) # awkward sizes. We'll just have to make sure we don't # deviates more than a sector. expected_size = value * 1000 elif "kib" in resize_value: value = int(resize_value[:-3]) expected_size = value * 1024 elif resize_value[-1] in "b": expected_size = int(resize_value[:-1]) elif resize_value[-1] in "k": value = int(resize_value[:-1]) expected_size = value * 1024 elif resize_value[-1] == "m": value = int(resize_value[:-1]) expected_size = value * 1024 * 1024 elif resize_value[-1] == "g": value = int(resize_value[:-1]) expected_size = value * 1024 * 1024 * 1024 else: raise error.TestError("Unknown scale value") image_info = utils_misc.get_image_info(image_path) actual_size = int(image_info['vsize']) logging.info( "The expected block size is %s bytes, " "the actual block size is %s bytes", expected_size, actual_size) # See comment above regarding Raw images if image_format == "raw" and resize_value[-2] in "kb": if abs(int(actual_size) - int(expected_size)) > 512: raise error.TestFail("New raw blocksize set by blockresize do " "not match the expected value") else: if int(actual_size) != int(expected_size): raise error.TestFail("New blocksize set by blockresize is " "different from actual size from " "'qemu-img info'") finally: virsh.detach_disk(vm_name, target="vdd") if os.path.exists(image_path): os.remove(image_path)
raise error.TestError("Add acpiphp module failed before test.") # If we are testing cdrom device, we need to detach hdc in VM first. if device == "cdrom": if vm.is_alive(): vm.destroy(gracefully=False) s_detach = virsh.detach_disk(vm_name, device_target, "--config") if not s_detach: logging.error("Detach hdc failed before test.") vm.start() # If we are testing detach-disk, we need to attach certain device first. if test_cmd == "detach-disk" and no_attach != "yes": if bus_type == "ide" and vm.is_alive(): vm.destroy(gracefully=False) s_attach = virsh.attach_disk(vm_name, device_source, device_target, "--driver qemu --config").exit_status if s_attach != 0: logging.error("Attaching device failed before testing detach-disk") if vm.is_dead(): vm.start() # Turn VM into certain state. if pre_vm_state == "paused": logging.info("Suspending %s..." % vm_name) if vm.is_alive(): vm.pause() elif pre_vm_state == "shut off": logging.info("Shuting down %s..." % vm_name) if vm.is_alive(): vm.destroy(gracefully=False)
def run(test, params, env): """ Test storage pool and volumes with applications such as: install vms, attached to vms... """ pool_type = params.get("pool_type") pool_name = "test_%s_app" % pool_type pool_target = params.get("pool_target") emulated_img = params.get("emulated_image", "emulated-image") volume_count = int(params.get("volume_count", 1)) volume_size = params.get("volume_size", "1G") emulated_size = "%sG" % (volume_count * int(volume_size[:-1]) + 1) application = params.get("application", "install") disk_target = params.get("disk_target", "vdb") test_message = params.get("test_message", "") vm_name = params.get("main_vm", "avocado-vt-vm1") block_device = params.get("block_device", "/DEV/EXAMPLE") if application == "install": cdrom_path = os.path.join(data_dir.get_data_dir(), params.get("cdrom_cd1")) if not os.path.exists(cdrom_path): raise error.TestNAError("Can't find installation cdrom:%s" % cdrom_path) # Get a nonexist domain name vm_name = "vol_install_test" try: pvtest = utlv.PoolVolumeTest(test, params) pvtest.pre_pool(pool_name, pool_type, pool_target, emulated_img, image_size=emulated_size, pre_disk_vol=[volume_size], device_name=block_device) logging.debug("Current pools:\n%s", libvirt_storage.StoragePool().list_pools()) new_pool = libvirt_storage.PoolVolume(pool_name) if pool_type == "disk": volumes = new_pool.list_volumes() logging.debug("Current volumes:%s", volumes) else: volumes = create_volumes(new_pool, volume_count, volume_size) if application == "attach": vm = env.get_vm(vm_name) session = vm.wait_for_login() virsh.attach_disk(vm_name, volumes.values()[volume_count - 1], disk_target) vm_attach_device = "/dev/%s" % disk_target if session.cmd_status("which parted"): # No parted command, check device only if session.cmd_status("ls %s" % vm_attach_device): raise error.TestFail("Didn't find attached device:%s" % vm_attach_device) return # Test if attached disk can be used normally utlv.mk_part(vm_attach_device, session=session) session.cmd("mkfs.ext4 %s1" % vm_attach_device) session.cmd("mount %s1 /mnt" % vm_attach_device) session.cmd("echo %s > /mnt/test" % test_message) output = session.cmd_output("cat /mnt/test").strip() if output != test_message: raise error.TestFail("%s cannot be used normally!" % vm_attach_device) elif application == "install": # Get a nonexist domain name anyway while virsh.domain_exists(vm_name): vm_name += "_test" # Prepare installation parameters params["main_vm"] = vm_name vm = env.create_vm("libvirt", None, vm_name, params, test.bindir) env.register_vm(vm_name, vm) params["image_name"] = volumes.values()[volume_count - 1] params["image_format"] = "raw" params['force_create_image'] = "yes" params['remove_image'] = "yes" params['shutdown_cleanly'] = "yes" params['shutdown_cleanly_timeout'] = 120 params['guest_port_unattended_install'] = 12323 params['inactivity_watcher'] = "error" params['inactivity_treshold'] = 1800 params['image_verify_bootable'] = "no" params['unattended_delivery_method'] = "cdrom" params['drive_index_unattended'] = 1 params['drive_index_cd1'] = 2 params['boot_once'] = "d" params['medium'] = "cdrom" params['wait_no_ack'] = "yes" params['image_raw_device'] = "yes" params['backup_image_before_testing'] = "no" params['kernel_params'] = ("ks=cdrom nicdelay=60 " "console=ttyS0,115200 console=tty0") params['cdroms'] = "unattended cd1" params['redirs'] += " unattended_install" selinux_mode = None try: selinux_mode = utils_selinux.get_status() utils_selinux.set_status("permissive") try: unattended_install.run(test, params, env) except process.CmdError, detail: raise error.TestFail("Guest install failed:%s" % detail) finally: if selinux_mode is not None: utils_selinux.set_status(selinux_mode) env.unregister_vm(vm_name) finally: try: if application == "install": if virsh.domain_exists(vm_name): virsh.remove_domain(vm_name) elif application == "attach": virsh.detach_disk(vm_name, disk_target) finally: pvtest.cleanup_pool(pool_name, pool_type, pool_target, emulated_img, device_name=block_device)
def run(test, params, env): """ Test snapshot-create-as command Make sure that the clean repo can be used because qemu-guest-agent need to be installed in guest The command create a snapshot (disk and RAM) from arguments which including the following point * virsh snapshot-create-as --print-xml --diskspec --name --description * virsh snapshot-create-as --print-xml with multi --diskspec * virsh snapshot-create-as --print-xml --memspec * virsh snapshot-create-as --description * virsh snapshot-create-as --no-metadata * virsh snapshot-create-as --no-metadata --print-xml (negative test) * virsh snapshot-create-as --atomic --disk-only * virsh snapshot-create-as --quiesce --disk-only (positive and negative) * virsh snapshot-create-as --reuse-external * virsh snapshot-create-as --disk-only --diskspec * virsh snapshot-create-as --memspec --reuse-external --atomic(negative) * virsh snapshot-create-as --disk-only and --memspec (negative) * Create multi snapshots with snapshot-create-as * Create snapshot with name a--a a--a--snap1 """ if not virsh.has_help_command('snapshot-create-as'): raise error.TestNAError("This version of libvirt does not support " "the snapshot-create-as test") vm_name = params.get("main_vm") status_error = params.get("status_error", "no") options = params.get("snap_createas_opts") multi_num = params.get("multi_num", "1") diskspec_num = params.get("diskspec_num", "1") bad_disk = params.get("bad_disk") reuse_external = "yes" == params.get("reuse_external", "no") start_ga = params.get("start_ga", "yes") domain_state = params.get("domain_state") memspec_opts = params.get("memspec_opts") config_format = "yes" == params.get("config_format", "no") snapshot_image_format = params.get("snapshot_image_format") diskspec_opts = params.get("diskspec_opts") create_autodestroy = 'yes' == params.get("create_autodestroy", "no") unix_channel = "yes" == params.get("unix_channel", "yes") dac_denial = "yes" == params.get("dac_denial", "no") check_json_no_savevm = "yes" == params.get("check_json_no_savevm", "no") disk_snapshot_attr = params.get('disk_snapshot_attr', 'external') set_snapshot_attr = "yes" == params.get("set_snapshot_attr", "no") # gluster related params replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") disk_src_protocol = params.get("disk_source_protocol") restart_tgtd = params.get("restart_tgtd", "no") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) uri = params.get("virsh_uri") usr = params.get('unprivileged_user') if usr: if usr.count('EXAMPLE'): usr = '******' if disk_src_protocol == 'iscsi': if not libvirt_version.version_compare(1, 0, 4): raise error.TestNAError("'iscsi' disk doesn't support in" " current libvirt version.") if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") if not libvirt_version.version_compare(1, 2, 7): # As bug 1017289 closed as WONTFIX, the support only # exist on 1.2.7 and higher if disk_src_protocol == 'gluster': raise error.TestNAError("Snapshot on glusterfs not support in " "current version. Check more info with " "https://bugzilla.redhat.com/buglist.cgi?" "bug_id=1017289,1032370") opt_names = locals() if memspec_opts is not None: mem_options = compose_disk_options(test, params, memspec_opts) # if the parameters have the disk without "file=" then we only need to # add testdir for it. if mem_options is None: mem_options = os.path.join(test.tmpdir, memspec_opts) options += " --memspec " + mem_options tag_diskspec = 0 dnum = int(diskspec_num) if diskspec_opts is not None: tag_diskspec = 1 opt_names['diskopts_1'] = diskspec_opts # diskspec_opts[n] is used in cfg when more than 1 --diskspec is used if dnum > 1: tag_diskspec = 1 for i in range(1, dnum + 1): opt_names["diskopts_%s" % i] = params.get("diskspec_opts%s" % i) if tag_diskspec == 1: for i in range(1, dnum + 1): disk_options = compose_disk_options(test, params, opt_names["diskopts_%s" % i]) options += " --diskspec " + disk_options logging.debug("options are %s", options) vm = env.get_vm(vm_name) option_dict = {} option_dict = utils_misc.valued_option_dict(options, r' --(?!-)') logging.debug("option_dict is %s", option_dict) # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("original xml is %s", vmxml_backup) # Generate empty image for negative test if bad_disk is not None: bad_disk = os.path.join(test.tmpdir, bad_disk) os.open(bad_disk, os.O_RDWR | os.O_CREAT) # Generate external disk if reuse_external: disk_path = '' for i in range(dnum): external_disk = "external_disk%s" % i if params.get(external_disk): disk_path = os.path.join(test.tmpdir, params.get(external_disk)) utils.run("qemu-img create -f qcow2 %s 1G" % disk_path) # Only chmod of the last external disk for negative case if dac_denial: utils.run("chmod 500 %s" % disk_path) qemu_conf = None libvirtd_conf = None libvirtd_log_path = None libvirtd = utils_libvirtd.Libvirtd() try: # Config "snapshot_image_format" option in qemu.conf if config_format: qemu_conf = utils_config.LibvirtQemuConfig() qemu_conf.snapshot_image_format = snapshot_image_format logging.debug("the qemu config file content is:\n %s" % qemu_conf) libvirtd.restart() if check_json_no_savevm: libvirtd_conf = utils_config.LibvirtdConfig() libvirtd_conf["log_level"] = '1' libvirtd_conf["log_filters"] = '"1:json 3:remote 4:event"' libvirtd_log_path = os.path.join(test.tmpdir, "libvirtd.log") libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path logging.debug("the libvirtd config file content is:\n %s" % libvirtd_conf) libvirtd.restart() if replace_vm_disk: libvirt.set_vm_disk(vm, params, tmp_dir) if set_snapshot_attr: if vm.is_alive(): vm.destroy(gracefully=False) vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = vmxml_backup.get_devices(device_type="disk")[0] vmxml_new.del_device(disk_xml) # set snapshot attribute in disk xml disk_xml.snapshot = disk_snapshot_attr new_disk = disk.Disk(type_name='file') new_disk.xmltreefile = disk_xml.xmltreefile vmxml_new.add_device(new_disk) logging.debug("The vm xml now is: %s" % vmxml_new.xmltreefile) vmxml_new.sync() vm.start() # Start qemu-ga on guest if have --quiesce if unix_channel and options.find("quiesce") >= 0: vm.prepare_guest_agent() session = vm.wait_for_login() if start_ga == "no": # The qemu-ga could be running and should be killed session.cmd("kill -9 `pidof qemu-ga`") # Check if the qemu-ga get killed stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if not stat_ps: # As managed by systemd and set as autostart, qemu-ga # could be restarted, so use systemctl to stop it. session.cmd("systemctl stop qemu-guest-agent") stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if not stat_ps: raise error.TestNAError("Fail to stop agent in " "guest") if domain_state == "paused": virsh.suspend(vm_name) else: # Remove channel if exist if vm.is_alive(): vm.destroy(gracefully=False) xml_inst = vm_xml.VMXML.new_from_dumpxml(vm_name) xml_inst.remove_agent_channels() vm.start() # Record the previous snapshot-list snaps_before = virsh.snapshot_list(vm_name) # Attach disk before create snapshot if not print xml and multi disks # specified in cfg if dnum > 1 and "--print-xml" not in options: for i in range(1, dnum): disk_path = os.path.join(test.tmpdir, 'disk%s.qcow2' % i) utils.run("qemu-img create -f qcow2 %s 200M" % disk_path) virsh.attach_disk(vm_name, disk_path, 'vd%s' % list(string.lowercase)[i], debug=True) # Run virsh command # May create several snapshots, according to configuration for count in range(int(multi_num)): if create_autodestroy: # Run virsh command in interactive mode vmxml_backup.undefine() vp = virsh.VirshPersistent() vp.create(vmxml_backup['xml'], '--autodestroy') cmd_result = vp.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) vp.close_session() vmxml_backup.define() else: cmd_result = virsh.snapshot_create_as(vm_name, options, unprivileged_user=usr, uri=uri, ignore_status=True, debug=True) # for multi snapshots without specific snapshot name, the # snapshot name is using time string with 1 second # incremental, to avoid get snapshot failure with same name, # sleep 1 second here. if int(multi_num) > 1: time.sleep(1.1) output = cmd_result.stdout.strip() status = cmd_result.exit_status # check status_error if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") else: # Check memspec file should be removed if failed if (options.find("memspec") >= 0 and options.find("atomic") >= 0): if os.path.isfile(option_dict['memspec']): os.remove(option_dict['memspec']) raise error.TestFail("Run failed but file %s exist" % option_dict['memspec']) else: logging.info("Run failed as expected and memspec" " file already been removed") # Check domain xml is not updated if reuse external fail elif reuse_external and dac_denial: output = virsh.dumpxml(vm_name).stdout.strip() if "reuse_external" in output: raise error.TestFail("Domain xml should not be " "updated with snapshot image") else: logging.info("Run failed as expected") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command: %s" % output) else: # Check the special options snaps_list = virsh.snapshot_list(vm_name) logging.debug("snaps_list is %s", snaps_list) check_snapslist(vm_name, options, option_dict, output, snaps_before, snaps_list) # For cover bug 872292 if check_json_no_savevm: pattern = "The command savevm has not been found" with open(libvirtd_log_path) as f: for line in f: if pattern in line and "error" in line: raise error.TestFail("'%s' was found: %s" % (pattern, line)) finally: if vm.is_alive(): vm.destroy() # recover domain xml xml_recover(vmxml_backup) path = "/var/lib/libvirt/qemu/snapshot/" + vm_name if os.path.isfile(path): raise error.TestFail("Still can find snapshot metadata") if disk_src_protocol == 'gluster': libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path) libvirtd.restart() if disk_src_protocol == 'iscsi': libvirt.setup_or_cleanup_iscsi(False, restart_tgtd=restart_tgtd) # rm bad disks if bad_disk is not None: os.remove(bad_disk) # rm attach disks and reuse external disks if dnum > 1 and "--print-xml" not in options: for i in range(dnum): disk_path = os.path.join(test.tmpdir, 'disk%s.qcow2' % i) if os.path.exists(disk_path): os.unlink(disk_path) if reuse_external: external_disk = "external_disk%s" % i disk_path = os.path.join(test.tmpdir, params.get(external_disk)) if os.path.exists(disk_path): os.unlink(disk_path) # restore config if config_format and qemu_conf: qemu_conf.restore() if libvirtd_conf: libvirtd_conf.restore() if libvirtd_conf or (config_format and qemu_conf): libvirtd.restart() if libvirtd_log_path and os.path.exists(libvirtd_log_path): os.unlink(libvirtd_log_path)
seLinuxBool = SELinuxBoolean(params) seLinuxBool.setup() subdriver = utils_test.get_image_info(shared_storage)['format'] extra_attach = ("--config --driver qemu --subdriver %s --cache %s" % (subdriver, disk_cache)) # Attach a scsi device for special testcases if attach_scsi_disk: shared_dir = os.path.dirname(shared_storage) # This is a workaround. It does not take effect to specify # this parameter in config file params["image_name"] = "scsi_test" scsi_qemuImg = QemuImg(params, shared_dir, '') scsi_disk, _ = scsi_qemuImg.create(params) s_attach = virsh.attach_disk(vm_name, scsi_disk, "sdb", extra_attach, debug=True) if s_attach.exit_status != 0: logging.error("Attach another scsi disk failed.") # Get vcpu and memory info of guest for numa related tests if enable_numa: numa_dict_list = [] vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name) vcpu = vmxml.vcpu max_mem = vmxml.max_mem max_mem_unit = vmxml.max_mem_unit if vcpu < 1: raise error.TestError("%s not having even 1 vcpu" % vm.name) else: numa_dict_list = create_numa(vcpu, max_mem, max_mem_unit)
def run(test, params, env): """ Test DAC in adding nfs pool disk to VM. (1).Init variables for test. (2).Create nfs pool and vol. (3).Attach the nfs pool vol to VM. (4).Start VM and check result. """ # Get general variables. status_error = ('yes' == params.get("status_error", 'no')) host_sestatus = params.get("dac_nfs_disk_host_selinux", "enforcing") # Get qemu.conf config variables qemu_user = params.get("qemu_user") qemu_group = params.get("qemu_group") dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes") # Get variables about pool vol virt_use_nfs = params.get("virt_use_nfs", "off") nfs_server_dir = params.get("nfs_server_dir", "nfs-server") pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") export_options = params.get("export_options", "rw,async,no_root_squash,fsid=0") emulated_image = params.get("emulated_image") vol_name = params.get("vol_name") vol_format = params.get("vol_format") bk_file_name = params.get("bk_file_name") # Get pool vol variables img_tup = ("img_user", "img_group", "img_mode") img_val = [] for i in img_tup: try: img_val.append(int(params.get(i))) except ValueError: raise error.TestNAError("%s value '%s' is not a number." % (i, params.get(i))) img_user, img_group, img_mode = img_val # Get variables about VM and get a VM object and VMXML instance. vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Backup domain disk label disks = vm.get_disk_devices() backup_labels_of_disks = {} for disk in disks.values(): disk_path = disk['source'] f = os.open(disk_path, 0) stat_re = os.fstat(f) backup_labels_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid, stat_re.st_gid) os.close(f) # Backup selinux status of host. backup_sestatus = utils_selinux.get_status() pvt = None snapshot_name = None disk_snap_path = [] qemu_conf = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() try: # chown domain disk to qemu:qemu to avoid fail on local disk for disk in disks.values(): disk_path = disk['source'] if qemu_user == "root": os.chown(disk_path, 0, 0) elif qemu_user == "qemu": os.chown(disk_path, 107, 107) # Set selinux of host. utils_selinux.set_status(host_sestatus) # set qemu conf qemu_conf.user = qemu_user qemu_conf.group = qemu_user if dynamic_ownership: qemu_conf.dynamic_ownership = 1 else: qemu_conf.dynamic_ownership = 0 logging.debug("the qemu.conf content is: %s" % qemu_conf) libvirtd.restart() # Create dst pool for create attach vol img logging.debug("export_options is: %s" % export_options) pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, image_size="1G", pre_disk_vol=["20M"], export_options=export_options) # set virt_use_nfs result = utils.run("setsebool virt_use_nfs %s" % virt_use_nfs) if result.exit_status: raise error.TestNAError("Failed to set virt_use_nfs value") # Init a QemuImg instance and create img on nfs server dir. params['image_name'] = vol_name tmp_dir = data_dir.get_tmp_dir() nfs_path = os.path.join(tmp_dir, nfs_server_dir) image = qemu_storage.QemuImg(params, nfs_path, vol_name) # Create a image. server_img_path, result = image.create(params) if params.get("image_name_backing_file"): params['image_name'] = bk_file_name params['has_backing_file'] = "yes" image = qemu_storage.QemuImg(params, nfs_path, bk_file_name) server_img_path, result = image.create(params) # Get vol img path vol_name = server_img_path.split('/')[-1] virsh.pool_refresh(pool_name, debug=True) cmd_result = virsh.vol_path(vol_name, pool_name, debug=True) if cmd_result.exit_status: raise error.TestNAError("Failed to get volume path from pool.") img_path = cmd_result.stdout.strip() # Do the attach action. extra = "--persistent --subdriver qcow2" result = virsh.attach_disk(vm_name, source=img_path, target="vdf", extra=extra, debug=True) if result.exit_status: raise error.TestFail("Failed to attach disk %s to VM." "Detail: %s." % (img_path, result.stderr)) # Change img ownership and mode on nfs server dir os.chown(server_img_path, img_user, img_group) os.chmod(server_img_path, img_mode) img_label_before = check_ownership(server_img_path) if img_label_before: logging.debug("attached image ownership on nfs server before " "start: %s" % img_label_before) # Start VM to check the VM is able to access the image or not. try: vm.start() # Start VM successfully. img_label_after = check_ownership(server_img_path) if img_label_after: logging.debug("attached image ownership on nfs server after" " start: %s" % img_label_after) if status_error: raise error.TestFail('Test succeeded in negative case.') except virt_vm.VMStartError, e: # Starting VM failed. if not status_error: raise error.TestFail("Test failed in positive case." "error: %s" % e) if params.get("image_name_backing_file"): options = "--disk-only" snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) if snapshot_result.exit_status: if not status_error: raise error.TestFail("Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) snapshot_name = re.search( "\d+", snapshot_result.stdout.strip()).group(0) if snapshot_name: disks_snap = vm.get_disk_devices() for disk in disks_snap.values(): disk_snap_path.append(disk['source']) virsh.snapshot_delete(vm_name, snapshot_name, "--metadata", debug=True) try: virsh.detach_disk(vm_name, target="vdf", extra="--persistent", debug=True) except error.CmdError: raise error.TestFail("Detach disk 'vdf' from VM %s failed." % vm.name)
def run(test, params, env): """ Test virsh blockcopy --xml option. 1.Prepare backend storage (file/block/iscsi/ceph/nbd) 2.Start VM 3.Prepare target xml 4.Execute virsh blockcopy --xml command 5.Check VM xml after operation accomplished 6.Clean up test environment """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} ignore_check = False def check_blockcopy_xml(vm_name, source_image, ignore_check=False): """ Check blockcopy xml in VM. :param vm_name: VM name :param source_image: source image name. :param ignore_check: default is False. """ if ignore_check: return source_imge_list = [] blklist = virsh.domblklist(vm_name).stdout_text.splitlines() for line in blklist: if line.strip().startswith(('hd', 'vd', 'sd', 'xvd')): source_imge_list.append(line.split()[-1]) logging.debug('domblklist %s:\n%s', vm_name, source_imge_list) if not any(source_image in s for s in source_imge_list): test.fail("Cannot find expected source image: %s" % source_image) # Disk specific attributes. device = params.get("virt_disk_device", "disk") device_target = params.get("virt_disk_device_target", "vdd") device_format = params.get("virt_disk_device_format", "raw") device_type = params.get("virt_disk_device_type", "file") device_bus = params.get("virt_disk_device_bus", "virtio") backend_storage_type = params.get("backend_storage_type", "iscsi") blockcopy_option = params.get("blockcopy_option") # Backend storage auth info storage_size = params.get("storage_size", "1G") enable_auth = "yes" == params.get("enable_auth") use_auth_usage = "yes" == params.get("use_auth_usage") auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi") auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi") auth_sec_uuid = "" disk_auth_dict = {} size = "1" status_error = "yes" == params.get("status_error") define_error = "yes" == params.get("define_error") # Initialize one NbdExport object nbd = None img_file = os.path.join(data_dir.get_tmp_dir(), "%s_test.img" % vm_name) # Start VM and get all partitions in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Additional disk images. disks_img = [] try: # Clean up dirty secrets in test environments if there are. utils_secret.clean_up_secrets() # Setup backend storage if backend_storage_type == "file": image_filename = params.get("image_filename", "raw.img") disk_path = os.path.join(data_dir.get_tmp_dir(), image_filename) if blockcopy_option in ['reuse_external']: device_source = libvirt.create_local_disk( backend_storage_type, disk_path, storage_size, device_format) else: device_source = disk_path disks_img.append({ "format": device_format, "source": disk_path, "path": disk_path }) disk_src_dict = { 'attrs': { 'file': device_source, 'type_name': 'file' } } checkout_device_source = image_filename elif backend_storage_type == "iscsi": iscsi_host = params.get("iscsi_host") iscsi_port = params.get("iscsi_port") if device_type == "block": device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True) disk_src_dict = {'attrs': {'dev': device_source}} checkout_device_source = device_source elif device_type == "network": chap_user = params.get("chap_user", "redhat") chap_passwd = params.get("chap_passwd", "password") auth_sec_usage = params.get("auth_sec_usage", "libvirtiscsi") auth_sec_dict = { "sec_usage": "iscsi", "sec_target": auth_sec_usage } auth_sec_uuid = libvirt.create_secret(auth_sec_dict) # Set password of auth secret virsh.secret_set_value(auth_sec_uuid, chap_passwd, encode=True, debug=True) iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=storage_size, chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=iscsi_host) # ISCSI auth attributes for disk xml disk_auth_dict = { "auth_user": chap_user, "secret_type": auth_sec_usage_type, "secret_usage": auth_sec_usage_target } device_source = "iscsi://%s:%s/%s/%s" % ( iscsi_host, iscsi_port, iscsi_target, lun_num) disk_src_dict = { "attrs": { "protocol": "iscsi", "name": "%s/%s" % (iscsi_target, lun_num) }, "hosts": [{ "name": iscsi_host, "port": iscsi_port }] } checkout_device_source = 'emulated-iscsi' elif backend_storage_type == "ceph": ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS") ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST") ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS") ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME") ceph_client_name = params.get("ceph_client_name") ceph_client_key = params.get("ceph_client_key") ceph_auth_user = params.get("ceph_auth_user") ceph_auth_key = params.get("ceph_auth_key") enable_auth = "yes" == params.get("enable_auth") size = "0.15" key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") key_opt = "" # Prepare a blank params to confirm whether it needs delete the configure at the end of the test ceph_cfg = "" if not utils_package.package_install(["ceph-common"]): test.error("Failed to install ceph-common") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(ceph_mon_ip) # If enable auth, prepare a local file to save key if ceph_client_name and ceph_client_key: with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (ceph_client_name, ceph_client_key)) key_opt = "--keyring %s" % key_file auth_sec_dict = { "sec_usage": auth_sec_usage_type, "sec_name": "ceph_auth_secret" } auth_sec_uuid = libvirt.create_secret(auth_sec_dict) virsh.secret_set_value(auth_sec_uuid, ceph_auth_key, ignore_status=False, debug=True) disk_auth_dict = { "auth_user": ceph_auth_user, "secret_type": auth_sec_usage_type, "secret_uuid": auth_sec_uuid } else: test.error("No ceph client name/key provided.") device_source = "rbd:%s:mon_host=%s:keyring=%s" % ( ceph_disk_name, ceph_mon_ip, key_file) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("pre clean up rbd disk if exists: %s", cmd_result) if blockcopy_option in ['reuse_external']: # Create an local image and make FS on it. libvirt.create_local_disk("file", img_file, storage_size, device_format) # Convert the image to remote storage disk_path = ("rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip)) if ceph_client_name and ceph_client_key: disk_path += (":id=%s:key=%s" % (ceph_auth_user, ceph_auth_key)) rbd_cmd = ( "rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O" " %s %s %s" % (ceph_mon_ip, key_opt, ceph_disk_name, device_format, img_file, disk_path)) process.run(rbd_cmd, ignore_status=False, shell=True) disk_src_dict = { "attrs": { "protocol": "rbd", "name": ceph_disk_name }, "hosts": [{ "name": ceph_host_ip, "port": ceph_host_port }] } checkout_device_source = ceph_disk_name elif backend_storage_type == "nbd": # Get server hostname. hostname = socket.gethostname().strip() # Setup backend storage nbd_server_host = hostname nbd_server_port = params.get("nbd_server_port") image_path = params.get("emulated_image", "/var/lib/libvirt/images/nbdtest.img") # Create NbdExport object nbd = NbdExport(image_path, image_format=device_format, port=nbd_server_port) nbd.start_nbd_server() # Prepare disk source xml source_attrs_dict = {"protocol": "nbd"} disk_src_dict = {} disk_src_dict.update({"attrs": source_attrs_dict}) disk_src_dict.update({ "hosts": [{ "name": nbd_server_host, "port": nbd_server_port }] }) device_source = "nbd://%s:%s/%s" % (nbd_server_host, nbd_server_port, image_path) checkout_device_source = image_path if blockcopy_option in ['pivot']: ignore_check = True logging.debug("device source is: %s", device_source) # Add disk xml. vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = Disk(type_name=device_type) disk_xml.device = device disk_xml.target = {"dev": device_target, "bus": device_bus} driver_dict = {"name": "qemu", "type": device_format} disk_xml.driver = driver_dict disk_source = disk_xml.new_disk_source(**disk_src_dict) auth_in_source = True if disk_auth_dict: logging.debug("disk auth dict is: %s" % disk_auth_dict) disk_source.auth = disk_xml.new_auth(**disk_auth_dict) disk_xml.source = disk_source logging.debug("new disk xml is: %s", disk_xml) # Sync VM xml device_source_path = os.path.join(data_dir.get_tmp_dir(), "source.raw") tmp_device_source = libvirt.create_local_disk("file", path=device_source_path, size=size, disk_format="raw") s_attach = virsh.attach_disk(vm_name, tmp_device_source, device_target, "--config", debug=True) libvirt.check_exit_status(s_attach) try: vm.start() vm.wait_for_login().close() except xcepts.LibvirtXMLError as xml_error: if not define_error: test.fail("Failed to define VM:\n%s", str(xml_error)) except virt_vm.VMStartError as details: # VM cannot be started if status_error: logging.info("VM failed to start as expected: %s", str(details)) else: test.fail("VM should start but failed: %s" % str(details)) # Additional operations before set block threshold options = params.get("options", "--pivot --transient-job --verbose --wait") result = virsh.blockcopy(vm_name, device_target, "--xml %s" % disk_xml.xml, options=options, debug=True) libvirt.check_exit_status(result) check_source_image = None if blockcopy_option in ['pivot']: check_source_image = checkout_device_source else: check_source_image = tmp_device_source check_blockcopy_xml(vm_name, check_source_image, ignore_check) finally: # Delete snapshots. if virsh.domain_exists(vm_name): #To Delete snapshot, destroy vm first. if vm.is_alive(): vm.destroy() libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) vmxml_backup.sync("--snapshots-metadata") if os.path.exists(img_file): libvirt.delete_local_disk("file", img_file) for img in disks_img: if os.path.exists(img["path"]): libvirt.delete_local_disk("file", img["path"]) # Clean up backend storage if backend_storage_type == "iscsi": libvirt.setup_or_cleanup_iscsi(is_setup=False) elif backend_storage_type == "ceph": # Remove ceph configure file if created. if ceph_cfg: os.remove(ceph_cfg) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("result of rbd removal: %s", cmd_result.stdout_text) if os.path.exists(key_file): os.remove(key_file) elif backend_storage_type == "nbd": if nbd: try: nbd.cleanup() except Exception as ndbEx: logging.error("Clean Up nbd failed: %s" % str(ndbEx)) # Clean up secrets if auth_sec_uuid: virsh.secret_undefine(auth_sec_uuid)
def vm_stress_events(self, event, vm): """ Stress events :param event: event name :param vm: vm object """ dargs = {'ignore_status': True, 'debug': True} for itr in range(self.iterations): if "vcpupin" in event: for vcpu in range(int(self.current_vcpu)): result = virsh.vcpupin(vm.name, vcpu, random.choice(self.host_cpu_list), **dargs) if not self.ignore_status: libvirt.check_exit_status(result) elif "emulatorpin" in event: for vcpu in range(int(self.current_vcpu)): result = virsh.emulatorpin(vm.name, random.choice( self.host_cpu_list), **dargs) if not self.ignore_status: libvirt.check_exit_status(result) elif "suspend" in event: result = virsh.suspend(vm.name, **dargs) if not self.ignore_status: libvirt.check_exit_status(result) time.sleep(self.event_sleep_time) result = virsh.resume(vm.name, **dargs) if not self.ignore_status: libvirt.check_exit_status(result) elif "cpuhotplug" in event: result = virsh.setvcpus(vm.name, self.max_vcpu, "--live", **dargs) if not self.ignore_status: libvirt.check_exit_status(result) exp_vcpu = {'max_config': self.max_vcpu, 'max_live': self.max_vcpu, 'cur_config': self.current_vcpu, 'cur_live': self.max_vcpu, 'guest_live': self.max_vcpu} utils_hotplug.check_vcpu_value( vm, exp_vcpu, option="--live") time.sleep(self.event_sleep_time) result = virsh.setvcpus(vm.name, self.current_vcpu, "--live", **dargs) if not self.ignore_status: libvirt.check_exit_status(result) exp_vcpu = {'max_config': self.max_vcpu, 'max_live': self.max_vcpu, 'cur_config': self.current_vcpu, 'cur_live': self.current_vcpu, 'guest_live': self.current_vcpu} utils_hotplug.check_vcpu_value( vm, exp_vcpu, option="--live") elif "reboot" in event: vm.reboot() elif "nethotplug" in event: for iface_num in range(int(self.iface_num)): logging.debug("Try to attach interface %d" % iface_num) mac = utils_net.generate_mac_address_simple() options = ("%s %s --model %s --mac %s %s" % (self.iface_type, self.iface_source['network'], self.iface_model, mac, self.attach_option)) logging.debug("VM name: %s , Options for Network attach: %s", vm.name, options) ret = virsh.attach_interface(vm.name, options, ignore_status=True) time.sleep(self.event_sleep_time) if not self.ignore_status: libvirt.check_exit_status(ret) if self.detach_option: options = ("--type %s --mac %s %s" % (self.iface_type, mac, self.detach_option)) logging.debug("VM name: %s , Options for Network detach: %s", vm.name, options) ret = virsh.detach_interface(vm.name, options, ignore_status=True) if not self.ignore_status: libvirt.check_exit_status(ret) elif "diskhotplug" in event: for disk_num in range(len(self.device_source_names)): disk = {} disk_attach_error = False disk_name = os.path.join(self.path, vm.name, self.device_source_names[disk_num]) device_source = libvirt.create_local_disk( self.disk_type, disk_name, self.disk_size, disk_format=self.disk_format) disk.update({"format": self.disk_format, "source": device_source}) disk_xml = Disk(self.disk_type) disk_xml.device = self.disk_device disk_xml.driver = {"name": self.disk_driver, "type": self.disk_format} ret = virsh.attach_disk(vm.name, disk["source"], self.device_target[disk_num], self.attach_option, debug=True) if not self.ignore_status: libvirt.check_exit_status(ret, disk_attach_error) if self.detach_option: ret = virsh.detach_disk(vm.name, self.device_target[disk_num], extra=self.detach_option) if not self.ignore_status: libvirt.check_exit_status(ret) libvirt.delete_local_disk(self.disk_type, disk_name) else: raise NotImplementedError
devices = vm.get_blk_devices() for device in devices: s_detach = virsh.detach_disk(vm_name, device, "--config", debug=True) if not s_detach: logging.error("Detach vda failed before test.") subdriver = utils_test.get_image_info(shared_storage)['format'] extra_attach = ("--config --driver qemu --subdriver %s --cache %s" % (subdriver, disk_cache)) s_attach = virsh.attach_disk(vm_name, shared_storage, "vda", extra_attach, debug=True) if s_attach.exit_status != 0: logging.error("Attach vda failed before test.") vm.start() vm.wait_for_login() # Confirm VM can be accessed through network. time.sleep(delay) vm_ip = vm.get_address() s_ping, o_ping = utils_test.ping(vm_ip, count=2, timeout=delay) logging.info(o_ping) if s_ping != 0: raise error.TestError("%s did not respond after %d sec." %
def run(test, params, env): """ Test migration of multi vms. """ vm_names = params.get("migrate_vms").split() if len(vm_names) < 2: raise exceptions.TestSkipError("No multi vms provided.") # Prepare parameters method = params.get("virsh_migrate_method") jobabort = "yes" == params.get("virsh_migrate_jobabort", "no") options = params.get("virsh_migrate_options", "") status_error = "yes" == params.get("status_error", "no") remote_host = params.get("remote_host", "DEST_HOSTNAME.EXAMPLE.COM") local_host = params.get("local_host", "SOURCE_HOSTNAME.EXAMPLE.COM") host_user = params.get("host_user", "root") host_passwd = params.get("host_password", "PASSWORD") nfs_shared_disk = params.get("nfs_shared_disk", True) migration_type = params.get("virsh_migration_type", "simultaneous") migrate_timeout = int(params.get("virsh_migrate_thread_timeout", 900)) migration_time = int(params.get("virsh_migrate_timeout", 60)) # Params for NFS and SSH setup params["server_ip"] = params.get("migrate_dest_host") params["server_user"] = "******" params["server_pwd"] = params.get("migrate_dest_pwd") params["client_ip"] = params.get("migrate_source_host") params["client_user"] = "******" params["client_pwd"] = params.get("migrate_source_pwd") params["nfs_client_ip"] = params.get("migrate_dest_host") params["nfs_server_ip"] = params.get("migrate_source_host") desturi = libvirt_vm.get_uri_with_transport(transport="ssh", dest_ip=remote_host) srcuri = libvirt_vm.get_uri_with_transport(transport="ssh", dest_ip=local_host) # Don't allow the defaults. if srcuri.count('///') or srcuri.count('EXAMPLE'): raise exceptions.TestSkipError("The srcuri '%s' is invalid" % srcuri) if desturi.count('///') or desturi.count('EXAMPLE'): raise exceptions.TestSkipError("The desturi '%s' is invalid" % desturi) # Config ssh autologin for remote host ssh_key.setup_remote_ssh_key(remote_host, host_user, host_passwd, port=22, public_key="rsa") # Prepare local session and remote session localrunner = remote.RemoteRunner(host=remote_host, username=host_user, password=host_passwd) remoterunner = remote.RemoteRunner(host=remote_host, username=host_user, password=host_passwd) # Configure NFS in remote host if nfs_shared_disk: nfs_client = nfs.NFSClient(params) nfs_client.setup() # Prepare MigrationHelper instance vms = [] for vm_name in vm_names: vm = env.get_vm(vm_name) vms.append(vm) try: option = make_migration_options(method, options, migration_time) # make sure cache=none if "unsafe" not in options: device_target = params.get("virsh_device_target", "sda") for vm in vms: if vm.is_alive(): vm.destroy() for each_vm in vm_names: logging.info("configure cache=none") vmxml = vm_xml.VMXML.new_from_dumpxml(each_vm) device_source = str( vmxml.get_disk_attr(each_vm, device_target, 'source', 'file')) ret_detach = virsh.detach_disk(each_vm, device_target, "--config") status = ret_detach.exit_status output = ret_detach.stdout.strip() logging.info("Status:%s", status) logging.info("Output:\n%s", output) if not ret_detach: raise exceptions.TestError("Detach disks fails") subdriver = utils_test.get_image_info(device_source)['format'] ret_attach = virsh.attach_disk( each_vm, device_source, device_target, "--driver qemu " "--config --cache none " "--subdriver %s" % subdriver) status = ret_attach.exit_status output = ret_attach.stdout.strip() logging.info("Status:%s", status) logging.info("Output:\n%s", output) if not ret_attach: raise exceptions.TestError("Attach disks fails") for vm in vms: if vm.is_dead(): vm.start() vm.wait_for_login() multi_migration(vms, srcuri, desturi, option, migration_type, migrate_timeout, jobabort, lrunner=localrunner, rrunner=remoterunner, status_error=status_error) except Exception as info: logging.error("Test failed: %s" % info) flag_migration = False # NFS cleanup if nfs_shared_disk: logging.info("NFS cleanup") nfs_client.cleanup(ssh_auto_recover=False) localrunner.session.close() remoterunner.session.close() if not (ret_migration or flag_migration): if not status_error: raise exceptions.TestFail("Migration test failed") if not ret_jobabort: if not status_error: raise exceptions.TestFail("Abort migration failed") if not ret_downtime_tolerable: raise exceptions.TestFail("Downtime during migration is intolerable")
def run(test, params, env): """ Test virsh undefine command. Undefine an inactive domain, or convert persistent to transient. 1.Prepare test environment. 2.Backup the VM's information to a xml file. 3.When the libvirtd == "off", stop the libvirtd service. 4.Perform virsh undefine operation. 5.Recover test environment.(libvirts service,VM) 6.Confirm the test result. """ vm_ref = params.get("undefine_vm_ref", "vm_name") extra = params.get("undefine_extra", "") option = params.get("undefine_option", "") libvirtd_state = params.get("libvirtd", "on") status_error = ("yes" == params.get("status_error", "no")) undefine_twice = ("yes" == params.get("undefine_twice", 'no')) local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM") remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") remote_user = params.get("remote_user", "user") remote_pwd = params.get("remote_pwd", "password") remote_prompt = params.get("remote_prompt", "#") pool_type = params.get("pool_type") pool_name = params.get("pool_name", "test") pool_target = params.get("pool_target") volume_size = params.get("volume_size", "1G") vol_name = params.get("vol_name", "test_vol") emulated_img = params.get("emulated_img", "emulated_img") emulated_size = "%sG" % (int(volume_size[:-1]) + 1) disk_target = params.get("disk_target", "vdb") wipe_data = "yes" == params.get("wipe_data", "no") if wipe_data: option += " --wipe-storage" vm_name = params.get("main_vm", "virt-tests-vm1") vm = env.get_vm(vm_name) vm_id = vm.get_id() vm_uuid = vm.get_uuid() # polkit acl related params uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") # Back up xml file.Xen host has no guest xml file to define a guset. backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Confirm how to reference a VM. if vm_ref == "vm_name": vm_ref = vm_name elif vm_ref == "id": vm_ref = vm_id elif vm_ref == "hex_vm_id": vm_ref = hex(int(vm_id)) elif vm_ref == "uuid": vm_ref = vm_uuid elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) volume = None pvtest = None status3 = None try: save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name if option.count("managedsave") and vm.is_alive(): virsh.managedsave(vm_name) if not vm.is_lxc(): snp_list = virsh.snapshot_list(vm_name) if option.count("snapshot"): snp_file_list = [] if not len(snp_list): virsh.snapshot_create(vm_name) logging.debug("Create a snapshot for test!") else: # Backup snapshots for domain for snp_item in snp_list: tmp_file = os.path.join(test.tmpdir, snp_item + ".xml") virsh.snapshot_dumpxml(vm_name, snp_item, to_file=tmp_file) snp_file_list.append(tmp_file) else: if len(snp_list): raise error.TestNAError("This domain has snapshot(s), " "cannot be undefined!") if option.count("remove-all-storage"): pvtest = utlv.PoolVolumeTest(test, params) pvtest.pre_pool(pool_name, pool_type, pool_target, emulated_img, emulated_size=emulated_size) new_pool = libvirt_storage.PoolVolume(pool_name) if not new_pool.create_volume(vol_name, volume_size): raise error.TestFail("Creation of volume %s failed." % vol_name) volumes = new_pool.list_volumes() volume = volumes[vol_name] virsh.attach_disk(vm_name, volume, disk_target, "--config") # Turn libvirtd into certain state. if libvirtd_state == "off": utils_libvirtd.libvirtd_stop() # Test virsh undefine command. output = "" if vm_ref != "remote": vm_ref = "%s %s" % (vm_ref, extra) cmdresult = virsh.undefine(vm_ref, option, unprivileged_user=unprivileged_user, uri=uri, ignore_status=True, debug=True) status = cmdresult.exit_status output = cmdresult.stdout.strip() if status: logging.debug("Error status, command output: %s", cmdresult.stderr.strip()) if undefine_twice: status2 = virsh.undefine(vm_ref, ignore_status=True).exit_status else: if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"): raise error.TestNAError("remote_ip and/or local_ip parameters" " not changed from default values") try: uri = libvirt_vm.complete_uri(local_ip) session = remote.remote_login("ssh", remote_ip, "22", remote_user, remote_pwd, remote_prompt) cmd_undefine = "virsh -c %s undefine %s" % (uri, vm_name) status, output = session.cmd_status_output(cmd_undefine) logging.info("Undefine output: %s", output) except (error.CmdError, remote.LoginError, aexpect.ShellError), de: logging.error("Detail: %s", de) status = 1 # Recover libvirtd state. if libvirtd_state == "off": utils_libvirtd.libvirtd_start() # Shutdown VM. if virsh.domain_exists(vm.name): try: if vm.is_alive(): vm.destroy(gracefully=False) except error.CmdError, detail: logging.error("Detail: %s", detail)
def run(test, params, env): """ Test virsh snapshot command when disk in all kinds of type. (1). Init the variables from params. (2). Create a image by specifice format. (3). Attach disk to vm. (4). Snapshot create. (5). Snapshot revert. (6). cleanup. """ # Init variables. vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(vm_name) vm_state = params.get("vm_state", "running") image_format = params.get("snapshot_image_format", "qcow2") snapshot_del_test = "yes" == params.get("snapshot_del_test", "no") status_error = ("yes" == params.get("status_error", "no")) snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no")) snapshot_current = ("yes" == params.get("snapshot_current", "no")) snapshot_revert_paused = ("yes" == params.get("snapshot_revert_paused", "no")) replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") disk_source_protocol = params.get("disk_source_protocol") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) multi_gluster_disks = "yes" == params.get("multi_gluster_disks", "no") # Pool variables. snapshot_with_pool = "yes" == params.get("snapshot_with_pool", "no") pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") emulated_image = params.get("emulated_image", "emulated-image") vol_format = params.get("vol_format") lazy_refcounts = "yes" == params.get("lazy_refcounts") options = params.get("snapshot_options", "") export_options = params.get("export_options", "rw,no_root_squash,fsid=0") # Set volume xml attribute dictionary, extract all params start with 'vol_' # which are for setting volume xml, except 'lazy_refcounts'. vol_arg = {} for key in params.keys(): if key.startswith('vol_'): if key[4:] in ['capacity', 'allocation', 'owner', 'group']: vol_arg[key[4:]] = int(params[key]) else: vol_arg[key[4:]] = params[key] vol_arg['lazy_refcounts'] = lazy_refcounts supported_pool_list = [ "dir", "fs", "netfs", "logical", "iscsi", "disk", "gluster" ] if snapshot_with_pool: if pool_type not in supported_pool_list: raise error.TestNAError("%s not in support list %s" % (pool_target, supported_pool_list)) # Do xml backup for final recovery vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Some variable for xmlfile of snapshot. snapshot_memory = params.get("snapshot_memory", "internal") snapshot_disk = params.get("snapshot_disk", "internal") no_memory_snap = "yes" == params.get("no_memory_snap", "no") # Skip 'qed' cases for libvirt version greater than 1.1.0 if libvirt_version.version_compare(1, 1, 0): if vol_format == "qed" or image_format == "qed": raise error.TestNAError("QED support changed, check bug: " "https://bugzilla.redhat.com/show_bug.cgi" "?id=731570") if not libvirt_version.version_compare(1, 2, 7): # As bug 1017289 closed as WONTFIX, the support only # exist on 1.2.7 and higher if disk_source_protocol == 'gluster': raise error.TestNAError("Snapshot on glusterfs not support in " "current version. Check more info with " "https://bugzilla.redhat.com/buglist.cgi?" "bug_id=1017289,1032370") # Init snapshot_name snapshot_name = None snapshot_external_disk = [] snapshot_xml_path = None del_status = None image = None pvt = None # Get a tmp dir snap_cfg_path = "/var/lib/libvirt/qemu/snapshot/%s/" % vm_name try: if replace_vm_disk: utlv.set_vm_disk(vm, params, tmp_dir) if multi_gluster_disks: new_params = params.copy() new_params["pool_name"] = "gluster-pool2" new_params["vol_name"] = "gluster-vol2" new_params["disk_target"] = "vdf" new_params["image_convert"] = 'no' utlv.set_vm_disk(vm, new_params, tmp_dir) if snapshot_with_pool: # Create dst pool for create attach vol img pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, image_size="1G", pre_disk_vol=["20M"], source_name=vol_name, export_options=export_options) if pool_type in ["iscsi", "disk"]: # iscsi and disk pool did not support create volume in libvirt, # logical pool could use libvirt to create volume but volume # format is not supported and will be 'raw' as default. pv = libvirt_storage.PoolVolume(pool_name) vols = pv.list_volumes().keys() if vols: vol_name = vols[0] else: raise error.TestNAError("No volume in pool: %s" % pool_name) else: # Set volume xml file volxml = libvirt_xml.VolXML() newvol = volxml.new_vol(**vol_arg) vol_xml = newvol['xml'] # Run virsh_vol_create to create vol logging.debug("create volume from xml: %s" % newvol.xmltreefile) cmd_result = virsh.vol_create(pool_name, vol_xml, ignore_status=True, debug=True) if cmd_result.exit_status: raise error.TestNAError("Failed to create attach volume.") cmd_result = virsh.vol_path(vol_name, pool_name, debug=True) if cmd_result.exit_status: raise error.TestNAError("Failed to get volume path from pool.") img_path = cmd_result.stdout.strip() if pool_type in ["logical", "iscsi", "disk"]: # Use qemu-img to format logical, iscsi and disk block device if vol_format != "raw": cmd = "qemu-img create -f %s %s 10M" % (vol_format, img_path) cmd_result = utils.run(cmd, ignore_status=True) if cmd_result.exit_status: raise error.TestNAError("Failed to format volume, %s" % cmd_result.stdout.strip()) extra = "--persistent --subdriver %s" % vol_format else: # Create a image. params['image_name'] = "snapshot_test" params['image_format'] = image_format params['image_size'] = "1M" image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test") img_path, _ = image.create(params) extra = "--persistent --subdriver %s" % image_format if not multi_gluster_disks: # Do the attach action. out = utils.run("qemu-img info %s" % img_path) logging.debug("The img info is:\n%s" % out.stdout.strip()) result = virsh.attach_disk(vm_name, source=img_path, target="vdf", extra=extra, debug=True) if result.exit_status: raise error.TestNAError("Failed to attach disk %s to VM." "Detail: %s." % (img_path, result.stderr)) # Create snapshot. if snapshot_from_xml: snap_xml = libvirt_xml.SnapshotXML() snapshot_name = "snapshot_test" snap_xml.snap_name = snapshot_name snap_xml.description = "Snapshot Test" if not no_memory_snap: if "--disk-only" not in options: if snapshot_memory == "external": memory_external = os.path.join(tmp_dir, "snapshot_memory") snap_xml.mem_snap_type = snapshot_memory snap_xml.mem_file = memory_external snapshot_external_disk.append(memory_external) else: snap_xml.mem_snap_type = snapshot_memory # Add all disks into xml file. vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') new_disks = [] for src_disk_xml in disks: disk_xml = snap_xml.SnapDiskXML() disk_xml.xmltreefile = src_disk_xml.xmltreefile del disk_xml.device del disk_xml.address disk_xml.snapshot = snapshot_disk disk_xml.disk_name = disk_xml.target['dev'] # Only qcow2 works as external snapshot file format, update it # here driver_attr = disk_xml.driver driver_attr.update({'type': 'qcow2'}) disk_xml.driver = driver_attr if snapshot_disk == 'external': new_attrs = disk_xml.source.attrs if disk_xml.source.attrs.has_key('file'): new_file = "%s.snap" % disk_xml.source.attrs['file'] snapshot_external_disk.append(new_file) new_attrs.update({'file': new_file}) hosts = None elif disk_xml.source.attrs.has_key('name'): new_name = "%s.snap" % disk_xml.source.attrs['name'] new_attrs.update({'name': new_name}) hosts = disk_xml.source.hosts elif (disk_xml.source.attrs.has_key('dev') and disk_xml.type_name == 'block'): # Use local file as external snapshot target for block type. # As block device will be treat as raw format by default, # it's not fit for external disk snapshot target. A work # around solution is use qemu-img again with the target. disk_xml.type_name = 'file' del new_attrs['dev'] new_file = "%s/blk_src_file.snap" % tmp_dir snapshot_external_disk.append(new_file) new_attrs.update({'file': new_file}) hosts = None new_src_dict = {"attrs": new_attrs} if hosts: new_src_dict.update({"hosts": hosts}) disk_xml.source = disk_xml.new_disk_source(**new_src_dict) else: del disk_xml.source new_disks.append(disk_xml) snap_xml.set_disks(new_disks) snapshot_xml_path = snap_xml.xml logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile) options += " --xmlfile %s " % snapshot_xml_path if vm_state == "shut off": vm.destroy(gracefully=False) snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) out_err = snapshot_result.stderr.strip() if snapshot_result.exit_status: if status_error: return else: if re.search( "live disk snapshot not supported with this " "QEMU binary", out_err): raise error.TestNAError(out_err) if libvirt_version.version_compare(1, 2, 5): # As commit d2e668e in 1.2.5, internal active snapshot # without memory state is rejected. Handle it as SKIP # for now. This could be supportted in future by bug: # https://bugzilla.redhat.com/show_bug.cgi?id=1103063 if re.search( "internal snapshot of a running VM" + " must include the memory state", out_err): raise error.TestNAError("Check Bug #1083345, %s" % out_err) raise error.TestFail( "Failed to create snapshot. Error:%s." % out_err) else: snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) if snapshot_result.exit_status: if status_error: return else: raise error.TestFail( "Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) snapshot_name = re.search("\d+", snapshot_result.stdout.strip()).group(0) if snapshot_current: snap_xml = libvirt_xml.SnapshotXML() new_snap = snap_xml.new_from_snapshot_dumpxml( vm_name, snapshot_name) # update an element new_snap.creation_time = snapshot_name snapshot_xml_path = new_snap.xml options += "--redefine %s --current" % snapshot_xml_path snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) if snapshot_result.exit_status: raise error.TestFail("Failed to create snapshot --current." "Error:%s." % snapshot_result.stderr.strip()) if status_error: if not snapshot_del_test: raise error.TestFail("Success to create snapshot in negative" " case\nDetail: %s" % snapshot_result) # Touch a file in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() # Init a unique name for tmp_file. tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") tmp_file_path = tmp_file.name tmp_file.close() echo_cmd = "echo SNAPSHOT_DISK_TEST >> %s" % tmp_file_path status, output = session.cmd_status_output(echo_cmd) logging.debug("The echo output in domain is: '%s'", output) if status: raise error.TestFail("'%s' run failed with '%s'" % (tmp_file_path, output)) status, output = session.cmd_status_output("cat %s" % tmp_file_path) logging.debug("File created with content: '%s'", output) session.close() # As only internal snapshot revert works now, let's only do revert # with internal, and move the all skip external cases back to pass. # After external also supported, just move the following code back. if snapshot_disk == 'internal': # Destroy vm for snapshot revert. if not libvirt_version.version_compare(1, 2, 3): virsh.destroy(vm_name) # Revert snapshot. revert_options = "" if snapshot_revert_paused: revert_options += " --paused" revert_result = virsh.snapshot_revert(vm_name, snapshot_name, revert_options, debug=True) if revert_result.exit_status: # Attempts to revert external snapshots will FAIL with an error # "revert to external disk snapshot not supported yet" or "revert # to external snapshot not supported yet" since d410e6f. Thus, # let's check for that and handle as a SKIP for now. Check bug: # https://bugzilla.redhat.com/show_bug.cgi?id=1071264 if re.search( "revert to external \w* ?snapshot not supported yet", revert_result.stderr): raise error.TestNAError(revert_result.stderr.strip()) else: raise error.TestFail("Revert snapshot failed. %s" % revert_result.stderr.strip()) if vm.is_dead(): raise error.TestFail("Revert snapshot failed.") if snapshot_revert_paused: if vm.is_paused(): vm.resume() else: raise error.TestFail( "Revert command successed, but VM is not " "paused after reverting with --paused" " option.") # login vm. session = vm.wait_for_login() # Check the result of revert. status, output = session.cmd_status_output("cat %s" % tmp_file_path) logging.debug("After revert cat file output='%s'", output) if not status: raise error.TestFail("Tmp file exists, revert failed.") # Close the session. session.close() # Test delete snapshot without "--metadata", delete external disk # snapshot will fail for now. # Only do this when snapshot creat succeed which filtered in cfg file. if snapshot_del_test: if snapshot_name: del_result = virsh.snapshot_delete(vm_name, snapshot_name, debug=True, ignore_status=True) del_status = del_result.exit_status snap_xml_path = snap_cfg_path + "%s.xml" % snapshot_name if del_status: if not status_error: raise error.TestFail("Failed to delete snapshot.") else: if not os.path.exists(snap_xml_path): raise error.TestFail( "Snapshot xml file %s missing" % snap_xml_path) else: if status_error: err_msg = "Snapshot delete succeed but expect fail." raise error.TestFail(err_msg) else: if os.path.exists(snap_xml_path): raise error.TestFail("Snapshot xml file %s still" % snap_xml_path + " exist") finally: if vm.is_alive(): vm.destroy(gracefully=False) virsh.detach_disk(vm_name, target="vdf", extra="--persistent") if image: image.remove() if del_status and snapshot_name: virsh.snapshot_delete(vm_name, snapshot_name, "--metadata") for disk in snapshot_external_disk: if os.path.exists(disk): os.remove(disk) vmxml_backup.sync("--snapshots-metadata") libvirtd = utils_libvirtd.Libvirtd() if disk_source_protocol == 'gluster': utlv.setup_or_cleanup_gluster(False, vol_name, brick_path) if multi_gluster_disks: brick_path = os.path.join(tmp_dir, "gluster-pool2") utlv.setup_or_cleanup_gluster(False, "gluster-vol2", brick_path) libvirtd.restart() if snapshot_xml_path: if os.path.exists(snapshot_xml_path): os.unlink(snapshot_xml_path) if pvt: try: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, source_name=vol_name) except error.TestFail, detail: libvirtd.restart() logging.error(str(detail))
def run(test, params, env): """ Test virsh snapshot command when disk in all kinds of type. (1). Init the variables from params. (2). Create a image by specifice format. (3). Attach disk to vm. (4). Snapshot create. (5). Snapshot revert. (6). cleanup. """ # Init variables. vm_name = params.get("main_vm", "virt-tests-vm1") vm = env.get_vm(vm_name) image_format = params.get("snapshot_image_format", "qcow2") status_error = ("yes" == params.get("status_error", "no")) snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no")) snapshot_current = ("yes" == params.get("snapshot_current", "no")) snapshot_revert_paused = ("yes" == params.get("snapshot_revert_paused", "no")) # Do xml backup for final recovery vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Some variable for xmlfile of snapshot. snapshot_memory = params.get("snapshot_memory", "internal") snapshot_disk = params.get("snapshot_disk", "internal") # Get a tmp_dir. tmp_dir = data_dir.get_tmp_dir() # Create a image. params['image_name'] = "snapshot_test" params['image_format'] = image_format params['image_size'] = "1M" image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test") img_path, _ = image.create(params) # Do the attach action. result = virsh.attach_disk(vm_name, source=img_path, target="vdf", extra="--persistent --subdriver %s" % image_format) if result.exit_status: raise error.TestNAError("Failed to attach disk %s to VM." "Detail: %s." % (img_path, result.stderr)) # Init snapshot_name snapshot_name = None snapshot_external_disk = [] try: # Create snapshot. if snapshot_from_xml: snapshot_name = "snapshot_test" lines = ["<domainsnapshot>\n", "<name>%s</name>\n" % snapshot_name, "<description>Snapshot Test</description>\n"] if snapshot_memory == "external": memory_external = os.path.join(tmp_dir, "snapshot_memory") snapshot_external_disk.append(memory_external) lines.append("<memory snapshot=\'%s\' file='%s'/>\n" % (snapshot_memory, memory_external)) else: lines.append("<memory snapshot='%s'/>\n" % snapshot_memory) # Add all disks into xml file. disks = vm.get_disk_devices().values() lines.append("<disks>\n") for disk in disks: lines.append("<disk name='%s' snapshot='%s'>\n" % (disk['source'], snapshot_disk)) if snapshot_disk == "external": disk_external = os.path.join(tmp_dir, "%s.snap" % os.path.basename(disk['source'])) snapshot_external_disk.append(disk_external) lines.append("<source file='%s'/>\n" % disk_external) lines.append("</disk>\n") lines.append("</disks>\n") lines.append("</domainsnapshot>") snapshot_xml_path = "%s/snapshot_xml" % tmp_dir snapshot_xml_file = open(snapshot_xml_path, "w") snapshot_xml_file.writelines(lines) snapshot_xml_file.close() snapshot_result = virsh.snapshot_create( vm_name, ("--xmlfile %s" % snapshot_xml_path)) if snapshot_result.exit_status: if status_error: return else: raise error.TestFail("Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) else: options = "" snapshot_result = virsh.snapshot_create(vm_name, options) if snapshot_result.exit_status: if status_error: return else: raise error.TestFail("Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) snapshot_name = re.search( "\d+", snapshot_result.stdout.strip()).group(0) if snapshot_current: lines = ["<domainsnapshot>\n", "<description>Snapshot Test</description>\n", "<state>running</state>\n", "<creationTime>%s</creationTime>" % snapshot_name, "</domainsnapshot>"] snapshot_xml_path = "%s/snapshot_xml" % tmp_dir snapshot_xml_file = open(snapshot_xml_path, "w") snapshot_xml_file.writelines(lines) snapshot_xml_file.close() options += "--redefine %s --current" % snapshot_xml_path if snapshot_result.exit_status: raise error.TestFail("Failed to create snapshot --current." "Error:%s." % snapshot_result.stderr.strip()) if status_error: raise error.TestFail("Success to create snapshot in negative case\n" "Detail: %s" % snapshot_result) # Touch a file in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() # Init a unique name for tmp_file. tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") tmp_file_path = tmp_file.name tmp_file.close() status, output = session.cmd_status_output("touch %s" % tmp_file_path) if status: raise error.TestFail("Touch file in vm failed. %s" % output) session.close() # Destroy vm for snapshot revert. virsh.destroy(vm_name) # Revert snapshot. revert_options = "" if snapshot_revert_paused: revert_options += " --paused" revert_result = virsh.snapshot_revert(vm_name, snapshot_name, revert_options) if revert_result.exit_status: raise error.TestFail( "Revert snapshot failed. %s" % revert_result.stderr.strip()) if vm.is_dead(): raise error.TestFail("Revert snapshot failed.") if snapshot_revert_paused: if vm.is_paused(): vm.resume() else: raise error.TestFail("Revert command successed, but VM is not " "paused after reverting with --paused option.") # login vm. session = vm.wait_for_login() # Check the result of revert. status, output = session.cmd_status_output("cat %s" % tmp_file_path) if not status: raise error.TestFail("Tmp file exists, revert failed.") # Close the session. session.close() finally: virsh.detach_disk(vm_name, target="vdf", extra="--persistent") image.remove() if snapshot_name: virsh.snapshot_delete(vm_name, snapshot_name, "--metadata") for disk in snapshot_external_disk: if os.path.exists(disk): os.remove(disk) vmxml_backup.sync("--snapshots-metadata")
exception = False try: # Change the disk of the vm to shared disk if vm.is_alive(): vm.destroy(gracefully=False) devices = vm.get_blk_devices() for device in devices: s_detach = virsh.detach_disk(vm_name, device, "--config", debug=True) if not s_detach: logging.error("Detach vda failed before test.") subdriver = utils_test.get_image_info(shared_storage)['format'] extra_attach = ("--config --driver qemu --subdriver %s --cache %s" % (subdriver, disk_cache)) s_attach = virsh.attach_disk(vm_name, shared_storage, "vda", extra_attach, debug=True) if s_attach.exit_status != 0: logging.error("Attach vda failed before test.") # Attach a scsi device for special testcases if attach_scsi_disk: shared_dir = os.path.dirname(shared_storage) scsi_disk = "%s/scsi_test.img" % shared_dir utils.run("qemu-img create -f qcow2 %s 100M" % scsi_disk) s_attach = virsh.attach_disk(vm_name, scsi_disk, "sdb", extra_attach, debug=True) if s_attach.exit_status != 0: logging.error("Attach another scsi disk failed.") vm.start() vm.wait_for_login()
ret = virsh.snapshot_create_as(vm_name, "s1 %s" % snapshot_option) libvirt.check_exit_status(ret, snapshot_error) # Start the VM. vm.start() if status_error: raise error.TestFail("VM started unexpectedly") # Hotplug the disks. if device_at_dt_disk: for i in range(len(disks)): attach_option = "" if len(device_attach_option) > i: attach_option = device_attach_option[i] ret = virsh.attach_disk(vm_name, disks[i]["source"], device_targets[i], attach_option) libvirt.check_exit_status(ret) elif hotplug: for i in range(len(disks_xml)): disks_xml[i].xmltreefile.write() attach_option = "" if len(device_attach_option) > i: attach_option = device_attach_option[i] ret = virsh.attach_device(vm_name, disks_xml[i].xml, flagstr=attach_option) attach_error = False if len(device_attach_error) > i: attach_error = "yes" == device_attach_error[i] libvirt.check_exit_status(ret, attach_error)
def run(test, params, env): """ Test blkdevio tuning Positive test has covered the following combination. ------------------------- | total | read | write | ------------------------- | 0 | 0 | 0 | | non-0 | 0 | 0 | | 0 | non-0 | non-0 | | 0 | non-0 | 0 | | 0 | 0 | non-0 | ------------------------- Negative test has covered unsupported combination and invalid command arguments. NB: only qemu-kvm-rhev supports block I/O throttling on >= RHEL6.5, the qemu-kvm is okay for block I/O throttling on >= RHEL7.0. """ # Run test case vm_name = params.get("main_vm") vm = env.get_vm(vm_name) start_vm = params.get("start_vm", "yes") change_parameters = params.get("change_parameters", "no") attach_disk = "yes" == params.get("attach_disk", "no") attach_before_start = "yes" == params.get("attach_before_start", "yes") disk_type = params.get("disk_type", 'file') disk_format = params.get("disk_format", 'qcow2') disk_bus = params.get("disk_bus", 'virtio') disk_alias = params.get("disk_alias") attach_options = params.get("attach_options") original_vm_xml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Used for default device of blkdeviotune device = params.get("device_name", "vmblk") sys_image_target = vm.get_first_disk_devices()["target"] # Make sure vm is down if start not requested if (start_vm == "no" or attach_before_start) and vm and vm.is_alive(): vm.destroy() if attach_disk: disk_source = tempfile.mktemp(dir=data_dir.get_tmp_dir()) libvirt.create_local_disk(disk_type, path=disk_source, size='1', disk_format=disk_format) attach_extra = "" if disk_alias: attach_extra += " --alias %s" % disk_alias if disk_bus: attach_extra += " --targetbus %s" % disk_bus if disk_format: attach_extra += " --subdriver %s" % disk_format if attach_options: attach_extra += " %s" % attach_options # Coldplug disk if attach_disk and attach_before_start: ret = virsh.attach_disk(vm_name, disk_source, device, extra=attach_extra, debug=True) libvirt.check_exit_status(ret) # Recover previous running guest if vm and not vm.is_alive() and start_vm == "yes": try: vm.start() vm.wait_for_login().close() except (virt_vm.VMError, remote.LoginError) as detail: vm.destroy() test.fail(str(detail)) # Hotplug disk if attach_disk and not attach_before_start: ret = virsh.attach_disk(vm_name, disk_source, device, extra=attach_extra, debug=True) libvirt.check_exit_status(ret) test_dict = dict(params) test_dict['vm'] = vm if device == "vmblk": test_dict['device_name'] = sys_image_target # Make sure libvirtd service is running if not utils_libvirtd.libvirtd_is_running(): test.cancel("libvirt service is not running!") # Positive and negative testing try: if change_parameters == "no": get_blkdevio_parameter(test_dict, test) else: set_blkdevio_parameter(test_dict, test) finally: # Restore guest original_vm_xml.sync()
def run_svirt_attach_disk(test, params, env): """ Test svirt in adding disk to VM. (1).Init variables for test. (2).Create a image to attached to VM. (3).Attach disk. (4).Start VM and check result. """ # Get general variables. status_error = ('yes' == params.get("status_error", 'no')) host_sestatus = params.get("svirt_attach_disk_host_selinux", "enforcing") # Get variables about seclabel for VM. sec_type = params.get("svirt_attach_disk_vm_sec_type", "dynamic") sec_model = params.get("svirt_attach_disk_vm_sec_model", "selinux") sec_label = params.get("svirt_attach_disk_vm_sec_label", None) sec_relabel = params.get("svirt_attach_disk_vm_sec_relabel", "yes") sec_dict = {'type': sec_type, 'model': sec_model, 'label': sec_label, 'relabel': sec_relabel} # Get variables about VM and get a VM object and VMXML instance. vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Get varialbles about image. img_label = params.get('svirt_attach_disk_disk_label') img_name = "svirt_disk" # Default label for the other disks. # To ensure VM is able to access other disks. default_label = params.get('svirt_attach_disk_disk_default_label', None) # Set selinux of host. backup_sestatus = utils_selinux.get_status() utils_selinux.set_status(host_sestatus) # Set the default label to other disks of vm. disks = vm.get_disk_devices() for disk in disks.values(): utils_selinux.set_context_of_file(filename=disk['source'], context=default_label) # Init a QemuImg instance. params['image_name'] = img_name tmp_dir = data_dir.get_tmp_dir() image = qemu_storage.QemuImg(params, tmp_dir, img_name) # Create a image. img_path, result = image.create(params) # Set the context of the image. utils_selinux.set_context_of_file(filename=img_path, context=img_label) # Set the context of the VM. vmxml.set_seclabel(sec_dict) vmxml.sync() # Do the attach action. try: virsh.attach_disk(vm_name, source=img_path, target="vdf", extra="--persistent", ignore_status=False) except error.CmdError: raise error.TestFail("Attach disk %s to vdf on VM %s failed." % (img_path, vm.name)) # Check result. try: # Start VM to check the VM is able to access the image or not. try: vm.start() # Start VM successfully. # VM with set seclabel can access the image with the # set context. if status_error: raise error.TestFail('Test successed in negative case.') except virt_vm.VMStartError, e: # Starting VM failed. # VM with set seclabel can not access the image with the # set context. if not status_error: raise error.TestFail("Test failed in positive case." "error: %s" % e) finally: # clean up try: virsh.detach_disk(vm_name, target="vdf", extra="--persistent", ignore_status=False) except error.CmdError: raise error.TestFail("Detach disk 'vdf' from VM %s failed." % vm.name) image.remove() backup_xml.sync() utils_selinux.set_status(backup_sestatus)
def trigger_events(dom, events_list=[]): """ Trigger various events in events_list :param dom: the vm objects corresponding to the domain :return: the expected output that virsh event command prints out """ expected_events_list = [] tmpdir = data_dir.get_tmp_dir() save_path = os.path.join(tmpdir, "%s_event.save" % dom.name) new_disk = os.path.join(tmpdir, "%s_new_disk.img" % dom.name) print dom.name try: for event in events_list: if event in ['start', 'restore']: if dom.is_alive(): dom.destroy() else: if not dom.is_alive(): dom.start() dom.wait_for_login().close() if event == "start": virsh.start(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Started Booted") dom.wait_for_login().close() elif event == "save": virsh.save(dom.name, save_path, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Stopped Saved") elif event == "restore": if not os.path.exists(save_path): logging.error("%s not exist", save_path) else: virsh.restore(save_path, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Started Restored") elif event == "destroy": virsh.destroy(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Stopped Destroyed") elif event == "reset": virsh.reset(dom.name, **virsh_dargs) expected_events_list.append("'reboot' for %s") elif event == "vcpupin": virsh.vcpupin(dom.name, '0', '0', **virsh_dargs) expected_events_list.append("'tunable' for %s:" "\n\tcputune.vcpupin0: 0") elif event == "emulatorpin": virsh.emulatorpin(dom.name, '0', **virsh_dargs) expected_events_list.append("'tunable' for %s:" "\n\tcputune.emulatorpin: 0") elif event == "setmem": mem_size = int(params.get("mem_size", 512000)) virsh.setmem(dom.name, mem_size, **virsh_dargs) expected_events_list.append("'balloon-change' for %s:") elif event == "detach-disk": if not os.path.exists(new_disk): open(new_disk, 'a').close() # Attach disk firstly, this event will not be catched virsh.attach_disk(dom.name, new_disk, 'vdb', **virsh_dargs) virsh.detach_disk(dom.name, 'vdb', **virsh_dargs) expected_events_list.append("'device-removed' for %s:" " virtio-disk1") else: raise error.TestError("Unsupported event: %s" % event) # Event may not received immediately time.sleep(3) finally: if os.path.exists(save_path): os.unlink(save_path) if os.path.exists(new_disk): os.unlink(new_disk) return [(dom.name, event) for event in expected_events_list]
def run(test, params, env): """ Test snapshot-create-as command Make sure that the clean repo can be used because qemu-guest-agent need to be installed in guest The command create a snapshot (disk and RAM) from arguments which including the following point * virsh snapshot-create-as --print-xml --diskspec --name --description * virsh snapshot-create-as --print-xml with multi --diskspec * virsh snapshot-create-as --print-xml --memspec * virsh snapshot-create-as --description * virsh snapshot-create-as --no-metadata * virsh snapshot-create-as --no-metadata --print-xml (negative test) * virsh snapshot-create-as --atomic --disk-only * virsh snapshot-create-as --quiesce --disk-only (positive and negative) * virsh snapshot-create-as --reuse-external * virsh snapshot-create-as --disk-only --diskspec * virsh snapshot-create-as --memspec --reuse-external --atomic(negative) * virsh snapshot-create-as --disk-only and --memspec (negative) * Create multi snapshots with snapshot-create-as * Create snapshot with name a--a a--a--snap1 """ if not virsh.has_help_command('snapshot-create-as'): test.cancel("This version of libvirt does not support " "the snapshot-create-as test") vm_name = params.get("main_vm") status_error = params.get("status_error", "no") options = params.get("snap_createas_opts") multi_num = params.get("multi_num", "1") diskspec_num = params.get("diskspec_num", "1") bad_disk = params.get("bad_disk") reuse_external = "yes" == params.get("reuse_external", "no") start_ga = params.get("start_ga", "yes") domain_state = params.get("domain_state") memspec_opts = params.get("memspec_opts") config_format = "yes" == params.get("config_format", "no") snapshot_image_format = params.get("snapshot_image_format") diskspec_opts = params.get("diskspec_opts") create_autodestroy = 'yes' == params.get("create_autodestroy", "no") unix_channel = "yes" == params.get("unix_channel", "yes") dac_denial = "yes" == params.get("dac_denial", "no") check_json_no_savevm = "yes" == params.get("check_json_no_savevm", "no") disk_snapshot_attr = params.get('disk_snapshot_attr', 'external') set_snapshot_attr = "yes" == params.get("set_snapshot_attr", "no") # gluster related params replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") disk_src_protocol = params.get("disk_source_protocol") restart_tgtd = params.get("restart_tgtd", "no") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) uri = params.get("virsh_uri") usr = params.get('unprivileged_user') if usr: if usr.count('EXAMPLE'): usr = '******' if disk_src_protocol == 'iscsi': if not libvirt_version.version_compare(1, 0, 4): test.cancel("'iscsi' disk doesn't support in" " current libvirt version.") if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") if not libvirt_version.version_compare(1, 2, 7): # As bug 1017289 closed as WONTFIX, the support only # exist on 1.2.7 and higher if disk_src_protocol == 'gluster': test.cancel("Snapshot on glusterfs not support in " "current version. Check more info with " "https://bugzilla.redhat.com/buglist.cgi?" "bug_id=1017289,1032370") if libvirt_version.version_compare(5, 5, 0): # libvirt-5.5.0-2 commit 68e1a05f starts to allow --no-metadata and # --print-xml to be used together. if "--no-metadata" in options and "--print-xml" in options: logging.info("--no-metadata and --print-xml can be used together " "in this libvirt version. Not expecting a failure.") status_error = "no" opt_names = locals() if memspec_opts is not None: mem_options = compose_disk_options(test, params, memspec_opts) # if the parameters have the disk without "file=" then we only need to # add testdir for it. if mem_options is None: mem_options = os.path.join(data_dir.get_tmp_dir(), memspec_opts) options += " --memspec " + mem_options tag_diskspec = 0 dnum = int(diskspec_num) if diskspec_opts is not None: tag_diskspec = 1 opt_names['diskopts_1'] = diskspec_opts # diskspec_opts[n] is used in cfg when more than 1 --diskspec is used if dnum > 1: tag_diskspec = 1 for i in range(1, dnum + 1): opt_names["diskopts_%s" % i] = params.get("diskspec_opts%s" % i) if tag_diskspec == 1: for i in range(1, dnum + 1): disk_options = compose_disk_options(test, params, opt_names["diskopts_%s" % i]) options += " --diskspec " + disk_options logging.debug("options are %s", options) vm = env.get_vm(vm_name) option_dict = {} option_dict = utils_misc.valued_option_dict(options, r' --(?!-)') logging.debug("option_dict is %s", option_dict) # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("original xml is %s", vmxml_backup) # Generate empty image for negative test if bad_disk is not None: bad_disk = os.path.join(data_dir.get_tmp_dir(), bad_disk) with open(bad_disk, 'w') as bad_file: pass # Generate external disk if reuse_external: disk_path = '' for i in range(dnum): external_disk = "external_disk%s" % i if params.get(external_disk): disk_path = os.path.join(data_dir.get_tmp_dir(), params.get(external_disk)) process.run("qemu-img create -f qcow2 %s 1G" % disk_path, shell=True) # Only chmod of the last external disk for negative case if dac_denial: process.run("chmod 500 %s" % disk_path, shell=True) qemu_conf = None libvirtd_conf = None libvirtd_log_path = None libvirtd = utils_libvirtd.Libvirtd() try: # Config "snapshot_image_format" option in qemu.conf if config_format: qemu_conf = utils_config.LibvirtQemuConfig() qemu_conf.snapshot_image_format = snapshot_image_format logging.debug("the qemu config file content is:\n %s" % qemu_conf) libvirtd.restart() if check_json_no_savevm: libvirtd_conf = utils_config.LibvirtdConfig() libvirtd_conf["log_level"] = '1' libvirtd_conf["log_filters"] = '"1:json 3:remote 4:event"' libvirtd_log_path = os.path.join(data_dir.get_tmp_dir(), "libvirtd.log") libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path logging.debug("the libvirtd config file content is:\n %s" % libvirtd_conf) libvirtd.restart() if replace_vm_disk: libvirt.set_vm_disk(vm, params, tmp_dir) if set_snapshot_attr: if vm.is_alive(): vm.destroy(gracefully=False) vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = vmxml_backup.get_devices(device_type="disk")[0] vmxml_new.del_device(disk_xml) # set snapshot attribute in disk xml disk_xml.snapshot = disk_snapshot_attr new_disk = disk.Disk(type_name='file') new_disk.xmltreefile = disk_xml.xmltreefile vmxml_new.add_device(new_disk) logging.debug("The vm xml now is: %s" % vmxml_new.xmltreefile) vmxml_new.sync() vm.start() # Start qemu-ga on guest if have --quiesce if unix_channel and options.find("quiesce") >= 0: vm.prepare_guest_agent() session = vm.wait_for_login() if start_ga == "no": # The qemu-ga could be running and should be killed session.cmd("kill -9 `pidof qemu-ga`") # Check if the qemu-ga get killed stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if not stat_ps: # As managed by systemd and set as autostart, qemu-ga # could be restarted, so use systemctl to stop it. session.cmd("systemctl stop qemu-guest-agent") stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if not stat_ps: test.cancel("Fail to stop agent in " "guest") if domain_state == "paused": virsh.suspend(vm_name) else: # Remove channel if exist if vm.is_alive(): vm.destroy(gracefully=False) xml_inst = vm_xml.VMXML.new_from_dumpxml(vm_name) xml_inst.remove_agent_channels() vm.start() # Record the previous snapshot-list snaps_before = virsh.snapshot_list(vm_name) # Attach disk before create snapshot if not print xml and multi disks # specified in cfg if dnum > 1 and "--print-xml" not in options: for i in range(1, dnum): disk_path = os.path.join(data_dir.get_tmp_dir(), 'disk%s.qcow2' % i) process.run("qemu-img create -f qcow2 %s 200M" % disk_path, shell=True) virsh.attach_disk(vm_name, disk_path, 'vd%s' % list(string.ascii_lowercase)[i], debug=True) # Run virsh command # May create several snapshots, according to configuration for count in range(int(multi_num)): if create_autodestroy: # Run virsh command in interactive mode vmxml_backup.undefine() vp = virsh.VirshPersistent() vp.create(vmxml_backup['xml'], '--autodestroy') cmd_result = vp.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) vp.close_session() vmxml_backup.define() else: cmd_result = virsh.snapshot_create_as(vm_name, options, unprivileged_user=usr, uri=uri, ignore_status=True, debug=True) # for multi snapshots without specific snapshot name, the # snapshot name is using time string with 1 second # incremental, to avoid get snapshot failure with same name, # sleep 1 second here. if int(multi_num) > 1: time.sleep(1.1) output = cmd_result.stdout.strip() status = cmd_result.exit_status # check status_error if status_error == "yes": if status == 0: test.fail("Run successfully with wrong command!") else: # Check memspec file should be removed if failed if (options.find("memspec") >= 0 and options.find("atomic") >= 0): if os.path.isfile(option_dict['memspec']): os.remove(option_dict['memspec']) test.fail("Run failed but file %s exist" % option_dict['memspec']) else: logging.info("Run failed as expected and memspec" " file already been removed") # Check domain xml is not updated if reuse external fail elif reuse_external and dac_denial: output = virsh.dumpxml(vm_name).stdout.strip() if "reuse_external" in output: test.fail("Domain xml should not be " "updated with snapshot image") else: logging.info("Run failed as expected") elif status_error == "no": if status != 0: test.fail("Run failed with right command: %s" % output) else: # Check the special options snaps_list = virsh.snapshot_list(vm_name) logging.debug("snaps_list is %s", snaps_list) check_snapslist(test, vm_name, options, option_dict, output, snaps_before, snaps_list) # For cover bug 872292 if check_json_no_savevm: pattern = "The command savevm has not been found" with open(libvirtd_log_path) as f: for line in f: if pattern in line and "error" in line: test.fail("'%s' was found: %s" % (pattern, line)) finally: if vm.is_alive(): vm.destroy() # recover domain xml xml_recover(vmxml_backup) path = "/var/lib/libvirt/qemu/snapshot/" + vm_name if os.path.isfile(path): test.fail("Still can find snapshot metadata") if disk_src_protocol == 'gluster': gluster.setup_or_cleanup_gluster(False, brick_path=brick_path, **params) libvirtd.restart() if disk_src_protocol == 'iscsi': libvirt.setup_or_cleanup_iscsi(False, restart_tgtd=restart_tgtd) # rm bad disks if bad_disk is not None: os.remove(bad_disk) # rm attach disks and reuse external disks if dnum > 1 and "--print-xml" not in options: for i in range(dnum): disk_path = os.path.join(data_dir.get_tmp_dir(), 'disk%s.qcow2' % i) if os.path.exists(disk_path): os.unlink(disk_path) if reuse_external: external_disk = "external_disk%s" % i disk_path = os.path.join(data_dir.get_tmp_dir(), params.get(external_disk)) if os.path.exists(disk_path): os.unlink(disk_path) # restore config if config_format and qemu_conf: qemu_conf.restore() if libvirtd_conf: libvirtd_conf.restore() if libvirtd_conf or (config_format and qemu_conf): libvirtd.restart() if libvirtd_log_path and os.path.exists(libvirtd_log_path): os.unlink(libvirtd_log_path)
def run(test, params, env): """ Test virsh blockresize command for block device of domain. 1) Init the variables from params. 2) Create an image with specified format. 3) Attach a disk image to vm. 4) Test blockresize for the disk 5) Detach the disk """ # MAIN TEST CODE ### # Process cartesian parameters vm_name = params.get("main_vm", "virt-tests-vm1") image_format = params.get("disk_image_format", "qcow2") initial_disk_size = params.get("initial_disk_size", "1M") status_error = "yes" == params.get("status_error", "yes") resize_value = params.get("resize_value") virsh_dargs = {'debug': True} # Create an image. tmp_dir = data_dir.get_tmp_dir() image_path = os.path.join(tmp_dir, "blockresize_test") logging.info("Create image: %s, " "size %s, " "format %s", image_path, initial_disk_size, image_format) cmd = "qemu-img create -f %s %s %s" % (image_format, image_path, initial_disk_size) status, output = commands.getstatusoutput(cmd) if status: raise error.TestError("Creating image file %s failed: %s" % \ (image_path, output)) # Hotplug the image as disk device result = virsh.attach_disk(vm_name, source=image_path, target="vdd", extra=" --subdriver %s" % image_format) if result.exit_status: raise error.TestError("Failed to attach disk %s to VM: %s." % (image_path, result.stderr)) if resize_value == "over_size": # Use byte unit for over_size test resize_value = "%s" % OVER_SIZE + "b" # Run the test try: result = virsh.blockresize(vm_name, image_path, resize_value, **virsh_dargs) status = result.exit_status err = result.stderr.strip() # Check status_error if status_error: if status == 0 or err == "": raise error.TestFail("Expect failure, but run successfully!") # No need to do more test return else: if status != 0 or err != "": raise error.TestFail("Run failed with right " "virsh blockresize command") if resize_value[-1] in "bkm": expected_size = 1024*1024 elif resize_value[-1] == "g": expected_size = 1024*1024*1024 else: raise error.TestError("Unknown infomation of unit") image_info = utils_misc.get_image_info(image_path) actual_size = int(image_info['vsize']) logging.info("The expected block size is %s bytes," "the actual block size is %s bytes", expected_size, actual_size) if int(actual_size) != int(expected_size): raise error.TestFail("New blocksize set by blockresize is " "different from actual size from " "'qemu-img info'") finally: virsh.detach_disk(vm_name, target="vdd") if os.path.exists(image_path): os.remove(image_path)
def run(test, params, env): """ Test virsh {at|de}tach-disk command. The command can attach new disk/detach disk. 1.Prepare test environment,destroy or suspend a VM. 2.Perform virsh attach/detach-disk operation. 3.Recover test environment. 4.Confirm the test result. """ def check_vm_partition(vm, device, os_type, target_name, old_parts): """ Check VM disk's partition. :param vm. VM guest. :param os_type. VM's operation system type. :param target_name. Device target type. :return: True if check successfully. """ logging.info("Checking VM partittion...") if vm.is_dead(): vm.start() try: attached = False if os_type == "linux": session = vm.wait_for_login() new_parts = utils_disk.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.debug("Added parts: %s" % added_parts) for i in range(len(added_parts)): if device == "disk": if target_name.startswith("vd"): if added_parts[i].startswith("vd"): attached = True elif target_name.startswith( "hd") or target_name.startswith("sd"): if added_parts[i].startswith("sd"): attached = True elif device == "cdrom": if added_parts[i].startswith("sr"): attached = True session.close() return attached except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False def acpiphp_module_modprobe(vm, os_type): """ Add acpiphp module if VM's os type is rhle5.* :param vm. VM guest. :param os_type. VM's operation system type. :return: True if operate successfully. """ if vm.is_dead(): vm.start() try: if os_type == "linux": session = vm.wait_for_login() s_rpm, _ = session.cmd_status_output("rpm --version") # If status is different from 0, this # guest OS doesn't support the rpm package # manager if s_rpm: session.close() return True _, o_vd = session.cmd_status_output( "rpm -qa | grep redhat-release") if o_vd.find("5Server") != -1: s_mod, o_mod = session.cmd_status_output( "modprobe acpiphp") del o_mod if s_mod != 0: session.close() return False session.close() return True except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False def check_shareable(at_with_shareable, test_twice): """ check if current libvirt version support shareable option at_with_shareable: True or False. Whether attach disk with shareable option test_twice: True or False. Whether perform operations twice return: True or cancel the test """ if at_with_shareable or test_twice: if libvirt_version.version_compare(3, 9, 0): return True else: test.cancel( "Current libvirt version doesn't support shareable feature" ) # Get test command. test_cmd = params.get("at_dt_disk_test_cmd", "attach-disk") vm_ref = params.get("at_dt_disk_vm_ref", "name") at_options = params.get("at_dt_disk_at_options", "") dt_options = params.get("at_dt_disk_dt_options", "") at_with_shareable = "yes" == params.get("at_with_shareable", 'no') pre_vm_state = params.get("at_dt_disk_pre_vm_state", "running") status_error = "yes" == params.get("status_error", 'no') no_attach = params.get("at_dt_disk_no_attach", 'no') os_type = params.get("os_type", "linux") qemu_file_lock = params.get("qemu_file_lock", "") if qemu_file_lock: if utils_misc.compare_qemu_version(2, 9, 0): logging.info('From qemu-kvm-rhev 2.9.0:' 'QEMU image locking, which should prevent multiple ' 'runs of QEMU or qemu-img when a VM is running.') if test_cmd == "detach-disk" or pre_vm_state == "shut off": test.cancel('This case is not supported.') else: logging.info( 'The expect result is failure as opposed with succeed') status_error = True # Disk specific attributes. device = params.get("at_dt_disk_device", "disk") device_source_name = params.get("at_dt_disk_device_source", "attach.img") device_source_format = params.get("at_dt_disk_device_source_format", "raw") device_target = params.get("at_dt_disk_device_target", "vdd") device_disk_bus = params.get("at_dt_disk_bus_type", "virtio") source_path = "yes" == params.get("at_dt_disk_device_source_path", "yes") create_img = "yes" == params.get("at_dt_disk_create_image", "yes") test_twice = "yes" == params.get("at_dt_disk_test_twice", "no") test_type = "yes" == params.get("at_dt_disk_check_type", "no") test_audit = "yes" == params.get("at_dt_disk_check_audit", "no") test_block_dev = "yes" == params.get("at_dt_disk_iscsi_device", "no") test_logcial_dev = "yes" == params.get("at_dt_disk_logical_device", "no") restart_libvirtd = "yes" == params.get("at_dt_disk_restart_libvirtd", "no") detach_disk_with_print_xml = "yes" == params.get( "detach_disk_with_print_xml", "no") vg_name = params.get("at_dt_disk_vg", "vg_test_0") lv_name = params.get("at_dt_disk_lv", "lv_test_0") serial = params.get("at_dt_disk_serial", "") address = params.get("at_dt_disk_address", "") address2 = params.get("at_dt_disk_address2", "") cache_options = params.get("cache_options", "") time_sleep = params.get("time_sleep", 3) if check_shareable(at_with_shareable, test_twice): at_options += " --mode shareable" if serial: at_options += (" --serial %s" % serial) if address2: at_options_twice = at_options + (" --address %s" % address2) if address: at_options += (" --address %s" % address) if cache_options: if cache_options.count("directsync"): if not libvirt_version.version_compare(1, 0, 0): test.cancel("'directsync' cache option doesn't " "support in current libvirt version.") at_options += (" --cache %s" % cache_options) vm_name = params.get("main_vm") vm = env.get_vm(vm_name) # Start vm and get all partions in vm. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Back up xml file. backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Create virtual device file. device_source_path = os.path.join(data_dir.get_tmp_dir(), device_source_name) if test_block_dev: device_source = libvirt.setup_or_cleanup_iscsi(True) if not device_source: # We should skip this case test.cancel("Can not get iscsi device name in host") if test_logcial_dev: lv_utils.vg_create(vg_name, device_source) device_source = libvirt.create_local_disk("lvm", size="10M", vgname=vg_name, lvname=lv_name) logging.debug("New created volume: %s", lv_name) else: if source_path and create_img: device_source = libvirt.create_local_disk( "file", path=device_source_path, size="1G", disk_format=device_source_format) else: device_source = device_source_name # if we are testing audit, we need to start audit servcie first. if test_audit: auditd_service = Factory.create_service("auditd") if not auditd_service.status(): auditd_service.start() logging.info("Auditd service status: %s" % auditd_service.status()) # If we are testing cdrom device, we need to detach hdc in VM first. if device == "cdrom": if vm.is_alive(): vm.destroy(gracefully=False) s_detach = virsh.detach_disk(vm_name, device_target, "--config") if not s_detach: logging.error("Detach hdc failed before test.") # If we are testing detach-disk, we need to attach certain device first. if test_cmd == "detach-disk" and no_attach != "yes": s_at_options = "--driver qemu --config" #Since lock feature is introduced in libvirt 3.9.0 afterwards, disk shareable options #need be set if disk needs be attached multitimes if check_shareable(at_with_shareable, test_twice): s_at_options += " --mode shareable" s_attach = virsh.attach_disk(vm_name, device_source, device_target, s_at_options, debug=True).exit_status if s_attach != 0: logging.error("Attaching device failed before testing detach-disk") else: logging.debug( "Attaching device succeeded before testing detach-disk") if test_twice: device_target2 = params.get("at_dt_disk_device_target2", device_target) device_source = libvirt.create_local_disk( "file", path=device_source_path, size="1", disk_format=device_source_format) s_attach = virsh.attach_disk(vm_name, device_source, device_target2, s_at_options).exit_status if s_attach != 0: logging.error("Attaching device failed before testing " "detach-disk test_twice") vm.start() vm.wait_for_login() # Add acpiphp module before testing if VM's os type is rhle5.* if not acpiphp_module_modprobe(vm, os_type): test.error("Add acpiphp module failed before test.") # Turn VM into certain state. if pre_vm_state == "paused": logging.info("Suspending %s..." % vm_name) if vm.is_alive(): vm.pause() elif pre_vm_state == "shut off": logging.info("Shuting down %s..." % vm_name) if vm.is_alive(): vm.destroy(gracefully=False) # Get disk count before test. disk_count_before_cmd = vm_xml.VMXML.get_disk_count(vm_name) # Test. domid = vm.get_id() domuuid = vm.get_uuid() # Confirm how to reference a VM. if vm_ref == "name": vm_ref = vm_name elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "id": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref == "uuid": vm_ref = domuuid else: vm_ref = "" if test_cmd == "attach-disk": status = virsh.attach_disk(vm_ref, device_source, device_target, at_options, debug=True).exit_status elif test_cmd == "detach-disk": # For detach disk with print-xml option, it only print information,and not actual disk detachment. if detach_disk_with_print_xml and libvirt_version.version_compare( 4, 5, 0): ret = virsh.detach_disk(vm_ref, device_target, at_options) libvirt.check_exit_status(ret) cmd = ("echo \"%s\" | grep -A 16 %s" % (ret.stdout.strip(), device_source_name)) if process.system(cmd, ignore_status=True, shell=True): test.error("Check disk with source image name failed") status = virsh.detach_disk(vm_ref, device_target, dt_options, debug=True).exit_status if restart_libvirtd: libvirtd_serv = utils_libvirtd.Libvirtd() libvirtd_serv.restart() if test_twice: device_target2 = params.get("at_dt_disk_device_target2", device_target) device_source = libvirt.create_local_disk( "file", path=device_source_path, size="1G", disk_format=device_source_format) if test_cmd == "attach-disk": if address2: at_options = at_options_twice status = virsh.attach_disk(vm_ref, device_source, device_target2, at_options, debug=True).exit_status elif test_cmd == "detach-disk": status = virsh.detach_disk(vm_ref, device_target2, dt_options, debug=True).exit_status # Resume guest after command. On newer libvirt this is fixed as it has # been a bug. The change in xml file is done after the guest is resumed. if pre_vm_state == "paused": vm.resume() time.sleep(5) # Check audit log check_audit_after_cmd = True if test_audit: grep_audit = ('grep "%s" /var/log/audit/audit.log' % test_cmd.split("-")[0]) cmd = (grep_audit + ' | ' + 'grep "%s" | tail -n1 | grep "res=success"' % device_source) if process.run(cmd, shell=True).exit_status: logging.error("Audit check failed") check_audit_after_cmd = False # Need wait a while for xml to sync time.sleep(float(time_sleep)) # Check disk count after command. check_count_after_cmd = True disk_count_after_cmd = vm_xml.VMXML.get_disk_count(vm_name) if test_cmd == "attach-disk": if disk_count_after_cmd == disk_count_before_cmd: check_count_after_cmd = False elif test_cmd == "detach-disk": if disk_count_after_cmd < disk_count_before_cmd: check_count_after_cmd = False # Recover VM state. if pre_vm_state == "shut off": vm.start() # Check in VM after command. check_vm_after_cmd = True check_vm_after_cmd = check_vm_partition(vm, device, os_type, device_target, old_parts) # Check disk type after attach. check_disk_type = True if test_type: if test_block_dev: check_disk_type = vm_xml.VMXML.check_disk_type( vm_name, device_source, "block") else: check_disk_type = vm_xml.VMXML.check_disk_type( vm_name, device_source, "file") # Check disk serial after attach. check_disk_serial = True if serial: disk_serial = vm_xml.VMXML.get_disk_serial(vm_name, device_target) if serial != disk_serial: check_disk_serial = False # Check disk address after attach. check_disk_address = True if address: disk_address = vm_xml.VMXML.get_disk_address(vm_name, device_target) if address != disk_address: check_disk_address = False # Check multifunction address after attach. check_disk_address2 = True if address2: disk_address2 = vm_xml.VMXML.get_disk_address(vm_name, device_target2) if address2 != disk_address2: check_disk_address2 = False # Check disk cache option after attach. check_cache_after_cmd = True if cache_options: disk_cache = vm_xml.VMXML.get_disk_attr(vm_name, device_target, "driver", "cache") if cache_options == "default": if disk_cache is not None: check_cache_after_cmd = False elif disk_cache != cache_options: check_cache_after_cmd = False # Eject cdrom test eject_cdrom = "yes" == params.get("at_dt_disk_eject_cdrom", "no") save_vm = "yes" == params.get("at_dt_disk_save_vm", "no") save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save") try: if eject_cdrom: eject_params = { 'type_name': "file", 'device_type': "cdrom", 'target_dev': device_target, 'target_bus': device_disk_bus } eject_xml = libvirt.create_disk_xml(eject_params) with open(eject_xml) as eject_file: logging.debug("Eject CDROM by XML: %s", eject_file.read()) # Run command tiwce to make sure cdrom tray open first #BZ892289 # Open tray virsh.attach_device(domainarg=vm_name, filearg=eject_xml, debug=True) # Add time sleep between two attach commands. if time_sleep: time.sleep(float(time_sleep)) # Eject cdrom result = virsh.attach_device(domainarg=vm_name, filearg=eject_xml, debug=True) if result.exit_status != 0: test.fail("Eject CDROM failed") if vm_xml.VMXML.check_disk_exist(vm_name, device_source): test.fail("Find %s after do eject" % device_source) # Save and restore VM if save_vm: result = virsh.save(vm_name, save_file, debug=True) libvirt.check_exit_status(result) result = virsh.restore(save_file, debug=True) libvirt.check_exit_status(result) if vm_xml.VMXML.check_disk_exist(vm_name, device_source): test.fail("Find %s after do restore" % device_source) # Destroy VM. vm.destroy(gracefully=False) # Check disk count after VM shutdown (with --config). check_count_after_shutdown = True inactive_vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) disk_count_after_shutdown = len(inactive_vmxml.get_disk_all()) if test_cmd == "attach-disk": if disk_count_after_shutdown == disk_count_before_cmd: check_count_after_shutdown = False elif test_cmd == "detach-disk": if disk_count_after_shutdown < disk_count_before_cmd: check_count_after_shutdown = False finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) logging.debug("Restore the VM XML") backup_xml.sync() if os.path.exists(save_file): os.remove(save_file) if test_block_dev: if test_logcial_dev: libvirt.delete_local_disk("lvm", vgname=vg_name, lvname=lv_name) lv_utils.vg_remove(vg_name) process.run("pvremove %s" % device_source, shell=True, ignore_status=True) libvirt.setup_or_cleanup_iscsi(False) else: libvirt.delete_local_disk("file", device_source) # Check results. if status_error: if not status: test.fail("virsh %s exit with unexpected value." % test_cmd) else: if status: test.fail("virsh %s failed." % test_cmd) if test_cmd == "attach-disk": if at_options.count("config"): if not check_count_after_shutdown: test.fail("Cannot see config attached device " "in xml file after VM shutdown.") if not check_disk_serial: test.fail("Serial set failed after attach") if not check_disk_address: test.fail("Address set failed after attach") if not check_disk_address2: test.fail("Address(multifunction) set failed" " after attach") else: if not check_count_after_cmd: test.fail("Cannot see device in xml file" " after attach.") if not check_vm_after_cmd: test.fail("Cannot see device in VM after" " attach.") if not check_disk_type: test.fail("Check disk type failed after" " attach.") if not check_audit_after_cmd: test.fail("Audit hotplug failure after attach") if not check_cache_after_cmd: test.fail("Check cache failure after attach") if at_options.count("persistent"): if not check_count_after_shutdown: test.fail("Cannot see device attached " "with persistent after " "VM shutdown.") else: if check_count_after_shutdown: test.fail("See non-config attached device " "in xml file after VM shutdown.") elif test_cmd == "detach-disk": if dt_options.count("config"): if check_count_after_shutdown: test.fail("See config detached device in " "xml file after VM shutdown.") else: if check_count_after_cmd: test.fail("See device in xml file " "after detach.") if check_vm_after_cmd: test.fail("See device in VM after detach.") if not check_audit_after_cmd: test.fail("Audit hotunplug failure " "after detach") if dt_options.count("persistent"): if check_count_after_shutdown: test.fail("See device deattached " "with persistent after " "VM shutdown.") else: if not check_count_after_shutdown: test.fail("See non-config detached " "device in xml file after " "VM shutdown.") else: test.error("Unknown command %s." % test_cmd)
% snapshot_option) libvirt.check_exit_status(ret, snapshot_error) # Start the VM. vm.start() if status_error: raise error.TestFail("VM started unexpectedly") # Hotplug the disks. if device_at_dt_disk: for i in range(len(disks)): attach_option = "" if len(device_attach_option) > i: attach_option = device_attach_option[i] ret = virsh.attach_disk(vm_name, disks[i]["source"], device_targets[i], attach_option) libvirt.check_exit_status(ret) elif hotplug: for i in range(len(disks_xml)): disks_xml[i].xmltreefile.write() attach_option = "" if len(device_attach_option) > i: attach_option = device_attach_option[i] ret = virsh.attach_device(vm_name, disks_xml[i].xml, flagstr=attach_option) attach_error = False if len(device_attach_error) > i: attach_error = "yes" == device_attach_error[i] libvirt.check_exit_status(ret, attach_error)