def make_external_disk_snapshots(vm, device_target, postfix_n, snapshot_take): """ Make external snapshots for disks only. :param vm: VM instance :param device_target: device target :param postfix_n: postfix option :param snapshot_take: snapshots taken. :return: list containing absolute root path of snapshot files """ first_disk_source = get_first_disk_source(vm) root_dir = os.path.dirname(first_disk_source) basename = os.path.basename(first_disk_source) disk = device_target external_snapshot_disks = [] # Make external snapshots for disks only for count in range(1, snapshot_take + 1): options = "%s_%s %s%s-desc " % (postfix_n, count, postfix_n, count) options += "--diskspec " diskname = basename.split(".")[0] snap_name = "%s.%s%s" % (diskname, postfix_n, count) disk_external = os.path.join(root_dir, snap_name) external_snapshot_disks.append(disk_external) options += " %s,snapshot=external,file=%s" % (disk, disk_external) options += " --disk-only --atomic" virsh.snapshot_create_as(vm.name, options, ignore_status=False, debug=True) return external_snapshot_disks
def _generate_backstore_attribute(params): """ Create one disk with backingStore attribute by creating snapshot :param params: one dict to wrap up parameters """ device_target = params.get("virt_disk_device_target") top_file_image_name = params.get("top_file_image_name") second_file_image_name = params.get("second_file_image_name") tmp_blkpull_path.append(top_file_image_name) tmp_blkpull_path.append(second_file_image_name) backing_chain_list.append(top_file_image_name) if vm.is_dead(): vm.start() snapshot_tmp_name = "blockpull_tmp_snap" options = " %s --disk-only --diskspec %s,file=%s" % ( snapshot_tmp_name, 'vda', second_file_image_name) options += " --diskspec %s,file=%s" % (device_target, top_file_image_name) virsh.snapshot_create_as(vm_name, options, ignore_status=False, debug=True) vm.destroy() virsh.snapshot_delete(vm_name, snapshot_tmp_name, "--metadata", ignore_status=False, debug=True) vmxml_dir = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug("backstore prepare readiness :\n%s", vmxml_dir)
def check_snapshot(bgjob=None): """ Do snapshot operation and check the results """ snapshot_name1 = "snap.s1" snapshot_name2 = "snap.s2" if not snapshot_vm_running: vm.destroy(gracefully=False) ret = virsh.snapshot_create_as(vm_name, snapshot_name1) libvirt.check_exit_status(ret) snap_lists = virsh.snapshot_list(vm_name) if snapshot_name not in snap_lists: test.fail("Snapshot %s doesn't exist" % snapshot_name) if snapshot_vm_running: options = "--force" else: options = "" ret = virsh.snapshot_revert( vm_name, ("%s %s" % (snapshot_name, options))) libvirt.check_exit_status(ret) ret = virsh.dumpxml(vm_name) if ret.stdout.count("<rng model="): test.fail("Found rng device in xml") if snapshot_with_rng: if vm.is_alive(): vm.destroy(gracefully=False) if bgjob: bgjob.kill_func() modify_rng_xml(params, False) # Start the domain before disk-only snapshot if vm.is_dead(): # Add random server if params.get("backend_type") == "tcp": cmd = "cat /dev/random | nc -4 -l localhost 1024" bgjob = utils.AsyncJob(cmd) vm.start() vm.wait_for_login().close() err_msgs = ("live disk snapshot not supported" " with this QEMU binary") ret = virsh.snapshot_create_as(vm_name, "%s --disk-only" % snapshot_name2) if ret.exit_status: if ret.stderr.count(err_msgs): test.skip(err_msgs) else: test.fail("Failed to create external snapshot") snap_lists = virsh.snapshot_list(vm_name) if snapshot_name2 not in snap_lists: test.fail("Failed to check snapshot list") ret = virsh.domblklist(vm_name) if not ret.stdout.count(snapshot_name2): test.fail("Failed to find snapshot disk")
def check_bootorder_snapshot(disk_name): """ Check VM disk's bootorder option with snapshot. :param disk_name. The target disk to be checked. """ logging.info("Checking diskorder option with snapshot...") snapshot1 = "s1" snapshot2 = "s2" snapshot2_file = os.path.join(test.tmpdir, "s2") ret = virsh.snapshot_create(vm_name, "", **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.snapshot_create_as(vm_name, "%s --disk-only" % snapshot1, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.snapshot_dumpxml(vm_name, snapshot1) libvirt.check_exit_status(ret) cmd = "echo \"%s\" | grep %s.%s" % (ret.stdout, disk_name, snapshot1) if process.system(cmd, ignore_status=True, shell=True): raise exceptions.TestError("Check snapshot disk failed") ret = virsh.snapshot_create_as(vm_name, "%s --memspec file=%s,snapshot=external" % (snapshot2, snapshot2_file), **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.dumpxml(vm_name) libvirt.check_exit_status(ret) cmd = ("echo \"%s\" | grep -A 16 %s.%s | grep \"boot order='%s'\"" % (ret.stdout, disk_name, snapshot2, bootorder)) if process.system(cmd, ignore_status=True, shell=True): raise exceptions.TestError("Check snapshot disk with bootorder failed") snap_lists = virsh.snapshot_list(vm_name) if snapshot1 not in snap_lists or snapshot2 not in snap_lists: raise exceptions.TestError("Check snapshot list failed") # Check virsh save command after snapshot. save_file = "/tmp/%s.save" % vm_name ret = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(ret) # Check virsh restore command after snapshot. ret = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(ret) #Passed all test. os.remove(save_file)
def check_bootorder_snapshot(disk_name): """ Check VM disk's bootorder option with snapshot. :param disk_name. The target disk to be checked. """ logging.info("Checking diskorder option with snapshot...") snapshot1 = "s1" snapshot2 = "s2" snapshot2_file = os.path.join(test.tmpdir, "s2") ret = virsh.snapshot_create(vm_name, "", **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.snapshot_create_as(vm_name, "%s --disk-only" % snapshot1, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.snapshot_dumpxml(vm_name, snapshot1) libvirt.check_exit_status(ret) cmd = "echo \"%s\" | grep %s.%s" % (ret.stdout, disk_name, snapshot1) if utils.run(cmd, ignore_status=True).exit_status: raise error.TestError("Check snapshot disk failed") ret = virsh.snapshot_create_as(vm_name, "%s --memspec file=%s,snapshot=external" % (snapshot2, snapshot2_file), **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.dumpxml(vm_name) libvirt.check_exit_status(ret) cmd = ("echo \"%s\" | grep -A 16 %s.%s | grep \"boot order='%s'\"" % (ret.stdout, disk_name, snapshot2, bootorder)) if utils.run(cmd, ignore_status=True).exit_status: raise error.TestError("Check snapshot disk with bootorder failed") snap_lists = virsh.snapshot_list(vm_name) if snapshot1 not in snap_lists or snapshot2 not in snap_lists: raise error.TestError("Check snapshot list failed") # Check virsh save command after snapshot. save_file = "/tmp/%s.save" % vm_name ret = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(ret) # Check virsh restore command after snapshot. ret = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(ret) #Passed all test. os.remove(save_file)
def create_reuse_external_snapshots(vm, pre_set_root_dir=None, skip_first_one=False, disk_target="vda", disk_type="file", snapshot_chain_lenghth=4): """ Create reuse external snapshots :param vm: VM instance :param pre_set_root_dir: preset root directory :param skip_first_one: whether skip first image file :param disk_target: disk target :param snapshot_chain_lenghth : snapshot length :return: absolute root path of backing files and snapshot list """ if pre_set_root_dir is None: first_disk_source = get_first_disk_source(vm) root_dir = os.path.dirname(first_disk_source) else: root_dir = pre_set_root_dir meta_options = " --reuse-external --disk-only --no-metadata" # Make four external relative path backing files. relative_sub_folders = [ chr(letter) for letter in range(ord('a'), ord('a') + snapshot_chain_lenghth) ] backing_file_dict = collections.OrderedDict() snapshot_external_disks = [] for index in range(len(relative_sub_folders)): key = relative_sub_folders[index] if index == 0 and skip_first_one: continue else: backing_file_dict[key] = "%s.img" % key for key, value in list(backing_file_dict.items()): backing_file_path = os.path.join(root_dir, key) external_snap_shot = "%s/%s" % (backing_file_path, value) snapshot_external_disks.append(external_snap_shot) if disk_type == "block": options = "%s --diskspec %s,file=%s,stype=%s" % ( meta_options, disk_target, external_snap_shot, disk_type) else: options = "%s --diskspec %s,file=%s" % (meta_options, disk_target, external_snap_shot) virsh.snapshot_create_as(vm.name, options, ignore_status=False, debug=True) LOG.debug('reuse external snapshots:%s' % snapshot_external_disks) return root_dir, snapshot_external_disks
def create_reuse_external_snapshots(pre_set_root_dir=None): """ Create reuse external snapshots :param pre_set_root_dir: preset root directory :return: absolute path of base file """ if pre_set_root_dir is None: first_disk_source = get_first_disk_source() basename = os.path.basename(first_disk_source) root_dir = os.path.dirname(first_disk_source) else: root_dir = pre_set_root_dir meta_options = " --reuse-external --disk-only --no-metadata" # Make three external relative path backing files. backing_file_dict = collections.OrderedDict() backing_file_dict["b"] = "b.img" backing_file_dict["c"] = "c.img" backing_file_dict["d"] = "d.img" for key, value in list(backing_file_dict.items()): backing_file_path = os.path.join(root_dir, key) external_snap_shot = "%s/%s" % (backing_file_path, value) snapshot_external_disks.append(external_snap_shot) options = "%s --diskspec %s,file=%s" % (meta_options, disk_target, external_snap_shot) cmd_result = virsh.snapshot_create_as(vm_name, options, ignore_status=False, debug=True) libvirt.check_exit_status(cmd_result) logging.debug('reuse external snapshots:%s' % snapshot_external_disks) return root_dir
def do_snapshot(vm_name, expected_str): """ Run snapshot related commands: snapshot-create-as, snapshot-list snapshot-dumpxml, snapshot-revert :param vm_name: vm name :param expected_str: expected string in snapshot-dumpxml :raise: test.fail if virsh command failed """ snapshot_name = vm_name + "-snap" virsh_dargs = {'debug': True} cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) try: snapshots = virsh.snapshot_list(vm_name, **virsh_dargs) except process.CmdError: test.fail("Failed to get snapshots list for %s" % vm_name) if snapshot_name not in snapshots: test.fail("The snapshot '%s' was not in snapshot-list." % snapshot_name) cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name, **virsh_dargs) libvirt.check_result(cmd_result, expected_match=expected_str) cmd_result = virsh.snapshot_revert(vm_name, "", "--current", **virsh_dargs) libvirt.check_exit_status(cmd_result)
def make_snapshot(): """ make external snapshots. :return external snapshot path list """ logging.info("Making snapshot...") first_disk_source = vm.get_first_disk_devices()['source'] snapshot_path_list = [] snapshot2_file = os.path.join(data_dir.get_tmp_dir(), "mem.s2") snapshot3_file = os.path.join(data_dir.get_tmp_dir(), "mem.s3") snapshot4_file = os.path.join(data_dir.get_tmp_dir(), "mem.s4") snapshot4_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s4") snapshot5_file = os.path.join(data_dir.get_tmp_dir(), "mem.s5") snapshot5_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s5") # Attempt to take different types of snapshots. snapshots_param_dict = {"s1": "s1 --disk-only --no-metadata", "s2": "s2 --memspec %s --no-metadata" % snapshot2_file, "s3": "s3 --memspec %s --no-metadata --live" % snapshot3_file, "s4": "s4 --memspec %s --diskspec vda,file=%s --no-metadata" % (snapshot4_file, snapshot4_disk_file), "s5": "s5 --memspec %s --diskspec vda,file=%s --live --no-metadata" % (snapshot5_file, snapshot5_disk_file)} for snapshot_name in sorted(snapshots_param_dict.keys()): ret = virsh.snapshot_create_as(vm_name, snapshots_param_dict[snapshot_name], **virsh_dargs) libvirt.check_exit_status(ret) if snapshot_name != 's4' and snapshot_name != 's5': snapshot_path_list.append(first_disk_source.replace('qcow2', snapshot_name)) return snapshot_path_list
def check_snapshot(): """ Test domain snapshot operation. """ snapshot1 = "s1" snapshot2 = "s2" ret = virsh.snapshot_create_as(vm_name, snapshot1) libvirt.check_exit_status(ret) ret = virsh.snapshot_create_as(vm_name, "%s --disk-only --diskspec vda," "file=/tmp/testvm-snap1" % snapshot2) libvirt.check_exit_status(ret, True) ret = virsh.snapshot_create_as( vm_name, "%s --memspec file=%s,snapshot=external" " --diskspec vda,file=/tmp/testvm-snap2" % (snapshot2, snapshot2), ) libvirt.check_exit_status(ret, True)
def do_blockcommit_pivot_repeatedly(): """ Validate bugzilla:https://bugzilla.redhat.com/show_bug.cgi?id=1857735 """ # Make external snapshot,pivot and delete snapshot file repeatedly. tmp_snapshot_name = "external_snapshot_" + "repeated.qcow2" block_target = 'vda' for count in range(0, 5): options = "%s " % tmp_snapshot_name options += "--disk-only --atomic" disk_external = os.path.join(tmp_dir, tmp_snapshot_name) options += " --diskspec %s,snapshot=external,file=%s" % (block_target, disk_external) virsh.snapshot_create_as(vm_name, options, ignore_status=False, debug=True) virsh.blockcommit(vm_name, block_target, " --active --pivot ", ignore_status=False, debug=True) virsh.snapshot_delete(vm_name, tmp_snapshot_name, " --metadata") libvirt.delete_local_disk('file', disk_external)
def prepare_snapshot(self, snap_num=3, option='--disk-only'): """ Prepare domain snapshot :params snap_num: snapshot number, default value is 3 :params option: option to create snapshot, default value is '--disk-only' """ # Create backing chain for i in range(snap_num): snap_option = "%s %s --diskspec %s,file=%s" % \ ('snap%d' % i, option, self.new_dev, self.tmp_dir + 'snap%d' % i) virsh.snapshot_create_as(self.vm.name, snap_option, ignore_status=False, debug=True) self.snap_path_list.append(self.tmp_dir + 'snap%d' % i) self.snap_name_list.append('snap%d' % i)
def check_snapshot(): """ Test domain snapshot operation. """ snapshot1 = "s1" snapshot2 = "s2" ret = virsh.snapshot_create_as(vm_name, snapshot1) libvirt.check_exit_status(ret) ret = virsh.snapshot_create_as( vm_name, "%s --disk-only --diskspec vda," "file=/tmp/testvm-snap1" % snapshot2) libvirt.check_exit_status(ret, True) ret = virsh.snapshot_create_as( vm_name, "%s --memspec file=%s,snapshot=external" " --diskspec vda,file=/tmp/testvm-snap2" % (snapshot2, snapshot2)) libvirt.check_exit_status(ret, True)
def test_with_label(vm, params, test): """ Test nvdimm with label setting :param vm: vm object :param params: dict, test parameters :param test: test object :raises: test.fail if checkpoints fail """ test_str = params.get('test_str') test_file = params.get('test_file') vm_name = params.get('main_vm') vm_session = vm.wait_for_login() # Create a file on the nvdimm device. create_file_within_nvdimm_disk(vm_session, test_file, test_str, test, block_size=4096) # Reboot the guest, and remount the nvdimm device in the guest. # Check the file foo-label is exited vm_session.close() virsh.reboot(vm_name, debug=True) vm_session = vm.wait_for_login() vm_session.cmd('mount -o dax /dev/pmem0 /mnt') if test_str not in vm_session.cmd('cat /mnt/foo-label '): test.fail('"%s" should be in output' % test_str) vm_session.close() if params.get('check_life_cycle', 'no') == 'yes': virsh.managedsave(vm_name, ignore_status=False, debug=True) vm.start() vm_session = vm.wait_for_login() check_nvdimm_file(test_str, test_file, vm_session, test) vm_session.close() vm_s1 = vm_name + ".s1" virsh.save(vm_name, vm_s1, ignore_status=False, debug=True) virsh.restore(vm_s1, ignore_status=False, debug=True) vm_session = vm.wait_for_login() check_nvdimm_file(test_str, test_file, vm_session, test) vm_session.close() virsh.snapshot_create_as(vm_name, vm_s1, ignore_status=False, debug=True) virsh.snapshot_revert(vm_name, vm_s1, ignore_status=False, debug=True) virsh.snapshot_delete(vm_name, vm_s1, ignore_status=False, debug=True)
def create_snap(vm_name, snap_option): """ Create snap using snap options :param vm_name: VM name :param snap_option: snap option :return: snapshot created """ cmd_result = virsh.snapshot_create_as(vm_name, snap_option, ignore_status=True, debug=True) libvirt.check_exit_status(cmd_result) return cmd_result
def check_snapshot(snap_option, target_dev='vda'): """ Test snapshot operation. """ snap_name = "s1" snap_mem = os.path.join(TMP_DATA_DIR, "rbd.mem") snap_disk = os.path.join(TMP_DATA_DIR, "rbd.disk") xml_snap_exp = [ "disk name='%s' snapshot='external' type='file'" % target_dev ] xml_dom_exp = [ "source file='%s'" % snap_disk, "backingStore type='network' index='1'", "source protocol='rbd' name='%s'" % disk_src_name ] if snap_option.count("disk-only"): options = ("%s --diskspec %s,file=%s --disk-only" % (snap_name, target_dev, snap_disk)) elif snap_option.count("disk-mem"): options = ("%s --memspec file=%s --diskspec %s,file=" "%s" % (snap_name, snap_mem, target_dev, snap_disk)) xml_snap_exp.append("memory snapshot='external' file='%s'" % snap_mem) else: options = snap_name ret = virsh.snapshot_create_as(vm_name, options) if test_disk_internal_snapshot: libvirt.check_result(ret, expected_fails=unsupported_err) elif test_disk_readonly: if libvirt_version.version_compare(6, 0, 0): libvirt.check_result(ret) else: libvirt.check_result(ret, expected_fails=unsupported_err) else: libvirt.check_result(ret, skip_if=unsupported_err) # check xml file. if not ret.exit_status: snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name, debug=True).stdout.strip() dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() # Delete snapshots. libvirt.clean_up_snapshots(vm_name) if os.path.exists(snap_mem): os.remove(snap_mem) if os.path.exists(snap_disk): os.remove(snap_disk) if not all([x in snap_xml for x in xml_snap_exp]): test.fail("Failed to check snapshot xml") if not all([x in dom_xml for x in xml_dom_exp]): test.fail("Failed to check domain xml")
def make_disk_snapshot(postfix_n, snapshot_take): """ Make external snapshots for disks only. :param postfix_n: postfix option :param snapshot_take: snapshots taken. """ # Add all disks into command line. disks = vm.get_disk_devices() # Make three external snapshots for disks only for count in range(1, snapshot_take): options = "%s_%s %s%s-desc " % (postfix_n, count, postfix_n, count) options += "--disk-only --atomic --no-metadata" if needs_agent: options += " --quiesce" for disk in disks: disk_detail = disks[disk] basename = os.path.basename(disk_detail['source']) # Remove the original suffix if any, appending # ".postfix_n[0-9]" diskname = basename.split(".")[0] snap_name = "%s.%s%s" % (diskname, postfix_n, count) disk_external = os.path.join(tmp_dir, snap_name) snapshot_external_disks.append(disk_external) options += " %s,snapshot=external,file=%s" % (disk, disk_external) cmd_result = virsh.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) status = cmd_result.exit_status if status != 0: test.fail("Failed to make snapshots for disks!") # Create a file flag in VM after each snapshot flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") file_path = flag_file.name flag_file.close() status, output = session.cmd_status_output("touch %s" % file_path) if status: test.fail("Touch file in vm failed. %s" % output) snapshot_flag_files.append(file_path)
def snapshot(): # pylint: disable=W0611 """ Sub test for snapshot """ for i in range(1, 4): ret = virsh.snapshot_create_as(vm_name, "sn%s --disk-only" % i) libvirt.check_exit_status(ret) process.system("systemctl restart libvirtd") save_path = os.path.join(tmp_dir, "test.save") ret = virsh.save(vm_name, save_path) libvirt.check_exit_status(ret) ret = virsh.restore(save_path) libvirt.check_exit_status(ret) session = vm.wait_for_login() session.close()
def domainsnapshot_validate(vm_name, file=None, **virsh_dargs): """ Test for schema domainsnapshot """ snapshot_name = "snap-%s-%s" % (vm_name, time.time()) cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name) libvirt.check_exit_status(cmd_result) def check_info(s1, s2, errorstr="Values differ"): if s1 != s2: error.TestFail("%s (%s != %s)" % (errorstr, s1, s2)) try: ss_info = virsh.snapshot_info(vm_name, snapshot_name) check_info(ss_info["Name"], snapshot_name, "Incorrect snapshot name") check_info(ss_info["Domain"], vm_name, "Incorrect domain name") except error.CmdError, e: error.TestFail(str(e))
def check_snapshot(snap_option, target_dev='vda'): """ Test snapshot operation. """ snap_name = "s1" snap_mem = os.path.join(data_dir.get_tmp_dir(), "rbd.mem") snap_disk = os.path.join(data_dir.get_tmp_dir(), "rbd.disk") xml_snap_exp = ["disk name='%s' snapshot='external' type='file'" % target_dev] xml_dom_exp = ["source file='%s'" % snap_disk, "backingStore type='network' index='1'", "source protocol='rbd' name='%s'" % disk_src_name] if snap_option.count("disk-only"): options = ("%s --diskspec %s,file=%s --disk-only" % (snap_name, target_dev, snap_disk)) elif snap_option.count("disk-mem"): options = ("%s --memspec file=%s --diskspec %s,file=" "%s" % (snap_name, snap_mem, target_dev, snap_disk)) xml_snap_exp.append("memory snapshot='external' file='%s'" % snap_mem) else: options = snap_name ret = virsh.snapshot_create_as(vm_name, options) if test_disk_internal_snapshot or test_disk_readonly: libvirt.check_result(ret, expected_fails=unsupported_err) else: libvirt.check_result(ret, skip_if=unsupported_err) # check xml file. if not ret.exit_status: snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name, debug=True).stdout.strip() dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() # Delete snapshots. libvirt.clean_up_snapshots(vm_name) if os.path.exists(snap_mem): os.remove(snap_mem) if os.path.exists(snap_disk): os.remove(snap_disk) if not all([x in snap_xml for x in xml_snap_exp]): test.fail("Failed to check snapshot xml") if not all([x in dom_xml for x in xml_dom_exp]): test.fail("Failed to check domain xml")
def check_snapshot(snap_option): """ Test snapshot operation. """ snap_name = "s1" snap_mem = os.path.join(test.tmpdir, "rbd.mem") snap_disk = os.path.join(test.tmpdir, "rbd.disk") xml_snap_exp = ["disk name='vda' snapshot='external' type='file'"] xml_dom_exp = [ "source file='%s'" % snap_disk, "backingStore type='network' index='1'", "source protocol='rbd' name='%s'" % disk_src_name ] if snap_option.count("disk-only"): options = ("%s --diskspec vda,file=%s --disk-only" % (snap_name, snap_disk)) elif snap_option.count("disk-mem"): options = ("%s --memspec file=%s --diskspec vda,file=" "%s" % (snap_name, snap_mem, snap_disk)) xml_snap_exp.append("memory snapshot='external' file='%s'" % snap_mem) else: options = snap_name ret = virsh.snapshot_create_as(vm_name, options) libvirt.check_result(ret, skip_if=unsupported_err) # check xml file. if not ret.exit_status: snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name, debug=True).stdout.strip() dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() # Delete snapshots. libvirt.clean_up_snapshots(vm_name) if os.path.exists(snap_mem): os.remove(snap_mem) if os.path.exists(snap_disk): os.remove(snap_disk) if not all([x in snap_xml for x in xml_snap_exp]): raise exceptions.TestFail("Failed to check snapshot xml") if not all([x in dom_xml for x in xml_dom_exp]): raise exceptions.TestFail("Failed to check domain xml")
def make_snapshot(): """ make external snapshots. :return external snapshot path list """ logging.info("Making snapshot...") first_disk_source = vm.get_first_disk_devices()['source'] snapshot_path_list = [] snapshot2_file = os.path.join(data_dir.get_tmp_dir(), "mem.s2") snapshot3_file = os.path.join(data_dir.get_tmp_dir(), "mem.s3") snapshot4_file = os.path.join(data_dir.get_tmp_dir(), "mem.s4") snapshot4_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s4") snapshot5_file = os.path.join(data_dir.get_tmp_dir(), "mem.s5") snapshot5_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s5") # Attempt to take different types of snapshots. snapshots_param_dict = { "s1": "s1 --disk-only --no-metadata", "s2": "s2 --memspec %s --no-metadata" % snapshot2_file, "s3": "s3 --memspec %s --no-metadata --live" % snapshot3_file, "s4": "s4 --memspec %s --diskspec vda,file=%s --no-metadata" % (snapshot4_file, snapshot4_disk_file), "s5": "s5 --memspec %s --diskspec vda,file=%s --live --no-metadata" % (snapshot5_file, snapshot5_disk_file) } for snapshot_name in sorted(snapshots_param_dict.keys()): ret = virsh.snapshot_create_as(vm_name, snapshots_param_dict[snapshot_name], **virsh_dargs) libvirt.check_exit_status(ret) if snapshot_name != 's4' and snapshot_name != 's5': snapshot_path_list.append( first_disk_source.replace('qcow2', snapshot_name)) return snapshot_path_list
def make_disk_snapshot(): # Add all disks into commandline. disks = vm.get_disk_devices() # Make three external snapshots for disks only for count in range(1, 4): options = "snapshot%s snap%s-desc " \ "--disk-only --atomic --no-metadata" % (count, count) for disk in disks: disk_detail = disks[disk] basename = os.path.basename(disk_detail['source']) # Remove the original suffix if any, appending ".snap[0-9]" diskname = basename.split(".")[0] disk_external = os.path.join(tmp_dir, "%s.snap%s" % (diskname, count)) snapshot_external_disks.append(disk_external) options += " %s,snapshot=external,file=%s" % (disk, disk_external) cmd_result = virsh.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) status = cmd_result.exit_status if status != 0: raise error.TestFail("Failed to make snapshots for disks!") # Create a file flag in VM after each snapshot flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") file_path = flag_file.name flag_file.close() status, output = session.cmd_status_output("touch %s" % file_path) if status: raise error.TestFail("Touch file in vm failed. %s" % output) snapshot_flag_files.append(file_path)
def run(test, params, env): """ 1. prepare a fc lun with one of following methods - create a scsi pool&vol - create a vhba 2. prepare the virtual disk xml, as one of following - source = /dev/disk/by-path - source = /dev/mapper/mpathX - source = pool&vol format 3. start a vm with above disk as vdb 4. create disk-only snapshot of vdb 5. check the snapshot-list and snapshot file's existence 6. mount vdb and touch file to it 7. revert the snapshot and check file's existence 8. delete snapshot 9. cleanup env. """ vm_name = params.get("main_vm", "avocado-vt-vm1") wwpn = params.get("wwpn", "WWPN_EXAMPLE") wwnn = params.get("wwnn", "WWNN_EXAMPLE") disk_device = params.get("disk_device", "disk") disk_type = params.get("disk_type", "file") disk_size = params.get("disk_size", "100M") device_target = params.get("device_target", "vdb") driver_name = params.get("driver_name", "qemu") driver_type = params.get("driver_type", "raw") target_bus = params.get("target_bus", "virtio") vd_format = params.get("vd_format", "") snapshot_dir = params.get("snapshot_dir", "/tmp") snapshot_name = params.get("snapshot_name", "s1") pool_name = params.get("pool_name", "") pool_target = params.get("pool_target", "/dev") snapshot_disk_only = "yes" == params.get("snapshot_disk_only", "no") new_vhbas = [] current_vhbas = [] new_vhba = [] path_to_blk = "" lun_sl = [] new_disk = "" pool_ins = None old_mpath_conf = "" mpath_conf_path = "/etc/multipath.conf" original_mpath_conf_exist = os.path.exists(mpath_conf_path) vm = env.get_vm(vm_name) online_hbas = utils_npiv.find_hbas("hba") if not online_hbas: raise exceptions.TestSkipError("There is no online hba cards.") old_mpath_conf = utils_npiv.prepare_multipath_conf( conf_path=mpath_conf_path, replace_existing=True) first_online_hba = online_hbas[0] old_vhbas = utils_npiv.find_hbas("vhba") if vm.is_dead(): vm.start() session = vm.wait_for_login() virt_vm = libvirt_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache) old_disks = virt_vm.get_disks() if vm.is_alive(): vm.destroy(gracefully=False) if pool_name: pool_ins = libvirt_storage.StoragePool() vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() try: # prepare a fc lun if vd_format in ['scsi_vol']: if pool_ins.pool_exists(pool_name): raise exceptions.TestFail("Pool %s already exist" % pool_name) prepare_scsi_pool(pool_name, wwnn, wwpn, first_online_hba, pool_target) utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_TIMEOUT) if not utils_npiv.is_vhbas_added(old_vhbas): raise exceptions.TestFail("vHBA not successfully created") current_vhbas = utils_npiv.find_hbas("vhba") new_vhba = list(set(current_vhbas).difference(set(old_vhbas)))[0] new_vhbas.append(new_vhba) new_vhba_scsibus = re.sub("\D", "", new_vhba) utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus), timeout=_TIMEOUT) new_blks = get_blks_by_scsi(new_vhba_scsibus) if not new_blks: raise exceptions.TestFail( "block device not found with scsi_%s", new_vhba_scsibus) vol_list = utlv.get_vol_list(pool_name, vol_check=True, timeout=_TIMEOUT * 3) path_to_blk = list(vol_list.values())[0] elif vd_format in ['mpath', 'by_path']: old_mpath_devs = utils_npiv.find_mpath_devs() new_vhba = utils_npiv.nodedev_create_from_xml({ "nodedev_parent": first_online_hba, "scsi_wwnn": wwnn, "scsi_wwpn": wwpn }) utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_TIMEOUT * 2) if not new_vhba: raise exceptions.TestFail("vHBA not successfully generated.") new_vhbas.append(new_vhba) if vd_format == "mpath": utils_misc.wait_for( lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs), timeout=_TIMEOUT * 5) if not utils_npiv.is_mpath_devs_added(old_mpath_devs): raise exceptions.TestFail("mpath dev not generated.") cur_mpath_devs = utils_npiv.find_mpath_devs() new_mpath_devs = list( set(cur_mpath_devs).difference(set(old_mpath_devs))) logging.debug("The newly added mpath dev is: %s", new_mpath_devs) path_to_blk = "/dev/mapper/" + new_mpath_devs[0] elif vd_format == "by_path": new_vhba_scsibus = re.sub("\D", "", new_vhba) utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus), timeout=_TIMEOUT) new_blks = get_blks_by_scsi(new_vhba_scsibus) if not new_blks: raise exceptions.TestFail("blk dev not found with scsi_%s", new_vhba_scsibus) first_blk_dev = new_blks[0] utils_misc.wait_for(lambda: get_symbols_by_blk(first_blk_dev), timeout=_TIMEOUT) lun_sl = get_symbols_by_blk(first_blk_dev) if not lun_sl: raise exceptions.TestFail( "lun symbolic links not found in " "/dev/disk/by-path/ for %s" % first_blk_dev) lun_dev = lun_sl[0] path_to_blk = os.path.join(_BY_PATH_DIR, lun_dev) else: pass else: raise exceptions.TestSkipError("Not provided how to pass" "virtual disk to VM.") # create qcow2 file on the block device with specified size if path_to_blk: cmd = "qemu-img create -f qcow2 %s %s" % (path_to_blk, disk_size) try: process.run(cmd, shell=True) except process.cmdError as detail: raise exceptions.TestFail( "Fail to create qcow2 on blk dev: %s", detail) else: raise exceptions.TestFail("Don't have a valid path to blk dev.") # prepare disk xml if "vol" in vd_format: vol_list = utlv.get_vol_list(pool_name, vol_check=True, timeout=_TIMEOUT * 3) test_vol = list(vol_list.keys())[0] disk_params = { 'type_name': disk_type, 'target_dev': device_target, 'target_bus': target_bus, 'source_pool': pool_name, 'source_volume': test_vol, 'driver_type': driver_type } else: disk_params = { 'type_name': disk_type, 'device': disk_device, 'driver_name': driver_name, 'driver_type': driver_type, 'source_file': path_to_blk, 'target_dev': device_target, 'target_bus': target_bus } if vm.is_alive(): vm.destroy(gracefully=False) new_disk = disk.Disk() new_disk.xml = open(utlv.create_disk_xml(disk_params)).read() # start vm with the virtual disk vmxml.devices = vmxml.devices.append(new_disk) vmxml.sync() vm.start() session = vm.wait_for_login() cur_disks = virt_vm.get_disks() mount_disk = "".join(list(set(old_disks) ^ set(cur_disks))) # mkfs and mount disk in vm, create a file on that disk. if not mount_disk: logging.debug("old_disk: %s, new_disk: %s", old_disks, cur_disks) raise exceptions.TestFail("No new disk found in vm.") mkfs_and_mount(session, mount_disk) create_file_in_vm(session, "/mnt/before_snapshot.txt", "before") # virsh snapshot-create-as vm s --disk-only --diskspec vda,file=path if snapshot_disk_only: vm_blks = list(vm.get_disk_devices().keys()) options = "%s --disk-only" % snapshot_name for vm_blk in vm_blks: snapshot_file = snapshot_dir + "/" + vm_blk + "." + snapshot_name if os.path.exists(snapshot_file): os.remove(snapshot_file) options = options + " --diskspec %s,file=%s" % (vm_blk, snapshot_file) else: options = snapshot_name utlv.check_exit_status(virsh.snapshot_create_as(vm_name, options)) # check virsh snapshot-list logging.debug("Running: snapshot-list %s", vm_name) snapshot_list = virsh.snapshot_list(vm_name) logging.debug("snapshot list is: %s", snapshot_list) if not snapshot_list: raise exceptions.TestFail("snapshots not found after creation.") # snapshot-revert doesn't support external snapshot for now. so # only check this with internal snapshot. if not snapshot_disk_only: create_file_in_vm(session, "/mnt/after_snapshot.txt", "after") logging.debug("Running: snapshot-revert %s %s", vm_name, snapshot_name) utlv.check_exit_status( virsh.snapshot_revert(vm_name, snapshot_name)) session = vm.wait_for_login() file_existence, file_content = get_file_in_vm( session, "/mnt/after_snapshot.txt") logging.debug("file exist = %s, file content = %s", file_existence, file_content) if file_existence: raise exceptions.TestFail("The file created " "after snapshot still exists.") file_existence, file_content = get_file_in_vm( session, "/mnt/before_snapshot.txt") logging.debug("file eixst = %s, file content = %s", file_existence, file_content) if ((not file_existence) or (file_content.strip() != "before")): raise exceptions.TestFail("The file created " "before snapshot is lost.") # delete snapshots # if diskonly, delete --metadata and remove files # if not diskonly, delete snapshot if snapshot_disk_only: options = "--metadata" else: options = "" for snap in snapshot_list: logging.debug("deleting snapshot %s with options %s", snap, options) result = virsh.snapshot_delete(vm_name, snap, options) logging.debug("result of snapshot-delete: %s", result.stdout.strip()) if snapshot_disk_only: vm_blks = list(vm.get_disk_devices().keys()) for vm_blk in vm_blks: snapshot_file = snapshot_dir + "/" + vm_blk + "." + snap if os.path.exists(snapshot_file): os.remove(snapshot_file) snapshot_list = virsh.snapshot_list(vm_name) if snapshot_list: raise exceptions.TestFail("Snapshot not deleted: %s", snapshot_list) except Exception as detail: raise exceptions.TestFail("exception happens: %s", detail) finally: logging.debug("Start to clean up env...") vmxml_backup.sync() if pool_ins and pool_ins.pool_exists(pool_name): virsh.pool_destroy(pool_name) for new_vhba in new_vhbas: virsh.nodedev_destroy(new_vhba) utils_npiv.restart_multipathd() if old_mpath_conf: utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path, conf_content=old_mpath_conf, replace_existing=True) if not original_mpath_conf_exist and os.path.exists(mpath_conf_path): os.remove(mpath_conf_path)
elems = map(None, pre_xml.splitlines(), after_xml.splitlines()) for pre_line, aft_line in elems: if pre_line.lstrip().strip() != aft_line.lstrip().strip(): if pre_line is not None: logging.debug("diff before='%s'", pre_line.lstrip().strip()) if aft_line is not None: logging.debug("diff after='%s'", aft_line.lstrip().strip()) raise error.TestFail("Failed xml before/after comparison") snapshot_oldlist = None try: # Create disk snapshot before all to make the origin image clean logging.debug("Create snap-temp --disk-only") ret = virsh.snapshot_create_as(vm_name, "snap-temp --disk-only") if ret.exit_status != 0: raise error.TestFail("Fail to create temp snap, Error: %s" % ret.stderr.strip()) # Create snapshots for opt in [snap_create_opt1, snap_create_opt2]: logging.debug("...use option %s", opt) result = virsh.snapshot_create_as(vm_name, opt) if result.exit_status: raise error.TestFail("Failed to create snapshot. Error:%s." % result.stderr.strip()) time.sleep(1) snapshot_oldlist = virsh.snapshot_list(vm_name)
def run(test, params, env): """ Test command: virsh blockpull <domain> <path> 1) Prepare test environment. 2) Populate a disk from its backing image. 3) Recover test environment. 4) Check result. """ def make_disk_snapshot(snapshot_take): """ Make external snapshots for disks only. :param snapshot_take: snapshots taken. """ for count in range(1, snapshot_take + 1): snap_xml = snapshot_xml.SnapshotXML() snapshot_name = "snapshot_test%s" % count snap_xml.snap_name = snapshot_name snap_xml.description = "Snapshot Test %s" % count # Add all disks into xml file. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') new_disks = [] for src_disk_xml in disks: disk_xml = snap_xml.SnapDiskXML() disk_xml.xmltreefile = src_disk_xml.xmltreefile # Skip cdrom if disk_xml.device == "cdrom": continue del disk_xml.device del disk_xml.address disk_xml.snapshot = "external" disk_xml.disk_name = disk_xml.target['dev'] # Only qcow2 works as external snapshot file format, update it # here driver_attr = disk_xml.driver driver_attr.update({'type': 'qcow2'}) disk_xml.driver = driver_attr new_attrs = disk_xml.source.attrs if 'file' in disk_xml.source.attrs: file_name = disk_xml.source.attrs['file'] new_file = "%s.snap%s" % (file_name.split('.')[0], count) snapshot_external_disks.append(new_file) new_attrs.update({'file': new_file}) hosts = None elif ('name' in disk_xml.source.attrs and disk_src_protocol == 'gluster'): src_name = disk_xml.source.attrs['name'] new_name = "%s.snap%s" % (src_name.split('.')[0], count) new_attrs.update({'name': new_name}) snapshot_external_disks.append(new_name) hosts = disk_xml.source.hosts elif ('dev' in disk_xml.source.attrs or 'name' in disk_xml.source.attrs): if (disk_xml.type_name == 'block' or disk_src_protocol in ['iscsi', 'rbd']): # Use local file as external snapshot target for block # and iscsi network type. # As block device will be treat as raw format by # default, it's not fit for external disk snapshot # target. A work around solution is use qemu-img again # with the target. # And external active snapshots are not supported on # 'network' disks using 'iscsi' protocol disk_xml.type_name = 'file' if 'dev' in new_attrs: del new_attrs['dev'] elif 'name' in new_attrs: del new_attrs['name'] del new_attrs['protocol'] new_file = "%s/blk_src_file.snap%s" % (tmp_dir, count) snapshot_external_disks.append(new_file) new_attrs.update({'file': new_file}) hosts = None new_src_dict = {"attrs": new_attrs} if hosts: new_src_dict.update({"hosts": hosts}) disk_xml.source = disk_xml.new_disk_source(**new_src_dict) new_disks.append(disk_xml) snap_xml.set_disks(new_disks) snapshot_xml_path = snap_xml.xml logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile) options = "--disk-only --xmlfile %s " % snapshot_xml_path snapshot_result = virsh.snapshot_create( vm_name, options, debug=True) if snapshot_result.exit_status != 0: test.fail(snapshot_result.stderr) # Create a file flag in VM after each snapshot flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") file_path = flag_file.name flag_file.close() status, output = session.cmd_status_output("touch %s" % file_path) if status: test.fail("Touch file in vm failed. %s" % output) snapshot_flag_files.append(file_path) def get_first_disk_source(): """ Get disk source of first device :return: first disk of first device. """ first_device = vm.get_first_disk_devices() firt_disk_src = first_device['source'] return firt_disk_src def make_relative_path_backing_files(): """ Create backing chain files of relative path. :return: absolute path of top active file """ first_disk_source = get_first_disk_source() basename = os.path.basename(first_disk_source) root_dir = os.path.dirname(first_disk_source) cmd = "mkdir -p %s" % os.path.join(root_dir, '{b..d}') ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) # Make three external relative path backing files. backing_file_dict = collections.OrderedDict() backing_file_dict["b"] = "../%s" % basename backing_file_dict["c"] = "../b/b.img" backing_file_dict["d"] = "../c/c.img" for key, value in list(backing_file_dict.items()): backing_file_path = os.path.join(root_dir, key) cmd = ("cd %s && qemu-img create -f qcow2 -o backing_file=%s,backing_fmt=qcow2 %s.img" % (backing_file_path, value, key)) ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) return os.path.join(backing_file_path, "d.img") def check_chain_backing_files(disk_src_file, expect_backing_file=False): """ Check backing chain files of relative path after blockcommit. :param disk_src_file: first disk src file. :param expect_backing_file: whether it expect to have backing files. """ first_disk_source = get_first_disk_source() # Validate source image need refer to original one after active blockcommit if not expect_backing_file and disk_src_file not in first_disk_source: test.fail("The disk image path:%s doesn't include the origin image: %s" % (first_disk_source, disk_src_file)) # Validate source image doesn't have backing files after active blockcommit cmd = "qemu-img info %s --backing-chain" % first_disk_source if qemu_img_locking_feature_support: cmd = "qemu-img info -U %s --backing-chain" % first_disk_source ret = process.run(cmd, shell=True).stdout_text.strip() if expect_backing_file: if 'backing file' not in ret: test.fail("The disk image doesn't have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) else: if 'backing file' in ret: test.fail("The disk image still have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) # MAIN TEST CODE ### # Process cartesian parameters vm_name = params.get("main_vm") vm = env.get_vm(vm_name) snapshot_take = int(params.get("snapshot_take", '0')) needs_agent = "yes" == params.get("needs_agent", "yes") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") snap_in_mirror = "yes" == params.get("snap_in_mirror", 'no') snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", 'no') bandwidth = params.get("bandwidth", None) with_timeout = ("yes" == params.get("with_timeout_option", "no")) status_error = ("yes" == params.get("status_error", "no")) base_option = params.get("base_option", None) keep_relative = "yes" == params.get("keep_relative", 'no') virsh_dargs = {'debug': True} # Check whether qemu-img need add -U suboption since locking feature was added afterwards qemu-2.10 qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support() backing_file_relative_path = "yes" == params.get("backing_file_relative_path", "no") # Process domain disk device parameters disk_type = params.get("disk_type") disk_target = params.get("disk_target", 'vda') disk_src_protocol = params.get("disk_source_protocol") restart_tgtd = params.get("restart_tgtd", "no") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("original xml is %s", vmxml_backup) # Abort the test if there are snapshots already exsiting_snaps = virsh.snapshot_list(vm_name) if len(exsiting_snaps) != 0: test.fail("There are snapshots created for %s already" % vm_name) snapshot_external_disks = [] # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" try: if disk_src_protocol == 'iscsi' and disk_type == 'network': if not libvirt_version.version_compare(1, 0, 4): test.cancel("'iscsi' disk doesn't support in" " current libvirt version.") if disk_src_protocol == 'gluster': if not libvirt_version.version_compare(1, 2, 7): test.cancel("Snapshot on glusterfs not" " support in current " "version. Check more info " " with https://bugzilla.re" "dhat.com/show_bug.cgi?id=" "1017289") # Set vm xml and guest agent if replace_vm_disk: if disk_src_protocol == "rbd" and disk_type == "network": src_host = params.get("disk_source_host", "EXAMPLE_HOSTS") mon_host = params.get("mon_host", "EXAMPLE_MON_HOST") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(mon_host) if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"): test.cancel("Please provide ceph host first.") if backing_file_relative_path: if vm.is_alive(): vm.destroy(gracefully=False) first_src_file = get_first_disk_source() blk_source_image = os.path.basename(first_src_file) blk_source_folder = os.path.dirname(first_src_file) replace_disk_image = make_relative_path_backing_files() params.update({'disk_source_name': replace_disk_image, 'disk_type': 'file', 'disk_src_protocol': 'file'}) vm.start() libvirt.set_vm_disk(vm, params, tmp_dir) if needs_agent: vm.prepare_guest_agent() # The first disk is supposed to include OS # We will perform blockpull operation for it. first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] blk_target = first_disk['target'] snapshot_flag_files = [] # get a vm session before snapshot session = vm.wait_for_login() # do snapshot make_disk_snapshot(snapshot_take) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug("The domain xml after snapshot is %s" % vmxml) # snapshot src file list snap_src_lst = [blk_source] snap_src_lst += snapshot_external_disks if snap_in_mirror: blockpull_options = "--bandwidth 1" else: blockpull_options = "--wait --verbose" if with_timeout: blockpull_options += " --timeout 1" if bandwidth: blockpull_options += " --bandwidth %s" % bandwidth if base_option == "async": blockpull_options += " --async" base_image = None base_index = None if (libvirt_version.version_compare(1, 2, 4) or disk_src_protocol == 'gluster'): # For libvirt is older version than 1.2.4 or source protocol is gluster # there are various base image,which depends on base option:shallow,base,top respectively if base_option == "shallow": base_index = 1 base_image = "%s[%s]" % (disk_target, base_index) elif base_option == "base": base_index = 2 base_image = "%s[%s]" % (disk_target, base_index) elif base_option == "top": base_index = 0 base_image = "%s[%s]" % (disk_target, base_index) else: if base_option == "shallow": base_image = snap_src_lst[3] elif base_option == "base": base_image = snap_src_lst[2] elif base_option == "top": base_image = snap_src_lst[4] if base_option and base_image: blockpull_options += " --base %s" % base_image if keep_relative: blockpull_options += " --keep-relative" if backing_file_relative_path: # Use block commit to shorten previous snapshots. blockcommit_options = " --active --verbose --shallow --pivot --keep-relative" for count in range(1, snapshot_take + 1): res = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) #Use block pull with --keep-relative flag,and reset base_index to 2. base_index = 2 for count in range(1, snapshot_take): # If block pull operations are more than or equal to 3,it need reset base_index to 1. if count >= 3: base_index = 1 base_image = "%s[%s]" % (disk_target, base_index) blockpull_options = " --wait --verbose --base %s --keep-relative" % base_image res = virsh.blockpull(vm_name, blk_target, blockpull_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) # Check final backing chain files. check_chain_backing_files(blk_source_image, True) return # Run test case result = virsh.blockpull(vm_name, blk_target, blockpull_options, **virsh_dargs) status = result.exit_status # If pull job aborted as timeout, the exit status is different # on RHEL6(0) and RHEL7(1) if with_timeout and 'Pull aborted' in result.stdout.strip(): if libvirt_version.version_compare(1, 1, 1): status_error = True else: status_error = False # Check status_error libvirt.check_exit_status(result, status_error) if not status and not with_timeout: if snap_in_mirror: snap_mirror_path = "%s/snap_mirror" % tmp_dir snap_options = "--diskspec vda,snapshot=external," snap_options += "file=%s --disk-only" % snap_mirror_path snapshot_external_disks.append(snap_mirror_path) ret = virsh.snapshot_create_as(vm_name, snap_options, ignore_status=True, debug=True) libvirt.check_exit_status(ret, snap_in_mirror_err) return vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') for disk in disks: if disk.target['dev'] != blk_target: continue else: disk_xml = disk.xmltreefile break logging.debug("after pull the disk xml is: %s" % disk_xml) if libvirt_version.version_compare(1, 2, 4): err_msg = "Domain image backing chain check failed" if not base_option or "async" in base_option: chain_lst = snap_src_lst[-1:] ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail(err_msg) elif "base" or "shallow" in base_option: chain_lst = snap_src_lst[::-1] if not base_index and base_image: base_index = chain_lst.index(base_image) val_tmp = [] for i in range(1, base_index): val_tmp.append(chain_lst[i]) for i in val_tmp: chain_lst.remove(i) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail(err_msg) # If base image is the top layer of snapshot chain, # virsh blockpull should fail, return directly if base_option == "top": return # Check flag files for flag in snapshot_flag_files: status, output = session.cmd_status_output("cat %s" % flag) if status: test.fail("blockpull failed: %s" % output) finally: # Remove ceph configure file if created if ceph_cfg: os.remove(ceph_cfg) if vm.is_alive(): vm.destroy(gracefully=False) # Recover xml of vm. vmxml_backup.sync("--snapshots-metadata") if not disk_src_protocol or disk_src_protocol != 'gluster': for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) if backing_file_relative_path: libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) process.run("cd %s && rm -rf b c d" % blk_source_folder, shell=True) libvirtd = utils_libvirtd.Libvirtd() if disk_src_protocol == 'iscsi': libvirt.setup_or_cleanup_iscsi(is_setup=False, restart_tgtd=restart_tgtd) elif disk_src_protocol == 'gluster': libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path) libvirtd.restart() elif disk_src_protocol == 'netfs': restore_selinux = params.get('selinux_status_bak') libvirt.setup_or_cleanup_nfs(is_setup=False, restore_selinux=restore_selinux)
def run(test, params, env): """ Test command: virsh blockcopy. This command can copy a disk backing image chain to dest. 1. Positive testing 1.1 Copy a disk to a new image file. 1.2 Reuse existing destination copy. 1.3 Valid blockcopy timeout and bandwidth test. 2. Negative testing 2.1 Copy a disk to a non-exist directory. 2.2 Copy a disk with invalid options. 2.3 Do block copy for a persistent domain. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) target = params.get("target_disk", "") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") disk_source_protocol = params.get("disk_source_protocol") disk_type = params.get("disk_type") pool_name = params.get("pool_name") image_size = params.get("image_size") emu_image = params.get("emulated_image") copy_to_nfs = "yes" == params.get("copy_to_nfs", "no") mnt_path_name = params.get("mnt_path_name") options = params.get("blockcopy_options", "") bandwidth = params.get("blockcopy_bandwidth", "") bandwidth_byte = "yes" == params.get("bandwidth_byte", "no") reuse_external = "yes" == params.get("reuse_external", "no") persistent_vm = params.get("persistent_vm", "no") status_error = "yes" == params.get("status_error", "no") active_error = "yes" == params.get("active_error", "no") active_snap = "yes" == params.get("active_snap", "no") active_save = "yes" == params.get("active_save", "no") check_state_lock = "yes" == params.get("check_state_lock", "no") check_finish_job = "yes" == params.get("check_finish_job", "yes") with_shallow = "yes" == params.get("with_shallow", "no") with_blockdev = "yes" == params.get("with_blockdev", "no") setup_libvirt_polkit = "yes" == params.get('setup_libvirt_polkit') bug_url = params.get("bug_url", "") timeout = int(params.get("timeout", 1200)) relative_path = params.get("relative_path") rerun_flag = 0 blkdev_n = None back_n = 'blockdev-backing-iscsi' snapshot_external_disks = [] snapshots_take = int(params.get("snapshots_take", '0')) external_disk_only_snapshot = "yes" == params.get( "external_disk_only_snapshot", "no") enable_iscsi_auth = "yes" == params.get("enable_iscsi_auth", "no") # Skip/Fail early if with_blockdev and not libvirt_version.version_compare(1, 2, 13): raise exceptions.TestSkipError("--blockdev option not supported in " "current version") if not target: raise exceptions.TestSkipError("Require target disk to copy") if setup_libvirt_polkit and not libvirt_version.version_compare(1, 1, 1): raise exceptions.TestSkipError("API acl test not supported in current" " libvirt version") if copy_to_nfs and not libvirt_version.version_compare(1, 1, 1): raise exceptions.TestSkipError("Bug will not fix: %s" % bug_url) if bandwidth_byte and not libvirt_version.version_compare(1, 3, 3): raise exceptions.TestSkipError("--bytes option not supported in " "current version") if relative_path == "yes" and not libvirt_version.version_compare(3, 0, 0): test.cancel( "Forbid using relative path or file name only is added since libvirt-3.0.0" ) if "--transient-job" in options and not libvirt_version.version_compare( 4, 5, 0): test.cancel( "--transient-job option is supported until libvirt 4.5.0 version") # Check the source disk if vm_xml.VMXML.check_disk_exist(vm_name, target): logging.debug("Find %s in domain %s", target, vm_name) else: raise exceptions.TestFail("Can't find %s in domain %s" % (target, vm_name)) original_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) tmp_dir = data_dir.get_tmp_dir() # Prepare dest path params dest_path = params.get("dest_path", "") dest_format = params.get("dest_format", "") # Ugh... this piece of chicanery brought to you by the QemuImg which # will "add" the 'dest_format' extension during the check_format code. # So if we create the file with the extension and then remove it when # doing the check_format later, then we avoid erroneous failures. dest_extension = "" if dest_format != "": dest_extension = ".%s" % dest_format # Prepare for --reuse-external option if reuse_external: options += "--reuse-external --wait" # Set rerun_flag=1 to do blockcopy twice, and the first time created # file can be reused in the second time if no dest_path given # This will make sure the image size equal to original disk size if dest_path == "/path/non-exist": if os.path.exists(dest_path) and not os.path.isdir(dest_path): os.remove(dest_path) else: rerun_flag = 1 # Prepare other options if dest_format == "raw": options += " --raw" if with_blockdev: options += " --blockdev" if len(bandwidth): options += " --bandwidth %s" % bandwidth if bandwidth_byte: options += " --bytes" if with_shallow: options += " --shallow" # Prepare acl options uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' extra_dict = { 'uri': uri, 'unprivileged_user': unprivileged_user, 'debug': True, 'ignore_status': True, 'timeout': timeout } libvirtd_utl = utils_libvirtd.Libvirtd() libvirtd_log_path = os.path.join(data_dir.get_tmp_dir(), "libvirt_daemons.log") libvirtd_conf_dict = { "log_filter": '"3:json 1:libvirt 1:qemu"', "log_outputs": '"1:file:%s"' % libvirtd_log_path } logging.debug("the libvirtd conf file content is :\n %s" % libvirtd_conf_dict) libvirtd_conf = utl.customize_libvirt_config(libvirtd_conf_dict) def check_format(dest_path, dest_extension, expect): """ Check the image format :param dest_path: Path of the copy to create :param expect: Expect image format """ # And now because the QemuImg will add the extension for us # we have to remove it here. path_noext = dest_path.strip(dest_extension) params['image_name'] = path_noext params['image_format'] = expect image = qemu_storage.QemuImg(params, "/", path_noext) if image.get_format() == expect: logging.debug("%s format is %s", dest_path, expect) else: raise exceptions.TestFail("%s format is not %s" % (dest_path, expect)) def _blockjob_and_libvirtd_chk(cmd_result): """ Raise TestFail when blockcopy fail with block-job-complete error or blockcopy hang with state change lock. This is a specific bug verify, so ignore status_error here. """ failure_msg = "" err_msg = "internal error: unable to execute QEMU command" err_msg += " 'block-job-complete'" if err_msg in cmd_result.stderr: failure_msg += "Virsh cmd error happened: %s\n" % err_msg err_pattern = "Timed out during operation: cannot acquire" err_pattern += " state change lock" ret = chk_libvirtd_log(libvirtd_log_path, err_pattern, "error") if ret: failure_msg += "Libvirtd log error happened: %s\n" % err_pattern if failure_msg: if not libvirt_version.version_compare(1, 3, 2): bug_url_ = "https://bugzilla.redhat.com/show_bug.cgi?id=1197592" failure_msg += "Hit on bug: %s " % bug_url_ test.fail(failure_msg) def _make_snapshot(snapshot_numbers_take): """ Make external disk snapshot :param snapshot_numbers_take: snapshot numbers. """ for count in range(0, snapshot_numbers_take): snap_xml = snapshot_xml.SnapshotXML() snapshot_name = "blockcopy_snap" snap_xml.snap_name = snapshot_name + "_%s" % count snap_xml.description = "blockcopy snapshot" # Add all disks into xml file. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') # Remove non-storage disk such as 'cdrom' for disk in disks: if disk.device != 'disk': disks.remove(disk) new_disks = [] src_disk_xml = disks[0] disk_xml = snap_xml.SnapDiskXML() disk_xml.xmltreefile = src_disk_xml.xmltreefile del disk_xml.device del disk_xml.address disk_xml.snapshot = "external" disk_xml.disk_name = disk_xml.target['dev'] # Only qcow2 works as external snapshot file format, update it # here driver_attr = disk_xml.driver driver_attr.update({'type': 'qcow2'}) disk_xml.driver = driver_attr new_attrs = disk_xml.source.attrs if 'file' in disk_xml.source.attrs: new_file = os.path.join(tmp_dir, "blockcopy_shallow_%s.snap" % count) snapshot_external_disks.append(new_file) new_attrs.update({'file': new_file}) hosts = None elif ('dev' in disk_xml.source.attrs or 'name' in disk_xml.source.attrs or 'pool' in disk_xml.source.attrs): if (disk_xml.type_name == 'block' or disk_source_protocol == 'iscsi'): disk_xml.type_name = 'block' if 'name' in new_attrs: del new_attrs['name'] del new_attrs['protocol'] elif 'pool' in new_attrs: del new_attrs['pool'] del new_attrs['volume'] del new_attrs['mode'] back_path = utl.setup_or_cleanup_iscsi( is_setup=True, is_login=True, image_size="1G", emulated_image=back_n) emulated_iscsi.append(back_n) cmd = "qemu-img create -f qcow2 %s 1G" % back_path process.run(cmd, shell=True) new_attrs.update({'dev': back_path}) hosts = None new_src_dict = {"attrs": new_attrs} if hosts: new_src_dict.update({"hosts": hosts}) disk_xml.source = disk_xml.new_disk_source(**new_src_dict) new_disks.append(disk_xml) snap_xml.set_disks(new_disks) snapshot_xml_path = snap_xml.xml logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile) options = "--disk-only --xmlfile %s " % snapshot_xml_path snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) if snapshot_result.exit_status != 0: raise exceptions.TestFail(snapshot_result.stderr) snap_path = '' save_path = '' emulated_iscsi = [] nfs_cleanup = False try: # Prepare dest_path tmp_file = time.strftime("%Y-%m-%d-%H.%M.%S.img") tmp_file += dest_extension if not dest_path: if enable_iscsi_auth: utils_secret.clean_up_secrets() setup_auth_enabled_iscsi_disk(vm, params) dest_path = os.path.join(tmp_dir, tmp_file) elif with_blockdev: blkdev_n = 'blockdev-iscsi' dest_path = utl.setup_or_cleanup_iscsi(is_setup=True, is_login=True, image_size=image_size, emulated_image=blkdev_n) emulated_iscsi.append(blkdev_n) # Make sure the new disk show up utils_misc.wait_for(lambda: os.path.exists(dest_path), 5) else: if copy_to_nfs: tmp_dir = "%s/%s" % (tmp_dir, mnt_path_name) dest_path = os.path.join(tmp_dir, tmp_file) # Domain disk replacement with desire type if replace_vm_disk: # Calling 'set_vm_disk' is bad idea as it left lots of cleanup jobs # after test, such as pool, volume, nfs, iscsi and so on # TODO: remove this function in the future if disk_source_protocol == 'iscsi': emulated_iscsi.append(emu_image) if disk_source_protocol == 'netfs': nfs_cleanup = True utl.set_vm_disk(vm, params, tmp_dir, test) new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if with_shallow or external_disk_only_snapshot or enable_iscsi_auth: _make_snapshot(snapshots_take) # Prepare transient/persistent vm if persistent_vm == "no" and vm.is_persistent(): vm.undefine() elif persistent_vm == "yes" and not vm.is_persistent(): new_xml.define() # Run blockcopy command to create destination file if rerun_flag == 1: options1 = "--wait %s --finish --verbose" % dest_format if with_blockdev: options1 += " --blockdev" if with_shallow: options1 += " --shallow" cmd_result = virsh.blockcopy(vm_name, target, dest_path, options1, **extra_dict) status = cmd_result.exit_status if status != 0: raise exceptions.TestFail("Run blockcopy command fail: %s" % cmd_result.stdout.strip() + cmd_result.stderr) elif not os.path.exists(dest_path): raise exceptions.TestFail("Cannot find the created copy") if "--transient-job" in options: pool = ThreadPool(processes=1) async_result = pool.apply_async( blockcopy_thread, (vm_name, target, dest_path, options)) kill_blockcopy_process() utl.check_blockjob(vm_name, target) return # Run the real testing command cmd_result = virsh.blockcopy(vm_name, target, dest_path, options, **extra_dict) # check BZ#1197592 _blockjob_and_libvirtd_chk(cmd_result) status = cmd_result.exit_status if not libvirtd_utl.is_running(): raise exceptions.TestFail("Libvirtd service is dead") if not status_error: if status == 0: ret = utils_misc.wait_for( lambda: check_xml(vm_name, target, dest_path, options), 5) if not ret: raise exceptions.TestFail("Domain xml not expected after" " blockcopy") if options.count("--bandwidth"): if options.count('--bytes'): bandwidth += 'B' else: bandwidth += 'M' if not (bandwidth in ['0B', '0M']) and not utl.check_blockjob( vm_name, target, "bandwidth", bandwidth): raise exceptions.TestFail("Check bandwidth failed") val = options.count("--pivot") + options.count("--finish") # Don't wait for job finish when using --byte option val += options.count('--bytes') if val == 0 and check_finish_job: try: finish_job(vm_name, target, timeout) except JobTimeout as excpt: raise exceptions.TestFail("Run command failed: %s" % excpt) if options.count("--raw") and not with_blockdev: check_format(dest_path, dest_extension, dest_format) if active_snap: snap_path = "%s/%s.snap" % (tmp_dir, vm_name) snap_opt = "--disk-only --atomic --no-metadata " snap_opt += "vda,snapshot=external,file=%s" % snap_path ret = virsh.snapshot_create_as(vm_name, snap_opt, ignore_status=True, debug=True) utl.check_exit_status(ret, active_error) if active_save: save_path = "%s/%s.save" % (tmp_dir, vm_name) ret = virsh.save(vm_name, save_path, ignore_status=True, debug=True) utl.check_exit_status(ret, active_error) if check_state_lock: # Run blockjob pivot in subprocess as it will hang # for a while, run blockjob info again to check # job state command = "virsh blockjob %s %s --pivot" % (vm_name, target) session = aexpect.ShellSession(command) ret = virsh.blockjob(vm_name, target, "--info") err_info = "cannot acquire state change lock" if err_info in ret.stderr: raise exceptions.TestFail("Hit on bug: %s" % bug_url) utl.check_exit_status(ret, status_error) session.close() else: raise exceptions.TestFail(cmd_result.stdout.strip() + cmd_result.stderr) else: if status: logging.debug("Expect error: %s", cmd_result.stderr) else: # Commit id '4c297728' changed how virsh exits when # unexpectedly failing due to timeout from a fail (1) # to a success(0), so we need to look for a different # marker to indicate the copy aborted. As "stdout: Now # in mirroring phase" could be in stdout which fail the # check, so also do check in libvirtd log to confirm. if options.count("--timeout") and options.count("--wait"): log_pattern = "Copy aborted" if (re.search(log_pattern, cmd_result.stdout.strip()) or chk_libvirtd_log(libvirtd_log_path, log_pattern, "debug")): logging.debug("Found success a timed out block copy") else: raise exceptions.TestFail("Expect fail, but run " "successfully: %s" % bug_url) finally: # Recover VM may fail unexpectedly, we need using try/except to # proceed the following cleanup steps try: # Abort exist blockjob to avoid any possible lock error virsh.blockjob(vm_name, target, '--abort', ignore_status=True) vm.destroy(gracefully=False) # It may take a long time to shutdown the VM which has # blockjob running utils_misc.wait_for( lambda: virsh.domstate(vm_name, ignore_status=True). exit_status, 180) if virsh.domain_exists(vm_name): if active_snap or with_shallow: option = "--snapshots-metadata" else: option = None original_xml.sync(option) else: original_xml.define() except Exception as e: logging.error(e) for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) # Clean up libvirt pool, which may be created by 'set_vm_disk' if disk_type == 'volume': virsh.pool_destroy(pool_name, ignore_status=True, debug=True) # Restore libvirtd conf and restart libvirtd libvirtd_conf.restore() libvirtd_utl.restart() if libvirtd_log_path and os.path.exists(libvirtd_log_path): os.unlink(libvirtd_log_path) # Clean up NFS try: if nfs_cleanup: utl.setup_or_cleanup_nfs(is_setup=False) except Exception as e: logging.error(e) # Clean up iSCSI try: for iscsi_n in list(set(emulated_iscsi)): utl.setup_or_cleanup_iscsi(is_setup=False, emulated_image=iscsi_n) # iscsid will be restarted, so give it a break before next loop time.sleep(5) except Exception as e: logging.error(e) if os.path.exists(dest_path): os.remove(dest_path) if os.path.exists(snap_path): os.remove(snap_path) if os.path.exists(save_path): os.remove(save_path) # Restart virtlogd service to release VM log file lock try: path.find_command('virtlogd') process.run('systemctl reset-failed virtlogd') process.run('systemctl restart virtlogd ') except path.CmdNotFoundError: pass
def run_virsh_snapshot_dumpxml(test, params, env): """ Test snapshot-dumpxml command, make sure that the xml you get is correct Test scenaries: 1. live snapshot dump 2. shutoff snapshot dump 3. dumpxml with security info 4. readonly mode """ if not virsh.has_help_command('snapshot-dumpxml'): raise error.TestNAError("This version of libvirt does not support " "the snapshot-dumpxml test") vm_name = params.get("main_vm") status_error = params.get("status_error", "no") passwd = params.get("snapshot_passwd") secu_opt = params.get("snapshot_secure_option") desc_opt = params.get("snapshot_desc_option") mem_opt = params.get("snapshot_mem_option") disk_opt = params.get("disk_only_snap") snap_name = params.get("snapshot_name", "snap_test") readonly = params.get("readonly", False) try: snap_opt = "" opt_dict = {} # collect all the parameters at one time opt_name = locals() for opt in ["snap_name", "desc_opt", "mem_opt", "disk_opt"]: if opt_name[opt] is not None: # Integrate snapshot create options snap_opt = snap_opt + " " + opt_name[opt] # Do xml backup for final recovery vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Add passwd in guest graphics if passwd is not None: vm = env.get_vm(vm_name) if vm.is_alive(): vm.destroy() vm_xml.VMXML.add_security_info( vm_xml.VMXML.new_from_dumpxml(vm_name), passwd) vm.start() if secu_opt is not None: opt_dict['passwd'] = passwd logging.debug("snapshot create options are %s", snap_opt) # Get state to do snapshot xml state check dom_state = virsh.domstate(vm_name).stdout.strip() # Create disk snapshot before all to make the origin image clean virsh.snapshot_create_as(vm_name, "--disk-only") # Create snapshot with options snapshot_result = virsh.snapshot_create_as(vm_name, snap_opt, readonly=readonly) if snapshot_result.exit_status: if status_error == "no": raise error.TestFail("Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) elif status_error == "yes": logging.info("Create snapshot failed as expected, Error:%s.", snapshot_result.stderr.strip()) return ctime = get_snap_createtime(vm_name, snap_name) # Run virsh command for snapshot-dumpxml dumpxml_result = virsh.snapshot_dumpxml(vm_name, snap_name, secu_opt) if dumpxml_result.exit_status: if status_error == "no": raise error.TestFail("Failed to dump snapshot xml. Error:%s." % dumpxml_result.stderr.strip()) elif status_error == "yes": logging.info("Dumpxml snapshot failed as expected, Error:%s.", dumpxml_result.stderr.strip()) return # Record all the parameters in dict at one time check_name = locals() for var in ["vm_name", "snap_name", "desc_opt", "dom_state", "ctime", "disk_opt"]: if check_name[var] is not None: opt_dict[var] = check_name[var] logging.debug("opt_dict is %s", opt_dict) output = dumpxml_result.stdout.strip() snapshot_dumpxml_check(output, opt_dict) finally: # Recovery utils_test.libvirt.clean_up_snapshots(vm_name) vmxml_backup.sync("--snapshots-metadata")
def run(test, params, env): """ Test command: virsh blockcopy. This command can copy a disk backing image chain to dest. 1. Positive testing 1.1 Copy a disk to a new image file. 1.2 Reuse existing destination copy. 1.3 Valid blockcopy timeout and bandwidth test. 2. Negative testing 2.1 Copy a disk to a non-exist directory. 2.2 Copy a disk with invalid options. 2.3 Do block copy for a persistent domain. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) target = params.get("target_disk", "") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") disk_source_protocol = params.get("disk_source_protocol") copy_to_nfs = "yes" == params.get("copy_to_nfs", "no") mnt_path_name = params.get("mnt_path_name") # check the source disk if not target: raise error.TestFail("Require target disk to copy") if vm_xml.VMXML.check_disk_exist(vm_name, target): logging.debug("Find %s in domain %s.", target, vm_name) else: raise error.TestFail("Can't find %s in domain %s." % (target, vm_name)) options = params.get("blockcopy_options", "") bandwidth = params.get("blockcopy_bandwidth", "") default_timeout = params.get("default_timeout", "300") reuse_external = "yes" == params.get("reuse_external", "no") persistent_vm = params.get("persistent_vm", "no") status_error = "yes" == params.get("status_error", "no") active_error = "yes" == params.get("active_error", "no") active_snap = "yes" == params.get("active_snap", "no") active_save = "yes" == params.get("active_save", "no") check_state_lock = "yes" == params.get("check_state_lock", "no") bug_url = params.get("bug_url", "") timeout = int(params.get("timeout", 1200)) rerun_flag = 0 original_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) tmp_dir = data_dir.get_tmp_dir() # Prepare dest path params dest_path = params.get("dest_path", "") dest_format = params.get("dest_format", "") # Ugh... this piece of chicanery brought to you by the QemuImg which # will "add" the 'dest_format' extension during the check_format code. # So if we create the file with the extension and then remove it when # doing the check_format later, then we avoid erroneous failures. dest_extension = "" if dest_format != "": dest_extension = ".%s" % dest_format if not dest_path: tmp_file = time.strftime("%Y-%m-%d-%H.%M.%S.img") tmp_file += dest_extension if copy_to_nfs: tmp_dir = "%s/%s" % (tmp_dir, mnt_path_name) dest_path = os.path.join(tmp_dir, tmp_file) # Prepare for --reuse-external option if reuse_external: options += "--reuse-external" # Set rerun_flag=1 to do blockcopy twice, and the first time created # file can be reused in the second time if no dest_path given # This will make sure the image size equal to original disk size if dest_path == "/path/non-exist": if os.path.exists(dest_path) and not os.path.isdir(dest_path): os.remove(dest_path) else: rerun_flag = 1 # Prepare other options if dest_format == "raw": options += "--raw" if len(bandwidth): options += "--bandwidth %s" % bandwidth # Prepare acl options uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") if not copy_to_nfs: raise error.TestNAError("Bug will not fix:" " https://bugzilla.redhat.com/show_bug." "cgi?id=924151") extra_dict = {'uri': uri, 'unprivileged_user': unprivileged_user, 'debug': True, 'ignore_status': True, 'timeout': timeout} libvirtd_utl = utils_libvirtd.Libvirtd() libvirtd_conf = utils_config.LibvirtdConfig() libvirtd_conf["log_filters"] = '"3:json 1:libvirt 1:qemu"' libvirtd_log_path = os.path.join(test.tmpdir, "libvirtd.log") libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path logging.debug("the libvirtd config file content is:\n %s" % libvirtd_conf) libvirtd_utl.restart() def check_format(dest_path, dest_extension, expect): """ Check the image format :param dest_path: Path of the copy to create :param expect: Expect image format """ # And now because the QemuImg will add the extension for us # we have to remove it here. path_noext = dest_path.strip(dest_extension) params['image_name'] = path_noext params['image_format'] = expect image = qemu_storage.QemuImg(params, "/", path_noext) if image.get_format() == expect: logging.debug("%s format is %s.", dest_path, expect) else: raise error.TestFail("%s format is not %s." % (dest_path, expect)) def blockcopy_chk(): """ Raise TestFail when blockcopy hang with state change lock """ err_pattern = "Timed out during operation: cannot acquire" err_pattern += " state change lock" ret = chk_libvirtd_log(libvirtd_log_path, err_pattern, "error") if ret: raise error.TestFail("Hit on bug: %s" % bug_url) snap_path = '' save_path = '' try: # Domain disk replacement with desire type if replace_vm_disk: utl.set_vm_disk(vm, params, tmp_dir) new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Prepare transient/persistent vm if persistent_vm == "no" and vm.is_persistent(): vm.undefine() elif persistent_vm == "yes" and not vm.is_persistent(): new_xml.define() # Run blockcopy command if rerun_flag == 1: options1 = "--wait --raw --finish --verbose" cmd_result = virsh.blockcopy(vm_name, target, dest_path, options1, **extra_dict) status = cmd_result.exit_status if status != 0: raise error.TestFail("Run blockcopy command fail.") elif not os.path.exists(dest_path): raise error.TestFail("Cannot find the created copy.") cmd_result = virsh.blockcopy(vm_name, target, dest_path, options, **extra_dict) status = cmd_result.exit_status if not libvirtd_utl.is_running(): raise error.TestFail("Libvirtd service is dead.") if not status_error: blockcopy_chk() if status == 0: ret = utils_misc.wait_for( lambda: check_xml(vm_name, target, dest_path, options), 5) if not ret: raise error.TestFail("Domain xml not expected after" " blockcopy") if options.count("--bandwidth"): utl.check_blockjob(vm_name, target, "bandwidth", bandwidth) if check_state_lock: # Run blockjob pivot in subprocess as it will hang # for a while, run blockjob info again to check # job state command = "virsh blockjob %s %s --pivot" % (vm_name, target) session = aexpect.ShellSession(command) ret = virsh.blockjob(vm_name, target, "--info") err_info = "cannot acquire state change lock" if err_info in ret.stderr: raise error.TestFail("Hit on bug: %s" % bug_url) utl.check_exit_status(ret, status_error) session.close() val = options.count("--pivot") + options.count("--finish") if val == 0: try: finish_job(vm_name, target, default_timeout) except JobTimeout, excpt: raise error.TestFail("Run command failed: %s" % excpt) if options.count("--raw"): check_format(dest_path, dest_extension, dest_format) if active_snap: snap_path = "%s/%s.snap" % (tmp_dir, vm_name) snap_opt = "--disk-only --atomic --no-metadata " snap_opt += "vda,snapshot=external,file=%s" % snap_path ret = virsh.snapshot_create_as(vm_name, snap_opt, ignore_statues=True, debug=True) utl.check_exit_status(ret, active_error) if active_save: save_path = "%s/%s.save" % (tmp_dir, vm_name) ret = virsh.save(vm_name, save_path, ignore_statues=True, debug=True) utl.check_exit_status(ret, active_error) else: err_msg = "internal error: unable to execute QEMU command" err_msg += " 'block-job-complete'" if err_msg in cmd_result.stderr: raise error.TestFail("Hit on bug: %s" % bug_url) raise error.TestFail(cmd_result.stderr) else:
def run(test, params, env): """ Test rng device options. 1.Prepare test environment, destroy or suspend a VM. 2.Edit xml and start the domain. 3.Perform test operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) def modify_rng_xml(dparams, sync=True): """ Modify interface xml options """ rng_model = dparams.get("rng_model", "virtio") rng_rate = dparams.get("rng_rate") backend_model = dparams.get("backend_model", "random") backend_type = dparams.get("backend_type") backend_dev = dparams.get("backend_dev", "") backend_source_list = dparams.get("backend_source", "").split() backend_protocol = dparams.get("backend_protocol") vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) rng_xml = rng.Rng() rng_xml.rng_model = rng_model if rng_rate: rng_xml.rate = ast.literal_eval(rng_rate) backend = rng.Rng.Backend() backend.backend_model = backend_model if backend_type: backend.backend_type = backend_type if backend_dev: backend.backend_dev = backend_dev if backend_source_list: source_list = [ast.literal_eval(source) for source in backend_source_list] backend.source = source_list if backend_protocol: backend.backend_protocol = backend_protocol rng_xml.backend = backend logging.debug("Rng xml: %s", rng_xml) if sync: vmxml.add_device(rng_xml) vmxml.xmltreefile.write() vmxml.sync() else: status = libvirt.exec_virsh_edit( vm_name, [(r":/<devices>/s/$/%s" % re.findall(r"<rng.*<\/rng>", str(rng_xml), re.M )[0].replace("/", "\/"))]) if not status: test.fail("Failed to edit vm xml") def check_qemu_cmd(dparams): """ Verify qemu-kvm command line. """ rng_model = dparams.get("rng_model", "virtio") rng_rate = dparams.get("rng_rate") backend_type = dparams.get("backend_type") backend_source_list = dparams.get("backend_source", "").split() cmd = ("ps -ef | grep %s | grep -v grep" % vm_name) chardev = src_host = src_port = None if backend_type == "tcp": chardev = "socket" elif backend_type == "udp": chardev = "udp" for bc_source in backend_source_list: source = ast.literal_eval(bc_source) if "mode" in source and source['mode'] == "connect": src_host = source['host'] src_port = source['service'] if chardev and src_host and src_port: cmd += (" | grep 'chardev %s,.*host=%s,port=%s'" % (chardev, src_host, src_port)) if rng_model == "virtio": cmd += (" | grep 'device virtio-rng-pci'") if rng_rate: rate = ast.literal_eval(rng_rate) cmd += (" | grep 'max-bytes=%s,period=%s'" % (rate['bytes'], rate['period'])) if process.run(cmd, ignore_status=True, shell=True).exit_status: test.fail("Cann't see rng option" " in command line") def check_host(): """ Check random device on host """ backend_dev = params.get("backend_dev") if backend_dev: cmd = "lsof |grep %s" % backend_dev ret = process.run(cmd, ignore_status=True, shell=True) if ret.exit_status or not ret.stdout.count("qemu"): test.fail("Failed to check random device" " on host, command output: %s" % ret.stdout) def check_snapshot(bgjob=None): """ Do snapshot operation and check the results """ snapshot_name1 = "snap.s1" snapshot_name2 = "snap.s2" if not snapshot_vm_running: vm.destroy(gracefully=False) ret = virsh.snapshot_create_as(vm_name, snapshot_name1) libvirt.check_exit_status(ret) snap_lists = virsh.snapshot_list(vm_name) if snapshot_name not in snap_lists: test.fail("Snapshot %s doesn't exist" % snapshot_name) if snapshot_vm_running: options = "--force" else: options = "" ret = virsh.snapshot_revert( vm_name, ("%s %s" % (snapshot_name, options))) libvirt.check_exit_status(ret) ret = virsh.dumpxml(vm_name) if ret.stdout.count("<rng model="): test.fail("Found rng device in xml") if snapshot_with_rng: if vm.is_alive(): vm.destroy(gracefully=False) if bgjob: bgjob.kill_func() modify_rng_xml(params, False) # Start the domain before disk-only snapshot if vm.is_dead(): # Add random server if params.get("backend_type") == "tcp": cmd = "cat /dev/random | nc -4 -l localhost 1024" bgjob = utils.AsyncJob(cmd) vm.start() vm.wait_for_login().close() err_msgs = ("live disk snapshot not supported" " with this QEMU binary") ret = virsh.snapshot_create_as(vm_name, "%s --disk-only" % snapshot_name2) if ret.exit_status: if ret.stderr.count(err_msgs): test.skip(err_msgs) else: test.fail("Failed to create external snapshot") snap_lists = virsh.snapshot_list(vm_name) if snapshot_name2 not in snap_lists: test.fail("Failed to check snapshot list") ret = virsh.domblklist(vm_name) if not ret.stdout.count(snapshot_name2): test.fail("Failed to find snapshot disk") def check_guest(session): """ Check random device on guest """ rng_files = ( "/sys/devices/virtual/misc/hw_random/rng_available", "/sys/devices/virtual/misc/hw_random/rng_current") rng_avail = session.cmd_output("cat %s" % rng_files[0], timeout=600).strip() rng_currt = session.cmd_output("cat %s" % rng_files[1], timeout=600).strip() logging.debug("rng avail:%s, current:%s", rng_avail, rng_currt) if not rng_currt.count("virtio") or rng_currt not in rng_avail: test.fail("Failed to check rng file on guest") # Read the random device cmd = ("dd if=/dev/hwrng of=rng.test count=100" " && rm -f rng.test") ret, output = session.cmd_status_output(cmd, timeout=600) if ret: test.fail("Failed to read the random device") rng_rate = params.get("rng_rate") if rng_rate: rate_bytes, rate_period = ast.literal_eval(rng_rate).values() rate_conf = float(rate_bytes) / (float(rate_period)/1000) ret = re.search(r"(\d+) bytes.*copied, (\d+.\d+) s", output, re.M) if not ret: test.fail("Can't find rate from output") rate_real = float(ret.group(1)) / float(ret.group(2)) logging.debug("Find rate: %s, config rate: %s", rate_real, rate_conf) if rate_real > rate_conf * 1.2: test.fail("The rate of reading exceed" " the limitation of configuration") if device_num > 1: rng_dev = rng_avail.split() if len(rng_dev) != device_num: test.skip("Multiple virtio-rng devices are not" " supported on this guest kernel. " "Bug: https://bugzilla.redhat.com/" "show_bug.cgi?id=915335") session.cmd("echo -n %s > %s" % (rng_dev[1], rng_files[1])) # Read the random device if session.cmd_status(cmd, timeout=120): test.fail("Failed to read the random device") start_error = "yes" == params.get("start_error", "no") test_host = "yes" == params.get("test_host", "no") test_guest = "yes" == params.get("test_guest", "no") test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no") test_snapshot = "yes" == params.get("test_snapshot", "no") snapshot_vm_running = "yes" == params.get("snapshot_vm_running", "no") snapshot_with_rng = "yes" == params.get("snapshot_with_rng", "no") snapshot_name = params.get("snapshot_name") device_num = int(params.get("device_num", 1)) if device_num > 1 and not libvirt_version.version_compare(1, 2, 7): test.skip("Multiple virtio-rng devices not " "supported on this libvirt version") # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Try to install rng-tools on host, it can speed up random rate # if installation failed, ignore the error and continue the test if utils_package.package_install(["rng-tools"]): rngd_conf = "/etc/sysconfig/rngd" rngd_srv = "/usr/lib/systemd/system/rngd.service" if os.path.exists(rngd_conf): # For rhel6 host, add extraoptions with open(rngd_conf, 'w') as f_rng: f_rng.write('EXTRAOPTIONS="--rng-device /dev/urandom"') elif os.path.exists(rngd_srv): # For rhel7 host, modify start options rngd_srv_conf = "/etc/systemd/system/rngd.service" if not os.path.exists(rngd_srv_conf): shutil.copy(rngd_srv, rngd_srv_conf) process.run("sed -i -e 's#^ExecStart=.*#ExecStart=/sbin/rngd" " -f -r /dev/urandom -o /dev/random#' %s" % rngd_srv_conf, shell=True) process.run('systemctl daemon-reload') process.run("service rngd start") # Build the xml and run test. try: bgjob = None # Take snapshot if needed if snapshot_name: if snapshot_vm_running: vm.start() vm.wait_for_login().close() ret = virsh.snapshot_create_as(vm_name, snapshot_name) libvirt.check_exit_status(ret) # Destroy VM first if vm.is_alive(): vm.destroy(gracefully=False) # Build vm xml. dparams = {} if device_num > 1: for i in xrange(device_num): dparams[i] = {"rng_model": params.get( "rng_model_%s" % i, "virtio")} dparams[i].update({"backend_model": params.get( "backend_model_%s" % i, "random")}) bk_type = params.get("backend_type_%s" % i) if bk_type: dparams[i].update({"backend_type": bk_type}) bk_dev = params.get("backend_dev_%s" % i) if bk_dev: dparams[i].update({"backend_dev": bk_dev}) bk_src = params.get("backend_source_%s" % i) if bk_src: dparams[i].update({"backend_source": bk_src}) bk_pro = params.get("backend_protocol_%s" % i) if bk_pro: dparams[i].update({"backend_protocol": bk_pro}) modify_rng_xml(dparams[i], False) else: modify_rng_xml(params, not test_snapshot) try: # Add random server if params.get("backend_type") == "tcp": cmd = "cat /dev/random | nc -4 -l localhost 1024" bgjob = utils.AsyncJob(cmd) # Start the VM. vm.start() if start_error: test.fail("VM started unexpectedly") if test_qemu_cmd: if device_num > 1: for i in xrange(device_num): check_qemu_cmd(dparams[i]) else: check_qemu_cmd(params) if test_host: check_host() session = vm.wait_for_login() if test_guest: check_guest(session) session.close() if test_snapshot: check_snapshot(bgjob) except virt_vm.VMStartError as details: logging.info(str(details)) if not start_error: test.fail('VM failed to start, ' 'please refer to https://bugzilla.' 'redhat.com/show_bug.cgi?id=1220252:' '\n%s' % details) finally: # Delete snapshots. snapshot_lists = virsh.snapshot_list(vm_name) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snapshot in snapshot_lists: virsh.snapshot_delete(vm_name, snapshot, "--metadata") # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) logging.info("Restoring vm...") vmxml_backup.sync() if bgjob: bgjob.kill_func()
def run(test, params, env): """ Attach/Detach an iscsi network/volume disk to domain 1. For secret usage testing: 1.1. Setup an iscsi target with CHAP authentication. 1.2. Define a secret for iscsi target usage 1.3. Set secret value 2. Create 4. Create an iscsi network disk XML 5. Attach disk with the XML file and check the disk inside the VM 6. Detach the disk """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) disk_device = params.get("disk_device", "disk") disk_type = params.get("disk_type", "network") disk_src_protocal = params.get("disk_source_protocal", "iscsi") disk_src_host = params.get("disk_source_host", "127.0.0.1") disk_src_port = params.get("disk_source_port", "3260") disk_src_pool = params.get("disk_source_pool") disk_src_mode = params.get("disk_source_mode", "host") pool_type = params.get("pool_type", "iscsi") pool_src_host = params.get("pool_source_host", "127.0.0.1") disk_target = params.get("disk_target", "vdb") disk_target_bus = params.get("disk_target_bus", "virtio") disk_readonly = params.get("disk_readonly", "no") chap_auth = "yes" == params.get("chap_auth", "no") chap_user = params.get("chap_username", "") chap_passwd = params.get("chap_password", "") secret_usage_target = params.get("secret_usage_target") secret_ephemeral = params.get("secret_ephemeral", "no") secret_private = params.get("secret_private", "yes") status_error = "yes" == params.get("status_error", "no") if disk_type == "volume": if not libvirt_version.version_compare(1, 0, 5): raise error.TestNAError("'volume' type disk doesn't support in" + " current libvirt version.") # Back VM XML vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} try: if chap_auth: # Create a secret xml to define it secret_xml = SecretXML(secret_ephemeral, secret_private) secret_xml.auth_type = "chap" secret_xml.auth_username = chap_user secret_xml.usage = disk_src_protocal secret_xml.target = secret_usage_target logging.debug("Define secret by XML: %s", open(secret_xml.xml).read()) # Define secret cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Get secret uuid try: secret_uuid = cmd_result.stdout.strip().split()[1] except IndexError: raise error.TestError("Fail to get new created secret uuid") # Set secret value secret_string = base64.b64encode(chap_passwd) cmd_result = virsh.secret_set_value(secret_uuid, secret_string, **virsh_dargs) libvirt.check_exit_status(cmd_result) else: # Set chap_user and chap_passwd to empty to avoid setup # CHAP authentication when export iscsi target chap_user = "" chap_passwd = "" # Setup iscsi target iscsi_target = libvirt.setup_or_cleanup_iscsi(is_setup=True, is_login=False, chap_user=chap_user, chap_passwd=chap_passwd) # Create iscsi pool if disk_type == "volume": # Create an iscsi pool xml to create it pool_src_xml = pool_xml.SourceXML() pool_src_xml.hostname = pool_src_host pool_src_xml.device_path = iscsi_target poolxml = pool_xml.PoolXML(pool_type=pool_type) poolxml.name = disk_src_host poolxml.set_source(pool_src_xml) poolxml.target_path = "/dev/disk/by-path" # Create iscsi pool cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Get volume name cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs) libvirt.check_exit_status(cmd_result) try: vol_name = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(cmd_result.stdout))[1][0] except IndexError: raise error.TestError("Fail to get volume name") # Create iscsi network disk XML disk_params = {'device_type': disk_device, 'type_name': disk_type, 'target_dev': disk_target, 'target_bus': disk_target_bus, 'readonly': disk_readonly} disk_params_src = {} if disk_type == "network": disk_params_src = {'source_protocol': disk_src_protocal, 'source_name': iscsi_target + "/1", 'source_host_name': disk_src_host, 'source_host_port': disk_src_port} elif disk_type == "volume": disk_params_src = {'source_pool': disk_src_pool, 'source_volume': vol_name, 'source_mode': disk_src_mode} else: error.TestNAError("Unsupport disk type in this test") disk_params.update(disk_params_src) if chap_auth: disk_params_auth = {'auth_user': chap_user, 'secret_type': disk_src_protocal, 'secret_usage': secret_xml.target} disk_params.update(disk_params_auth) disk_xml = libvirt.create_disk_xml(disk_params) start_vm = "yes" == params.get("start_vm", "yes") if start_vm: if vm.is_dead(): vm.start() else: if not vm.is_dead(): vm.destroy() attach_option = params.get("attach_option", "") # Attach the iscsi network disk to domain logging.debug("Attach disk by XML: %s", open(disk_xml).read()) cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml, flagstrs=attach_option, dargs=virsh_dargs) libvirt.check_exit_status(cmd_result, status_error) if vm.is_dead(): vm.start() cmd_result = virsh.start(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) domain_operation = params.get("domain_operation", "") if domain_operation == "save": save_file = os.path.join(test.tmpdir, "vm.save") cmd_result = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.restore(save_file) libvirt.check_exit_status(cmd_result) if os.path.exists(save_file): os.remove(save_file) elif domain_operation == "snapshot": # Run snapshot related commands: snapshot-create-as, snapshot-list # snapshot-info, snapshot-dumpxml, snapshot-create snapshot_name1 = "snap1" snapshot_name2 = "snap2" cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) sn_create_op = "%s --disk_ony %s" % (snapshot_name2, disk_target) cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1, **virsh_dargs) cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_delete(vm_name, snapshot_name2, **virsh_dargs) libvirt.check_exit_status(cmd_result) pass else: logging.error("Unsupport operation %s in this case, so skip it", domain_operation) def find_attach_disk(expect=True): """ Find attached disk inside the VM """ found_disk = False if vm.is_dead(): raise error.TestError("Domain %s is not running" % vm_name) else: try: session = vm.wait_for_login() cmd = "grep %s /proc/partitions" % disk_target s, o = session.cmd_status_output(cmd) logging.info("%s output: %s", cmd, o) session.close() if s == 0: found_disk = True except (LoginError, VMError, ShellError), e: logging.error(str(e)) if found_disk == expect: logging.debug("Check disk inside the VM PASS as expected") else: raise error.TestError("Check disk inside the VM FAIL") # Check disk inside the VM, expect is False if status_error=True find_attach_disk(not status_error) # Detach disk cmd_result = virsh.detach_disk(vm_name, disk_target) libvirt.check_exit_status(cmd_result, status_error) # Check disk inside the VM find_attach_disk(False)
def run(test, params, env): """ Test command: virsh blockcopy. This command can copy a disk backing image chain to dest. 1. Positive testing 1.1 Copy a disk to a new image file. 1.2 Reuse existing destination copy. 1.3 Valid blockcopy timeout and bandwidth test. 2. Negative testing 2.1 Copy a disk to a non-exist directory. 2.2 Copy a disk with invalid options. 2.3 Do block copy for a persistent domain. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) target = params.get("target_disk", "") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") disk_source_protocol = params.get("disk_source_protocol") disk_type = params.get("disk_type") pool_name = params.get("pool_name") image_size = params.get("image_size") emu_image = params.get("emulated_image") copy_to_nfs = "yes" == params.get("copy_to_nfs", "no") mnt_path_name = params.get("mnt_path_name") options = params.get("blockcopy_options", "") bandwidth = params.get("blockcopy_bandwidth", "") bandwidth_byte = "yes" == params.get("bandwidth_byte", "no") reuse_external = "yes" == params.get("reuse_external", "no") persistent_vm = params.get("persistent_vm", "no") status_error = "yes" == params.get("status_error", "no") active_error = "yes" == params.get("active_error", "no") active_snap = "yes" == params.get("active_snap", "no") active_save = "yes" == params.get("active_save", "no") check_state_lock = "yes" == params.get("check_state_lock", "no") with_shallow = "yes" == params.get("with_shallow", "no") with_blockdev = "yes" == params.get("with_blockdev", "no") setup_libvirt_polkit = "yes" == params.get('setup_libvirt_polkit') bug_url = params.get("bug_url", "") timeout = int(params.get("timeout", 1200)) relative_path = params.get("relative_path") rerun_flag = 0 blkdev_n = None back_n = 'blockdev-backing-iscsi' snapshot_external_disks = [] # Skip/Fail early if with_blockdev and not libvirt_version.version_compare(1, 2, 13): raise exceptions.TestSkipError("--blockdev option not supported in " "current version") if not target: raise exceptions.TestSkipError("Require target disk to copy") if setup_libvirt_polkit and not libvirt_version.version_compare(1, 1, 1): raise exceptions.TestSkipError("API acl test not supported in current" " libvirt version") if copy_to_nfs and not libvirt_version.version_compare(1, 1, 1): raise exceptions.TestSkipError("Bug will not fix: %s" % bug_url) if bandwidth_byte and not libvirt_version.version_compare(1, 3, 3): raise exceptions.TestSkipError("--bytes option not supported in " "current version") if relative_path == "yes" and not libvirt_version.version_compare(3, 0, 0): test.cancel("Forbid using relative path or file name only is added since libvirt-3.0.0") # Check the source disk if vm_xml.VMXML.check_disk_exist(vm_name, target): logging.debug("Find %s in domain %s", target, vm_name) else: raise exceptions.TestFail("Can't find %s in domain %s" % (target, vm_name)) original_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) tmp_dir = data_dir.get_tmp_dir() # Prepare dest path params dest_path = params.get("dest_path", "") dest_format = params.get("dest_format", "") # Ugh... this piece of chicanery brought to you by the QemuImg which # will "add" the 'dest_format' extension during the check_format code. # So if we create the file with the extension and then remove it when # doing the check_format later, then we avoid erroneous failures. dest_extension = "" if dest_format != "": dest_extension = ".%s" % dest_format # Prepare for --reuse-external option if reuse_external: options += "--reuse-external --wait" # Set rerun_flag=1 to do blockcopy twice, and the first time created # file can be reused in the second time if no dest_path given # This will make sure the image size equal to original disk size if dest_path == "/path/non-exist": if os.path.exists(dest_path) and not os.path.isdir(dest_path): os.remove(dest_path) else: rerun_flag = 1 # Prepare other options if dest_format == "raw": options += " --raw" if with_blockdev: options += " --blockdev" if len(bandwidth): options += " --bandwidth %s" % bandwidth if bandwidth_byte: options += " --bytes" if with_shallow: options += " --shallow" # Prepare acl options uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' extra_dict = {'uri': uri, 'unprivileged_user': unprivileged_user, 'debug': True, 'ignore_status': True, 'timeout': timeout} libvirtd_utl = utils_libvirtd.Libvirtd() libvirtd_conf = utils_config.LibvirtdConfig() libvirtd_conf["log_filters"] = '"3:json 1:libvirt 1:qemu"' libvirtd_log_path = os.path.join(data_dir.get_tmp_dir(), "libvirtd.log") libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path logging.debug("the libvirtd config file content is:\n %s" % libvirtd_conf) libvirtd_utl.restart() def check_format(dest_path, dest_extension, expect): """ Check the image format :param dest_path: Path of the copy to create :param expect: Expect image format """ # And now because the QemuImg will add the extension for us # we have to remove it here. path_noext = dest_path.strip(dest_extension) params['image_name'] = path_noext params['image_format'] = expect image = qemu_storage.QemuImg(params, "/", path_noext) if image.get_format() == expect: logging.debug("%s format is %s", dest_path, expect) else: raise exceptions.TestFail("%s format is not %s" % (dest_path, expect)) def _blockjob_and_libvirtd_chk(cmd_result): """ Raise TestFail when blockcopy fail with block-job-complete error or blockcopy hang with state change lock. This is a specific bug verify, so ignore status_error here. """ bug_url_ = "https://bugzilla.redhat.com/show_bug.cgi?id=1197592" err_msg = "internal error: unable to execute QEMU command" err_msg += " 'block-job-complete'" if err_msg in cmd_result.stderr: raise exceptions.TestFail("Hit on bug: %s" % bug_url_) err_pattern = "Timed out during operation: cannot acquire" err_pattern += " state change lock" ret = chk_libvirtd_log(libvirtd_log_path, err_pattern, "error") if ret: raise exceptions.TestFail("Hit on bug: %s" % bug_url_) def _make_snapshot(): """ Make external disk snapshot """ snap_xml = snapshot_xml.SnapshotXML() snapshot_name = "blockcopy_snap" snap_xml.snap_name = snapshot_name snap_xml.description = "blockcopy snapshot" # Add all disks into xml file. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') # Remove non-storage disk such as 'cdrom' for disk in disks: if disk.device != 'disk': disks.remove(disk) new_disks = [] src_disk_xml = disks[0] disk_xml = snap_xml.SnapDiskXML() disk_xml.xmltreefile = src_disk_xml.xmltreefile del disk_xml.device del disk_xml.address disk_xml.snapshot = "external" disk_xml.disk_name = disk_xml.target['dev'] # Only qcow2 works as external snapshot file format, update it # here driver_attr = disk_xml.driver driver_attr.update({'type': 'qcow2'}) disk_xml.driver = driver_attr new_attrs = disk_xml.source.attrs if 'file' in disk_xml.source.attrs: new_file = os.path.join(tmp_dir, "blockcopy_shallow.snap") snapshot_external_disks.append(new_file) new_attrs.update({'file': new_file}) hosts = None elif ('dev' in disk_xml.source.attrs or 'name' in disk_xml.source.attrs or 'pool' in disk_xml.source.attrs): if (disk_xml.type_name == 'block' or disk_source_protocol == 'iscsi'): disk_xml.type_name = 'block' if 'name' in new_attrs: del new_attrs['name'] del new_attrs['protocol'] elif 'pool' in new_attrs: del new_attrs['pool'] del new_attrs['volume'] del new_attrs['mode'] back_path = utl.setup_or_cleanup_iscsi(is_setup=True, is_login=True, image_size="1G", emulated_image=back_n) emulated_iscsi.append(back_n) cmd = "qemu-img create -f qcow2 %s 1G" % back_path process.run(cmd, shell=True) new_attrs.update({'dev': back_path}) hosts = None new_src_dict = {"attrs": new_attrs} if hosts: new_src_dict.update({"hosts": hosts}) disk_xml.source = disk_xml.new_disk_source(**new_src_dict) new_disks.append(disk_xml) snap_xml.set_disks(new_disks) snapshot_xml_path = snap_xml.xml logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile) options = "--disk-only --xmlfile %s " % snapshot_xml_path snapshot_result = virsh.snapshot_create( vm_name, options, debug=True) if snapshot_result.exit_status != 0: raise exceptions.TestFail(snapshot_result.stderr) snap_path = '' save_path = '' emulated_iscsi = [] nfs_cleanup = False try: # Prepare dest_path tmp_file = time.strftime("%Y-%m-%d-%H.%M.%S.img") tmp_file += dest_extension if not dest_path: if with_blockdev: blkdev_n = 'blockdev-iscsi' dest_path = utl.setup_or_cleanup_iscsi(is_setup=True, is_login=True, image_size=image_size, emulated_image=blkdev_n) emulated_iscsi.append(blkdev_n) # Make sure the new disk show up utils_misc.wait_for(lambda: os.path.exists(dest_path), 5) else: if copy_to_nfs: tmp_dir = "%s/%s" % (tmp_dir, mnt_path_name) dest_path = os.path.join(tmp_dir, tmp_file) # Domain disk replacement with desire type if replace_vm_disk: # Calling 'set_vm_disk' is bad idea as it left lots of cleanup jobs # after test, such as pool, volume, nfs, iscsi and so on # TODO: remove this function in the future if disk_source_protocol == 'iscsi': emulated_iscsi.append(emu_image) if disk_source_protocol == 'netfs': nfs_cleanup = True utl.set_vm_disk(vm, params, tmp_dir, test) new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if with_shallow: _make_snapshot() # Prepare transient/persistent vm if persistent_vm == "no" and vm.is_persistent(): vm.undefine("--nvram") elif persistent_vm == "yes" and not vm.is_persistent(): new_xml.define() # Run blockcopy command to create destination file if rerun_flag == 1: options1 = "--wait %s --finish --verbose" % dest_format if with_blockdev: options1 += " --blockdev" if with_shallow: options1 += " --shallow" cmd_result = virsh.blockcopy(vm_name, target, dest_path, options1, **extra_dict) status = cmd_result.exit_status if status != 0: raise exceptions.TestFail("Run blockcopy command fail: %s" % cmd_result.stdout.strip() + cmd_result.stderr) elif not os.path.exists(dest_path): raise exceptions.TestFail("Cannot find the created copy") # Run the real testing command cmd_result = virsh.blockcopy(vm_name, target, dest_path, options, **extra_dict) # check BZ#1197592 _blockjob_and_libvirtd_chk(cmd_result) status = cmd_result.exit_status if not libvirtd_utl.is_running(): raise exceptions.TestFail("Libvirtd service is dead") if not status_error: if status == 0: ret = utils_misc.wait_for( lambda: check_xml(vm_name, target, dest_path, options), 5) if not ret: raise exceptions.TestFail("Domain xml not expected after" " blockcopy") if options.count("--bandwidth"): if options.count('--bytes'): bandwidth += 'B' else: bandwidth += 'M' if not utl.check_blockjob(vm_name, target, "bandwidth", bandwidth): raise exceptions.TestFail("Check bandwidth failed") val = options.count("--pivot") + options.count("--finish") # Don't wait for job finish when using --byte option val += options.count('--bytes') if val == 0: try: finish_job(vm_name, target, timeout) except JobTimeout as excpt: raise exceptions.TestFail("Run command failed: %s" % excpt) if options.count("--raw") and not with_blockdev: check_format(dest_path, dest_extension, dest_format) if active_snap: snap_path = "%s/%s.snap" % (tmp_dir, vm_name) snap_opt = "--disk-only --atomic --no-metadata " snap_opt += "vda,snapshot=external,file=%s" % snap_path ret = virsh.snapshot_create_as(vm_name, snap_opt, ignore_status=True, debug=True) utl.check_exit_status(ret, active_error) if active_save: save_path = "%s/%s.save" % (tmp_dir, vm_name) ret = virsh.save(vm_name, save_path, ignore_status=True, debug=True) utl.check_exit_status(ret, active_error) if check_state_lock: # Run blockjob pivot in subprocess as it will hang # for a while, run blockjob info again to check # job state command = "virsh blockjob %s %s --pivot" % (vm_name, target) session = aexpect.ShellSession(command) ret = virsh.blockjob(vm_name, target, "--info") err_info = "cannot acquire state change lock" if err_info in ret.stderr: raise exceptions.TestFail("Hit on bug: %s" % bug_url) utl.check_exit_status(ret, status_error) session.close() else: raise exceptions.TestFail(cmd_result.stdout.strip() + cmd_result.stderr) else: if status: logging.debug("Expect error: %s", cmd_result.stderr) else: # Commit id '4c297728' changed how virsh exits when # unexpectedly failing due to timeout from a fail (1) # to a success(0), so we need to look for a different # marker to indicate the copy aborted. As "stdout: Now # in mirroring phase" could be in stdout which fail the # check, so also do check in libvirtd log to confirm. if options.count("--timeout") and options.count("--wait"): log_pattern = "Copy aborted" if (re.search(log_pattern, cmd_result.stdout.strip()) or chk_libvirtd_log(libvirtd_log_path, log_pattern, "debug")): logging.debug("Found success a timed out block copy") else: raise exceptions.TestFail("Expect fail, but run " "successfully: %s" % bug_url) finally: # Recover VM may fail unexpectedly, we need using try/except to # proceed the following cleanup steps try: # Abort exist blockjob to avoid any possible lock error virsh.blockjob(vm_name, target, '--abort', ignore_status=True) vm.destroy(gracefully=False) # It may take a long time to shutdown the VM which has # blockjob running utils_misc.wait_for( lambda: virsh.domstate(vm_name, ignore_status=True).exit_status, 180) if virsh.domain_exists(vm_name): if active_snap or with_shallow: option = "--snapshots-metadata" else: option = None original_xml.sync(option) else: original_xml.define() except Exception as e: logging.error(e) for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) # Clean up libvirt pool, which may be created by 'set_vm_disk' if disk_type == 'volume': virsh.pool_destroy(pool_name, ignore_status=True, debug=True) # Restore libvirtd conf and restart libvirtd libvirtd_conf.restore() libvirtd_utl.restart() if libvirtd_log_path and os.path.exists(libvirtd_log_path): os.unlink(libvirtd_log_path) # Clean up NFS try: if nfs_cleanup: utl.setup_or_cleanup_nfs(is_setup=False) except Exception as e: logging.error(e) # Clean up iSCSI try: for iscsi_n in list(set(emulated_iscsi)): utl.setup_or_cleanup_iscsi(is_setup=False, emulated_image=iscsi_n) # iscsid will be restarted, so give it a break before next loop time.sleep(5) except Exception as e: logging.error(e) if os.path.exists(dest_path): os.remove(dest_path) if os.path.exists(snap_path): os.remove(snap_path) if os.path.exists(save_path): os.remove(save_path) # Restart virtlogd service to release VM log file lock try: path.find_command('virtlogd') process.run('systemctl reset-failed virtlogd') process.run('systemctl restart virtlogd ') except path.CmdNotFoundError: pass
mkfs_and_mount(session, mount_disk) create_file_in_vm(session, "/mnt/before_snapshot.txt", "before") # virsh snapshot-create-as vm s --disk-only --diskspec vda,file=path if snapshot_disk_only: vm_blks = get_vm_blks(vm_name) options = "%s --disk-only" % snapshot_name for vm_blk in vm_blks: snapshot_file = snapshot_dir + "/" + vm_blk + "." + snapshot_name if os.path.exists(snapshot_file): os.remove(snapshot_file) options = options + " --diskspec %s,file=%s" % (vm_blk, snapshot_file) else: options = snapshot_name utlv.check_exit_status(virsh.snapshot_create_as(vm_name, options)) # check virsh snapshot-list logging.debug("Running: snapshot-list %s", vm_name) snapshot_list = virsh.snapshot_list(vm_name) logging.debug("snapshot list is: %s", snapshot_list) if not snapshot_list: raise exceptions.TestFail("snapshots not found after creation.") # snapshot-revert doesn't support external snapshot for now. so # only check this with internal snapshot. if not snapshot_disk_only: create_file_in_vm(session, "/mnt/after_snapshot.txt", "after") logging.debug("Running: snapshot-revert %s %s", vm_name, snapshot_name) utlv.check_exit_status(virsh.snapshot_revert(vm_name, snapshot_name))
def run(test, params, env): """ Test command: virsh blockcommit <domain> <path> 1) Prepare test environment. 2) Commit changes from a snapshot down to its backing image. 3) Recover test environment. 4) Check result. """ def make_disk_snapshot(postfix_n, snapshot_take): """ Make external snapshots for disks only. :param postfix_n: postfix option :param snapshot_take: snapshots taken. """ # Add all disks into command line. disks = vm.get_disk_devices() # Make three external snapshots for disks only for count in range(1, snapshot_take): options = "%s_%s %s%s-desc " % (postfix_n, count, postfix_n, count) options += "--disk-only --atomic --no-metadata" if needs_agent: options += " --quiesce" for disk in disks: disk_detail = disks[disk] basename = os.path.basename(disk_detail['source']) # Remove the original suffix if any, appending # ".postfix_n[0-9]" diskname = basename.split(".")[0] snap_name = "%s.%s%s" % (diskname, postfix_n, count) disk_external = os.path.join(tmp_dir, snap_name) snapshot_external_disks.append(disk_external) options += " %s,snapshot=external,file=%s" % (disk, disk_external) cmd_result = virsh.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) status = cmd_result.exit_status if status != 0: test.fail("Failed to make snapshots for disks!") # Create a file flag in VM after each snapshot flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") file_path = flag_file.name flag_file.close() status, output = session.cmd_status_output("touch %s" % file_path) if status: test.fail("Touch file in vm failed. %s" % output) snapshot_flag_files.append(file_path) def get_first_disk_source(): """ Get disk source of first device :return: first disk of first device. """ first_device = vm.get_first_disk_devices() first_disk_src = first_device['source'] return first_disk_src def make_relative_path_backing_files(): """ Create backing chain files of relative path. :return: absolute path of top active file """ first_disk_source = get_first_disk_source() basename = os.path.basename(first_disk_source) root_dir = os.path.dirname(first_disk_source) cmd = "mkdir -p %s" % os.path.join(root_dir, '{b..d}') ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) # Make three external relative path backing files. backing_file_dict = collections.OrderedDict() backing_file_dict["b"] = "../%s" % basename backing_file_dict["c"] = "../b/b.img" backing_file_dict["d"] = "../c/c.img" for key, value in list(backing_file_dict.items()): backing_file_path = os.path.join(root_dir, key) cmd = ("cd %s && qemu-img create -f qcow2 -o backing_file=%s,backing_fmt=qcow2 %s.img" % (backing_file_path, value, key)) ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) return os.path.join(backing_file_path, "d.img") def check_chain_backing_files(disk_src_file, expect_backing_file=False): """ Check backing chain files of relative path after blockcommit. :param disk_src_file: first disk src file. :param expect_backing_file: whether it expect to have backing files. """ first_disk_source = get_first_disk_source() # Validate source image need refer to original one after active blockcommit if not expect_backing_file and disk_src_file not in first_disk_source: test.fail("The disk image path:%s doesn't include the origin image: %s" % (first_disk_source, disk_src_file)) # Validate source image doesn't have backing files after active blockcommit cmd = "qemu-img info %s --backing-chain" % first_disk_source if qemu_img_locking_feature_support: cmd = "qemu-img info -U %s --backing-chain" % first_disk_source ret = process.run(cmd, shell=True).stdout_text.strip() if expect_backing_file: if 'backing file' not in ret: test.fail("The disk image doesn't have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) else: if 'backing file' in ret: test.fail("The disk image still have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) # MAIN TEST CODE ### # Process cartesian parameters vm_name = params.get("main_vm") vm = env.get_vm(vm_name) snapshot_take = int(params.get("snapshot_take", '0')) vm_state = params.get("vm_state", "running") needs_agent = "yes" == params.get("needs_agent", "yes") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") top_inactive = ("yes" == params.get("top_inactive")) with_timeout = ("yes" == params.get("with_timeout_option", "no")) status_error = ("yes" == params.get("status_error", "no")) base_option = params.get("base_option", "none") middle_base = "yes" == params.get("middle_base", "no") pivot_opt = "yes" == params.get("pivot_opt", "no") snap_in_mirror = "yes" == params.get("snap_in_mirror", "no") snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", "no") with_active_commit = "yes" == params.get("with_active_commit", "no") multiple_chain = "yes" == params.get("multiple_chain", "no") virsh_dargs = {'debug': True} # Check whether qemu-img need add -U suboption since locking feature was added afterwards qemu-2.10 qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support() backing_file_relative_path = "yes" == params.get("backing_file_relative_path", "no") # Process domain disk device parameters disk_type = params.get("disk_type") disk_src_protocol = params.get("disk_source_protocol") restart_tgtd = params.get("restart_tgtd", 'no') vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) if not top_inactive: if not libvirt_version.version_compare(1, 2, 4): test.cancel("live active block commit is not supported" " in current libvirt version.") # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Abort the test if there are snapshots already exsiting_snaps = virsh.snapshot_list(vm_name) if len(exsiting_snaps) != 0: test.fail("There are snapshots created for %s already" % vm_name) snapshot_external_disks = [] cmd_session = None try: if disk_src_protocol == 'iscsi' and disk_type == 'network': if not libvirt_version.version_compare(1, 0, 4): test.cancel("'iscsi' disk doesn't support in" " current libvirt version.") # Set vm xml and guest agent if replace_vm_disk: if disk_src_protocol == "rbd" and disk_type == "network": src_host = params.get("disk_source_host", "EXAMPLE_HOSTS") mon_host = params.get("mon_host", "EXAMPLE_MON_HOST") if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"): test.cancel("Please provide rbd host first.") if backing_file_relative_path: if vm.is_alive(): vm.destroy(gracefully=False) first_src_file = get_first_disk_source() blk_source_image = os.path.basename(first_src_file) blk_source_folder = os.path.dirname(first_src_file) replace_disk_image = make_relative_path_backing_files() params.update({'disk_source_name': replace_disk_image, 'disk_type': 'file', 'disk_src_protocol': 'file'}) vm.start() libvirt.set_vm_disk(vm, params, tmp_dir) if needs_agent: vm.prepare_guest_agent() # The first disk is supposed to include OS # We will perform blockcommit operation for it. first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] blk_target = first_disk['target'] snapshot_flag_files = [] # get a vm session before snapshot session = vm.wait_for_login() # do snapshot postfix_n = 'snap' make_disk_snapshot(postfix_n, snapshot_take) basename = os.path.basename(blk_source) diskname = basename.split(".")[0] snap_src_lst = [blk_source] if multiple_chain: snap_name = "%s.%s1" % (diskname, postfix_n) snap_top = os.path.join(tmp_dir, snap_name) top_index = snapshot_external_disks.index(snap_top) + 1 omit_list = snapshot_external_disks[top_index:] vm.destroy(gracefully=False) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = '' disk_xmls = vmxml.get_devices(device_type="disk") for disk in disk_xmls: if disk.get('device_tag') == 'disk': disk_xml = disk break vmxml.del_device(disk_xml) disk_dict = {'attrs': {'file': snap_top}} disk_xml.source = disk_xml.new_disk_source(**disk_dict) vmxml.add_device(disk_xml) vmxml.sync() vm.start() session = vm.wait_for_login() postfix_n = 'new_snap' make_disk_snapshot(postfix_n, snapshot_take) snap_src_lst = [blk_source] snap_src_lst += snapshot_external_disks logging.debug("omit list is %s", omit_list) for i in omit_list: snap_src_lst.remove(i) else: # snapshot src file list snap_src_lst += snapshot_external_disks backing_chain = '' for i in reversed(list(range(snapshot_take))): if i == 0: backing_chain += "%s" % snap_src_lst[i] else: backing_chain += "%s -> " % snap_src_lst[i] logging.debug("The backing chain is: %s" % backing_chain) # check snapshot disk xml backingStore is expected vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') disk_xml = None for disk in disks: if disk.target['dev'] != blk_target: continue else: if disk.device != 'disk': continue disk_xml = disk.xmltreefile logging.debug("the target disk xml after snapshot is %s", disk_xml) break if not disk_xml: test.fail("Can't find disk xml with target %s" % blk_target) elif libvirt_version.version_compare(1, 2, 4): # backingStore element introuduced in 1.2.4 chain_lst = snap_src_lst[::-1] ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing chain check failed") # set blockcommit_options top_image = None blockcommit_options = "--wait --verbose" if with_timeout: blockcommit_options += " --timeout 1" if base_option == "shallow": blockcommit_options += " --shallow" elif base_option == "base": if middle_base: snap_name = "%s.%s1" % (diskname, postfix_n) blk_source = os.path.join(tmp_dir, snap_name) blockcommit_options += " --base %s" % blk_source if top_inactive: snap_name = "%s.%s2" % (diskname, postfix_n) top_image = os.path.join(tmp_dir, snap_name) blockcommit_options += " --top %s" % top_image else: blockcommit_options += " --active" if pivot_opt: blockcommit_options += " --pivot" if vm_state == "shut off": vm.destroy(gracefully=True) if with_active_commit: # inactive commit follow active commit will fail with bug 1135339 cmd = "virsh blockcommit %s %s --active --pivot" % (vm_name, blk_target) cmd_session = aexpect.ShellSession(cmd) if backing_file_relative_path: blockcommit_options = " --active --verbose --shallow --pivot --keep-relative" block_commit_index = snapshot_take expect_backing_file = False # Do block commit using --active for count in range(1, snapshot_take): res = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) if top_inactive: blockcommit_options = " --wait --verbose --top vda[1] --base vda[2] --keep-relative" block_commit_index = snapshot_take - 1 expect_backing_file = True # Do block commit with --wait if top_inactive for count in range(1, block_commit_index): res = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) check_chain_backing_files(blk_source_image, expect_backing_file) return # Run test case # Active commit does not support on rbd based disk with bug 1200726 result = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) # Check status_error libvirt.check_exit_status(result, status_error) if result.exit_status and status_error: return while True: vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') for disk in disks: if disk.target['dev'] != blk_target: continue else: disk_xml = disk.xmltreefile break if not top_inactive: disk_mirror = disk_xml.find('mirror') if '--pivot' not in blockcommit_options: if disk_mirror is not None: job_type = disk_mirror.get('job') job_ready = disk_mirror.get('ready') src_element = disk_mirror.find('source') disk_src_file = None for elem in ('file', 'name', 'dev'): elem_val = src_element.get(elem) if elem_val: disk_src_file = elem_val break err_msg = "blockcommit base source " err_msg += "%s not expected" % disk_src_file if '--shallow' in blockcommit_options: if not multiple_chain: if disk_src_file != snap_src_lst[2]: test.fail(err_msg) else: if disk_src_file != snap_src_lst[3]: test.fail(err_msg) else: if disk_src_file != blk_source: test.fail(err_msg) if libvirt_version.version_compare(1, 2, 7): # The job attribute mentions which API started the # operation since 1.2.7. if job_type != 'active-commit': test.fail("blockcommit job type '%s'" " not expected" % job_type) if job_ready != 'yes': # The attribute ready, if present, tracks # progress of the job: yes if the disk is known # to be ready to pivot, or, since 1.2.7, abort # or pivot if the job is in the process of # completing. continue else: logging.debug("after active block commit job " "ready for pivot, the target disk" " xml is %s", disk_xml) break else: break else: break else: if disk_mirror is None: logging.debug(disk_xml) if "--shallow" in blockcommit_options: chain_lst = snap_src_lst[::-1] chain_lst.pop(0) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing " "chain check failed") cmd_result = virsh.blockjob(vm_name, blk_target, '', ignore_status=True, debug=True) libvirt.check_exit_status(cmd_result) elif "--base" in blockcommit_options: chain_lst = snap_src_lst[::-1] base_index = chain_lst.index(blk_source) chain_lst = chain_lst[base_index:] ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing " "chain check failed") break else: # wait pivot after commit is synced continue else: logging.debug("after inactive commit the disk xml is: %s" % disk_xml) if libvirt_version.version_compare(1, 2, 4): if "--shallow" in blockcommit_options: chain_lst = snap_src_lst[::-1] chain_lst.remove(top_image) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing chain " "check failed") elif "--base" in blockcommit_options: chain_lst = snap_src_lst[::-1] top_index = chain_lst.index(top_image) base_index = chain_lst.index(blk_source) val_tmp = [] for i in range(top_index, base_index): val_tmp.append(chain_lst[i]) for i in val_tmp: chain_lst.remove(i) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing chain " "check failed") break else: break # Check flag files if not vm_state == "shut off" and not multiple_chain: for flag in snapshot_flag_files: status, output = session.cmd_status_output("cat %s" % flag) if status: test.fail("blockcommit failed: %s" % output) if not pivot_opt and snap_in_mirror: # do snapshot during mirror phase snap_path = "%s/%s.snap" % (tmp_dir, vm_name) snap_opt = "--disk-only --atomic --no-metadata " snap_opt += "vda,snapshot=external,file=%s" % snap_path snapshot_external_disks.append(snap_path) cmd_result = virsh.snapshot_create_as(vm_name, snap_opt, ignore_statues=True, debug=True) libvirt.check_exit_status(cmd_result, snap_in_mirror_err) finally: if vm.is_alive(): vm.destroy(gracefully=False) # Recover xml of vm. vmxml_backup.sync("--snapshots-metadata") if cmd_session: cmd_session.close() for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) if backing_file_relative_path: libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) process.run("cd %s && rm -rf b c d" % blk_source_folder, shell=True) if disk_src_protocol == 'iscsi': libvirt.setup_or_cleanup_iscsi(is_setup=False, restart_tgtd=restart_tgtd) elif disk_src_protocol == 'gluster': libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path) libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() elif disk_src_protocol == 'netfs': restore_selinux = params.get('selinux_status_bak') libvirt.setup_or_cleanup_nfs(is_setup=False, restore_selinux=restore_selinux)
def run(test, params, env): """ Test snapshot-create-as command Make sure that the clean repo can be used because qemu-guest-agent need to be installed in guest The command create a snapshot (disk and RAM) from arguments which including the following point * virsh snapshot-create-as --print-xml --diskspec --name --description * virsh snapshot-create-as --print-xml with multi --diskspec * virsh snapshot-create-as --print-xml --memspec * virsh snapshot-create-as --description * virsh snapshot-create-as --no-metadata * virsh snapshot-create-as --no-metadata --print-xml (negative test) * virsh snapshot-create-as --atomic --disk-only * virsh snapshot-create-as --quiesce --disk-only (positive and negative) * virsh snapshot-create-as --reuse-external * virsh snapshot-create-as --disk-only --diskspec * virsh snapshot-create-as --memspec --reuse-external --atomic(negative) * virsh snapshot-create-as --disk-only and --memspec (negative) * Create multi snapshots with snapshot-create-as * Create snapshot with name a--a a--a--snap1 """ if not virsh.has_help_command('snapshot-create-as'): test.cancel("This version of libvirt does not support " "the snapshot-create-as test") vm_name = params.get("main_vm") status_error = params.get("status_error", "no") options = params.get("snap_createas_opts") multi_num = params.get("multi_num", "1") diskspec_num = params.get("diskspec_num", "1") bad_disk = params.get("bad_disk") reuse_external = "yes" == params.get("reuse_external", "no") start_ga = params.get("start_ga", "yes") domain_state = params.get("domain_state") memspec_opts = params.get("memspec_opts") config_format = "yes" == params.get("config_format", "no") snapshot_image_format = params.get("snapshot_image_format") diskspec_opts = params.get("diskspec_opts") create_autodestroy = 'yes' == params.get("create_autodestroy", "no") unix_channel = "yes" == params.get("unix_channel", "yes") dac_denial = "yes" == params.get("dac_denial", "no") check_json_no_savevm = "yes" == params.get("check_json_no_savevm", "no") disk_snapshot_attr = params.get('disk_snapshot_attr', 'external') set_snapshot_attr = "yes" == params.get("set_snapshot_attr", "no") # gluster related params replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") disk_src_protocol = params.get("disk_source_protocol") restart_tgtd = params.get("restart_tgtd", "no") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) uri = params.get("virsh_uri") usr = params.get('unprivileged_user') if usr: if usr.count('EXAMPLE'): usr = '******' if disk_src_protocol == 'iscsi': if not libvirt_version.version_compare(1, 0, 4): test.cancel("'iscsi' disk doesn't support in" " current libvirt version.") if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") if not libvirt_version.version_compare(1, 2, 7): # As bug 1017289 closed as WONTFIX, the support only # exist on 1.2.7 and higher if disk_src_protocol == 'gluster': test.cancel("Snapshot on glusterfs not support in " "current version. Check more info with " "https://bugzilla.redhat.com/buglist.cgi?" "bug_id=1017289,1032370") if libvirt_version.version_compare(5, 5, 0): # libvirt-5.5.0-2 commit 68e1a05f starts to allow --no-metadata and # --print-xml to be used together. if "--no-metadata" in options and "--print-xml" in options: logging.info("--no-metadata and --print-xml can be used together " "in this libvirt version. Not expecting a failure.") status_error = "no" opt_names = locals() if memspec_opts is not None: mem_options = compose_disk_options(test, params, memspec_opts) # if the parameters have the disk without "file=" then we only need to # add testdir for it. if mem_options is None: mem_options = os.path.join(data_dir.get_tmp_dir(), memspec_opts) options += " --memspec " + mem_options tag_diskspec = 0 dnum = int(diskspec_num) if diskspec_opts is not None: tag_diskspec = 1 opt_names['diskopts_1'] = diskspec_opts # diskspec_opts[n] is used in cfg when more than 1 --diskspec is used if dnum > 1: tag_diskspec = 1 for i in range(1, dnum + 1): opt_names["diskopts_%s" % i] = params.get("diskspec_opts%s" % i) if tag_diskspec == 1: for i in range(1, dnum + 1): disk_options = compose_disk_options(test, params, opt_names["diskopts_%s" % i]) options += " --diskspec " + disk_options logging.debug("options are %s", options) vm = env.get_vm(vm_name) option_dict = {} option_dict = utils_misc.valued_option_dict(options, r' --(?!-)') logging.debug("option_dict is %s", option_dict) # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("original xml is %s", vmxml_backup) # Generate empty image for negative test if bad_disk is not None: bad_disk = os.path.join(data_dir.get_tmp_dir(), bad_disk) with open(bad_disk, 'w') as bad_file: pass # Generate external disk if reuse_external: disk_path = '' for i in range(dnum): external_disk = "external_disk%s" % i if params.get(external_disk): disk_path = os.path.join(data_dir.get_tmp_dir(), params.get(external_disk)) process.run("qemu-img create -f qcow2 %s 1G" % disk_path, shell=True) # Only chmod of the last external disk for negative case if dac_denial: process.run("chmod 500 %s" % disk_path, shell=True) qemu_conf = None libvirtd_conf = None libvirtd_log_path = None libvirtd = utils_libvirtd.Libvirtd() try: # Config "snapshot_image_format" option in qemu.conf if config_format: qemu_conf = utils_config.LibvirtQemuConfig() qemu_conf.snapshot_image_format = snapshot_image_format logging.debug("the qemu config file content is:\n %s" % qemu_conf) libvirtd.restart() if check_json_no_savevm: libvirtd_conf = utils_config.LibvirtdConfig() libvirtd_conf["log_level"] = '1' libvirtd_conf["log_filters"] = '"1:json 3:remote 4:event"' libvirtd_log_path = os.path.join(data_dir.get_tmp_dir(), "libvirtd.log") libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path logging.debug("the libvirtd config file content is:\n %s" % libvirtd_conf) libvirtd.restart() if replace_vm_disk: libvirt.set_vm_disk(vm, params, tmp_dir) if set_snapshot_attr: if vm.is_alive(): vm.destroy(gracefully=False) vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = vmxml_backup.get_devices(device_type="disk")[0] vmxml_new.del_device(disk_xml) # set snapshot attribute in disk xml disk_xml.snapshot = disk_snapshot_attr new_disk = disk.Disk(type_name='file') new_disk.xmltreefile = disk_xml.xmltreefile vmxml_new.add_device(new_disk) logging.debug("The vm xml now is: %s" % vmxml_new.xmltreefile) vmxml_new.sync() vm.start() # Start qemu-ga on guest if have --quiesce if unix_channel and options.find("quiesce") >= 0: vm.prepare_guest_agent() session = vm.wait_for_login() if start_ga == "no": # The qemu-ga could be running and should be killed session.cmd("kill -9 `pidof qemu-ga`") # Check if the qemu-ga get killed stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if not stat_ps: # As managed by systemd and set as autostart, qemu-ga # could be restarted, so use systemctl to stop it. session.cmd("systemctl stop qemu-guest-agent") stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if not stat_ps: test.cancel("Fail to stop agent in " "guest") if domain_state == "paused": virsh.suspend(vm_name) else: # Remove channel if exist if vm.is_alive(): vm.destroy(gracefully=False) xml_inst = vm_xml.VMXML.new_from_dumpxml(vm_name) xml_inst.remove_agent_channels() vm.start() # Record the previous snapshot-list snaps_before = virsh.snapshot_list(vm_name) # Attach disk before create snapshot if not print xml and multi disks # specified in cfg if dnum > 1 and "--print-xml" not in options: for i in range(1, dnum): disk_path = os.path.join(data_dir.get_tmp_dir(), 'disk%s.qcow2' % i) process.run("qemu-img create -f qcow2 %s 200M" % disk_path, shell=True) virsh.attach_disk(vm_name, disk_path, 'vd%s' % list(string.ascii_lowercase)[i], debug=True) # Run virsh command # May create several snapshots, according to configuration for count in range(int(multi_num)): if create_autodestroy: # Run virsh command in interactive mode vmxml_backup.undefine() vp = virsh.VirshPersistent() vp.create(vmxml_backup['xml'], '--autodestroy') cmd_result = vp.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) vp.close_session() vmxml_backup.define() else: cmd_result = virsh.snapshot_create_as(vm_name, options, unprivileged_user=usr, uri=uri, ignore_status=True, debug=True) # for multi snapshots without specific snapshot name, the # snapshot name is using time string with 1 second # incremental, to avoid get snapshot failure with same name, # sleep 1 second here. if int(multi_num) > 1: time.sleep(1.1) output = cmd_result.stdout.strip() status = cmd_result.exit_status # check status_error if status_error == "yes": if status == 0: test.fail("Run successfully with wrong command!") else: # Check memspec file should be removed if failed if (options.find("memspec") >= 0 and options.find("atomic") >= 0): if os.path.isfile(option_dict['memspec']): os.remove(option_dict['memspec']) test.fail("Run failed but file %s exist" % option_dict['memspec']) else: logging.info("Run failed as expected and memspec" " file already been removed") # Check domain xml is not updated if reuse external fail elif reuse_external and dac_denial: output = virsh.dumpxml(vm_name).stdout.strip() if "reuse_external" in output: test.fail("Domain xml should not be " "updated with snapshot image") else: logging.info("Run failed as expected") elif status_error == "no": if status != 0: test.fail("Run failed with right command: %s" % output) else: # Check the special options snaps_list = virsh.snapshot_list(vm_name) logging.debug("snaps_list is %s", snaps_list) check_snapslist(test, vm_name, options, option_dict, output, snaps_before, snaps_list) # For cover bug 872292 if check_json_no_savevm: pattern = "The command savevm has not been found" with open(libvirtd_log_path) as f: for line in f: if pattern in line and "error" in line: test.fail("'%s' was found: %s" % (pattern, line)) finally: if vm.is_alive(): vm.destroy() # recover domain xml xml_recover(vmxml_backup) path = "/var/lib/libvirt/qemu/snapshot/" + vm_name if os.path.isfile(path): test.fail("Still can find snapshot metadata") if disk_src_protocol == 'gluster': gluster.setup_or_cleanup_gluster(False, brick_path=brick_path, **params) libvirtd.restart() if disk_src_protocol == 'iscsi': libvirt.setup_or_cleanup_iscsi(False, restart_tgtd=restart_tgtd) # rm bad disks if bad_disk is not None: os.remove(bad_disk) # rm attach disks and reuse external disks if dnum > 1 and "--print-xml" not in options: for i in range(dnum): disk_path = os.path.join(data_dir.get_tmp_dir(), 'disk%s.qcow2' % i) if os.path.exists(disk_path): os.unlink(disk_path) if reuse_external: external_disk = "external_disk%s" % i disk_path = os.path.join(data_dir.get_tmp_dir(), params.get(external_disk)) if os.path.exists(disk_path): os.unlink(disk_path) # restore config if config_format and qemu_conf: qemu_conf.restore() if libvirtd_conf: libvirtd_conf.restore() if libvirtd_conf or (config_format and qemu_conf): libvirtd.restart() if libvirtd_log_path and os.path.exists(libvirtd_log_path): os.unlink(libvirtd_log_path)
def run(test, params, env): """ Do virsh snapshot-parent and virsh snapshot-current test with all parameters in readonly/readwrite mode """ vm_name = params.get("main_vm") pstatus_error = params.get("snapshot_parent_status_error", "no") cstatus_error = params.get("snapshot_current_status_error", "no") snap_parent_opt = params.get("snapshot_parent_option") snap_cur_opt = params.get("snapshot_current_option") passwd = params.get("snapshot_current_passwd") snap_num = int(params.get("snapshot_num")) readonly = ("yes" == params.get("readonly", "no")) without_snapshot = "yes" == params.get("without_snapshot", "no") snap_opt = [] for i in range(1, snap_num + 1): screate_opt = params.get("screate_opt%s" % i) if "SNAPSHOT_TMPFILE" in screate_opt: tmp_file = os.path.join(data_dir.get_tmp_dir(), "tmpfile") screate_opt = re.sub("SNAPSHOT_TMPFILE", tmp_file, screate_opt) snap_opt.append(screate_opt) # Do xml backup for final recovery vmxml_backup = vm_xml.VMXML.new_from_dumpxml(vm_name) # Add passwd for snapshot-current --security-info testing if snap_cur_opt is not None and "security-info" in snap_cur_opt: vm = env.get_vm(vm_name) if vm.is_alive(): vm.destroy() vm_xml.VMXML.add_security_info(vmxml_backup.copy(), passwd) vm.start() def current_snapshot_test(): """ Do current snapshot test and xml check """ output = virsh.snapshot_current(vm_name, snap_cur_opt, ignore_status=True, debug=True, readonly=readonly) # If run fail with cstatus_error = no, then error will raise in command if cstatus_error == "yes": if output.exit_status == 0: test.fail("Unexpected snapshot-current success") else: logging.info("Failed to run snapshot-current as expected:%s", output.stderr) return # Check if snapshot xml have security info if "--security-info" in snap_cur_opt and \ "--name" not in snap_cur_opt: devices = vm_xml.VMXML.new_from_dumpxml(vm_name, "--security-info").devices first_graphic = devices.by_device_tag('graphics')[0] try: if passwd == first_graphic.passwd: logging.info("Success to check current snapshot with" " security info") else: test.fail("Passwd is not same as set") except KeyError: test.fail("Can not find passwd in snapshot xml") # Check if --snapshotname may change current snapshot if "--snapshotname" in snap_cur_opt: cmd_result = virsh.snapshot_current(vm_name, ignore_status=True, debug=True, readonly=readonly) current_snap = cmd_result.stdout.strip() if current_snap == snap_cur_opt.split()[1]: logging.info("Success to check current snapshot changed to %s", current_snap) else: test.fail("Failed to change current snapshot to %s," "current is %s" % (snap_cur_opt.split()[1], current_snap)) def parent_snapshot_check(snap_parent): """ Do parent snapshot check :params: snap_parent: parent snapshot name that need to check """ # get snapshot name which is parent snapshot's child if "--current" in snap_parent_opt: cmd_result = virsh.snapshot_current(vm_name) snap_name = cmd_result.stdout.strip() else: snap_name = snap_parent_opt.split()[-1] # check parent snapshot in snapshot-list output = virsh.command("snapshot-list %s --parent" % vm_name).stdout for i in range(2, snap_num + 3): if output.splitlines()[i].split()[0] == snap_name: expect_name = output.split('\n')[i].split()[-1] break if snap_parent == expect_name: logging.info("Success to check parent snapshot") else: test.fail("Failed to check parent " "snapshot, expect %s, get %s" % (expect_name, snap_parent)) def parent_snapshot_test(): """ Do parent snapshot test """ cmd_result = virsh.snapshot_parent(vm_name, snap_parent_opt, debug=True, readonly=readonly) # check status if pstatus_error == "yes": if cmd_result.exit_status == 0: test.fail("Unexpected success") else: logging.info("Run failed as expected:%s", cmd_result.stderr) elif cmd_result.exit_status != 0: test.fail("Run failed with right command:%s" % cmd_result.stderr) else: parent_snapshot_check(cmd_result.stdout.strip()) try: if not without_snapshot: # Create disk snapshot before all to make the origin image clean ret = virsh.snapshot_create_as(vm_name, "snap-temp --disk-only") if ret.exit_status != 0: test.fail("Fail to create temp snap, Error: %s" % ret.stderr.strip()) # Create snapshots for opt in snap_opt: result = virsh.snapshot_create_as(vm_name, opt) if result.exit_status: test.fail("Failed to create snapshot. Error:%s." % result.stderr.strip()) time.sleep(1) # Do parent snapshot test if snap_parent_opt is not None: parent_snapshot_test() # Do current snapshot test if snap_cur_opt is not None: current_snapshot_test() finally: if not without_snapshot: utils_test.libvirt.clean_up_snapshots(vm_name) vmxml_backup.sync("--snapshots-metadata") try: os.remove(tmp_file) except (OSError, NameError): # tmp_file defined inside conditional pass
def run(test, params, env): """ Attach/Detach an iscsi network/volume disk to domain 1. For secret usage testing: 1.1. Setup an iscsi target with CHAP authentication. 1.2. Define a secret for iscsi target usage 1.3. Set secret value 2. Create 4. Create an iscsi network disk XML 5. Attach disk with the XML file and check the disk inside the VM 6. Detach the disk """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) disk_device = params.get("disk_device", "disk") disk_type = params.get("disk_type", "network") disk_src_protocol = params.get("disk_source_protocol", "iscsi") disk_src_host = params.get("disk_source_host", "127.0.0.1") disk_src_port = params.get("disk_source_port", "3260") disk_src_pool = params.get("disk_source_pool") disk_src_mode = params.get("disk_source_mode", "host") pool_type = params.get("pool_type", "iscsi") pool_src_host = params.get("pool_source_host", "127.0.0.1") disk_target = params.get("disk_target", "vdb") disk_target_bus = params.get("disk_target_bus", "virtio") disk_readonly = params.get("disk_readonly", "no") chap_auth = "yes" == params.get("chap_auth", "no") chap_user = params.get("chap_username", "") chap_passwd = params.get("chap_password", "") secret_usage_target = params.get("secret_usage_target") secret_ephemeral = params.get("secret_ephemeral", "no") secret_private = params.get("secret_private", "yes") status_error = "yes" == params.get("status_error", "no") # Indicate the PPC platform on_ppc = False if platform.platform().count('ppc64'): on_ppc = True if disk_src_protocol == 'iscsi': if not libvirt_version.version_compare(1, 0, 4): test.cancel("'iscsi' disk doesn't support in" " current libvirt version.") if disk_type == "volume": if not libvirt_version.version_compare(1, 0, 5): test.cancel("'volume' type disk doesn't support in" " current libvirt version.") # Back VM XML vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} try: start_vm = "yes" == params.get("start_vm", "yes") if start_vm: if vm.is_dead(): vm.start() vm.wait_for_login() else: if not vm.is_dead(): vm.destroy() if chap_auth: # Create a secret xml to define it secret_xml = SecretXML(secret_ephemeral, secret_private) secret_xml.auth_type = "chap" secret_xml.auth_username = chap_user secret_xml.usage = disk_src_protocol secret_xml.target = secret_usage_target with open(secret_xml.xml) as f: logging.debug("Define secret by XML: %s", f.read()) # Define secret cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Get secret uuid try: secret_uuid = cmd_result.stdout.strip().split()[1] except IndexError: test.error("Fail to get new created secret uuid") # Set secret value encoding = locale.getpreferredencoding() secret_string = base64.b64encode(chap_passwd.encode(encoding)).decode(encoding) cmd_result = virsh.secret_set_value(secret_uuid, secret_string, **virsh_dargs) libvirt.check_exit_status(cmd_result) else: # Set chap_user and chap_passwd to empty to avoid setup # CHAP authentication when export iscsi target chap_user = "" chap_passwd = "" # Setup iscsi target iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True, is_login=False, image_size='1G', chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=disk_src_host) # Create iscsi pool if disk_type == "volume": # Create an iscsi pool xml to create it pool_src_xml = pool_xml.SourceXML() pool_src_xml.host_name = pool_src_host pool_src_xml.device_path = iscsi_target poolxml = pool_xml.PoolXML(pool_type=pool_type) poolxml.name = disk_src_pool poolxml.set_source(pool_src_xml) poolxml.target_path = "/dev/disk/by-path" # Create iscsi pool cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) def get_vol(): """Get the volume info""" # Refresh the pool cmd_result = virsh.pool_refresh(disk_src_pool) libvirt.check_exit_status(cmd_result) # Get volume name cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs) libvirt.check_exit_status(cmd_result) vol_list = [] vol_list = re.findall(r"(\S+)\ +(\S+)", str(cmd_result.stdout.strip())) if len(vol_list) > 1: return vol_list[1] else: return None # Wait for a while so that we can get the volume info vol_info = utils_misc.wait_for(get_vol, 10) if vol_info: vol_name, vol_path = vol_info else: test.error("Failed to get volume info") # Snapshot doesn't support raw disk format, create a qcow2 volume # disk for snapshot operation. process.run('qemu-img create -f qcow2 %s %s' % (vol_path, '100M'), shell=True) # Create iscsi network disk XML disk_params = {'device_type': disk_device, 'type_name': disk_type, 'target_dev': disk_target, 'target_bus': disk_target_bus, 'readonly': disk_readonly} disk_params_src = {} if disk_type == "network": disk_params_src = {'source_protocol': disk_src_protocol, 'source_name': iscsi_target + "/%s" % lun_num, 'source_host_name': disk_src_host, 'source_host_port': disk_src_port} elif disk_type == "volume": disk_params_src = {'source_pool': disk_src_pool, 'source_volume': vol_name, 'driver_type': 'qcow2', 'source_mode': disk_src_mode} else: test.cancel("Unsupport disk type in this test") disk_params.update(disk_params_src) if chap_auth: disk_params_auth = {'auth_user': chap_user, 'secret_type': disk_src_protocol, 'secret_usage': secret_xml.target} disk_params.update(disk_params_auth) disk_xml = libvirt.create_disk_xml(disk_params) attach_option = params.get("attach_option", "") cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml, flagstr=attach_option, dargs=virsh_dargs) libvirt.check_exit_status(cmd_result, status_error) if vm.is_dead(): cmd_result = virsh.start(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Wait for domain is stable vm.wait_for_login().close() domain_operation = params.get("domain_operation", "") if domain_operation == "save": save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save") cmd_result = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.restore(save_file) libvirt.check_exit_status(cmd_result) if os.path.exists(save_file): os.remove(save_file) elif domain_operation == "snapshot": # Run snapshot related commands: snapshot-create-as, snapshot-list # snapshot-info, snapshot-dumpxml, snapshot-create snapshot_name1 = "snap1" snapshot_name2 = "snap2" cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) try: virsh.snapshot_list(vm_name, **virsh_dargs) except process.CmdError: test.fail("Failed getting snapshots list for %s" % vm_name) try: virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs) except process.CmdError: test.fail("Failed getting snapshots info for %s" % vm_name) cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) snapshot_file = os.path.join(data_dir.get_tmp_dir(), snapshot_name2) sn_create_op = ("%s --disk-only --diskspec %s,file=%s" % (snapshot_name2, disk_target, snapshot_file)) cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1, **virsh_dargs) cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs) if snapshot_name2 not in cmd_result: test.error("Snapshot %s not found" % snapshot_name2) elif domain_operation == "": logging.debug("No domain operation provided, so skip it") else: logging.error("Unsupport operation %s in this case, so skip it", domain_operation) def find_attach_disk(expect=True): """ Find attached disk inside the VM """ found_disk = False if vm.is_dead(): test.error("Domain %s is not running" % vm_name) else: try: session = vm.wait_for_login() # Here the script needs wait for a while for the guest to # recognize the hotplugged disk on PPC if on_ppc: time.sleep(10) cmd = "grep %s /proc/partitions" % disk_target s, o = session.cmd_status_output(cmd) logging.info("%s output: %s", cmd, o) session.close() if s == 0: found_disk = True except (LoginError, VMError, ShellError) as e: logging.error(str(e)) if found_disk == expect: logging.debug("Check disk inside the VM PASS as expected") else: test.error("Check disk inside the VM FAIL") # Check disk inside the VM, expect is False if status_error=True find_attach_disk(not status_error) # Detach disk cmd_result = virsh.detach_disk(vm_name, disk_target) libvirt.check_exit_status(cmd_result, status_error) # Check disk inside the VM find_attach_disk(False) finally: # Clean up snapshot # Shut down before cleaning up snapshots if vm.is_alive(): vm.destroy() libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) # Restore vm vmxml_backup.sync("--snapshots-metadata") # Destroy pool and undefine secret, which may not exist try: if disk_type == "volume": virsh.pool_destroy(disk_src_pool) if chap_auth: virsh.secret_undefine(secret_uuid) except Exception: pass libvirt.setup_or_cleanup_iscsi(is_setup=False)
def run_virsh_snapshot_create_as(test, params, env): """ Test snapshot-create-as command Make sure that the clean repo can be used because qemu-guest-agent need to be installed in guest The command create a snapshot (disk and RAM) from arguments which including the following point * virsh snapshot-create-as --print-xml --diskspec --name --description * virsh snapshot-create-as --print-xml with multi --diskspec * virsh snapshot-create-as --print-xml --memspec * virsh snapshot-create-as --description * virsh snapshot-create-as --no-metadata * virsh snapshot-create-as --no-metadata --print-xml (negtive test) * virsh snapshot-create-as --atomic --disk-only * virsh snapshot-create-as --quiesce --disk-only (positive and negtive) * virsh snapshot-create-as --reuse-external * virsh snapshot-create-as --disk-only --diskspec * virsh snapshot-create-as --memspec --reuse-external --atomic(negtive) * virsh snapshot-create-as --disk-only and --memspec (negtive) * Create multi snapshots with snapshot-create-as * Create snapshot with name a--a a--a--snap1 """ if not virsh.has_help_command('snapshot-create-as'): raise error.TestNAError("This version of libvirt does not support " "the snapshot-create-as test") vm_name = params.get("main_vm") status_error = params.get("status_error", "no") options = params.get("snap_createas_opts") multi_num = params.get("multi_num", "1") diskspec_num = params.get("diskspec_num", "1") bad_disk = params.get("bad_disk") external_disk = params.get("external_disk") start_ga = params.get("start_ga", "yes") domain_state = params.get("domain_state") memspec_opts = params.get("memspec_opts") diskspec_opts = params.get("diskspec_opts") opt_names = locals() if memspec_opts is not None: mem_options = compose_disk_options(test, params, memspec_opts) # if the parameters have the disk without "file=" then we only need to # add testdir for it. if mem_options is None: mem_options = os.path.join(test.virtdir, memspec_opts) options += " --memspec " + mem_options tag_diskspec = 0 dnum = int(diskspec_num) if diskspec_opts is not None: tag_diskspec = 1 opt_names['diskopts_1'] = diskspec_opts # diskspec_opts[n] is used in cfg when more than 1 --diskspec is used if dnum > 1: tag_diskspec = 1 for i in range(1, dnum + 1): opt_names["diskopts_%s" % i] = params.get("diskspec_opts%s" % i) if tag_diskspec == 1: for i in range(1, dnum + 1): disk_options = compose_disk_options(test, params, opt_names["diskopts_%s" % i]) options += " --diskspec " + disk_options logging.debug("options are %s", options) vm = env.get_vm(vm_name) option_dict = {} option_dict = utils_misc.valued_option_dict(options, r' --(?!-)') logging.debug("option_dict is %s", option_dict) # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug("original xml is %s", vmxml_backup) # Generate empty image for negtive test if bad_disk is not None: bad_disk = os.path.join(test.virtdir, bad_disk) os.open(bad_disk, os.O_RDWR | os.O_CREAT) # Gererate external disk if external_disk is not None: external_disk = os.path.join(test.virtdir, external_disk) commands.getoutput("qemu-img create -f qcow2 %s 1G" % external_disk) # Start qemu-ga on guest if have --quiesce if options.find("quiesce") >= 0: if vm.is_alive(): vm.destroy() virt_xml_obj = libvirt_xml.VMXML(virsh_instance=virsh) virt_xml_obj.set_agent_channel(vm_name) vm.start() if start_ga == "yes": session = vm.wait_for_login() # Check if qemu-ga already started automatically cmd = "rpm -q qemu-guest-agent || yum install -y qemu-guest-agent" stat_install = session.cmd_status(cmd, 300) if stat_install != 0: xml_recover(vmxml_backup) raise error.TestFail("Fail to install qemu-guest-agent, make" "sure that you have usable repo in guest") # Check if qemu-ga already started stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if stat_ps != 0: session.cmd("qemu-ga -d") # Check if the qemu-ga really started stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if stat_ps != 0: xml_recover(vmxml_backup) raise error.TestFail("Fail to run qemu-ga in guest") if domain_state == "paused": virsh.suspend(vm_name) # Record the previous snapshot-list snaps_before = virsh.snapshot_list(vm_name) # Run virsh command # May create several snapshots, according to configuration for count in range(int(multi_num)): cmd_result = virsh.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) output = cmd_result.stdout.strip() status = cmd_result.exit_status # check status_error if status_error == "yes": if status == 0: xml_recover(vmxml_backup) raise error.TestFail("Run successfully with wrong command!") else: # Check memspec file should be removed if failed if (options.find("memspec") >= 0 and options.find("atomic") >= 0): if os.path.isfile(option_dict['memspec']): os.remove(option_dict['memspec']) xml_recover(vmxml_backup) raise error.TestFail("Run failed but file %s exist" % option_dict['memspec']) else: logging.info("Run failed as expected and memspec file" " already beed removed") else: logging.info("Run failed as expected") elif status_error == "no": if status != 0: xml_recover(vmxml_backup) raise error.TestFail("Run failed with right command: %s" % output) else: # Check the special options snaps_list = virsh.snapshot_list(vm_name) logging.debug("snaps_list is %s", snaps_list) no_metadata = options.find("--no-metadata") fdisks = "disks" # command with print-xml will not really create snapshot if options.find("print-xml") >= 0: xtf = xml_utils.XMLTreeFile(output) # With --print-xml there isn't new snapshot created if len(snaps_before) != len(snaps_list): xml_recover(vmxml_backup) raise error.TestFail("--print-xml create new snapshot") else: # The following does not check with print-xml get_sname = output.split()[2] # check domain/snapshot xml depends on if have metadata if no_metadata < 0: output_dump = virsh.snapshot_dumpxml(vm_name, get_sname) else: output_dump = virsh.dumpxml(vm_name) fdisks = "devices" xtf = xml_utils.XMLTreeFile(output_dump) find = 0 for snap in snaps_list: if snap == get_sname: find = 1 break # Should find snap in snaplist without --no-metadata if (find == 0 and no_metadata < 0): xml_recover(vmxml_backup) raise error.TestFail("Can not find snapshot %s!" % get_sname) # Should not find snap in list without metadata elif (find == 1 and no_metadata >= 0): xml_recover(vmxml_backup) raise error.TestFail("Can find snapshot metadata even " "if have --no-metadata") elif (find == 0 and no_metadata >= 0): logging.info("Can not find snapshot %s as no-metadata " "is given" % get_sname) # Check snapshot only in qemu-img if (options.find("--disk-only") < 0 and options.find("--memspec") < 0): ret = check_snap_in_image(vm_name, get_sname) if ret == False: xml_recover(vmxml_backup) raise error.TestFail("No snap info in image") else: logging.info("Find snapshot %s in snapshot list." % get_sname) # Check if the disk file exist when disk-only is given if options.find("disk-only") >= 0: for disk in xtf.find(fdisks).findall('disk'): diskpath = disk.find('source').get('file') if os.path.isfile(diskpath): logging.info("disk file %s exist" % diskpath) os.remove(diskpath) else: xml_recover(vmxml_backup) raise error.TestFail("Can not find disk %s" % diskpath) # Check if the guest is halted when 'halt' is given if options.find("halt") >= 0: domstate = virsh.domstate(vm_name) if re.match("shut off", domstate.stdout): logging.info("Domain is halted after create " "snapshot") else: xml_recover(vmxml_backup) raise error.TestFail("Domain is not halted after " "snapshot created") # Check the snapshot xml regardless of having print-xml or not if (options.find("name") >= 0 and no_metadata < 0): if xtf.findtext('name') == option_dict["name"]: logging.info("get snapshot name same as set") else: xml_recover(vmxml_backup) raise error.TestFail("Get wrong snapshot name %s" % xtf.findtext('name')) if (options.find("description") >= 0 and no_metadata < 0): desc = xtf.findtext('description') if desc == option_dict["description"]: logging.info("get snapshot description same as set") else: xml_recover(vmxml_backup) raise error.TestFail("Get wrong description on xml") if options.find("diskspec") >= 0: if isinstance(option_dict['diskspec'], list): index = len(option_dict['diskspec']) else: index = 1 disks = xtf.find(fdisks).findall('disk') for num in range(index): if isinstance(option_dict['diskspec'], list): option_disk = option_dict['diskspec'][num] else: option_disk = option_dict['diskspec'] option_disk = "name=" + option_disk disk_dict = utils_misc.valued_option_dict(option_disk, ",", 0, "=") logging.debug("disk_dict is %s", disk_dict) # For no metadata snapshot do not check name and # snapshot if no_metadata < 0: dname = disks[num].get('name') logging.debug("dname is %s", dname) if dname == disk_dict['name']: logging.info("get disk%d name same as set in " "diskspec", num) else: xml_recover(vmxml_backup) raise error.TestFail("Get wrong disk%d name %s" % num, dname) if option_disk.find('snapshot=') >= 0: dsnap = disks[num].get('snapshot') logging.debug("dsnap is %s", dsnap) if dsnap == disk_dict['snapshot']: logging.info("get disk%d snapshot type same" " as set in diskspec", num) else: xml_recover(vmxml_backup) raise error.TestFail("Get wrong disk%d " "snapshot type %s" % num, dsnap) if option_disk.find('driver=') >= 0: dtype = disks[num].find('driver').get('type') if dtype == disk_dict['driver']: logging.info("get disk%d driver type same as " "set in diskspec", num) else: xml_recover(vmxml_backup) raise error.TestFail("Get wrong disk%d driver " "type %s" % num, dtype) if option_disk.find('file=') >=0: sfile = disks[num].find('source').get('file') if sfile == disk_dict['file']: logging.info("get disk%d source file same as " "set in diskspec", num) else: xml_recover(vmxml_backup) raise error.TestFail("Get wrong disk%d source " "file %s" % num, sfile) # For memspec check if the xml is same as setting # Also check if the mem file exists if options.find("memspec") >= 0: memspec = option_dict['memspec'] if re.search('file=', option_dict['memspec']) < 0: memspec = 'file=' + option_dict['memspec'] mem_dict = utils_misc.valued_option_dict(memspec, ",", 0, "=") logging.debug("mem_dict is %s", mem_dict) if no_metadata < 0: if memspec.find('snapshot=') >= 0: snap = xtf.find('memory').get('snapshot') if snap == mem_dict['snapshot']: logging.info("get memory snapshot type same as" " set in diskspec") else: xml_recover(vmxml_backup) raise error.TestFail("Get wrong memory snapshot" " type on print xml") memfile = xtf.find('memory').get('file') if memfile == mem_dict['file']: logging.info("get memory file same as set in " "diskspec") else: xml_recover(vmxml_backup) raise error.TestFail("Get wrong memory file on " "print xml %s", memfile) if options.find("print-xml") < 0: if os.path.isfile(mem_dict['file']): logging.info("memory file generated") os.remove(mem_dict['file']) else: xml_recover(vmxml_backup) raise error.TestFail("Fail to generate memory file" " %s", mem_dict['file']) # Environment clean if options.find("quiesce") >= 0 and start_ga == "yes": session.cmd("rpm -e qemu-guest-agent") # recover domain xml xml_recover(vmxml_backup) path = "/var/lib/libvirt/qemu/snapshot/" + vm_name if os.path.isfile(path): raise error.TestFail("Still can find snapshot metadata") # rm bad disks if bad_disk is not None: os.remove(bad_disk)
def run(test, params, env): """ Test command: virsh blockpull <domain> <path> 1) Prepare test environment. 2) Populate a disk from its backing image. 3) Recover test environment. 4) Check result. """ def make_disk_snapshot(): # Make four external snapshots for disks only for count in range(1, 5): snap_xml = snapshot_xml.SnapshotXML() snapshot_name = "snapshot_test%s" % count snap_xml.snap_name = snapshot_name snap_xml.description = "Snapshot Test %s" % count # Add all disks into xml file. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') new_disks = [] for src_disk_xml in disks: disk_xml = snap_xml.SnapDiskXML() disk_xml.xmltreefile = src_disk_xml.xmltreefile # Skip cdrom if disk_xml.device == "cdrom": continue del disk_xml.device del disk_xml.address disk_xml.snapshot = "external" disk_xml.disk_name = disk_xml.target['dev'] # Only qcow2 works as external snapshot file format, update it # here driver_attr = disk_xml.driver driver_attr.update({'type': 'qcow2'}) disk_xml.driver = driver_attr new_attrs = disk_xml.source.attrs if disk_xml.source.attrs.has_key('file'): file_name = disk_xml.source.attrs['file'] new_file = "%s.snap%s" % (file_name.split('.')[0], count) snapshot_external_disks.append(new_file) new_attrs.update({'file': new_file}) hosts = None elif (disk_xml.source.attrs.has_key('name') and disk_src_protocol == 'gluster'): src_name = disk_xml.source.attrs['name'] new_name = "%s.snap%s" % (src_name.split('.')[0], count) new_attrs.update({'name': new_name}) snapshot_external_disks.append(new_name) hosts = disk_xml.source.hosts elif (disk_xml.source.attrs.has_key('dev') or disk_xml.source.attrs.has_key('name')): if (disk_xml.type_name == 'block' or disk_src_protocol in ['iscsi', 'rbd']): # Use local file as external snapshot target for block # and iscsi network type. # As block device will be treat as raw format by # default, it's not fit for external disk snapshot # target. A work around solution is use qemu-img again # with the target. # And external active snapshots are not supported on # 'network' disks using 'iscsi' protocol disk_xml.type_name = 'file' if new_attrs.has_key('dev'): del new_attrs['dev'] elif new_attrs.has_key('name'): del new_attrs['name'] del new_attrs['protocol'] new_file = "%s/blk_src_file.snap%s" % (tmp_dir, count) snapshot_external_disks.append(new_file) new_attrs.update({'file': new_file}) hosts = None new_src_dict = {"attrs": new_attrs} if hosts: new_src_dict.update({"hosts": hosts}) disk_xml.source = disk_xml.new_disk_source(**new_src_dict) new_disks.append(disk_xml) snap_xml.set_disks(new_disks) snapshot_xml_path = snap_xml.xml logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile) options = "--disk-only --xmlfile %s " % snapshot_xml_path snapshot_result = virsh.snapshot_create( vm_name, options, debug=True) if snapshot_result.exit_status != 0: raise error.TestFail(snapshot_result.stderr) # Create a file flag in VM after each snapshot flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") file_path = flag_file.name flag_file.close() status, output = session.cmd_status_output("touch %s" % file_path) if status: raise error.TestFail("Touch file in vm failed. %s" % output) snapshot_flag_files.append(file_path) # MAIN TEST CODE ### # Process cartesian parameters vm_name = params.get("main_vm") vm = env.get_vm(vm_name) needs_agent = "yes" == params.get("needs_agent", "yes") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") snap_in_mirror = "yes" == params.get("snap_in_mirror", 'no') snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", 'no') bandwidth = params.get("bandwidth", None) with_timeout = ("yes" == params.get("with_timeout_option", "no")) status_error = ("yes" == params.get("status_error", "no")) base_option = params.get("base_option", None) keep_relative = "yes" == params.get("keep_relative", 'no') virsh_dargs = {'debug': True} # Process domain disk device parameters disk_type = params.get("disk_type") disk_target = params.get("disk_target", 'vda') disk_src_protocol = params.get("disk_source_protocol") restart_tgtd = params.get("restart_tgtd", "no") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("original xml is %s", vmxml_backup) # Abort the test if there are snapshots already exsiting_snaps = virsh.snapshot_list(vm_name) if len(exsiting_snaps) != 0: raise error.TestFail("There are snapshots created for %s already" % vm_name) snapshot_external_disks = [] try: if disk_src_protocol == 'iscsi' and disk_type == 'network': if not libvirt_version.version_compare(1, 0, 4): raise error.TestNAError("'iscsi' disk doesn't support in" " current libvirt version.") if disk_src_protocol == 'gluster': if not libvirt_version.version_compare(1, 2, 7): raise error.TestNAError("Snapshot on glusterfs not" " support in current " "version. Check more info " " with https://bugzilla.re" "dhat.com/show_bug.cgi?id=" "1017289") # Set vm xml and guest agent if replace_vm_disk: if disk_src_protocol == "rbd" and disk_type == "network": src_host = params.get("disk_source_host", "EXAMPLE_HOSTS") mon_host = params.get("mon_host", "EXAMPLE_MON_HOST") if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"): raise error.TestNAError("Please provide ceph host first.") libvirt.set_vm_disk(vm, params, tmp_dir) if needs_agent: vm.prepare_guest_agent() # The first disk is supposed to include OS # We will perform blockpull operation for it. first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] blk_target = first_disk['target'] snapshot_flag_files = [] # get a vm session before snapshot session = vm.wait_for_login() # do snapshot make_disk_snapshot() vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug("The domain xml after snapshot is %s" % vmxml) # snapshot src file list snap_src_lst = [blk_source] snap_src_lst += snapshot_external_disks if snap_in_mirror: blockpull_options = "--bandwidth 1" else: blockpull_options = "--wait --verbose" if with_timeout: blockpull_options += " --timeout 1" if bandwidth: blockpull_options += " --bandwidth %s" % bandwidth if base_option == "async": blockpull_options += " --async" base_image = None base_index = None if (libvirt_version.version_compare(1, 2, 4) or disk_src_protocol == 'gluster'): if base_option == "shallow": base_index = 1 base_image = "%s[%s]" % (disk_target, base_index) elif base_option == "base": base_index = 2 base_image = "%s[%s]" % (disk_target, base_index) elif base_option == "top": base_index = 0 base_image = "%s[%s]" % (disk_target, base_index) else: if base_option == "shallow": base_image = snap_src_lst[3] elif base_option == "base": base_image = snap_src_lst[2] elif base_option == "top": base_image = snap_src_lst[4] if base_option and base_image: blockpull_options += " --base %s" % base_image if keep_relative: blockpull_options += " --keep-relative" # Run test case result = virsh.blockpull(vm_name, blk_target, blockpull_options, **virsh_dargs) status = result.exit_status # If pull job aborted as timeout, the exit status is different # on RHEL6(0) and RHEL7(1) if with_timeout and 'Pull aborted' in result.stdout: if libvirt_version.version_compare(1, 1, 1): status_error = True else: status_error = False # Check status_error libvirt.check_exit_status(result, status_error) if not status and not with_timeout: if snap_in_mirror: snap_mirror_path = "%s/snap_mirror" % tmp_dir snap_options = "--diskspec vda,snapshot=external," snap_options += "file=%s --disk-only" % snap_mirror_path snapshot_external_disks.append(snap_mirror_path) ret = virsh.snapshot_create_as(vm_name, snap_options, ignore_status=True, debug=True) libvirt.check_exit_status(ret, snap_in_mirror_err) return vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') for disk in disks: if disk.target['dev'] != blk_target: continue else: disk_xml = disk.xmltreefile break logging.debug("after pull the disk xml is: %s" % disk_xml) if libvirt_version.version_compare(1, 2, 4): err_msg = "Domain image backing chain check failed" if not base_option or "async" in base_option: chain_lst = snap_src_lst[-1:] ret = check_chain_xml(disk_xml, chain_lst) if not ret: raise error.TestFail(err_msg) elif "base" or "shallow" in base_option: chain_lst = snap_src_lst[::-1] if not base_index and base_image: base_index = chain_lst.index(base_image) val_tmp = [] for i in range(1, base_index): val_tmp.append(chain_lst[i]) for i in val_tmp: chain_lst.remove(i) ret = check_chain_xml(disk_xml, chain_lst) if not ret: raise error.TestFail(err_msg) # If base image is the top layer of snapshot chain, # virsh blockpull should fail, return directly if base_option == "top": return # Check flag files for flag in snapshot_flag_files: status, output = session.cmd_status_output("cat %s" % flag) if status: raise error.TestFail("blockpull failed: %s" % output) finally: if vm.is_alive(): vm.destroy(gracefully=False) # Recover xml of vm. vmxml_backup.sync("--snapshots-metadata") if not disk_src_protocol or disk_src_protocol != 'gluster': for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) libvirtd = utils_libvirtd.Libvirtd() if disk_src_protocol == 'iscsi': libvirt.setup_or_cleanup_iscsi(is_setup=False, restart_tgtd=restart_tgtd) elif disk_src_protocol == 'gluster': libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path) libvirtd.restart() elif disk_src_protocol == 'netfs': restore_selinux = params.get('selinux_status_bak') libvirt.setup_or_cleanup_nfs(is_setup=False, restore_selinux=restore_selinux)
def run(test, params, env): """ Test command: virsh rename. The command can rename a domain. 1.Prepare test environment. 2.Perform virsh rename operation. 3.Recover test environment. 4.Confirm the test result. """ # Get specific parameter value vm_name = params.get("main_vm") vm = env.get_vm(vm_name) domuuid = vm.get_uuid() vm_ref = params.get("domrename_vm_ref", "name") status_error = "yes" == params.get("status_error", "no") new_name = params.get("vm_new_name", "new") pre_vm_state = params.get("domrename_vm_state", "shutoff") domain_option = params.get("dom_opt", "") new_name_option = params.get("newname_opt", "") add_vm = "yes" == params.get("add_vm", "no") # Replace the varaiables if vm_ref == "name": vm_ref = vm_name elif vm_ref == "uuid": vm_ref = domuuid if new_name == "vm2_name": vm2_name = ("%s" % vm_name[:-1])+"2" new_name = vm2_name # Build input params dom_param = ' '.join([domain_option, vm_ref]) new_name_param = ' '.join([new_name_option, new_name]) # Backup for recovery. vmxml_backup = libvirt_xml.vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("vm xml is %s", vmxml_backup) # Clone additional vms if needed if add_vm: try: utils_path.find_command("virt-clone") except utils_path.CmdNotFoundError: if not utils_package.package_install(["virt-install"]): test.cancel("Failed to install virt-install on host") ret_clone = utils_libguestfs.virt_clone_cmd(vm_name, vm2_name, True, timeout=360) if ret_clone.exit_status: test.fail("Error occured when clone a second vm!") vm2 = libvirt_vm.VM(vm2_name, vm.params, vm.root_dir, vm.address_cache) virsh.dom_list("--name --all", debug=True) # Create object instance for renamed domain new_vm = libvirt_vm.VM(new_name, vm.params, vm.root_dir, vm.address_cache) # Prepare vm state if pre_vm_state != "shutoff": vm.start() if pre_vm_state == "paused": vm.pause() logging.debug("Domain state is now: %s", vm.state()) elif pre_vm_state == "managed_saved": vm.managedsave() elif pre_vm_state == "with_snapshot": virsh.snapshot_create_as(vm_name, "snap1 --disk-only", debug=True) vm.destroy(gracefully=False) try: result = virsh.domrename(dom_param, new_name_param, ignore_status=True, debug=True) # Raise unexpected pass or fail libvirt.check_exit_status(result, status_error) # Return expected failure for negative tests if status_error: logging.debug("Expected failure: %s", result.stderr) return # Checkpoints after domrename succeed else: list_ret = virsh.dom_list("--name --all", debug=True).stdout domname_ret = virsh.domname(domuuid, debug=True).stdout.strip() if new_name not in list_ret or vm_name in list_ret: test.fail("New name does not affect in virsh list") if domname_ret != new_name: test.fail("New domain name does not affect in virsh domname uuid") # Try to start vm with the new name new_vm.start() finally: # Remove additional vms if add_vm and vm2.exists() and result.exit_status: virsh.remove_domain(vm2_name, "--remove-all-storage") # Undefine newly renamed domain if new_vm.exists(): if new_vm.is_alive(): new_vm.destroy(gracefully=False) new_vm.undefine() # Recover domain state if pre_vm_state != "shutoff": if pre_vm_state == "with_snapshot": libvirt.clean_up_snapshots(vm_name) else: if pre_vm_state == "managed_saved": vm.start() vm.destroy(gracefully=False) # Restore VM vmxml_backup.sync()
def make_disk_snapshot(postfix_n, snapshot_take, is_check_snapshot_tree=False): """ Make external snapshots for disks only. :param postfix_n: postfix option :param snapshot_take: snapshots taken. """ # Add all disks into command line. disks = vm.get_disk_devices() # Make three external snapshots for disks only for count in range(1, snapshot_take): options = "%s_%s %s%s-desc " % (postfix_n, count, postfix_n, count) options += "--disk-only --atomic --no-metadata" if needs_agent: options += " --quiesce" for disk in disks: disk_detail = disks[disk] basename = os.path.basename(disk_detail['source']) # Remove the original suffix if any, appending # ".postfix_n[0-9]" diskname = basename.split(".")[0] snap_name = "%s.%s%s" % (diskname, postfix_n, count) disk_external = os.path.join(tmp_dir, snap_name) snapshot_external_disks.append(disk_external) options += " %s,snapshot=external,file=%s" % (disk, disk_external) if is_check_snapshot_tree: options = options.replace("--no-metadata", "") cmd_result = virsh.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) status = cmd_result.exit_status if status != 0: test.fail("Failed to make snapshots for disks!") # Create a file flag in VM after each snapshot flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") file_path = flag_file.name flag_file.close() status, output = session.cmd_status_output("touch %s" % file_path) if status: test.fail("Touch file in vm failed. %s" % output) snapshot_flag_files.append(file_path) def check_snapshot_tree(): """ Check whether predefined snapshot names are equals to snapshot names by virsh snapshot-list --tree """ predefined_snapshot_name_list = [] for count in range(1, snapshot_take): predefined_snapshot_name_list.append("%s_%s" % (postfix_n, count)) snapshot_list_cmd = "virsh snapshot-list %s --tree" % vm_name result_output = process.run(snapshot_list_cmd, ignore_status=True, shell=True).stdout_text virsh_snapshot_name_list = [] for line in result_output.rsplit("\n"): strip_line = line.strip() if strip_line and "|" not in strip_line: virsh_snapshot_name_list.append(strip_line) # Compare two lists in their order and values, all need to be same. compare_list = [out_p for out_p, out_v in zip(predefined_snapshot_name_list, virsh_snapshot_name_list) if out_p not in out_v] if compare_list: test.fail("snapshot tree not correctly returned.") # If check_snapshot_tree is True, check snapshot tree output. if is_check_snapshot_tree: check_snapshot_tree()
def run(test, params, env): """ Test the tpm virtual devices 1. prepare a guest with different tpm devices 2. check whether the guest can be started 3. check the xml and qemu cmd line, even swtpm for vtpm 4. check tpm usage in guest os """ # Tpm passthrough supported since libvirt 1.0.5. if not libvirt_version.version_compare(1, 0, 5): test.cancel("Tpm device is not supported " "on current libvirt version.") # Tpm passthrough supported since qemu 2.12.0-49. if not utils_misc.compare_qemu_version(2, 9, 0, is_rhev=False): test.cancel("Tpm device is not supported " "on current qemu version.") tpm_model = params.get("tpm_model") backend_type = params.get("backend_type") backend_version = params.get("backend_version") device_path = params.get("device_path") tpm_num = int(params.get("tpm_num", 1)) # After first start of vm with vtpm, do operations, check it still works vm_operate = params.get("vm_operate") # Sub-operation(e.g.domrename) under vm_operate(e.g.restart) vm_oprt = params.get("vm_oprt") secret_uuid = params.get("secret_uuid") secret_value = params.get("secret_value") # Change encryption state: from plain to encrypted, or reverse. encrypt_change = params.get("encrypt_change") secret_uuid = params.get("secret_uuid") prepare_secret = ("yes" == params.get("prepare_secret", "no")) remove_dev = ("yes" == params.get("remove_dev", "no")) multi_vms = ("yes" == params.get("multi_vms", "no")) # Remove swtpm state file rm_statefile = ("yes" == params.get("rm_statefile", "no")) test_suite = ("yes" == params.get("test_suite", "no")) restart_libvirtd = ("yes" == params.get("restart_libvirtd", "no")) no_backend = ("yes" == params.get("no_backend", "no")) status_error = ("yes" == params.get("status_error", "no")) err_msg = params.get("xml_errmsg", "") loader = params.get("loader", "") nvram = params.get("nvram", "") uefi_disk_url = params.get("uefi_disk_url", "") download_file_path = os.path.join(data_dir.get_tmp_dir(), "uefi_disk.qcow2") # Check tpm chip on host for passthrough testing if backend_type == "passthrough": dmesg_info = process.getoutput("dmesg|grep tpm -wi", shell=True) logging.debug("dmesg info about tpm:\n %s", dmesg_info) dmesg_error = re.search("No TPM chip found|TPM is disabled", dmesg_info) if dmesg_error: test.cancel(dmesg_error.group()) else: # Try to check host tpm chip version tpm_v = None if re.search("2.0 TPM", dmesg_info): tpm_v = "2.0" if not utils_package.package_install("tpm2-tools"): # package_install() return 'True' if succeed test.error("Failed to install tpm2-tools on host") else: if re.search("1.2 TPM", dmesg_info): tpm_v = "1.2" # If "1.2 TPM" or no version info in dmesg, try to test a tpm1.2 at first if not utils_package.package_install("tpm-tools"): test.error("Failed to install tpm-tools on host") # Check host env for vtpm testing elif backend_type == "emulator": if not utils_misc.compare_qemu_version(4, 0, 0, is_rhev=False): test.cancel("vtpm(emulator backend) is not supported " "on current qemu version.") # Install swtpm pkgs on host for vtpm emulation if not utils_package.package_install("swtpm*"): test.error("Failed to install swtpm swtpm-tools on host") def replace_os_disk(vm_xml, vm_name, nvram): """ Replace os(nvram) and disk(uefi) for x86 vtpm test :param vm_xml: current vm's xml :param vm_name: current vm name :param nvram: nvram file path of vm """ # Add loader, nvram in <os> nvram = nvram.replace("<VM_NAME>", vm_name) dict_os_attrs = {"loader_readonly": "yes", "secure": "yes", "loader_type": "pflash", "loader": loader, "nvram": nvram} vm_xml.set_os_attrs(**dict_os_attrs) logging.debug("Set smm=on in VMFeaturesXML") # Add smm in <features> features_xml = vm_xml.features features_xml.smm = "on" vm_xml.features = features_xml vm_xml.sync() # Replace disk with an uefi image if not utils_package.package_install("wget"): test.error("Failed to install wget on host") if uefi_disk_url.count("EXAMPLE"): test.error("Please provide the URL %s" % uefi_disk_url) else: download_cmd = ("wget %s -O %s" % (uefi_disk_url, download_file_path)) process.system(download_cmd, verbose=False, shell=True) vm = env.get_vm(vm_name) uefi_disk = {'disk_source_name': download_file_path} libvirt.set_vm_disk(vm, uefi_disk) vm_names = params.get("vms").split() vm_name = vm_names[0] vm = env.get_vm(vm_name) vm_xml = VMXML.new_from_inactive_dumpxml(vm_name) vm_xml_backup = vm_xml.copy() os_xml = getattr(vm_xml, "os") host_arch = platform.machine() if backend_type == "emulator" and host_arch == 'x86_64': if not utils_package.package_install("OVMF"): test.error("Failed to install OVMF or edk2-ovmf pkgs on host") if os_xml.xmltreefile.find('nvram') is None: replace_os_disk(vm_xml, vm_name, nvram) vm_xml = VMXML.new_from_inactive_dumpxml(vm_name) if vm.is_alive(): vm.destroy() vm2 = None if multi_vms: if len(vm_names) > 1: vm2_name = vm_names[1] vm2 = env.get_vm(vm2_name) vm2_xml = VMXML.new_from_inactive_dumpxml(vm2_name) vm2_xml_backup = vm2_xml.copy() else: # Clone additional vms if needed try: utils_path.find_command("virt-clone") except utils_path.CmdNotFoundError: if not utils_package.package_install(["virt-install"]): test.cancel("Failed to install virt-install on host") vm2_name = "vm2_" + utils_misc.generate_random_string(5) ret_clone = utils_libguestfs.virt_clone_cmd(vm_name, vm2_name, True, timeout=360, debug=True) if ret_clone.exit_status: test.error("Need more than one domains, but error occured when virt-clone.") vm2 = vm.clone(vm2_name) vm2_xml = VMXML.new_from_inactive_dumpxml(vm2_name) if vm2.is_alive(): vm2.destroy() service_mgr = service.ServiceManager() def check_dumpxml(vm_name): """ Check whether the added devices are shown in the guest xml :param vm_name: current vm name """ logging.info("------Checking guest dumpxml------") if tpm_model: pattern = '<tpm model="%s">' % tpm_model else: # The default tpm model is "tpm-tis" pattern = '<tpm model="tpm-tis">' # Check tpm model xml_after_adding_device = VMXML.new_from_dumpxml(vm_name) logging.debug("xml after add tpm dev is %s", xml_after_adding_device) if pattern not in astring.to_text(xml_after_adding_device): test.fail("Can not find the %s tpm device xml " "in the guest xml file." % tpm_model) # Check backend type pattern = '<backend type="%s"' % backend_type if pattern not in astring.to_text(xml_after_adding_device): test.fail("Can not find the %s backend type xml for tpm dev " "in the guest xml file." % backend_type) # Check backend version if backend_version: check_ver = backend_version if backend_version != 'none' else '2.0' pattern = '"emulator" version="%s"' % check_ver if pattern not in astring.to_text(xml_after_adding_device): test.fail("Can not find the %s backend version xml for tpm dev " "in the guest xml file." % check_ver) # Check device path if backend_type == "passthrough": pattern = '<device path="/dev/tpm0"' if pattern not in astring.to_text(xml_after_adding_device): test.fail("Can not find the %s device path xml for tpm dev " "in the guest xml file." % device_path) # Check encryption secret if prepare_secret: pattern = '<encryption secret="%s" />' % encryption_uuid if pattern not in astring.to_text(xml_after_adding_device): test.fail("Can not find the %s secret uuid xml for tpm dev " "in the guest xml file." % encryption_uuid) logging.info('------PASS on guest dumpxml check------') def check_qemu_cmd_line(vm, vm_name, domid): """ Check whether the added devices are shown in the qemu cmd line :param vm: current vm :param vm_name: current vm name :param domid: domain id for checking vtpm socket file """ logging.info("------Checking qemu cmd line------") if not vm.get_pid(): test.fail('VM pid file missing.') with open('/proc/%s/cmdline' % vm.get_pid()) as cmdline_file: cmdline = cmdline_file.read() logging.debug("Qemu cmd line info:\n %s", cmdline) # Check tpm model pattern_list = ["-device.%s" % tpm_model] # Check backend type if backend_type == "passthrough": dev_num = re.search(r"\d+", device_path).group() backend_segment = "id=tpm-tpm%s" % dev_num else: # emulator backend backend_segment = "id=tpm-tpm0,chardev=chrtpm" pattern_list.append("-tpmdev.%s,%s" % (backend_type, backend_segment)) # Check chardev socket for vtpm if backend_type == "emulator": pattern_list.append("-chardev.socket,id=chrtpm," "path=.*/run/libvirt/qemu/swtpm/%s-%s-swtpm.sock" % (domid, vm_name)) for pattern in pattern_list: if not re.search(pattern, cmdline): if not remove_dev: test.fail("Can not find the %s for tpm device " "in qemu cmd line." % pattern) elif remove_dev: test.fail("%s still exists after remove vtpm and restart" % pattern) logging.info("------PASS on qemu cmd line check------") def check_swtpm(domid, domuuid, vm_name): """ Check swtpm cmdline and files for vtpm. :param domid: domain id for checking vtpm files :param domuuid: domain uuid for checking vtpm state file :param vm_name: current vm name """ logging.info("------Checking swtpm cmdline and files------") # Check swtpm cmdline swtpm_pid = utils_misc.get_pid("%s-swtpm.pid" % vm_name) if not swtpm_pid: if not remove_dev: test.fail('swtpm pid file missing.') else: return elif remove_dev: test.fail('swtpm pid file still exists after remove vtpm and restart') with open('/proc/%s/cmdline' % swtpm_pid) as cmdline_file: cmdline = cmdline_file.read() logging.debug("Swtpm cmd line info:\n %s", cmdline) pattern_list = ["--daemon", "--ctrl", "--tpmstate", "--log", "--tpm2", "--pid"] if prepare_secret: pattern_list.extend(["--key", "--migration-key"]) for pattern in pattern_list: if not re.search(pattern, cmdline): test.fail("Can not find the %s for tpm device " "in swtpm cmd line." % pattern) # Check swtpm files file_list = ["/var/run/libvirt/qemu/swtpm/%s-%s-swtpm.sock" % (domid, vm_name)] file_list.append("/var/lib/libvirt/swtpm/%s/tpm2" % domuuid) file_list.append("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm_name) file_list.append("/var/run/libvirt/qemu/swtpm/%s-%s-swtpm.pid" % (domid, vm_name)) for swtpm_file in file_list: if not os.path.exists(swtpm_file): test.fail("Swtpm file: %s does not exist" % swtpm_file) logging.info("------PASS on Swtpm cmdline and files check------") def get_tpm2_tools_cmd(session=None): """ Get tpm2-tools pkg version and return corresponding getrandom cmd :session: guest console session :return: tpm2_getrandom cmd usage """ cmd = 'rpm -q tpm2-tools' get_v_tools = session.cmd(cmd) if session else process.run(cmd).stdout_text v_tools_list = get_v_tools.strip().split('-') if session: logging.debug("The tpm2-tools version is %s", v_tools_list[2]) v_tools = int(v_tools_list[2].split('.')[0]) return "tpm2_getrandom 8" if v_tools < 4 else "tpm2_getrandom -T device:/dev/tpm0 8 --hex" def get_host_tpm_bef(tpm_v): """ Test host tpm function and identify its real version before passthrough Since sometimes dmesg info doesn't include tpm msg, need use tpm-tool or tpm2-tools to try the function. :param tpm_v: host tpm version get from dmesg info :return: host tpm version """ logging.info("------Checking host tpm device before passthrough------") # Try tcsd tool for suspected tpm1.2 chip on host tpm_real_v = tpm_v if tpm_v != "2.0": if not service_mgr.start('tcsd'): # service_mgr.start() return 'True' if succeed if tpm_v == "1.2": test.fail("Host tcsd.serivce start failed") else: # Means tpm_v got nothing from dmesg, log failure here and # go to next 'if' to try tpm2.0 tools. logging.info("Host tcsd.serivce start failed") else: tpm_real_v = "1.2" logging.info("Host tpm version info:") result = process.run("tpm_version", ignore_status=False) logging.debug("[host]# tpm_version\n %s", result.stdout) time.sleep(2) service_mgr.stop('tcsd') if tpm_v != "1.2": # Try tpm2.0 tools if not utils_package.package_install("tpm2-tools"): test.error("Failed to install tpm2-tools on host") tpm2_getrandom_cmd = get_tpm2_tools_cmd() if process.run(tpm2_getrandom_cmd, ignore_status=True).exit_status: test.cancel("Both tcsd and tpm2-tools can not work, " "pls check your host tpm version and test env.") else: tpm_real_v = "2.0" logging.info("------PASS on host tpm device check------") return tpm_real_v def test_host_tpm_aft(tpm_real_v): """ Test host tpm function after passthrough :param tpm_real_v: host tpm real version indentified from testing """ logging.info("------Checking host tpm device after passthrough------") if tpm_real_v == "1.2": if service_mgr.start('tcsd'): time.sleep(2) service_mgr.stop('tcsd') test.fail("Host tpm should not work after passthrough to guest.") else: logging.info("Expected failure: Tpm is being used by guest.") elif tpm_real_v == "2.0": tpm2_getrandom_cmd = get_tpm2_tools_cmd() if not process.run(tpm2_getrandom_cmd, ignore_status=True).exit_status: test.fail("Host tpm should not work after passthrough to guest.") else: logging.info("Expected failure: Tpm is being used by guest.") logging.info("------PASS on host tpm device check------") def test_guest_tpm(expect_version, session, expect_fail): """ Test tpm function in guest :param expect_version: guest tpm version, as host version, or emulator specified :param session: Guest session to be tested :param expect_fail: guest tpm is expectedly fail to work """ logging.info("------Checking guest tpm device work------") if expect_version == "1.2": # Install tpm-tools and test by tcsd method if not utils_package.package_install(["tpm-tools"], session, 360): test.error("Failed to install tpm-tools package in guest") else: status, output = session.cmd_status_output("systemctl start tcsd") logging.debug("Command output: %s", output) if status: if expect_fail: test.cancel("tpm-crb passthrough only works with host tpm2.0, " "but your host tpm version is 1.2") else: test.fail("Failed to start tcsd.service in guest") else: dev_output = session.cmd_output("ls /dev/|grep tpm") logging.debug("Command output: %s", dev_output) status, output = session.cmd_status_output("tpm_version") logging.debug("Command output: %s", output) if status: test.fail("Guest tpm can not work") else: # If expect_version is tpm2.0, install and test by tpm2-tools if not utils_package.package_install(["tpm2-tools"], session, 360): test.error("Failed to install tpm2-tools package in guest") else: tpm2_getrandom_cmd = get_tpm2_tools_cmd(session) status1, output1 = session.cmd_status_output("ls /dev/|grep tpm") logging.debug("Command output: %s", output1) status2, output2 = session.cmd_status_output(tpm2_getrandom_cmd) logging.debug("Command output: %s", output2) if status1 or status2: if not expect_fail: test.fail("Guest tpm can not work") else: d_status, d_output = session.cmd_status_output("date") if d_status: test.fail("Guest OS doesn't work well") logging.debug("Command output: %s", d_output) elif expect_fail: test.fail("Expect fail but guest tpm still works") logging.info("------PASS on guest tpm device work check------") def run_test_suite_in_guest(session): """ Run kernel test suite for guest tpm. :param session: Guest session to be tested """ logging.info("------Checking kernel test suite for guest tpm------") boot_info = session.cmd('uname -r').strip().split('.') kernel_version = '.'.join(boot_info[:2]) # Download test suite per current guest kernel version parent_path = "https://cdn.kernel.org/pub/linux/kernel" if float(kernel_version) < 5.3: major_version = "5" file_version = "5.3" else: major_version = boot_info[0] file_version = kernel_version src_url = "%s/v%s.x/linux-%s.tar.xz" % (parent_path, major_version, file_version) download_cmd = "wget %s -O %s" % (src_url, "/root/linux.tar.xz") output = session.cmd_output(download_cmd, timeout=480) logging.debug("Command output: %s", output) # Install neccessary pkgs to build test suite if not utils_package.package_install(["tar", "make", "gcc", "rsync", "python2"], session, 360): test.fail("Failed to install specified pkgs in guest OS.") # Unzip the downloaded test suite status, output = session.cmd_status_output("tar xvJf /root/linux.tar.xz -C /root") if status: test.fail("Uzip failed: %s" % output) # Specify using python2 to run the test suite per supporting test_path = "/root/linux-%s/tools/testing/selftests" % file_version sed_cmd = "sed -i 's/python -m unittest/python2 -m unittest/g' %s/tpm2/test_*.sh" % test_path output = session.cmd_output(sed_cmd) logging.debug("Command output: %s", output) # Build and and run the .sh files of test suite status, output = session.cmd_status_output("make -C %s TARGETS=tpm2 run_tests" % test_path, timeout=360) logging.debug("Command output: %s", output) if status: test.fail("Failed to run test suite in guest OS.") for test_sh in ["test_smoke.sh", "test_space.sh"]: pattern = "ok .* selftests: tpm2: %s" % test_sh if not re.search(pattern, output) or ("not ok" in output): test.fail("test suite check failed.") logging.info("------PASS on kernel test suite check------") def reuse_by_vm2(tpm_dev): """ Try to add same tpm to a second guest, when it's being used by one guest. :param tpm_dev: tpm device to be added into guest xml """ logging.info("------Trying to add same tpm to a second domain------") vm2_xml.remove_all_device_by_type('tpm') vm2_xml.add_device(tpm_dev) vm2_xml.sync() ret = virsh.start(vm2_name, ignore_status=True, debug=True) if backend_type == "passthrough": if ret.exit_status: logging.info("Expected failure when try to passthrough a tpm" " that being used by another guest") return test.fail("Reuse a passthroughed tpm should not succeed.") elif ret.exit_status: # emulator backend test.fail("Vtpm for each guest should not interfere with each other") try: tpm_real_v = None sec_uuids = [] new_name = "" virsh_dargs = {"debug": True, "ignore_status": False} vm_xml.remove_all_device_by_type('tpm') tpm_dev = Tpm() if tpm_model: tpm_dev.tpm_model = tpm_model if not no_backend: backend = tpm_dev.Backend() if backend_type != 'none': backend.backend_type = backend_type if backend_type == "passthrough": tpm_real_v = get_host_tpm_bef(tpm_v) logging.debug("The host tpm real version is %s", tpm_real_v) if device_path: backend.device_path = device_path if backend_type == "emulator": if backend_version != 'none': backend.backend_version = backend_version if prepare_secret: auth_sec_dict = {"sec_ephemeral": "no", "sec_private": "yes", "sec_desc": "sample vTPM secret", "sec_usage": "vtpm", "sec_name": "VTPM_example"} encryption_uuid = libvirt.create_secret(auth_sec_dict) if secret_value != 'none': virsh.secret_set_value(encryption_uuid, "open sesame", encode=True, debug=True) sec_uuids.append(encryption_uuid) if encrypt_change != 'encrpt': # plain_to_encrypt will not add encryption on first start if secret_uuid == 'invalid': encryption_uuid = encryption_uuid[:-1] backend.encryption_secret = encryption_uuid if secret_uuid == "change": auth_sec_dict["sec_desc"] = "sample2 vTPM secret" auth_sec_dict["sec_name"] = "VTPM_example2" new_encryption_uuid = libvirt.create_secret(auth_sec_dict) virsh.secret_set_value(new_encryption_uuid, "open sesame", encode=True, debug=True) sec_uuids.append(new_encryption_uuid) if secret_uuid == 'nonexist': backend.encryption_secret = "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" tpm_dev.backend = backend logging.debug("tpm dev xml to add is:\n %s", tpm_dev) for num in range(tpm_num): vm_xml.add_device(tpm_dev, True) ret = virsh.define(vm_xml.xml, ignore_status=True, debug=True) expected_match = "" if not err_msg: expected_match = "Domain .*%s.* defined from %s" % (vm_name, vm_xml.xml) libvirt.check_result(ret, err_msg, "", False, expected_match) if err_msg: # Stop test when get expected failure return if vm_operate != "restart": check_dumpxml(vm_name) # For default model, no need start guest to test if tpm_model: expect_fail = False try: vm.start() except VMStartError as detail: if secret_value == 'none' or secret_uuid == 'nonexist': logging.debug("Expected failure: %s", detail) return else: test.fail(detail) domuuid = vm.get_uuid() if vm_operate or restart_libvirtd: # Make sure OS works before vm operate or restart libvirtd session = vm.wait_for_login() test_guest_tpm("2.0", session, False) session.close() if restart_libvirtd: utils_libvirtd.libvirtd_restart() swtpm_statedir = "/var/lib/libvirt/swtpm/%s" % domuuid if vm_operate == "resume": virsh.suspend(vm_name, **virsh_dargs) time.sleep(3) virsh.resume(vm_name, **virsh_dargs) elif vm_operate == "snapshot": virsh.snapshot_create_as(vm_name, "sp1 --memspec file=/tmp/testvm_sp1", **virsh_dargs) elif vm_operate in ["restart", "create"]: vm.destroy() if vm_operate == "create": virsh.undefine(vm_name, options="--nvram", **virsh_dargs) if os.path.exists(swtpm_statedir): test.fail("Swtpm state dir: %s still exist after vm undefine" % swtpm_statedir) virsh.create(vm_xml.xml, **virsh_dargs) else: if vm_oprt == "domrename": new_name = "vm_" + utils_misc.generate_random_string(5) virsh.domrename(vm_name, new_name, **virsh_dargs) new_vm = libvirt_vm.VM(new_name, vm.params, vm.root_dir, vm.address_cache) vm = new_vm vm_name = new_name elif secret_value == 'change': logging.info("Changing secret value...") virsh.secret_set_value(encryption_uuid, "new sesame", encode=True, debug=True) elif not restart_libvirtd: # remove_dev or do other vm operations during restart vm_xml.remove_all_device_by_type('tpm') if secret_uuid == "change" or encrypt_change: # Change secret uuid, or change encrytion state:from plain to encrypted, or on the contrary if encrypt_change == 'plain': # Change from encrypted state to plain:redefine a tpm dev without encryption tpm_dev = Tpm() tpm_dev.tpm_model = tpm_model backend = tpm_dev.Backend() backend.backend_type = backend_type backend.backend_version = backend_version else: # Use a new secret's uuid if secret_uuid == "change": encryption_uuid = new_encryption_uuid backend.encryption_secret = encryption_uuid tpm_dev.backend = backend logging.debug("The new tpm dev xml to add for restart vm is:\n %s", tpm_dev) vm_xml.add_device(tpm_dev, True) if encrypt_change in ['encrpt', 'plain']: # Avoid sync() undefine removing the state file vm_xml.define() else: vm_xml.sync() if rm_statefile: swtpm_statefile = "%s/tpm2/tpm2-00.permall" % swtpm_statedir logging.debug("Removing state file: %s", swtpm_statefile) os.remove(swtpm_statefile) ret = virsh.start(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(ret, status_error) if status_error and ret.exit_status != 0: return if not remove_dev: check_dumpxml(vm_name) elif vm_operate == 'managedsave': virsh.managedsave(vm_name, **virsh_dargs) time.sleep(5) if secret_value == 'change': logging.info("Changing secret value...") virsh.secret_set_value(encryption_uuid, "new sesame", encode=True, debug=True) if rm_statefile: swtpm_statefile = "%s/tpm2/tpm2-00.permall" % swtpm_statedir logging.debug("Removing state file: %s", swtpm_statefile) os.remove(swtpm_statefile) ret = virsh.start(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(ret, status_error) if status_error and ret.exit_status != 0: return domid = vm.get_id() check_qemu_cmd_line(vm, vm_name, domid) if backend_type == "passthrough": if tpm_real_v == "1.2" and tpm_model == "tpm-crb": expect_fail = True expect_version = tpm_real_v test_host_tpm_aft(tpm_real_v) else: # emulator backend if remove_dev: expect_fail = True expect_version = backend_version check_swtpm(domid, domuuid, vm_name) session = vm.wait_for_login() if test_suite: run_test_suite_in_guest(session) else: test_guest_tpm(expect_version, session, expect_fail) session.close() if multi_vms: reuse_by_vm2(tpm_dev) if backend_type != "passthrough": #emulator backend check_dumpxml(vm2_name) domid = vm2.get_id() domuuid = vm2.get_uuid() check_qemu_cmd_line(vm2, vm2_name, domid) check_swtpm(domid, domuuid, vm2_name) session = vm2.wait_for_login() test_guest_tpm(backend_version, session, expect_fail) session.close() finally: # Remove renamed domain if it exists if new_name: virsh.remove_domain(new_name, "--nvram", debug=True) if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % new_name): os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % new_name) # Remove snapshot if exists if vm_operate == "snapshot": snapshot_lists = virsh.snapshot_list(vm_name) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snap in snapshot_lists: virsh.snapshot_delete(vm_name, snap, "--metadata") if os.path.exists("/tmp/testvm_sp1"): os.remove("/tmp/testvm_sp1") # Clear guest os if test_suite: session = vm.wait_for_login() logging.info("Removing dir /root/linux-*") output = session.cmd_output("rm -rf /root/linux-*") logging.debug("Command output:\n %s", output) session.close() if vm_operate == "create": vm.define(vm_xml.xml) vm_xml_backup.sync(options="--nvram --managed-save") # Remove swtpm log file in case of impact on later runs if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm.name): os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm.name) for sec_uuid in set(sec_uuids): virsh.secret_undefine(sec_uuid, ignore_status=True, debug=True) if vm2: if len(vm_names) > 1: vm2_xml_backup.sync(options="--nvram") else: virsh.remove_domain(vm2_name, "--nvram --remove-all-storage", debug=True) if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm2.name): os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm2.name)
def run(test, params, env): """ Test snapshot-create-as command Make sure that the clean repo can be used because qemu-guest-agent need to be installed in guest The command create a snapshot (disk and RAM) from arguments which including the following point * virsh snapshot-create-as --print-xml --diskspec --name --description * virsh snapshot-create-as --print-xml with multi --diskspec * virsh snapshot-create-as --print-xml --memspec * virsh snapshot-create-as --description * virsh snapshot-create-as --no-metadata * virsh snapshot-create-as --no-metadata --print-xml (negative test) * virsh snapshot-create-as --atomic --disk-only * virsh snapshot-create-as --quiesce --disk-only (positive and negative) * virsh snapshot-create-as --reuse-external * virsh snapshot-create-as --disk-only --diskspec * virsh snapshot-create-as --memspec --reuse-external --atomic(negative) * virsh snapshot-create-as --disk-only and --memspec (negative) * Create multi snapshots with snapshot-create-as * Create snapshot with name a--a a--a--snap1 """ if not virsh.has_help_command('snapshot-create-as'): raise error.TestNAError("This version of libvirt does not support " "the snapshot-create-as test") vm_name = params.get("main_vm") status_error = params.get("status_error", "no") options = params.get("snap_createas_opts") multi_num = params.get("multi_num", "1") diskspec_num = params.get("diskspec_num", "1") bad_disk = params.get("bad_disk") external_disk = params.get("external_disk") start_ga = params.get("start_ga", "yes") domain_state = params.get("domain_state") memspec_opts = params.get("memspec_opts") diskspec_opts = params.get("diskspec_opts") opt_names = locals() if memspec_opts is not None: mem_options = compose_disk_options(test, params, memspec_opts) # if the parameters have the disk without "file=" then we only need to # add testdir for it. if mem_options is None: mem_options = os.path.join(test.virtdir, memspec_opts) options += " --memspec " + mem_options tag_diskspec = 0 dnum = int(diskspec_num) if diskspec_opts is not None: tag_diskspec = 1 opt_names['diskopts_1'] = diskspec_opts # diskspec_opts[n] is used in cfg when more than 1 --diskspec is used if dnum > 1: tag_diskspec = 1 for i in range(1, dnum + 1): opt_names["diskopts_%s" % i] = params.get("diskspec_opts%s" % i) if tag_diskspec == 1: for i in range(1, dnum + 1): disk_options = compose_disk_options(test, params, opt_names["diskopts_%s" % i]) options += " --diskspec " + disk_options logging.debug("options are %s", options) vm = env.get_vm(vm_name) option_dict = {} option_dict = utils_misc.valued_option_dict(options, r' --(?!-)') logging.debug("option_dict is %s", option_dict) # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("original xml is %s", vmxml_backup) # Generate empty image for negative test if bad_disk is not None: bad_disk = os.path.join(test.virtdir, bad_disk) os.open(bad_disk, os.O_RDWR | os.O_CREAT) # Generate external disk if external_disk is not None: external_disk = os.path.join(test.virtdir, external_disk) commands.getoutput("qemu-img create -f qcow2 %s 1G" % external_disk) try: # Start qemu-ga on guest if have --quiesce if options.find("quiesce") >= 0: if vm.is_alive(): vm.destroy() virt_xml_obj = libvirt_xml.VMXML(virsh_instance=virsh) virt_xml_obj.set_agent_channel(vm_name) vm.start() if start_ga == "yes": session = vm.wait_for_login() # Check if qemu-ga already started automatically cmd = "rpm -q qemu-guest-agent || yum install -y qemu-guest-agent" stat_install = session.cmd_status(cmd, 300) if stat_install != 0: raise error.TestFail("Fail to install qemu-guest-agent, make" "sure that you have usable repo in guest") # Check if qemu-ga already started stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if stat_ps != 0: session.cmd("qemu-ga -d") # Check if the qemu-ga really started stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if stat_ps != 0: raise error.TestFail("Fail to run qemu-ga in guest") if domain_state == "paused": virsh.suspend(vm_name) # Record the previous snapshot-list snaps_before = virsh.snapshot_list(vm_name) # Run virsh command # May create several snapshots, according to configuration for count in range(int(multi_num)): cmd_result = virsh.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) output = cmd_result.stdout.strip() status = cmd_result.exit_status # check status_error if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") else: # Check memspec file should be removed if failed if (options.find("memspec") >= 0 and options.find("atomic") >= 0): if os.path.isfile(option_dict['memspec']): os.remove(option_dict['memspec']) raise error.TestFail("Run failed but file %s exist" % option_dict['memspec']) else: logging.info("Run failed as expected and memspec file" " already beed removed") else: logging.info("Run failed as expected") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command: %s" % output) else: # Check the special options snaps_list = virsh.snapshot_list(vm_name) logging.debug("snaps_list is %s", snaps_list) check_snapslist(vm_name, options, option_dict, output, snaps_before, snaps_list) finally: # Environment clean if options.find("quiesce") >= 0 and start_ga == "yes": session.cmd("rpm -e qemu-guest-agent") # recover domain xml xml_recover(vmxml_backup) path = "/var/lib/libvirt/qemu/snapshot/" + vm_name if os.path.isfile(path): raise error.TestFail("Still can find snapshot metadata") # rm bad disks if bad_disk is not None: os.remove(bad_disk)
def check_snapshot(): """ Check domain snapshot operations. """ # Cleaup dirty data if exists if os.path.exists(snapshot_name1_file): os.remove(snapshot_name1_file) if os.path.exists(snapshot_name2_mem_file): os.remove(snapshot_name2_mem_file) if os.path.exists(snapshot_name2_disk_file): os.remove(snapshot_name2_disk_file) device_target = 'vda' snapshot_name1_option = "--diskspec %s,file=%s,snapshot=external --disk-only --atomic" % ( device_target, snapshot_name1_file) ret = virsh.snapshot_create_as(vm_name, "%s %s" % (snapshot_name1, snapshot_name1_option), debug=True) libvirt.check_exit_status(ret) snap_lists = virsh.snapshot_list(vm_name, debug=True) if snapshot_name1 not in snap_lists: test.fail("Snapshot %s doesn't exist" % snapshot_name1) # Check file can be created after snapshot def _check_file_create(filename): """ Check whether file with specified filename exists or not. :param filename: finename """ try: session = vm.wait_for_login() if platform.platform().count('ppc64'): time.sleep(10) cmd = ("echo" " teststring > /tmp/{0}".format(filename)) status, output = session.cmd_status_output(cmd) if status != 0: test.fail("Failed to touch one file on VM internal") except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) raise finally: if session: session.close() _check_file_create("disk.txt") # Create memory snapshot. snapshot_name2_mem_option = "--memspec file=%s,snapshot=external" % ( snapshot_name2_mem_file) snapshot_name2_disk_option = "--diskspec %s,file=%s,snapshot=external --atomic" % ( device_target, snapshot_name2_disk_file) snapshot_name2_option = "%s %s" % (snapshot_name2_mem_option, snapshot_name2_disk_option) ret = virsh.snapshot_create_as(vm_name, "%s %s" % (snapshot_name2, snapshot_name2_option), debug=True) libvirt.check_exit_status(ret) snap_lists = virsh.snapshot_list(vm_name, debug=True) if snapshot_name2 not in snap_lists: test.fail("Snapshot: %s doesn't exist" % snapshot_name2) _check_file_create("mem.txt")
usb_devices.update({"hub": addr_dict}) if dom_iothreads: # Delete cputune/iothreadids section, it may have conflict # with domain iothreads del vmxml.cputune del vmxml.iothreadids vmxml.iothreads = int(dom_iothreads) # After compose the disk xml, redefine the VM xml. vmxml.sync() # Test snapshot before vm start. if test_disk_snapshot: if snapshot_before_start: ret = virsh.snapshot_create_as(vm_name, "s1 %s" % snapshot_option) libvirt.check_exit_status(ret, snapshot_error) # Start the VM. vm.start() if status_error: raise error.TestFail("VM started unexpectedly") # Hotplug the disks. if device_at_dt_disk: for i in range(len(disks)): attach_option = "" if len(device_attach_option) > i: attach_option = device_attach_option[i] ret = virsh.attach_disk(vm_name, disks[i]["source"], device_targets[i],
def run(test, params, env): """ Test command: virsh blockcopy. This command can copy a disk backing image chain to dest. 1. Positive testing 1.1 Copy a disk to a new image file. 1.2 Reuse existing destination copy. 1.3 Valid blockcopy timeout and bandwidth test. 2. Negative testing 2.1 Copy a disk to a non-exist directory. 2.2 Copy a disk with invalid options. 2.3 Do block copy for a persistent domain. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) target = params.get("target_disk", "") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") disk_source_protocol = params.get("disk_source_protocol") disk_type = params.get("disk_type") pool_name = params.get("pool_name") image_size = params.get("image_size") emu_image = params.get("emulated_image") copy_to_nfs = "yes" == params.get("copy_to_nfs", "no") mnt_path_name = params.get("mnt_path_name") options = params.get("blockcopy_options", "") bandwidth = params.get("blockcopy_bandwidth", "") default_timeout = int(params.get("default_timeout", "300")) reuse_external = "yes" == params.get("reuse_external", "no") persistent_vm = params.get("persistent_vm", "no") status_error = "yes" == params.get("status_error", "no") active_error = "yes" == params.get("active_error", "no") active_snap = "yes" == params.get("active_snap", "no") active_save = "yes" == params.get("active_save", "no") check_state_lock = "yes" == params.get("check_state_lock", "no") with_shallow = "yes" == params.get("with_shallow", "no") with_blockdev = "yes" == params.get("with_blockdev", "no") setup_libvirt_polkit = "yes" == params.get('setup_libvirt_polkit') bug_url = params.get("bug_url", "") timeout = int(params.get("timeout", 1200)) rerun_flag = 0 blkdev_n = None back_n = 'blockdev-backing-iscsi' snapshot_external_disks = [] # Skip/Fail early if with_blockdev and not libvirt_version.version_compare(1, 2, 13): raise exceptions.TestSkipError("--blockdev option not supported in " "current version") if not target: raise exceptions.TestSkipError("Require target disk to copy") if setup_libvirt_polkit and not libvirt_version.version_compare(1, 1, 1): raise exceptions.TestSkipError("API acl test not supported in current" " libvirt version") if copy_to_nfs and not libvirt_version.version_compare(1, 1, 1): raise exceptions.TestSkipError("Bug will not fix: %s" % bug_url) # Check the source disk if vm_xml.VMXML.check_disk_exist(vm_name, target): logging.debug("Find %s in domain %s", target, vm_name) else: raise exceptions.TestFail("Can't find %s in domain %s" % (target, vm_name)) original_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) tmp_dir = data_dir.get_tmp_dir() # Prepare dest path params dest_path = params.get("dest_path", "") dest_format = params.get("dest_format", "") # Ugh... this piece of chicanery brought to you by the QemuImg which # will "add" the 'dest_format' extension during the check_format code. # So if we create the file with the extension and then remove it when # doing the check_format later, then we avoid erroneous failures. dest_extension = "" if dest_format != "": dest_extension = ".%s" % dest_format # Prepare for --reuse-external option if reuse_external: options += "--reuse-external --wait" # Set rerun_flag=1 to do blockcopy twice, and the first time created # file can be reused in the second time if no dest_path given # This will make sure the image size equal to original disk size if dest_path == "/path/non-exist": if os.path.exists(dest_path) and not os.path.isdir(dest_path): os.remove(dest_path) else: rerun_flag = 1 # Prepare other options if dest_format == "raw": options += " --raw" if with_blockdev: options += " --blockdev" if len(bandwidth): options += " --bandwidth %s" % bandwidth if with_shallow: options += " --shallow" # Prepare acl options uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' extra_dict = {'uri': uri, 'unprivileged_user': unprivileged_user, 'debug': True, 'ignore_status': True, 'timeout': timeout} libvirtd_utl = utils_libvirtd.Libvirtd() libvirtd_conf = utils_config.LibvirtdConfig() libvirtd_conf["log_filters"] = '"3:json 1:libvirt 1:qemu"' libvirtd_log_path = os.path.join(test.tmpdir, "libvirtd.log") libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path logging.debug("the libvirtd config file content is:\n %s" % libvirtd_conf) libvirtd_utl.restart() def check_format(dest_path, dest_extension, expect): """ Check the image format :param dest_path: Path of the copy to create :param expect: Expect image format """ # And now because the QemuImg will add the extension for us # we have to remove it here. path_noext = dest_path.strip(dest_extension) params['image_name'] = path_noext params['image_format'] = expect image = qemu_storage.QemuImg(params, "/", path_noext) if image.get_format() == expect: logging.debug("%s format is %s", dest_path, expect) else: raise exceptions.TestFail("%s format is not %s" % (dest_path, expect)) def _blockjob_and_libvirtd_chk(cmd_result): """ Raise TestFail when blockcopy fail with block-job-complete error or blockcopy hang with state change lock. """ bug_url_ = "https://bugzilla.redhat.com/show_bug.cgi?id=1197592" err_msg = "internal error: unable to execute QEMU command" err_msg += " 'block-job-complete'" if err_msg in cmd_result.stderr: raise exceptions.TestFail("Hit on bug: %s" % bug_url_) err_pattern = "Timed out during operation: cannot acquire" err_pattern += " state change lock" ret = chk_libvirtd_log(libvirtd_log_path, err_pattern, "error") if ret: raise exceptions.TestFail("Hit on bug: %s" % bug_url_) def _blockcopy_cmd(): """ Run blockcopy command """ cmd_result = virsh.blockcopy(vm_name, target, dest_path, options, **extra_dict) _blockjob_and_libvirtd_chk(cmd_result) if cmd_result.exit_status: return False elif "Copy aborted" in cmd_result.stdout: return False else: return cmd_result def _make_snapshot(): """ Make external disk snapshot """ snap_xml = snapshot_xml.SnapshotXML() snapshot_name = "blockcopy_snap" snap_xml.snap_name = snapshot_name snap_xml.description = "blockcopy snapshot" # Add all disks into xml file. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') new_disks = [] src_disk_xml = disks[0] disk_xml = snap_xml.SnapDiskXML() disk_xml.xmltreefile = src_disk_xml.xmltreefile del disk_xml.device del disk_xml.address disk_xml.snapshot = "external" disk_xml.disk_name = disk_xml.target['dev'] # Only qcow2 works as external snapshot file format, update it # here driver_attr = disk_xml.driver driver_attr.update({'type': 'qcow2'}) disk_xml.driver = driver_attr new_attrs = disk_xml.source.attrs if disk_xml.source.attrs.has_key('file'): new_file = os.path.join(tmp_dir, "blockcopy_shallow.snap") snapshot_external_disks.append(new_file) new_attrs.update({'file': new_file}) hosts = None elif (disk_xml.source.attrs.has_key('dev') or disk_xml.source.attrs.has_key('name') or disk_xml.source.attrs.has_key('pool')): if (disk_xml.type_name == 'block' or disk_source_protocol == 'iscsi'): disk_xml.type_name = 'block' if new_attrs.has_key('name'): del new_attrs['name'] del new_attrs['protocol'] elif new_attrs.has_key('pool'): del new_attrs['pool'] del new_attrs['volume'] del new_attrs['mode'] back_path = utl.setup_or_cleanup_iscsi(is_setup=True, is_login=True, image_size="1G", emulated_image=back_n) emulated_iscsi.append(back_n) cmd = "qemu-img create -f qcow2 %s 1G" % back_path process.run(cmd, shell=True) new_attrs.update({'dev': back_path}) hosts = None new_src_dict = {"attrs": new_attrs} if hosts: new_src_dict.update({"hosts": hosts}) disk_xml.source = disk_xml.new_disk_source(**new_src_dict) new_disks.append(disk_xml) snap_xml.set_disks(new_disks) snapshot_xml_path = snap_xml.xml logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile) options = "--disk-only --xmlfile %s " % snapshot_xml_path snapshot_result = virsh.snapshot_create( vm_name, options, debug=True) if snapshot_result.exit_status != 0: raise exceptions.TestFail(snapshot_result.stderr) snap_path = '' save_path = '' emulated_iscsi = [] try: # Prepare dest_path tmp_file = time.strftime("%Y-%m-%d-%H.%M.%S.img") tmp_file += dest_extension if not dest_path: if with_blockdev: blkdev_n = 'blockdev-iscsi' dest_path = utl.setup_or_cleanup_iscsi(is_setup=True, is_login=True, image_size=image_size, emulated_image=blkdev_n) emulated_iscsi.append(blkdev_n) else: if copy_to_nfs: tmp_dir = "%s/%s" % (tmp_dir, mnt_path_name) dest_path = os.path.join(tmp_dir, tmp_file) # Domain disk replacement with desire type if replace_vm_disk: # Calling 'set_vm_disk' is bad idea as it left lots of cleanup jobs # after test, such as pool, volume, nfs, iscsi and so on # TODO: remove this function in the future utl.set_vm_disk(vm, params, tmp_dir, test) emulated_iscsi.append(emu_image) new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if with_shallow: _make_snapshot() # Prepare transient/persistent vm if persistent_vm == "no" and vm.is_persistent(): vm.undefine() elif persistent_vm == "yes" and not vm.is_persistent(): new_xml.define() # Run blockcopy command if rerun_flag == 1: options1 = "--wait %s --finish --verbose" % dest_format if with_blockdev: options1 += " --blockdev" if with_shallow: options1 += " --shallow" cmd_result = virsh.blockcopy(vm_name, target, dest_path, options1, **extra_dict) status = cmd_result.exit_status if status != 0: raise exceptions.TestFail("Run blockcopy command fail") elif not os.path.exists(dest_path): raise exceptions.TestFail("Cannot find the created copy") cmd_result = utils_misc.wait_for(_blockcopy_cmd, 10) if not cmd_result: raise exceptions.TestFail("Run blockcopy command fail") status = 0 else: cmd_result = virsh.blockcopy(vm_name, target, dest_path, options, **extra_dict) _blockjob_and_libvirtd_chk(cmd_result) status = cmd_result.exit_status if not libvirtd_utl.is_running(): raise exceptions.TestFail("Libvirtd service is dead") if not status_error: if status == 0: ret = utils_misc.wait_for( lambda: check_xml(vm_name, target, dest_path, options), 5) if not ret: raise exceptions.TestFail("Domain xml not expected after" " blockcopy") if options.count("--bandwidth"): utl.check_blockjob(vm_name, target, "bandwidth", bandwidth) if check_state_lock: # Run blockjob pivot in subprocess as it will hang # for a while, run blockjob info again to check # job state command = "virsh blockjob %s %s --pivot" % (vm_name, target) session = aexpect.ShellSession(command) ret = virsh.blockjob(vm_name, target, "--info") err_info = "cannot acquire state change lock" if err_info in ret.stderr: raise exceptions.TestFail("Hit on bug: %s" % bug_url) utl.check_exit_status(ret, status_error) session.close() val = options.count("--pivot") + options.count("--finish") if val == 0: try: finish_job(vm_name, target, default_timeout) except JobTimeout, excpt: raise exceptions.TestFail("Run command failed: %s" % excpt) if options.count("--raw") and not with_blockdev: check_format(dest_path, dest_extension, dest_format) if active_snap: snap_path = "%s/%s.snap" % (tmp_dir, vm_name) snap_opt = "--disk-only --atomic --no-metadata " snap_opt += "vda,snapshot=external,file=%s" % snap_path ret = virsh.snapshot_create_as(vm_name, snap_opt, ignore_statues=True, debug=True) utl.check_exit_status(ret, active_error) if active_save: save_path = "%s/%s.save" % (tmp_dir, vm_name) ret = virsh.save(vm_name, save_path, ignore_statues=True, debug=True) utl.check_exit_status(ret, active_error) else: raise exceptions.TestFail(cmd_result.stderr) else:
def run_virsh_snapshot_dumpxml(test, params, env): """ Test snapshot-dumpxml command, make sure that the xml you get is correct Test scenaries: 1. live snapshot dump 2. shutoff snapshot dump 3. dumpxml with security info 4. readonly mode """ if not virsh.has_help_command('snapshot-dumpxml'): raise error.TestNAError("This version of libvirt does not support " "the snapshot-dumpxml test") vm_name = params.get("main_vm") status_error = params.get("status_error", "no") passwd = params.get("snapshot_passwd") secu_opt = params.get("snapshot_secure_option") desc_opt = params.get("snapshot_desc_option") mem_opt = params.get("snapshot_mem_option") disk_opt = params.get("disk_only_snap") snap_name = params.get("snapshot_name", "snap_test") readonly = params.get("readonly", False) try: snap_opt = "" opt_dict = {} # collect all the parameters at one time opt_name = locals() for opt in ["snap_name", "desc_opt", "mem_opt", "disk_opt"]: if opt_name[opt] is not None: # Integrate snapshot create options snap_opt = snap_opt + " " + opt_name[opt] # Do xml backup for final recovery vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Add passwd in guest graphics if passwd is not None: vm = env.get_vm(vm_name) if vm.is_alive(): vm.destroy() vm_xml.VMXML.add_security_info( vm_xml.VMXML.new_from_dumpxml(vm_name), passwd) vm.start() if secu_opt is not None: opt_dict['passwd'] = passwd logging.debug("snapshot create options are %s", snap_opt) # Get state to do snapshot xml state check dom_state = virsh.domstate(vm_name).stdout.strip() # Create disk snapshot before all to make the origin image clean virsh.snapshot_create_as(vm_name, "--disk-only") # Create snapshot with options snapshot_result = virsh.snapshot_create_as(vm_name, snap_opt, readonly=readonly) if snapshot_result.exit_status: if status_error == "no": raise error.TestFail("Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) elif status_error == "yes": logging.info("Create snapshot failed as expected, Error:%s.", snapshot_result.stderr.strip()) return ctime = get_snap_createtime(vm_name, snap_name) # Run virsh command for snapshot-dumpxml dumpxml_result = virsh.snapshot_dumpxml(vm_name, snap_name, secu_opt) if dumpxml_result.exit_status: if status_error == "no": raise error.TestFail("Failed to dump snapshot xml. Error:%s." % dumpxml_result.stderr.strip()) elif status_error == "yes": logging.info("Dumpxml snapshot failed as expected, Error:%s.", dumpxml_result.stderr.strip()) return # Record all the parameters in dict at one time check_name = locals() for var in [ "vm_name", "snap_name", "desc_opt", "dom_state", "ctime", "disk_opt" ]: if check_name[var] is not None: opt_dict[var] = check_name[var] logging.debug("opt_dict is %s", opt_dict) output = dumpxml_result.stdout.strip() snapshot_dumpxml_check(output, opt_dict) finally: # Recovery utils_test.libvirt.clean_up_snapshots(vm_name) vmxml_backup.sync("--snapshots-metadata")
def run(test, params, env): """ Test snapshot-create-as command Make sure that the clean repo can be used because qemu-guest-agent need to be installed in guest The command create a snapshot (disk and RAM) from arguments which including the following point * virsh snapshot-create-as --print-xml --diskspec --name --description * virsh snapshot-create-as --print-xml with multi --diskspec * virsh snapshot-create-as --print-xml --memspec * virsh snapshot-create-as --description * virsh snapshot-create-as --no-metadata * virsh snapshot-create-as --no-metadata --print-xml (negative test) * virsh snapshot-create-as --atomic --disk-only * virsh snapshot-create-as --quiesce --disk-only (positive and negative) * virsh snapshot-create-as --reuse-external * virsh snapshot-create-as --disk-only --diskspec * virsh snapshot-create-as --memspec --reuse-external --atomic(negative) * virsh snapshot-create-as --disk-only and --memspec (negative) * Create multi snapshots with snapshot-create-as * Create snapshot with name a--a a--a--snap1 """ if not virsh.has_help_command('snapshot-create-as'): raise error.TestNAError("This version of libvirt does not support " "the snapshot-create-as test") vm_name = params.get("main_vm") status_error = params.get("status_error", "no") options = params.get("snap_createas_opts") multi_num = params.get("multi_num", "1") diskspec_num = params.get("diskspec_num", "1") bad_disk = params.get("bad_disk") reuse_external = "yes" == params.get("reuse_external", "no") start_ga = params.get("start_ga", "yes") domain_state = params.get("domain_state") memspec_opts = params.get("memspec_opts") config_format = "yes" == params.get("config_format", "no") snapshot_image_format = params.get("snapshot_image_format") diskspec_opts = params.get("diskspec_opts") create_autodestroy = 'yes' == params.get("create_autodestroy", "no") unix_channel = "yes" == params.get("unix_channel", "yes") dac_denial = "yes" == params.get("dac_denial", "no") check_json_no_savevm = "yes" == params.get("check_json_no_savevm", "no") disk_snapshot_attr = params.get('disk_snapshot_attr', 'external') set_snapshot_attr = "yes" == params.get("set_snapshot_attr", "no") # gluster related params replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") disk_src_protocol = params.get("disk_source_protocol") restart_tgtd = params.get("restart_tgtd", "no") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) uri = params.get("virsh_uri") usr = params.get('unprivileged_user') if usr: if usr.count('EXAMPLE'): usr = '******' if disk_src_protocol == 'iscsi': if not libvirt_version.version_compare(1, 0, 4): raise error.TestNAError("'iscsi' disk doesn't support in" " current libvirt version.") if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") if not libvirt_version.version_compare(1, 2, 7): # As bug 1017289 closed as WONTFIX, the support only # exist on 1.2.7 and higher if disk_src_protocol == 'gluster': raise error.TestNAError("Snapshot on glusterfs not support in " "current version. Check more info with " "https://bugzilla.redhat.com/buglist.cgi?" "bug_id=1017289,1032370") opt_names = locals() if memspec_opts is not None: mem_options = compose_disk_options(test, params, memspec_opts) # if the parameters have the disk without "file=" then we only need to # add testdir for it. if mem_options is None: mem_options = os.path.join(test.tmpdir, memspec_opts) options += " --memspec " + mem_options tag_diskspec = 0 dnum = int(diskspec_num) if diskspec_opts is not None: tag_diskspec = 1 opt_names['diskopts_1'] = diskspec_opts # diskspec_opts[n] is used in cfg when more than 1 --diskspec is used if dnum > 1: tag_diskspec = 1 for i in range(1, dnum + 1): opt_names["diskopts_%s" % i] = params.get("diskspec_opts%s" % i) if tag_diskspec == 1: for i in range(1, dnum + 1): disk_options = compose_disk_options(test, params, opt_names["diskopts_%s" % i]) options += " --diskspec " + disk_options logging.debug("options are %s", options) vm = env.get_vm(vm_name) option_dict = {} option_dict = utils_misc.valued_option_dict(options, r' --(?!-)') logging.debug("option_dict is %s", option_dict) # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("original xml is %s", vmxml_backup) # Generate empty image for negative test if bad_disk is not None: bad_disk = os.path.join(test.tmpdir, bad_disk) os.open(bad_disk, os.O_RDWR | os.O_CREAT) # Generate external disk if reuse_external: disk_path = '' for i in range(dnum): external_disk = "external_disk%s" % i if params.get(external_disk): disk_path = os.path.join(test.tmpdir, params.get(external_disk)) utils.run("qemu-img create -f qcow2 %s 1G" % disk_path) # Only chmod of the last external disk for negative case if dac_denial: utils.run("chmod 500 %s" % disk_path) qemu_conf = None libvirtd_conf = None libvirtd_log_path = None libvirtd = utils_libvirtd.Libvirtd() try: # Config "snapshot_image_format" option in qemu.conf if config_format: qemu_conf = utils_config.LibvirtQemuConfig() qemu_conf.snapshot_image_format = snapshot_image_format logging.debug("the qemu config file content is:\n %s" % qemu_conf) libvirtd.restart() if check_json_no_savevm: libvirtd_conf = utils_config.LibvirtdConfig() libvirtd_conf["log_level"] = '1' libvirtd_conf["log_filters"] = '"1:json 3:remote 4:event"' libvirtd_log_path = os.path.join(test.tmpdir, "libvirtd.log") libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path logging.debug("the libvirtd config file content is:\n %s" % libvirtd_conf) libvirtd.restart() if replace_vm_disk: libvirt.set_vm_disk(vm, params, tmp_dir) if set_snapshot_attr: if vm.is_alive(): vm.destroy(gracefully=False) vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = vmxml_backup.get_devices(device_type="disk")[0] vmxml_new.del_device(disk_xml) # set snapshot attribute in disk xml disk_xml.snapshot = disk_snapshot_attr new_disk = disk.Disk(type_name='file') new_disk.xmltreefile = disk_xml.xmltreefile vmxml_new.add_device(new_disk) logging.debug("The vm xml now is: %s" % vmxml_new.xmltreefile) vmxml_new.sync() vm.start() # Start qemu-ga on guest if have --quiesce if unix_channel and options.find("quiesce") >= 0: vm.prepare_guest_agent() session = vm.wait_for_login() if start_ga == "no": # The qemu-ga could be running and should be killed session.cmd("kill -9 `pidof qemu-ga`") # Check if the qemu-ga get killed stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if not stat_ps: # As managed by systemd and set as autostart, qemu-ga # could be restarted, so use systemctl to stop it. session.cmd("systemctl stop qemu-guest-agent") stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if not stat_ps: raise error.TestNAError("Fail to stop agent in " "guest") if domain_state == "paused": virsh.suspend(vm_name) else: # Remove channel if exist if vm.is_alive(): vm.destroy(gracefully=False) xml_inst = vm_xml.VMXML.new_from_dumpxml(vm_name) xml_inst.remove_agent_channels() vm.start() # Record the previous snapshot-list snaps_before = virsh.snapshot_list(vm_name) # Attach disk before create snapshot if not print xml and multi disks # specified in cfg if dnum > 1 and "--print-xml" not in options: for i in range(1, dnum): disk_path = os.path.join(test.tmpdir, 'disk%s.qcow2' % i) utils.run("qemu-img create -f qcow2 %s 200M" % disk_path) virsh.attach_disk(vm_name, disk_path, 'vd%s' % list(string.lowercase)[i], debug=True) # Run virsh command # May create several snapshots, according to configuration for count in range(int(multi_num)): if create_autodestroy: # Run virsh command in interactive mode vmxml_backup.undefine() vp = virsh.VirshPersistent() vp.create(vmxml_backup['xml'], '--autodestroy') cmd_result = vp.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) vp.close_session() vmxml_backup.define() else: cmd_result = virsh.snapshot_create_as(vm_name, options, unprivileged_user=usr, uri=uri, ignore_status=True, debug=True) # for multi snapshots without specific snapshot name, the # snapshot name is using time string with 1 second # incremental, to avoid get snapshot failure with same name, # sleep 1 second here. if int(multi_num) > 1: time.sleep(1.1) output = cmd_result.stdout.strip() status = cmd_result.exit_status # check status_error if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") else: # Check memspec file should be removed if failed if (options.find("memspec") >= 0 and options.find("atomic") >= 0): if os.path.isfile(option_dict['memspec']): os.remove(option_dict['memspec']) raise error.TestFail("Run failed but file %s exist" % option_dict['memspec']) else: logging.info("Run failed as expected and memspec" " file already been removed") # Check domain xml is not updated if reuse external fail elif reuse_external and dac_denial: output = virsh.dumpxml(vm_name).stdout.strip() if "reuse_external" in output: raise error.TestFail("Domain xml should not be " "updated with snapshot image") else: logging.info("Run failed as expected") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command: %s" % output) else: # Check the special options snaps_list = virsh.snapshot_list(vm_name) logging.debug("snaps_list is %s", snaps_list) check_snapslist(vm_name, options, option_dict, output, snaps_before, snaps_list) # For cover bug 872292 if check_json_no_savevm: pattern = "The command savevm has not been found" with open(libvirtd_log_path) as f: for line in f: if pattern in line and "error" in line: raise error.TestFail("'%s' was found: %s" % (pattern, line)) finally: if vm.is_alive(): vm.destroy() # recover domain xml xml_recover(vmxml_backup) path = "/var/lib/libvirt/qemu/snapshot/" + vm_name if os.path.isfile(path): raise error.TestFail("Still can find snapshot metadata") if disk_src_protocol == 'gluster': libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path) libvirtd.restart() if disk_src_protocol == 'iscsi': libvirt.setup_or_cleanup_iscsi(False, restart_tgtd=restart_tgtd) # rm bad disks if bad_disk is not None: os.remove(bad_disk) # rm attach disks and reuse external disks if dnum > 1 and "--print-xml" not in options: for i in range(dnum): disk_path = os.path.join(test.tmpdir, 'disk%s.qcow2' % i) if os.path.exists(disk_path): os.unlink(disk_path) if reuse_external: external_disk = "external_disk%s" % i disk_path = os.path.join(test.tmpdir, params.get(external_disk)) if os.path.exists(disk_path): os.unlink(disk_path) # restore config if config_format and qemu_conf: qemu_conf.restore() if libvirtd_conf: libvirtd_conf.restore() if libvirtd_conf or (config_format and qemu_conf): libvirtd.restart() if libvirtd_log_path and os.path.exists(libvirtd_log_path): os.unlink(libvirtd_log_path)
def run(test, params, env): """ Attach/Detach an iscsi network/volume disk to domain 1. For secret usage testing: 1.1. Setup an iscsi target with CHAP authentication. 1.2. Define a secret for iscsi target usage 1.3. Set secret value 2. Create 4. Create an iscsi network disk XML 5. Attach disk with the XML file and check the disk inside the VM 6. Detach the disk """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) disk_device = params.get("disk_device", "disk") disk_type = params.get("disk_type", "network") disk_src_protocol = params.get("disk_source_protocol", "iscsi") disk_src_host = params.get("disk_source_host", "127.0.0.1") disk_src_port = params.get("disk_source_port", "3260") disk_src_pool = params.get("disk_source_pool") disk_src_mode = params.get("disk_source_mode", "host") pool_type = params.get("pool_type", "iscsi") pool_src_host = params.get("pool_source_host", "127.0.0.1") disk_target = params.get("disk_target", "vdb") disk_target_bus = params.get("disk_target_bus", "virtio") disk_readonly = params.get("disk_readonly", "no") chap_auth = "yes" == params.get("chap_auth", "no") chap_user = params.get("chap_username", "") chap_passwd = params.get("chap_password", "") secret_usage_target = params.get("secret_usage_target") secret_ephemeral = params.get("secret_ephemeral", "no") secret_private = params.get("secret_private", "yes") status_error = "yes" == params.get("status_error", "no") # Indicate the PPC platform on_ppc = False if platform.platform().count('ppc64'): on_ppc = True if disk_src_protocol == 'iscsi': if not libvirt_version.version_compare(1, 0, 4): raise error.TestNAError("'iscsi' disk doesn't support in" " current libvirt version.") if disk_type == "volume": if not libvirt_version.version_compare(1, 0, 5): raise error.TestNAError("'volume' type disk doesn't support in" " current libvirt version.") # Back VM XML vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} try: if chap_auth: # Create a secret xml to define it secret_xml = SecretXML(secret_ephemeral, secret_private) secret_xml.auth_type = "chap" secret_xml.auth_username = chap_user secret_xml.usage = disk_src_protocol secret_xml.target = secret_usage_target logging.debug("Define secret by XML: %s", open(secret_xml.xml).read()) # Define secret cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Get secret uuid try: secret_uuid = cmd_result.stdout.strip().split()[1] except IndexError: raise error.TestError("Fail to get new created secret uuid") # Set secret value secret_string = base64.b64encode(chap_passwd) cmd_result = virsh.secret_set_value(secret_uuid, secret_string, **virsh_dargs) libvirt.check_exit_status(cmd_result) else: # Set chap_user and chap_passwd to empty to avoid setup # CHAP authentication when export iscsi target chap_user = "" chap_passwd = "" # Setup iscsi target iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size='1G', chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=disk_src_host) # Create iscsi pool if disk_type == "volume": # Create an iscsi pool xml to create it pool_src_xml = pool_xml.SourceXML() pool_src_xml.host_name = pool_src_host pool_src_xml.device_path = iscsi_target poolxml = pool_xml.PoolXML(pool_type=pool_type) poolxml.name = disk_src_pool poolxml.set_source(pool_src_xml) poolxml.target_path = "/dev/disk/by-path" # Create iscsi pool cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) def get_vol(): """Get the volume info""" # Refresh the pool cmd_result = virsh.pool_refresh(disk_src_pool) libvirt.check_exit_status(cmd_result) # Get volume name cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs) libvirt.check_exit_status(cmd_result) vol_list = [] vol_list = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(cmd_result.stdout)) if len(vol_list) > 1: return vol_list[1] else: return None # Wait for a while so that we can get the volume info vol_info = utils_misc.wait_for(get_vol, 10) if vol_info: vol_name, vol_path = vol_info else: raise error.TestError("Failed to get volume info") # Snapshot doesn't support raw disk format, create a qcow2 volume # disk for snapshot operation. process.run('qemu-img create -f qcow2 %s %s' % (vol_path, '100M'), shell=True) # Create iscsi network disk XML disk_params = { 'device_type': disk_device, 'type_name': disk_type, 'target_dev': disk_target, 'target_bus': disk_target_bus, 'readonly': disk_readonly } disk_params_src = {} if disk_type == "network": disk_params_src = { 'source_protocol': disk_src_protocol, 'source_name': iscsi_target + "/%s" % lun_num, 'source_host_name': disk_src_host, 'source_host_port': disk_src_port } elif disk_type == "volume": disk_params_src = { 'source_pool': disk_src_pool, 'source_volume': vol_name, 'driver_type': 'qcow2', 'source_mode': disk_src_mode } else: error.TestNAError("Unsupport disk type in this test") disk_params.update(disk_params_src) if chap_auth: disk_params_auth = { 'auth_user': chap_user, 'secret_type': disk_src_protocol, 'secret_usage': secret_xml.target } disk_params.update(disk_params_auth) disk_xml = libvirt.create_disk_xml(disk_params) start_vm = "yes" == params.get("start_vm", "yes") if start_vm: if vm.is_dead(): vm.start() vm.wait_for_login() else: if not vm.is_dead(): vm.destroy() attach_option = params.get("attach_option", "") disk_xml_f = open(disk_xml) disk_xml_content = disk_xml_f.read() disk_xml_f.close() logging.debug("Attach disk by XML: %s", disk_xml_content) cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml, flagstr=attach_option, dargs=virsh_dargs) libvirt.check_exit_status(cmd_result, status_error) if vm.is_dead(): cmd_result = virsh.start(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Wait for domain is stable vm.wait_for_login().close() domain_operation = params.get("domain_operation", "") if domain_operation == "save": save_file = os.path.join(test.tmpdir, "vm.save") cmd_result = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.restore(save_file) libvirt.check_exit_status(cmd_result) if os.path.exists(save_file): os.remove(save_file) elif domain_operation == "snapshot": # Run snapshot related commands: snapshot-create-as, snapshot-list # snapshot-info, snapshot-dumpxml, snapshot-create snapshot_name1 = "snap1" snapshot_name2 = "snap2" cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) try: virsh.snapshot_list(vm_name, **virsh_dargs) except process.CmdError: error.TestFail("Failed getting snapshots list for %s" % vm_name) try: virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs) except process.CmdError: error.TestFail("Failed getting snapshots info for %s" % vm_name) cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) snapshot_file = os.path.join(test.tmpdir, snapshot_name2) sn_create_op = ("%s --disk-only --diskspec %s,file=%s" % (snapshot_name2, disk_target, snapshot_file)) cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1, **virsh_dargs) cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs) if snapshot_name2 not in cmd_result: raise error.TestError("Snapshot %s not found" % snapshot_name2) else: logging.error("Unsupport operation %s in this case, so skip it", domain_operation) def find_attach_disk(expect=True): """ Find attached disk inside the VM """ found_disk = False if vm.is_dead(): raise error.TestError("Domain %s is not running" % vm_name) else: try: session = vm.wait_for_login() # Here the script needs wait for a while for the guest to # recognize the hotplugged disk on PPC if on_ppc: time.sleep(10) cmd = "grep %s /proc/partitions" % disk_target s, o = session.cmd_status_output(cmd) logging.info("%s output: %s", cmd, o) session.close() if s == 0: found_disk = True except (LoginError, VMError, ShellError), e: logging.error(str(e)) if found_disk == expect: logging.debug("Check disk inside the VM PASS as expected") else: raise error.TestError("Check disk inside the VM FAIL") # Check disk inside the VM, expect is False if status_error=True find_attach_disk(not status_error) # Detach disk cmd_result = virsh.detach_disk(vm_name, disk_target) libvirt.check_exit_status(cmd_result, status_error) # Check disk inside the VM find_attach_disk(False)
def run(test, params, env): """ Test command: virsh blockcommit <domain> <path> 1) Prepare test environment. 2) Commit changes from a snapshot down to its backing image. 3) Recover test environment. 4) Check result. """ def make_disk_snapshot(): # Add all disks into commandline. disks = vm.get_disk_devices() # Make three external snapshots for disks only for count in range(1, 4): options = "snapshot%s snap%s-desc " \ "--disk-only --atomic --no-metadata" % (count, count) if needs_agent: options += " --quiesce" for disk in disks: disk_detail = disks[disk] basename = os.path.basename(disk_detail['source']) # Remove the original suffix if any, appending ".snap[0-9]" diskname = basename.split(".")[0] disk_external = os.path.join(tmp_dir, "%s.snap%s" % (diskname, count)) snapshot_external_disks.append(disk_external) options += " %s,snapshot=external,file=%s" % (disk, disk_external) cmd_result = virsh.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) status = cmd_result.exit_status if status != 0: raise error.TestFail("Failed to make snapshots for disks!") # Create a file flag in VM after each snapshot flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") file_path = flag_file.name flag_file.close() status, output = session.cmd_status_output("touch %s" % file_path) if status: raise error.TestFail("Touch file in vm failed. %s" % output) snapshot_flag_files.append(file_path) # MAIN TEST CODE ### # Process cartesian parameters vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vm_state = params.get("vm_state", "running") needs_agent = "yes" == params.get("needs_agent", "yes") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") top_inactive = ("yes" == params.get("top_inactive")) with_timeout = ("yes" == params.get("with_timeout_option", "no")) status_error = ("yes" == params.get("status_error", "no")) base_option = params.get("base_option", "none") middle_base = "yes" == params.get("middle_base", "no") pivot_opt = "yes" == params.get("pivot_opt", "no") snap_in_mirror = "yes" == params.get("snap_in_mirror", "no") snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", "no") virsh_dargs = {'debug': True} # Process domain disk device parameters disk_type = params.get("disk_type") disk_src_protocol = params.get("disk_source_protocol") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) if not top_inactive: if not libvirt_version.version_compare(1, 2, 4): raise error.TestNAError("live active block commit is not supported" + " in current libvirt version.") # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Abort the test if there are snapshots already exsiting_snaps = virsh.snapshot_list(vm_name) if len(exsiting_snaps) != 0: raise error.TestFail("There are snapshots created for %s already" % vm_name) snapshot_external_disks = [] try: if disk_src_protocol == 'iscsi' and disk_type == 'network': if not libvirt_version.version_compare(1, 0, 4): raise error.TestNAError("'iscsi' disk doesn't support in" + " current libvirt version.") # Set vm xml and guest agent if replace_vm_disk: libvirt.set_vm_disk(vm, params, tmp_dir) if needs_agent: libvirt.set_guest_agent(vm) # The first disk is supposed to include OS # We will perform blockcommit operation for it. first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] blk_target = first_disk['target'] snapshot_flag_files = [] # get a vm session before snapshot session = vm.wait_for_login() # do snapshot make_disk_snapshot() # snapshot src file list snap_src_lst = [blk_source] snap_src_lst += snapshot_external_disks backing_chain = '' for i in reversed(range(4)): if i == 0: backing_chain += "%s" % snap_src_lst[i] else: backing_chain += "%s -> " % snap_src_lst[i] logging.debug("The backing chain is: %s" % backing_chain) # check snapshot disk xml backingStore is expected vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') disk_xml = None for disk in disks: if disk.target['dev'] != blk_target: continue else: disk_xml = disk.xmltreefile logging.debug("the target disk xml after snapshot is %s", disk_xml) break if not disk_xml: raise error.TestFail("Can't find disk xml with target %s" % blk_target) elif libvirt_version.version_compare(1, 2, 4): # backingStore element introuduced in 1.2.4 chain_lst = snap_src_lst[::-1] ret = check_chain_xml(disk_xml, chain_lst) if not ret: raise error.TestFail("Domain image backing chain check failed") # set blockcommit_options top_image = None blockcommit_options = "--wait --verbose" basename = os.path.basename(blk_source) diskname = basename.split(".")[0] if with_timeout: blockcommit_options += " --timeout 1" if base_option == "shallow": blockcommit_options += " --shallow" elif base_option == "base": if middle_base: blk_source = os.path.join(tmp_dir, "%s.snap1" % diskname) blockcommit_options += " --base %s" % blk_source if top_inactive: top_image = os.path.join(tmp_dir, "%s.snap2" % diskname) blockcommit_options += " --top %s" % top_image else: blockcommit_options += " --active" if pivot_opt: blockcommit_options += " --pivot" if vm_state == "shut off": vm.shutdown() # Run test case result = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) # Check status_error libvirt.check_exit_status(result, status_error) if result.exit_status and status_error: return while True: vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') for disk in disks: if disk.target['dev'] != blk_target: continue else: disk_xml = disk.xmltreefile break if not top_inactive: disk_mirror = disk_xml.find('mirror') if '--pivot' not in blockcommit_options: if disk_mirror is not None: job_type = disk_mirror.get('job') job_ready = disk_mirror.get('ready') src_element = disk_mirror.find('source') disk_src_file = None for elem in ('file', 'name', 'dev'): elem_val = src_element.get(elem) if elem_val: disk_src_file = elem_val break err_msg = "blockcommit base source " err_msg += "%s not expected" % disk_src_file if '--shallow' in blockcommit_options: if disk_src_file != snap_src_lst[2]: raise error.TestFail(err_msg) else: if disk_src_file != blk_source: raise error.TestFail(err_msg) if libvirt_version.version_compare(1, 2, 7): # The job attribute mentions which API started the # operation since 1.2.7. if job_type != 'active-commit': raise error.TestFail("blockcommit job type '%s'" " not expected" % job_type) if job_ready != 'yes': # The attribute ready, if present, tracks # progress of the job: yes if the disk is known # to be ready to pivot, or, since 1.2.7, abort # or pivot if the job is in the process of # completing. continue else: logging.debug("after active block commit job " "ready for pivot, the target disk" " xml is %s", disk_xml) break else: break else: if disk_mirror is None: logging.debug(disk_xml) if "--shallow" in blockcommit_options: chain_lst = snap_src_lst[::-1] chain_lst.pop(0) ret = check_chain_xml(disk_xml, chain_lst) if not ret: raise error.TestFail("Domain image backing " "chain check failed") elif "--base" in blockcommit_options: chain_lst = snap_src_lst[::-1] base_index = chain_lst.index(blk_source) chain_lst = chain_lst[base_index:] ret = check_chain_xml(disk_xml, chain_lst) if not ret: raise error.TestFail("Domain image backing " "chain check failed") break else: # wait pivot after commit is synced continue else: logging.debug("after inactive commit the disk xml is: %s" % disk_xml) if libvirt_version.version_compare(1, 2, 4): if "--shallow" in blockcommit_options: chain_lst = snap_src_lst[::-1] chain_lst.remove(top_image) ret = check_chain_xml(disk_xml, chain_lst) if not ret: raise error.TestFail("Domain image backing chain " "check failed") elif "--base" in blockcommit_options: chain_lst = snap_src_lst[::-1] top_index = chain_lst.index(top_image) base_index = chain_lst.index(blk_source) val_tmp = [] for i in range(top_index, base_index): val_tmp.append(chain_lst[i]) for i in val_tmp: chain_lst.remove(i) ret = check_chain_xml(disk_xml, chain_lst) if not ret: raise error.TestFail("Domain image backing chain " "check failed") break else: break # Check flag files if not vm_state == "shut off": for flag in snapshot_flag_files: status, output = session.cmd_status_output("cat %s" % flag) if status: raise error.TestFail("blockcommit failed: %s" % output) if not pivot_opt and snap_in_mirror: # do snapshot during mirror phase snap_path = "%s/%s.snap" % (tmp_dir, vm_name) snap_opt = "--disk-only --atomic --no-metadata " snap_opt += "vda,snapshot=external,file=%s" % snap_path snapshot_external_disks.append(snap_path) cmd_result = virsh.snapshot_create_as(vm_name, snap_opt, ignore_statues=True, debug=True) libvirt.check_exit_status(cmd_result, snap_in_mirror_err) finally: if vm.is_alive(): vm.destroy() # Recover xml of vm. vmxml_backup.sync("--snapshots-metadata") for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) if disk_src_protocol == 'iscsi': libvirt.setup_or_cleanup_iscsi(is_setup=False) elif disk_src_protocol == 'gluster': libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path) elif disk_src_protocol == 'netfs': restore_selinux = params.get('selinux_status_bak') libvirt.setup_or_cleanup_nfs(is_setup=False, restore_selinux=restore_selinux)
def run(test, params, env): """ Test command: virsh blockpull <domain> <path> 1) Prepare test environment. 2) Populate a disk from its backing image. 3) Recover test environment. 4) Check result. """ def make_disk_snapshot(snapshot_take): """ Make external snapshots for disks only. :param snapshot_take: snapshots taken. """ for count in range(1, snapshot_take + 1): snap_xml = snapshot_xml.SnapshotXML() snapshot_name = "snapshot_test%s" % count snap_xml.snap_name = snapshot_name snap_xml.description = "Snapshot Test %s" % count # Add all disks into xml file. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') new_disks = [] for src_disk_xml in disks: disk_xml = snap_xml.SnapDiskXML() disk_xml.xmltreefile = src_disk_xml.xmltreefile # Skip cdrom if disk_xml.device == "cdrom": continue del disk_xml.device del disk_xml.address disk_xml.snapshot = "external" disk_xml.disk_name = disk_xml.target['dev'] # Only qcow2 works as external snapshot file format, update it # here driver_attr = disk_xml.driver driver_attr.update({'type': 'qcow2'}) disk_xml.driver = driver_attr new_attrs = disk_xml.source.attrs if 'file' in disk_xml.source.attrs: file_name = disk_xml.source.attrs['file'] new_file = "%s.snap%s" % (file_name.split('.')[0], count) snapshot_external_disks.append(new_file) new_attrs.update({'file': new_file}) hosts = None elif ('name' in disk_xml.source.attrs and disk_src_protocol == 'gluster'): src_name = disk_xml.source.attrs['name'] new_name = "%s.snap%s" % (src_name.split('.')[0], count) new_attrs.update({'name': new_name}) snapshot_external_disks.append(new_name) hosts = disk_xml.source.hosts elif ('dev' in disk_xml.source.attrs or 'name' in disk_xml.source.attrs): if (disk_xml.type_name == 'block' or disk_src_protocol in ['iscsi', 'rbd']): # Use local file as external snapshot target for block # and iscsi network type. # As block device will be treat as raw format by # default, it's not fit for external disk snapshot # target. A work around solution is use qemu-img again # with the target. # And external active snapshots are not supported on # 'network' disks using 'iscsi' protocol disk_xml.type_name = 'file' if 'dev' in new_attrs: del new_attrs['dev'] elif 'name' in new_attrs: del new_attrs['name'] del new_attrs['protocol'] new_file = "%s/blk_src_file.snap%s" % (tmp_dir, count) snapshot_external_disks.append(new_file) new_attrs.update({'file': new_file}) hosts = None new_src_dict = {"attrs": new_attrs} if hosts: new_src_dict.update({"hosts": hosts}) disk_xml.source = disk_xml.new_disk_source(**new_src_dict) new_disks.append(disk_xml) snap_xml.set_disks(new_disks) snapshot_xml_path = snap_xml.xml logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile) options = "--disk-only --xmlfile %s " % snapshot_xml_path snapshot_result = virsh.snapshot_create( vm_name, options, debug=True) if snapshot_result.exit_status != 0: test.fail(snapshot_result.stderr) # Create a file flag in VM after each snapshot flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") file_path = flag_file.name flag_file.close() status, output = session.cmd_status_output("touch %s" % file_path) if status: test.fail("Touch file in vm failed. %s" % output) snapshot_flag_files.append(file_path) def get_first_disk_source(): """ Get disk source of first device :return: first disk of first device. """ first_device = vm.get_first_disk_devices() firt_disk_src = first_device['source'] return firt_disk_src def make_relative_path_backing_files(): """ Create backing chain files of relative path. :return: absolute path of top active file """ first_disk_source = get_first_disk_source() basename = os.path.basename(first_disk_source) root_dir = os.path.dirname(first_disk_source) cmd = "mkdir -p %s" % os.path.join(root_dir, '{b..d}') ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) # Make three external relative path backing files. backing_file_dict = collections.OrderedDict() backing_file_dict["b"] = "../%s" % basename backing_file_dict["c"] = "../b/b.img" backing_file_dict["d"] = "../c/c.img" for key, value in list(backing_file_dict.items()): backing_file_path = os.path.join(root_dir, key) cmd = ("cd %s && qemu-img create -f qcow2 -o backing_file=%s,backing_fmt=qcow2 %s.img" % (backing_file_path, value, key)) ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) return os.path.join(backing_file_path, "d.img") def check_chain_backing_files(disk_src_file, expect_backing_file=False): """ Check backing chain files of relative path after blockcommit. :param disk_src_file: first disk src file. :param expect_backing_file: whether it expect to have backing files. """ first_disk_source = get_first_disk_source() # Validate source image need refer to original one after active blockcommit if not expect_backing_file and disk_src_file not in first_disk_source: test.fail("The disk image path:%s doesn't include the origin image: %s" % (first_disk_source, disk_src_file)) # Validate source image doesn't have backing files after active blockcommit cmd = "qemu-img info %s --backing-chain" % first_disk_source if qemu_img_locking_feature_support: cmd = "qemu-img info -U %s --backing-chain" % first_disk_source ret = process.run(cmd, shell=True).stdout_text.strip() if expect_backing_file: if 'backing file' not in ret: test.fail("The disk image doesn't have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) else: if 'backing file' in ret: test.fail("The disk image still have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) # MAIN TEST CODE ### # Process cartesian parameters vm_name = params.get("main_vm") vm = env.get_vm(vm_name) snapshot_take = int(params.get("snapshot_take", '0')) needs_agent = "yes" == params.get("needs_agent", "yes") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") snap_in_mirror = "yes" == params.get("snap_in_mirror", 'no') snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", 'no') bandwidth = params.get("bandwidth", None) with_timeout = ("yes" == params.get("with_timeout_option", "no")) status_error = ("yes" == params.get("status_error", "no")) base_option = params.get("base_option", None) keep_relative = "yes" == params.get("keep_relative", 'no') virsh_dargs = {'debug': True} # Check whether qemu-img need add -U suboption since locking feature was added afterwards qemu-2.10 qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support() backing_file_relative_path = "yes" == params.get("backing_file_relative_path", "no") # Process domain disk device parameters disk_type = params.get("disk_type") disk_target = params.get("disk_target", 'vda') disk_src_protocol = params.get("disk_source_protocol") restart_tgtd = params.get("restart_tgtd", "no") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("original xml is %s", vmxml_backup) # Abort the test if there are snapshots already exsiting_snaps = virsh.snapshot_list(vm_name) if len(exsiting_snaps) != 0: test.fail("There are snapshots created for %s already" % vm_name) snapshot_external_disks = [] # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" try: if disk_src_protocol == 'iscsi' and disk_type == 'network': if not libvirt_version.version_compare(1, 0, 4): test.cancel("'iscsi' disk doesn't support in" " current libvirt version.") if disk_src_protocol == 'gluster': if not libvirt_version.version_compare(1, 2, 7): test.cancel("Snapshot on glusterfs not" " support in current " "version. Check more info " " with https://bugzilla.re" "dhat.com/show_bug.cgi?id=" "1017289") # Set vm xml and guest agent if replace_vm_disk: if disk_src_protocol == "rbd" and disk_type == "network": src_host = params.get("disk_source_host", "EXAMPLE_HOSTS") mon_host = params.get("mon_host", "EXAMPLE_MON_HOST") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(mon_host) if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"): test.cancel("Please provide ceph host first.") if backing_file_relative_path: if vm.is_alive(): vm.destroy(gracefully=False) first_src_file = get_first_disk_source() blk_source_image = os.path.basename(first_src_file) blk_source_folder = os.path.dirname(first_src_file) replace_disk_image = make_relative_path_backing_files() params.update({'disk_source_name': replace_disk_image, 'disk_type': 'file', 'disk_src_protocol': 'file'}) vm.start() libvirt.set_vm_disk(vm, params, tmp_dir) if needs_agent: vm.prepare_guest_agent() # The first disk is supposed to include OS # We will perform blockpull operation for it. first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] blk_target = first_disk['target'] snapshot_flag_files = [] # get a vm session before snapshot session = vm.wait_for_login() # do snapshot make_disk_snapshot(snapshot_take) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug("The domain xml after snapshot is %s" % vmxml) # snapshot src file list snap_src_lst = [blk_source] snap_src_lst += snapshot_external_disks if snap_in_mirror: blockpull_options = "--bandwidth 1" else: blockpull_options = "--wait --verbose" if with_timeout: blockpull_options += " --timeout 1" if bandwidth: blockpull_options += " --bandwidth %s" % bandwidth if base_option == "async": blockpull_options += " --async" base_image = None base_index = None if (libvirt_version.version_compare(1, 2, 4) or disk_src_protocol == 'gluster'): # For libvirt is older version than 1.2.4 or source protocol is gluster # there are various base image,which depends on base option:shallow,base,top respectively if base_option == "shallow": base_index = 1 base_image = "%s[%s]" % (disk_target, base_index) elif base_option == "base": base_index = 2 base_image = "%s[%s]" % (disk_target, base_index) elif base_option == "top": base_index = 0 base_image = "%s[%s]" % (disk_target, base_index) else: if base_option == "shallow": base_image = snap_src_lst[3] elif base_option == "base": base_image = snap_src_lst[2] elif base_option == "top": base_image = snap_src_lst[4] if base_option and base_image: blockpull_options += " --base %s" % base_image if keep_relative: blockpull_options += " --keep-relative" if backing_file_relative_path: # Use block commit to shorten previous snapshots. blockcommit_options = " --active --verbose --shallow --pivot --keep-relative" for count in range(1, snapshot_take + 1): res = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) #Use block pull with --keep-relative flag,and reset base_index to 2. base_index = 2 for count in range(1, snapshot_take): # If block pull operations are more than or equal to 3,it need reset base_index to 1. if count >= 3: base_index = 1 base_image = "%s[%s]" % (disk_target, base_index) blockpull_options = " --wait --verbose --base %s --keep-relative" % base_image res = virsh.blockpull(vm_name, blk_target, blockpull_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) # Check final backing chain files. check_chain_backing_files(blk_source_image, True) return # Run test case result = virsh.blockpull(vm_name, blk_target, blockpull_options, **virsh_dargs) status = result.exit_status # If pull job aborted as timeout, the exit status is different # on RHEL6(0) and RHEL7(1) if with_timeout and 'Pull aborted' in result.stdout.strip(): if libvirt_version.version_compare(1, 1, 1): status_error = True else: status_error = False # Check status_error libvirt.check_exit_status(result, status_error) if not status and not with_timeout: if snap_in_mirror: snap_mirror_path = "%s/snap_mirror" % tmp_dir snap_options = "--diskspec vda,snapshot=external," snap_options += "file=%s --disk-only" % snap_mirror_path snapshot_external_disks.append(snap_mirror_path) ret = virsh.snapshot_create_as(vm_name, snap_options, ignore_status=True, debug=True) libvirt.check_exit_status(ret, snap_in_mirror_err) return vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') for disk in disks: if disk.target['dev'] != blk_target: continue else: disk_xml = disk.xmltreefile break logging.debug("after pull the disk xml is: %s" % disk_xml) if libvirt_version.version_compare(1, 2, 4): err_msg = "Domain image backing chain check failed" if not base_option or "async" in base_option: chain_lst = snap_src_lst[-1:] ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail(err_msg) elif "base" or "shallow" in base_option: chain_lst = snap_src_lst[::-1] if not base_index and base_image: base_index = chain_lst.index(base_image) val_tmp = [] for i in range(1, base_index): val_tmp.append(chain_lst[i]) for i in val_tmp: chain_lst.remove(i) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail(err_msg) # If base image is the top layer of snapshot chain, # virsh blockpull should fail, return directly if base_option == "top": return # Check flag files for flag in snapshot_flag_files: status, output = session.cmd_status_output("cat %s" % flag) if status: test.fail("blockpull failed: %s" % output) finally: # Remove ceph configure file if created if ceph_cfg: os.remove(ceph_cfg) if vm.is_alive(): vm.destroy(gracefully=False) # Recover xml of vm. vmxml_backup.sync("--snapshots-metadata") # Clean ceph image if used in test if 'mon_host' in locals(): if utils_package.package_install(["ceph-common"]): disk_source_name = params.get("disk_source_name") cmd = ("rbd -m {0} info {1} && rbd -m {0} rm " "{1}".format(mon_host, disk_source_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("result of rbd removal: %s", cmd_result) else: logging.debug('Failed to install ceph-common to clean ceph.') if not disk_src_protocol or disk_src_protocol != 'gluster': for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) if backing_file_relative_path: libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) process.run("cd %s && rm -rf b c d" % blk_source_folder, shell=True) libvirtd = utils_libvirtd.Libvirtd() if disk_src_protocol == 'iscsi': libvirt.setup_or_cleanup_iscsi(is_setup=False, restart_tgtd=restart_tgtd) elif disk_src_protocol == 'gluster': logging.info("clean gluster env") libvirt.setup_or_cleanup_gluster(False, brick_path=brick_path, **params) libvirtd.restart() elif disk_src_protocol == 'netfs': restore_selinux = params.get('selinux_status_bak') libvirt.setup_or_cleanup_nfs(is_setup=False, restore_selinux=restore_selinux)
for pre_line, aft_line in elems: if pre_line.lstrip().strip() != aft_line.lstrip().strip(): if pre_line is not None: logging.debug("diff before='%s'", pre_line.lstrip().strip()) if aft_line is not None: logging.debug("diff after='%s'", aft_line.lstrip().strip()) raise error.TestFail("Failed xml before/after comparison") time.sleep(20) snapshot_oldlist = None try: # Create disk snapshot before all to make the origin image clean logging.debug("Create snap-temp --disk-only") ret = virsh.snapshot_create_as(vm_name, "snap-temp --disk-only") if ret.exit_status != 0: raise error.TestFail("Fail to create temp snap, Error: %s" % ret.stderr.strip()) # Create snapshots for opt in [snap_create_opt1, snap_create_opt2]: logging.debug("...use option %s", opt) result = virsh.snapshot_create_as(vm_name, opt) if result.exit_status: raise error.TestFail("Failed to create snapshot. Error:%s." % result.stderr.strip()) time.sleep(1) snapshot_oldlist = virsh.snapshot_list(vm_name)
if addr_option != "": d = addr_option.split('=') addr_dict.update({d[0].strip(): d[1].strip()}) if addr_dict: hub_obj.address = hub_obj.new_hub_address( **{"attrs": addr_dict}) vmxml.add_device(hub_obj) usb_devices.update({"hub": addr_dict}) # After compose the disk xml, redefine the VM xml. vmxml.sync() # Test snapshot before vm start. if test_disk_snapshot: if snapshot_before_start: ret = virsh.snapshot_create_as(vm_name, "s1 %s" % snapshot_option) libvirt.check_exit_status(ret, snapshot_error) # Start the VM. vm.start() if status_error: raise error.TestFail("VM started unexpectedly") # Hotplug the disks. if device_at_dt_disk: for i in range(len(disks)): attach_option = "" if len(device_attach_option) > i: attach_option = device_attach_option[i] ret = virsh.attach_disk(vm_name, disks[i]["source"], device_targets[i], attach_option)
def run(test, params, env): """ 1. prepare a fc lun with one of following methods - create a scsi pool&vol - create a vhba 2. prepare the virtual disk xml, as one of following - source = /dev/disk/by-path - source = /dev/mapper/mpathX - source = pool&vol format 3. start a vm with above disk as vdb 4. create disk-only snapshot of vdb 5. check the snapshot-list and snapshot file's existence 6. mount vdb and touch file to it 7. revert the snapshot and check file's existence 8. delete snapshot 9. cleanup env. """ vm_name = params.get("main_vm", "avocado-vt-vm1") wwpn = params.get("wwpn", "WWPN_EXAMPLE") wwnn = params.get("wwnn", "WWNN_EXAMPLE") disk_device = params.get("disk_device", "disk") disk_type = params.get("disk_type", "file") disk_size = params.get("disk_size", "100M") device_target = params.get("device_target", "vdb") driver_name = params.get("driver_name", "qemu") driver_type = params.get("driver_type", "raw") target_bus = params.get("target_bus", "virtio") vd_format = params.get("vd_format", "") snapshot_dir = params.get("snapshot_dir", "/tmp") snapshot_name = params.get("snapshot_name", "s1") pool_name = params.get("pool_name", "") pool_target = params.get("pool_target", "/dev") snapshot_disk_only = "yes" == params.get("snapshot_disk_only", "no") new_vhbas = [] current_vhbas = [] new_vhba = [] path_to_blk = "" lun_sl = [] new_disk = "" pool_ins = None old_mpath_conf = "" mpath_conf_path = "/etc/multipath.conf" original_mpath_conf_exist = os.path.exists(mpath_conf_path) vm = env.get_vm(vm_name) online_hbas = utils_npiv.find_hbas("hba") if not online_hbas: raise exceptions.TestSkipError("There is no online hba cards.") old_mpath_conf = utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path, replace_existing=True) first_online_hba = online_hbas[0] old_vhbas = utils_npiv.find_hbas("vhba") if vm.is_dead(): vm.start() session = vm.wait_for_login() virt_vm = libvirt_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache) old_disks = virt_vm.get_disks() if vm.is_alive(): vm.destroy(gracefully=False) if pool_name: pool_ins = libvirt_storage.StoragePool() vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() try: # prepare a fc lun if vd_format in ['scsi_vol']: if pool_ins.pool_exists(pool_name): raise exceptions.TestFail("Pool %s already exist" % pool_name) prepare_scsi_pool(pool_name, wwnn, wwpn, first_online_hba, pool_target) utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_TIMEOUT) if not utils_npiv.is_vhbas_added(old_vhbas): raise exceptions.TestFail("vHBA not successfully created") current_vhbas = utils_npiv.find_hbas("vhba") new_vhba = list(set(current_vhbas).difference( set(old_vhbas)))[0] new_vhbas.append(new_vhba) new_vhba_scsibus = re.sub("\D", "", new_vhba) utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus), timeout=_TIMEOUT) new_blks = get_blks_by_scsi(new_vhba_scsibus) if not new_blks: raise exceptions.TestFail("block device not found with scsi_%s", new_vhba_scsibus) first_blk_dev = new_blks[0] utils_misc.wait_for( lambda: get_symbols_by_blk(first_blk_dev), timeout=_TIMEOUT) lun_sl = get_symbols_by_blk(first_blk_dev) if not lun_sl: raise exceptions.TestFail("lun symbolic links not found under " "/dev/disk/by-path/ for blk dev %s" % first_blk_dev) lun_dev = lun_sl[0] path_to_blk = os.path.join(_BY_PATH_DIR, lun_dev) elif vd_format in ['mpath', 'by_path']: old_mpath_devs = utils_npiv.find_mpath_devs() new_vhba = utils_npiv.nodedev_create_from_xml( {"nodedev_parent": first_online_hba, "scsi_wwnn": wwnn, "scsi_wwpn": wwpn}) utils_misc.wait_for( lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_TIMEOUT*2) if not new_vhba: raise exceptions.TestFail("vHBA not sucessfully generated.") new_vhbas.append(new_vhba) if vd_format == "mpath": utils_misc.wait_for( lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs), timeout=_TIMEOUT*5) if not utils_npiv.is_mpath_devs_added(old_mpath_devs): raise exceptions.TestFail("mpath dev not generated.") cur_mpath_devs = utils_npiv.find_mpath_devs() new_mpath_devs = list(set(cur_mpath_devs).difference( set(old_mpath_devs))) logging.debug("The newly added mpath dev is: %s", new_mpath_devs) path_to_blk = "/dev/mapper/" + new_mpath_devs[0] elif vd_format == "by_path": new_vhba_scsibus = re.sub("\D", "", new_vhba) utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus), timeout=_TIMEOUT) new_blks = get_blks_by_scsi(new_vhba_scsibus) if not new_blks: raise exceptions.TestFail("blk dev not found with scsi_%s", new_vhba_scsibus) first_blk_dev = new_blks[0] utils_misc.wait_for( lambda: get_symbols_by_blk(first_blk_dev), timeout=_TIMEOUT) lun_sl = get_symbols_by_blk(first_blk_dev) if not lun_sl: raise exceptions.TestFail("lun symbolic links not found in " "/dev/disk/by-path/ for %s" % first_blk_dev) lun_dev = lun_sl[0] path_to_blk = os.path.join(_BY_PATH_DIR, lun_dev) else: pass else: raise exceptions.TestSkipError("Not provided how to pass" "virtual disk to VM.") # create qcow2 file on the block device with specified size if path_to_blk: cmd = "qemu-img create -f qcow2 %s %s" % (path_to_blk, disk_size) try: process.run(cmd, shell=True) except process.cmdError as detail: raise exceptions.TestFail("Fail to create qcow2 on blk dev: %s", detail) else: raise exceptions.TestFail("Don't have a vaild path to blk dev.") # prepare disk xml if "vol" in vd_format: vol_list = utlv.get_vol_list(pool_name, vol_check=True, timeout=_TIMEOUT*3) test_vol = list(vol_list.keys())[0] disk_params = {'type_name': disk_type, 'target_dev': device_target, 'target_bus': target_bus, 'source_pool': pool_name, 'source_volume': test_vol, 'driver_type': driver_type} else: disk_params = {'type_name': disk_type, 'device': disk_device, 'driver_name': driver_name, 'driver_type': driver_type, 'source_file': path_to_blk, 'target_dev': device_target, 'target_bus': target_bus} if vm.is_alive(): vm.destroy(gracefully=False) new_disk = disk.Disk() new_disk.xml = open(utlv.create_disk_xml(disk_params)).read() # start vm with the virtual disk vmxml.devices = vmxml.devices.append(new_disk) vmxml.sync() vm.start() session = vm.wait_for_login() cur_disks = virt_vm.get_disks() mount_disk = "".join(list(set(old_disks) ^ set(cur_disks))) # mkfs and mount disk in vm, create a file on that disk. if not mount_disk: logging.debug("old_disk: %s, new_disk: %s", old_disks, cur_disks) raise exceptions.TestFail("No new disk found in vm.") mkfs_and_mount(session, mount_disk) create_file_in_vm(session, "/mnt/before_snapshot.txt", "before") # virsh snapshot-create-as vm s --disk-only --diskspec vda,file=path if snapshot_disk_only: vm_blks = list(vm.get_disk_devices().keys()) options = "%s --disk-only" % snapshot_name for vm_blk in vm_blks: snapshot_file = snapshot_dir + "/" + vm_blk + "." + snapshot_name if os.path.exists(snapshot_file): os.remove(snapshot_file) options = options + " --diskspec %s,file=%s" % (vm_blk, snapshot_file) else: options = snapshot_name utlv.check_exit_status(virsh.snapshot_create_as(vm_name, options)) # check virsh snapshot-list logging.debug("Running: snapshot-list %s", vm_name) snapshot_list = virsh.snapshot_list(vm_name) logging.debug("snapshot list is: %s", snapshot_list) if not snapshot_list: raise exceptions.TestFail("snapshots not found after creation.") # snapshot-revert doesn't support external snapshot for now. so # only check this with internal snapshot. if not snapshot_disk_only: create_file_in_vm(session, "/mnt/after_snapshot.txt", "after") logging.debug("Running: snapshot-revert %s %s", vm_name, snapshot_name) utlv.check_exit_status(virsh.snapshot_revert(vm_name, snapshot_name)) session = vm.wait_for_login() file_existence, file_content = get_file_in_vm(session, "/mnt/after_snapshot.txt") logging.debug("file exist = %s, file content = %s", file_existence, file_content) if file_existence: raise exceptions.TestFail("The file created " "after snapshot still exists.") file_existence, file_content = get_file_in_vm(session, "/mnt/before_snapshot.txt") logging.debug("file eixst = %s, file content = %s", file_existence, file_content) if ((not file_existence) or (file_content.strip() != "before")): raise exceptions.TestFail("The file created " "before snapshot is lost.") # delete snapshots # if diskonly, delete --metadata and remove files # if not diskonly, delete snapshot if snapshot_disk_only: options = "--metadata" else: options = "" for snap in snapshot_list: logging.debug("deleting snapshot %s with options %s", snap, options) result = virsh.snapshot_delete(vm_name, snap, options) logging.debug("result of snapshot-delete: %s", result.stdout.strip()) if snapshot_disk_only: vm_blks = list(vm.get_disk_devices().keys()) for vm_blk in vm_blks: snapshot_file = snapshot_dir + "/" + vm_blk + "." + snap if os.path.exists(snapshot_file): os.remove(snapshot_file) snapshot_list = virsh.snapshot_list(vm_name) if snapshot_list: raise exceptions.TestFail("Snapshot not deleted: %s", snapshot_list) except Exception as detail: raise exceptions.TestFail("exception happens: %s", detail) finally: logging.debug("Start to clean up env...") vmxml_backup.sync() if pool_ins and pool_ins.pool_exists(pool_name): virsh.pool_destroy(pool_name) for new_vhba in new_vhbas: virsh.nodedev_destroy(new_vhba) utils_npiv.restart_multipathd() if old_mpath_conf: utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path, conf_content=old_mpath_conf, replace_existing=True) if not original_mpath_conf_exist and os.path.exists(mpath_conf_path): os.remove(mpath_conf_path)
def run(test, params, env): """ Test startupPolicy for CD-ROM/floppy/Volume disks. Steps: 1. Prepare disk media image. 2. Setup startupPolicy for a disk. 3. Start the domain. 4. Save the domain. 5. Remove the disk source file and restore the domain. 6. Update startupPolicy for a disk. 7. Destroy the domain. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) startup_policy = params.get("policy") def create_iscsi_pool(): """ Setup iSCSI target,and create one iSCSI pool. """ libvirt.setup_or_cleanup_iscsi(is_setup=False) iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True, is_login=False, image_size='1G', chap_user="", chap_passwd="", portal_ip=disk_src_host) # Define an iSCSI pool xml to create it pool_src_xml = pool_xml.SourceXML() pool_src_xml.host_name = pool_src_host pool_src_xml.device_path = iscsi_target poolxml = pool_xml.PoolXML(pool_type=pool_type) poolxml.name = pool_name poolxml.set_source(pool_src_xml) poolxml.target_path = "/dev/disk/by-path" # Create iSCSI pool. virsh.pool_destroy(pool_name, **virsh_dargs) cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) def create_volume(pvt, created_vol_name=None): """ Create iSCSI volume. :param pvt: PoolVolumeTest object :param created_vol_name: Created volume name """ try: if pool_type == "iscsi": create_iscsi_pool() else: pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image) pvt.pre_vol(vol_name=created_vol_name, vol_format=vol_format, capacity=capacity, allocation=None, pool_name=pool_name) except Exception as pool_exception: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **virsh_dargs) test.error("Error occurred when prepare" + "pool xml with message %s:\n" % str(pool_exception)) def get_vol(): """Get the volume info""" # Refresh the pool cmd_result = virsh.pool_refresh(pool_name) libvirt.check_exit_status(cmd_result) # Get volume name cmd_result = virsh.vol_list(pool_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) vol_list = [] vol_list = re.findall(r"(\S+)\ +(\S+)", str(cmd_result.stdout.strip())) try: return vol_list[1] except IndexError: return None # Wait for a while so that we can get the volume info vol_info = utils_misc.wait_for(get_vol, 10) if vol_info: tmp_vol_name, tmp_vol_path = vol_info else: test.error("Failed to get volume info") process.run('qemu-img create -f qcow2 %s %s' % (tmp_vol_path, '100M'), shell=True) return vol_info def check_disk_source(vm_name, target_dev, expect_value): """ Check the disk source: file and startupPolicy. :param vm_name: Domain name :param target_dev: Disk's target device :param expect_value: Expect value of source file and source startupPolicy """ logging.debug("Expect source file is '%s'", expect_value[0]) logging.debug("Expect source startupPolicy is '%s'", expect_value[1]) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.get_disk_all() source_value = [] try: disk_source = disks[target_dev].find('source') source_value.append(disk_source.get('file')) source_value.append(disk_source.get('startupPolicy')) except KeyError: test.error("No %s in domain %s" % (target_dev, vm_name)) logging.debug("Actual source file is '%s'", source_value[0]) logging.debug("Actual source startupPolicy is '%s'", source_value[1]) if source_value == expect_value: logging.debug("Domain disk XML check pass") else: test.error("Domain disk XML check fail") def create_disk_xml(): """ Create a disk xml file for attaching to a domain. """ if disk_type == "file": process.run("qemu-img create %s %s" % (media_file, image_size), shell=True) disk_params = {'device_type': device_type, 'type_name': disk_type, 'target_dev': target_dev, 'target_bus': target_bus} if disk_type == "file": disk_params_src = {'source_protocol': "file", 'source_file': media_file, 'source_startupPolicy': startup_policy} elif disk_type == "volume": disk_params_src = {'source_pool': pool_name, 'source_volume': vol_name, 'driver_type': 'qcow2', 'source_startupPolicy': startup_policy} if pool_type == "iscsi": disk_params_src.update({'source_mode': "host"}) disk_params.update(disk_params_src) disk_xml = libvirt.create_disk_xml(disk_params) shutil.copyfile(disk_xml, disk_xml_file) return disk_xml def check_in_vm(old_parts): """ Check mount/read/write disk in VM. :param old_parts: pre-operated partitions in VM. :return: True if check successfully. """ try: session = vm.wait_for_login() new_parts = libvirt.get_parts_list(session) logging.debug("new parted:%s", new_parts) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False added_part = added_parts[0] if not added_part: logging.error("Can't see added partition in VM") return False if 'sr' not in added_part and 'fd' not in added_part: cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && " "mkdir -p test && mount /dev/{0} test && echo" " teststring > test/testfile && umount test" .format(added_part)) status, output = session.cmd_status_output(cmd) logging.info("Check disk operation in VM:\n%s", output) if status != 0: return False return True except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False def check_policy_update(origin_policy, policy_list, xml_policy_file, device_type, flag_str): """ Check updated policy after executing virsh update-device. :param origin_policy: the inherit startup policy value. :param policy_list: updated policy list. :param xml_policy_file: xml file for startupPolicy. :param device_type: device type,cdrom or disk.,etc :param flag_str: it can be --config,--live and --persistent. """ for policy in policy_list: xmltreefile = XMLTreeFile(xml_policy_file) try: policy_item = xmltreefile.find('/source') policy_item.set('startupPolicy', policy) except AttributeError as elem_attr: test.error("Fail to find startupPolicy attribute.%s", str(elem_attr)) xmltreefile.write(xml_policy_file, encoding="UTF-8") ret = virsh.update_device(vm_name, xml_policy_file, flagstr=flag_str, debug=True) if all([device_type == "disk", policy == "requisite"]): libvirt.check_exit_status(ret, True) return else: libvirt.check_exit_status(ret) def check_policy_value(active_policy, inactive_policy): """ Check policy value in dumpxml with active or inactive option :param active_policy: active policy attribute value :param inactive_policy: inactive policy attribute value """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_list = vmxml.devices.by_device_tag("disk") disk = disk_list[len(disk_list)-1] if not active_policy == disk.source.attrs["startupPolicy"]: test.error("Actual policy:%s in active state is not equal to expected:%s" % (active_policy, disk.source.attrs["startupPolicy"])) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) disk_list = vmxml.devices.by_device_tag("disk") disk = disk_list[len(disk_list)-1] if not inactive_policy == disk.source.attrs["startupPolicy"]: test.error("Actual policy:%s in inactive state is not equal to expected: %s" % (inactive_policy, disk.source.attrs["startupPolicy"])) if flag_str == "--live": check_policy_value(policy, origin_policy) elif flag_str == "--config": check_policy_value(origin_policy, policy) elif flag_str == "--persistent": check_policy_value(policy, policy) def check_source_update(xml_policy_file): """ Update source and policy at the same time,then check those changes. :param xml_policy_file: VM xml policy file """ xmltreefile = XMLTreeFile(xml_policy_file) policy_item = xmltreefile.find('/source') def configure_startup_policy(update=False, policy='optional'): """ Configure startupPolicy attribute value. :param update: update value or not :param policy: policy value :return: flag_option and boolean value """ if update: del policy_item.attrib["startupPolicy"] else: policy_item.set("startupPolicy", policy) flag_option = "--live" xmltreefile.write(xml_policy_file, encoding="UTF-8") return flag_option, False # Update source and startUpPolicy attribute value. def update_source_policy(update=True, policy='optional'): """ Update startupPolicy source value. :param update: update value or not :param policy: policy value :return: flag_option and boolean value """ source_file = policy_item.get('file') if update: new_source_file = source_file+".empty" else: new_source_file = source_file+".new" shutil.copyfile(source_file, new_source_file) policy_item.set("file", new_source_file) policy_item.set("startupPolicy", policy) flag_option = "--persistent" xmltreefile.write(xml_policy_file, encoding="UTF-8") return flag_option, False function_list = [configure_startup_policy, update_source_policy, configure_startup_policy, update_source_policy] function_parameter = [False, False, True, True] # Loop all above scenarios to update device. for index in list(range(len(function_list))): try: func = function_list[index] para = function_parameter[index] flag_option, update_error = func(para) ret = virsh.update_device(vm_name, xml_policy_file, flagstr=flag_option, debug=True) libvirt.check_exit_status(ret, expect_error=update_error) except AttributeError as elem_attr: test.error("Fail to remove startupPolicy attribute:%s" % str(elem_attr)) except Exception as update_device_exception: test.error("Fail to update device:%s" % str(update_device_exception)) finally: source_file = policy_item.get('file') new_source_file = source_file+".new" if os.path.exists(new_source_file): os.remove(new_source_file) def rename_file(source_file, target_file, revert=False): """ Rename a file or revert it. :param source_file: The source file name. :param target_file: The target file name. :param revert: It can be True or False. """ try: if not revert: os.rename(source_file, target_file) logging.debug("Rename %s to %s", source_file, target_file) else: os.rename(target_file, source_file) logging.debug("Rename %s to %s", target_file, source_file) except OSError as err: test.fail("Rename image failed: %s" % str(err)) # Back VM XML vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Start VM and get all partitions in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = libvirt.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Get start,restore configuration parameters. start_error = "yes" == params.get("start_error", "no") restore_error = "yes" == params.get("restore_error", "no") virsh_dargs = {'debug': True, 'ignore_status': True} attach_option = params.get("attach_option") # Create disk xml and attach it. device_type = params.get("device_type") disk_type = params.get("disk_type", "network") disk_src_host = params.get("disk_source_host", "127.0.0.1") target_dev = params.get("target_dev") target_bus = params.get("disk_target_bus", "virtio") image_size = params.get("image_size", "1.44M") emulated_image = "emulated-iscsi" # Storage pool and volume related paramters. pool_name = params.get("pool_name", "iscsi_pool") pool_type = params.get("pool_type") pool_target = params.get("pool_target", "/dev/disk/by-path") pool_src_host = params.get("pool_source_host", "127.0.0.1") vol_name = params.get("volume_name") capacity = params.get("volume_size", "1048576") vol_format = params.get("volume_format") # Source file parameters. media_name = params.get("media_name") media_file = os.path.join(data_dir.get_tmp_dir(), media_name) media_file_new = media_file + ".new" save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save") snapshot_name = "s1" # Policy related paramters. disk_xml_file = os.path.join(data_dir.get_tmp_dir(), "attach_disk.xml") disk_xml_policy_file = os.path.join(data_dir.get_tmp_dir(), "attach_policy_disk.xml") update_policy = "yes" == params.get("update_policy", "no") policy_only = "yes" == params.get("policy_only", "no") update_policy_list = params.get("update_policy_list").split() expect_value = [None, startup_policy] try: if disk_type == "volume": pvt = libvirt.PoolVolumeTest(test, params) vol_name, vol_path = create_volume(pvt, vol_name) vol_path_new = vol_path + ".new" # Create disk xml. create_disk_xml() if vm.is_alive(): vm.destroy() try: # Backup disk xml file for policy update if update_policy=True. if update_policy: shutil.copyfile(disk_xml_file, disk_xml_policy_file) result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml_file, flagstr="--config", **virsh_dargs) # For iSCSI pool volume,startupPolicy attribute is not valid for it. # Moreover,setting disk 'requisite' is allowed only for cdrom or floppy. if pool_type == "iscsi" or all([device_type == "disk", startup_policy == "requisite"]): libvirt.check_exit_status(result, expect_error=True) return else: libvirt.check_exit_status(result, expect_error=False) except Exception as attach_device_exception: logging.debug("Attach device throws exception:%s", str(attach_device_exception)) os.remove(media_file) test.error("Attach %s fail" % device_type) # Check update policy operations. if disk_type == "file" and update_policy: vm.start() if policy_only: check_policy_update(startup_policy, update_policy_list, disk_xml_policy_file, device_type, attach_option) else: check_source_update(disk_xml_policy_file) elif disk_type == "file": # Step 1. Start domain and destroy it normally vm.start() vm.destroy() # Step 2. Remove the source_file then start the domain rename_file(media_file, media_file_new) result = virsh.start(vm_name, **virsh_dargs) libvirt.check_exit_status(result, expect_error=start_error) # For libvirt version >=2.0.0, feature is updated and startup policy attribute # can not exist alone without source protocol. if not start_error and not libvirt_version.version_compare(2, 0, 0): check_disk_source(vm_name, target_dev, expect_value) # Step 3. Move back the source file and start the domain(if needed). rename_file(media_file, media_file_new, revert=True) if not vm.is_alive(): vm.start() # Step 4. Save the domain normally, then remove the source file # and restore it back vm.save_to_file(save_file) rename_file(media_file, media_file_new) result = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(result, expect_error=restore_error) if not restore_error and not libvirt_version.version_compare(2, 0, 0): check_disk_source(vm_name, target_dev, expect_value) # Step 5. Move back the source file and restore the domain(if needed) rename_file(media_file, media_file_new, revert=True) if not vm.is_alive(): result = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(result, expect_error=False) elif disk_type == "volume": # Step 1. Start domain and destroy it normally. vm.start() # Step 1 Start VM successfully. if not check_in_vm(old_parts): test.fail("Check disk partitions in VM failed") # Step 2 Move the volume to other place, refresh the pool, then reboot the guest. rename_file(vol_path, vol_path_new) cmd_result = virsh.pool_refresh(pool_name) libvirt.check_exit_status(cmd_result) vm.destroy() result = virsh.start(vm_name, **virsh_dargs) libvirt.check_exit_status(result, expect_error=start_error) # Step 3 Move back the source file and start. rename_file(vol_path, vol_path_new, revert=True) cmd_result = virsh.pool_refresh(pool_name) libvirt.check_exit_status(cmd_result) if not vm.is_alive(): vm.start() # Step 4 Save the domain normally, then remove the source file,then restore domain. vm.save_to_file(save_file) rename_file(vol_path, vol_path_new) cmd_result = virsh.pool_refresh(pool_name) libvirt.check_exit_status(cmd_result) result = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(result, expect_error=restore_error) # Step 5, Create snapshot,move the source to other place,then revert snapshot. if device_type == "disk": rename_file(vol_path, vol_path_new, revert=True) cmd_result = virsh.pool_refresh(pool_name) libvirt.check_exit_status(cmd_result) if restore_error: result = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(result) ret = virsh.snapshot_create_as(vm_name, snapshot_name, **virsh_dargs) libvirt.check_exit_status(ret) rename_file(vol_path, vol_path_new) ret = virsh.snapshot_revert(vm_name, snapshot_name, **virsh_dargs) # Clean up snapshot. libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync() if disk_type == "volume": pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **virsh_dargs) if os.path.exists(save_file): os.remove(save_file) if os.path.exists(disk_xml_file): os.remove(disk_xml_file) if os.path.exists(media_file): os.remove(media_file) if os.path.exists(disk_xml_policy_file): os.remove(disk_xml_policy_file)
def run(test, params, env): """ Test command: snapshot-edit Test options: --current, --rename, --clone """ vm_name = params.get("main_vm") status_error = params.get("status_error", "no") snap_desc = params.get("snapshot_edit_description") snap_cur = params.get("snapshot_edit_current", "") snap_opt = params.get("snapshot_edit_option", "") snap_name = params.get("snapshot_edit_snapname", "") snap_newname = params.get("snapshot_edit_newname", "new-snap") snap_create_opt1 = params.get("snapshot_create_option1", "") snap_create_opt2 = params.get("snapshot_create_option2", "") # Do xml backup for final recovery vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) def edit_snap_xml(dom_name, edit_opts, edit_cmd): """ Edit domain snapshot xml :param dom_name: name of domain :param snap_name: name of snapshot :param edit_opts: snapshot-edit options :param edit_cmd: edit command list in interactive mode """ session = aexpect.ShellSession("sudo -s") try: logging.debug("snapshot-edit options is: %s" % edit_opts) logging.debug("edit cmd is: %s" % edit_cmd) session.sendline("virsh snapshot-edit %s %s" % (dom_name, edit_opts)) for i in edit_cmd: session.sendline(i) # Press ESC session.send('\x1b') # Save and quit session.send('ZZ') # use sleep(1) to make sure the modify has been completed. remote.handle_prompts(session, None, None, r"[\#\$]\s*$") session.close() logging.info("Succeed to do snapshot edit") except (aexpect.ShellError, aexpect.ExpectError) as details: log = session.get_output() session.close() test.fail("Failed to do snapshot-edit: %s\n%s" % (details, log)) def snap_xml_compare(pre_xml, after_xml): """ Do xml compare when snapshot-edit have --clone or --name option :param pre_xml: snapshot xml before edit :param after_xml: snapshot xml after edit """ desc_sec = "<description>%s</description>" % snap_desc name_sec = "<name>%s</name>" % snap_newname name_pat = "<name>\S+</name>" desc_pat = "<description>.*?</description>" if re.search(r"%s\s+?%s" % (name_pat, desc_pat), pre_xml): pre_xml = re.sub(r"%s\s+?%s" % (name_pat, desc_pat), name_sec + '\n' + desc_sec, pre_xml) else: pre_xml = re.subn(r"%s" % name_pat, name_sec + '\n' + desc_sec, pre_xml, 1)[0] # change to list and remove the description element in list pre_xml_list = pre_xml.strip().splitlines() after_xml_list = after_xml.strip().splitlines() pre_list = [pl.strip() for pl in pre_xml_list] after_list = [al.strip() for al in after_xml_list] if not snap_desc: for i in pre_list: if desc_sec in i: pre_list.remove(i) if pre_list == after_list: logging.info("Succeed to check the xml for description and name") else: # Print just the differences rather than printing both # files and forcing the eyeball comparison between lines elems = list(map(None, pre_xml.splitlines(), after_xml.splitlines())) for pre_line, aft_line in elems: if pre_line.lstrip().strip() != aft_line.lstrip().strip(): if pre_line is not None: logging.debug("diff before='%s'", pre_line.lstrip().strip()) if aft_line is not None: logging.debug("diff after='%s'", aft_line.lstrip().strip()) test.fail("Failed xml before/after comparison") snapshot_oldlist = None try: # Create disk snapshot before all to make the origin image clean logging.debug("Create snap-temp --disk-only") ret = virsh.snapshot_create_as(vm_name, "snap-temp --disk-only") if ret.exit_status != 0: test.fail("Fail to create temp snap, Error: %s" % ret.stderr.strip()) # Create snapshots for opt in [snap_create_opt1, snap_create_opt2]: logging.debug("...use option %s", opt) result = virsh.snapshot_create_as(vm_name, opt) if result.exit_status: test.fail("Failed to create snapshot. Error:%s." % result.stderr.strip()) time.sleep(1) snapshot_oldlist = virsh.snapshot_list(vm_name) # Get the snapshot xml before edit if len(snap_name) > 0: pre_name = check_name = snap_name else: cmd_result = virsh.snapshot_current(vm_name) pre_name = check_name = cmd_result.stdout.strip() ret = virsh.snapshot_dumpxml(vm_name, pre_name) if ret.exit_status == 0: pre_xml = ret.stdout.strip() else: test.fail("Fail to dumpxml of snapshot %s:%s" % (pre_name, ret.stderr.strip())) edit_cmd = [] replace_cmd = '%s<\/name>/%s<\/name>' % (pre_name, pre_name) replace_cmd += '\\r<description>%s<\/description>' % snap_desc replace_cmd = ":%s/" + replace_cmd + "/" edit_cmd.append(replace_cmd) # if have --clone or --rename, need to change snapshot name in xml if len(snap_opt) > 0: edit_cmd.append(":2") edit_cmd.append(":s/<name>.*</<name>" + snap_newname + "<") check_name = snap_newname edit_opts = " " + snap_name + " " + snap_cur + " " + snap_opt # Do snapshot edit if status_error == "yes": output = virsh.snapshot_edit(vm_name, edit_opts) if output.exit_status == 0: test.fail("Succeed to do the snapshot-edit but" " expect fail") else: logging.info("Fail to do snapshot-edit as expect: %s", output.stderr.strip()) return edit_snap_xml(vm_name, edit_opts, edit_cmd) # Do edit check snapshots = virsh.snapshot_list(vm_name) after_xml = virsh.snapshot_dumpxml(vm_name, check_name).stdout match_str = "<description>" + snap_desc + "</description>" if not re.search(match_str, after_xml.strip("\n")): if snap_desc: logging.debug("Failed to edit snapshot edit_opts=%s, match=%s", edit_opts, match_str) # Only print first 15 lines - they are most relevant for i in range(15): logging.debug("before xml=%s", pre_xml.split()[i].lstrip()) logging.debug(" after xml=%s", after_xml.split()[i].lstrip()) test.fail("Failed to edit snapshot description") # Check edit options --clone if snap_opt == "--clone": if pre_name not in snapshots: test.fail("After clone, previous snapshot missing") snap_xml_compare(pre_xml, after_xml) if snap_opt == "--rename": if pre_name in snapshots: test.fail("After rename, snapshot %s still exist" % pre_name) snap_xml_compare(pre_xml, after_xml) # Check if --current effect take effect if len(snap_cur) > 0 and len(snap_name) > 0: cmd_result = virsh.snapshot_current(vm_name) snap_cur = cmd_result.stdout.strip() if snap_cur == check_name: logging.info("Check current is same as set %s", check_name) else: test.fail("Fail to check --current, current is %s " "but set is %s" % (snap_cur, check_name)) finally: utils_test.libvirt.clean_up_snapshots(vm_name, snapshot_oldlist) vmxml_backup.sync("--snapshots-metadata")