def prepare_gluster_disk(disk_img, disk_format): """ Setup glusterfs and prepare disk image. """ # Get the image path image_source = vm.get_first_disk_devices()['source'] # Setup gluster host_ip = gluster.setup_or_cleanup_gluster(True, brick_path=brick_path, **params) logging.debug("host ip: %s ", host_ip) image_info = utils_misc.get_image_info(image_source) image_dest = "/mnt/%s" % disk_img if image_info["format"] == disk_format: disk_cmd = ("cp -f %s %s" % (image_source, image_dest)) else: # Convert the disk format disk_cmd = ( "qemu-img convert -f %s -O %s %s %s" % (image_info["format"], disk_format, image_source, image_dest)) # Mount the gluster disk and create the image. process.run("mount -t glusterfs %s:%s /mnt && " "%s && chmod a+rw /mnt/%s && umount /mnt" % (host_ip, vol_name, disk_cmd, disk_img), shell=True) return host_ip
def prepare_gluster_disk(blk_source, test, **kwargs): """ Set up gluster disk device and replace the domain disk image :param blk_source: The domain disk image path :param test: Avocado test object :param kwargs: Key words for gluster device setup :return: host_ip """ vol_name = kwargs.get("vol_name") brick_path = kwargs.get("brick_path") disk_img = kwargs.get("disk_img") disk_format = kwargs.get("disk_format") host_ip = gluster.setup_or_cleanup_gluster(True, **kwargs) logging.debug("host ip: %s ", host_ip) # Copy the domain disk image to gluster disk path image_info = utils_misc.get_image_info(blk_source) dest_image = "/mnt/%s" % disk_img if image_info["format"] == disk_format: disk_cmd = ("cp -f %s %s" % (blk_source, dest_image)) else: disk_cmd = ( "qemu-img convert -f %s -O %s %s %s" % (image_info["format"], disk_format, blk_source, dest_image)) # Mount the gluster disk and create the image src_mnt = "%s:%s" % (host_ip, vol_name) if not utils_misc.mount(src_mnt, "/mnt", "glusterfs"): test.error("glusterfs mount failed") process.run("%s && chmod a+rw /mnt/%s && umount /mnt" % (disk_cmd, disk_img), shell=True) return host_ip
def run(test, params, env): """ Test Boot OVMF Guest and Seabios Guest with options Steps: 1) Edit VM xml with specified options 2) For secure boot mode, boot OVMF Guest from cdrom first, enroll the key, then switch boot from hd 3) For normal boot mode, directly boot Guest from given device 4) Verify if Guest can boot as expected """ vm_name = params.get("main_vm", "") vm = env.get_vm(vm_name) username = params.get("username", "root") password = params.get("password", "redhat") test_cmd = params.get("test_cmd", "") expected_output = params.get("expected_output", "") check_point = params.get("checkpoint", "") status_error = "yes" == params.get("status_error", "no") boot_iso_file = os.path.join(data_dir.get_tmp_dir(), "boot.iso") non_release_os_url = params.get("non_release_os_url", "") download_file_path = os.path.join(data_dir.get_tmp_dir(), "non_released_os.qcow2") release_os_url = params.get("release_os_url", "") download_released_file_path = os.path.join(data_dir.get_tmp_dir(), "released_os.qcow2") uefi_iso = params.get("uefi_iso", "") custom_codes = params.get("uefi_custom_codes", "") uefi_target_dev = params.get("uefi_target_dev", "") uefi_device_bus = params.get("uefi_device_bus", "") with_boot = (params.get("with_boot", "no") == "yes") boot_ref = params.get("boot_ref", "dev") boot_order = params.get("boot_order", "1") boot_dev = params.get("boot_dev", "hd") target_dev = params.get("target_dev", "vdb") vol_name = params.get("vol_name") brick_path = os.path.join(test.virtdir, "gluster-pool") boot_type = params.get("boot_type", "seabios") boot_loadparm = params.get("boot_loadparm", None) libvirt_version.is_libvirt_feature_supported(params) # Prepare result checkpoint list check_points = [] if check_point: check_points.append(check_point) # Back VM XML vmxml_backup = vm_xml.VMXML.new_from_dumpxml(vm_name) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = '' try: # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(params.get("mon_host")) setup_test_env(params, test) apply_boot_options(vmxml, params, test) blk_source = vm.get_first_disk_devices()['source'] set_domain_disk(vmxml, blk_source, params, test) vmxml.remove_all_boots() if with_boot: boot_kwargs = { "boot_ref": boot_ref, "boot_dev": boot_dev, "boot_order": boot_order, "target_dev": target_dev, "loadparm": boot_loadparm } if "yes" == params.get("two_same_boot_dev", "no"): boot_kwargs.update({"two_same_boot_dev": True}) set_boot_dev_or_boot_order(vmxml, **boot_kwargs) define_error = ("yes" == params.get("define_error", "no")) enable_normal_boot(vmxml, check_points, define_error, test) # Some negative cases failed at virsh.define if define_error: return # Start VM and check result # For boot from cdrom or non_released_os, just verify key words from serial console output # For boot from disk image, run 'test cmd' to verify if OS boot well if boot_dev == "cdrom" or non_release_os_url: if not vm.is_alive(): vm.start() check_prompt = params.get("check_prompt", "") while True: if boot_type == "ovmf": match, text = vm.serial_console.read_until_any_line_matches( [check_prompt], timeout=30.0, internal_timeout=0.5) else: match, text = read_until_any_line_matches( vm.serial_console, [check_prompt], timeout=30.0, internal_timeout=0.5) logging.debug("matches %s", check_prompt) if match == -1: logging.debug("Got check point as expected") break elif boot_dev == "hd": ret = virsh.start(vm_name, timeout=60) utlv.check_result(ret, expected_fails=check_points) # For no boot options, further check if boot dev can be automatically added if not with_boot: if re.search(r"<boot dev='hd'/>", virsh.dumpxml(vm_name).stdout.strip()): logging.debug("OS boot dev added automatically") else: test.fail("OS boot dev not added as expected") if not status_error: vm_ip = vm.wait_for_get_address(0, timeout=240) remote_session = remote.wait_for_login("ssh", vm_ip, "22", username, password, r"[\#\$]\s*$") if test_cmd: status, output = remote_session.cmd_status_output(test_cmd) logging.debug("CMD '%s' running result is:\n%s", test_cmd, output) if expected_output: if not re.search(expected_output, output): test.fail("Expected '%s' to match '%s'" " but failed." % (output, expected_output)) if status: test.fail("Failed to boot %s from %s" % (vm_name, vmxml.xml)) remote_session.close() logging.debug("Succeed to boot %s" % vm_name) finally: # Remove ceph configure file if created. if ceph_cfg: os.remove(ceph_cfg) logging.debug("Start to cleanup") if vm.is_alive: vm.destroy() logging.debug("Restore the VM XML") vmxml_backup.sync(options="--nvram") if cleanup_gluster: process.run("umount /mnt", ignore_status=True, shell=True) gluster.setup_or_cleanup_gluster(False, brick_path=brick_path, **params) if cleanup_iscsi: utlv.setup_or_cleanup_iscsi(False) if cleanup_iso_file: process.run("rm -rf %s" % boot_iso_file, shell=True, ignore_status=True) if cleanup_image_file: process.run("rm -rf %s" % download_file_path, shell=True, ignore_status=True) if cleanup_released_image_file: process.run("rm -rf %s" % download_released_file_path, shell=True, ignore_status=True)
def run(test, params, env): """ Test snapshot-create-as command Make sure that the clean repo can be used because qemu-guest-agent need to be installed in guest The command create a snapshot (disk and RAM) from arguments which including the following point * virsh snapshot-create-as --print-xml --diskspec --name --description * virsh snapshot-create-as --print-xml with multi --diskspec * virsh snapshot-create-as --print-xml --memspec * virsh snapshot-create-as --description * virsh snapshot-create-as --no-metadata * virsh snapshot-create-as --no-metadata --print-xml (negative test) * virsh snapshot-create-as --atomic --disk-only * virsh snapshot-create-as --quiesce --disk-only (positive and negative) * virsh snapshot-create-as --reuse-external * virsh snapshot-create-as --disk-only --diskspec * virsh snapshot-create-as --memspec --reuse-external --atomic(negative) * virsh snapshot-create-as --disk-only and --memspec (negative) * Create multi snapshots with snapshot-create-as * Create snapshot with name a--a a--a--snap1 """ if not virsh.has_help_command('snapshot-create-as'): test.cancel("This version of libvirt does not support " "the snapshot-create-as test") vm_name = params.get("main_vm") status_error = params.get("status_error", "no") options = params.get("snap_createas_opts") multi_num = params.get("multi_num", "1") diskspec_num = params.get("diskspec_num", "1") bad_disk = params.get("bad_disk") reuse_external = "yes" == params.get("reuse_external", "no") start_ga = params.get("start_ga", "yes") domain_state = params.get("domain_state") memspec_opts = params.get("memspec_opts") config_format = "yes" == params.get("config_format", "no") snapshot_image_format = params.get("snapshot_image_format") diskspec_opts = params.get("diskspec_opts") create_autodestroy = 'yes' == params.get("create_autodestroy", "no") unix_channel = "yes" == params.get("unix_channel", "yes") dac_denial = "yes" == params.get("dac_denial", "no") check_json_no_savevm = "yes" == params.get("check_json_no_savevm", "no") disk_snapshot_attr = params.get('disk_snapshot_attr', 'external') set_snapshot_attr = "yes" == params.get("set_snapshot_attr", "no") # gluster related params replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") disk_src_protocol = params.get("disk_source_protocol") restart_tgtd = params.get("restart_tgtd", "no") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) uri = params.get("virsh_uri") usr = params.get('unprivileged_user') if usr: if usr.count('EXAMPLE'): usr = '******' if disk_src_protocol == 'iscsi': if not libvirt_version.version_compare(1, 0, 4): test.cancel("'iscsi' disk doesn't support in" " current libvirt version.") if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") if not libvirt_version.version_compare(1, 2, 7): # As bug 1017289 closed as WONTFIX, the support only # exist on 1.2.7 and higher if disk_src_protocol == 'gluster': test.cancel("Snapshot on glusterfs not support in " "current version. Check more info with " "https://bugzilla.redhat.com/buglist.cgi?" "bug_id=1017289,1032370") if libvirt_version.version_compare(5, 5, 0): # libvirt-5.5.0-2 commit 68e1a05f starts to allow --no-metadata and # --print-xml to be used together. if "--no-metadata" in options and "--print-xml" in options: logging.info("--no-metadata and --print-xml can be used together " "in this libvirt version. Not expecting a failure.") status_error = "no" opt_names = locals() if memspec_opts is not None: mem_options = compose_disk_options(test, params, memspec_opts) # if the parameters have the disk without "file=" then we only need to # add testdir for it. if mem_options is None: mem_options = os.path.join(data_dir.get_tmp_dir(), memspec_opts) options += " --memspec " + mem_options tag_diskspec = 0 dnum = int(diskspec_num) if diskspec_opts is not None: tag_diskspec = 1 opt_names['diskopts_1'] = diskspec_opts # diskspec_opts[n] is used in cfg when more than 1 --diskspec is used if dnum > 1: tag_diskspec = 1 for i in range(1, dnum + 1): opt_names["diskopts_%s" % i] = params.get("diskspec_opts%s" % i) if tag_diskspec == 1: for i in range(1, dnum + 1): disk_options = compose_disk_options(test, params, opt_names["diskopts_%s" % i]) options += " --diskspec " + disk_options logging.debug("options are %s", options) vm = env.get_vm(vm_name) option_dict = {} option_dict = utils_misc.valued_option_dict(options, r' --(?!-)') logging.debug("option_dict is %s", option_dict) # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("original xml is %s", vmxml_backup) # Generate empty image for negative test if bad_disk is not None: bad_disk = os.path.join(data_dir.get_tmp_dir(), bad_disk) with open(bad_disk, 'w') as bad_file: pass # Generate external disk if reuse_external: disk_path = '' for i in range(dnum): external_disk = "external_disk%s" % i if params.get(external_disk): disk_path = os.path.join(data_dir.get_tmp_dir(), params.get(external_disk)) process.run("qemu-img create -f qcow2 %s 1G" % disk_path, shell=True) # Only chmod of the last external disk for negative case if dac_denial: process.run("chmod 500 %s" % disk_path, shell=True) qemu_conf = None libvirtd_conf = None libvirtd_log_path = None libvirtd = utils_libvirtd.Libvirtd() try: # Config "snapshot_image_format" option in qemu.conf if config_format: qemu_conf = utils_config.LibvirtQemuConfig() qemu_conf.snapshot_image_format = snapshot_image_format logging.debug("the qemu config file content is:\n %s" % qemu_conf) libvirtd.restart() if check_json_no_savevm: libvirtd_conf = utils_config.LibvirtdConfig() libvirtd_conf["log_level"] = '1' libvirtd_conf["log_filters"] = '"1:json 3:remote 4:event"' libvirtd_log_path = os.path.join(data_dir.get_tmp_dir(), "libvirtd.log") libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path logging.debug("the libvirtd config file content is:\n %s" % libvirtd_conf) libvirtd.restart() if replace_vm_disk: libvirt.set_vm_disk(vm, params, tmp_dir) if set_snapshot_attr: if vm.is_alive(): vm.destroy(gracefully=False) vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = vmxml_backup.get_devices(device_type="disk")[0] vmxml_new.del_device(disk_xml) # set snapshot attribute in disk xml disk_xml.snapshot = disk_snapshot_attr new_disk = disk.Disk(type_name='file') new_disk.xmltreefile = disk_xml.xmltreefile vmxml_new.add_device(new_disk) logging.debug("The vm xml now is: %s" % vmxml_new.xmltreefile) vmxml_new.sync() vm.start() # Start qemu-ga on guest if have --quiesce if unix_channel and options.find("quiesce") >= 0: vm.prepare_guest_agent() session = vm.wait_for_login() if start_ga == "no": # The qemu-ga could be running and should be killed session.cmd("kill -9 `pidof qemu-ga`") # Check if the qemu-ga get killed stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if not stat_ps: # As managed by systemd and set as autostart, qemu-ga # could be restarted, so use systemctl to stop it. session.cmd("systemctl stop qemu-guest-agent") stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if not stat_ps: test.cancel("Fail to stop agent in " "guest") if domain_state == "paused": virsh.suspend(vm_name) else: # Remove channel if exist if vm.is_alive(): vm.destroy(gracefully=False) xml_inst = vm_xml.VMXML.new_from_dumpxml(vm_name) xml_inst.remove_agent_channels() vm.start() # Record the previous snapshot-list snaps_before = virsh.snapshot_list(vm_name) # Attach disk before create snapshot if not print xml and multi disks # specified in cfg if dnum > 1 and "--print-xml" not in options: for i in range(1, dnum): disk_path = os.path.join(data_dir.get_tmp_dir(), 'disk%s.qcow2' % i) process.run("qemu-img create -f qcow2 %s 200M" % disk_path, shell=True) virsh.attach_disk(vm_name, disk_path, 'vd%s' % list(string.ascii_lowercase)[i], debug=True) # Run virsh command # May create several snapshots, according to configuration for count in range(int(multi_num)): if create_autodestroy: # Run virsh command in interactive mode vmxml_backup.undefine() vp = virsh.VirshPersistent() vp.create(vmxml_backup['xml'], '--autodestroy') cmd_result = vp.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) vp.close_session() vmxml_backup.define() else: cmd_result = virsh.snapshot_create_as(vm_name, options, unprivileged_user=usr, uri=uri, ignore_status=True, debug=True) # for multi snapshots without specific snapshot name, the # snapshot name is using time string with 1 second # incremental, to avoid get snapshot failure with same name, # sleep 1 second here. if int(multi_num) > 1: time.sleep(1.1) output = cmd_result.stdout.strip() status = cmd_result.exit_status # check status_error if status_error == "yes": if status == 0: test.fail("Run successfully with wrong command!") else: # Check memspec file should be removed if failed if (options.find("memspec") >= 0 and options.find("atomic") >= 0): if os.path.isfile(option_dict['memspec']): os.remove(option_dict['memspec']) test.fail("Run failed but file %s exist" % option_dict['memspec']) else: logging.info("Run failed as expected and memspec" " file already been removed") # Check domain xml is not updated if reuse external fail elif reuse_external and dac_denial: output = virsh.dumpxml(vm_name).stdout.strip() if "reuse_external" in output: test.fail("Domain xml should not be " "updated with snapshot image") else: logging.info("Run failed as expected") elif status_error == "no": if status != 0: test.fail("Run failed with right command: %s" % output) else: # Check the special options snaps_list = virsh.snapshot_list(vm_name) logging.debug("snaps_list is %s", snaps_list) check_snapslist(test, vm_name, options, option_dict, output, snaps_before, snaps_list) # For cover bug 872292 if check_json_no_savevm: pattern = "The command savevm has not been found" with open(libvirtd_log_path) as f: for line in f: if pattern in line and "error" in line: test.fail("'%s' was found: %s" % (pattern, line)) finally: if vm.is_alive(): vm.destroy() # recover domain xml xml_recover(vmxml_backup) path = "/var/lib/libvirt/qemu/snapshot/" + vm_name if os.path.isfile(path): test.fail("Still can find snapshot metadata") if disk_src_protocol == 'gluster': gluster.setup_or_cleanup_gluster(False, brick_path=brick_path, **params) libvirtd.restart() if disk_src_protocol == 'iscsi': libvirt.setup_or_cleanup_iscsi(False, restart_tgtd=restart_tgtd) # rm bad disks if bad_disk is not None: os.remove(bad_disk) # rm attach disks and reuse external disks if dnum > 1 and "--print-xml" not in options: for i in range(dnum): disk_path = os.path.join(data_dir.get_tmp_dir(), 'disk%s.qcow2' % i) if os.path.exists(disk_path): os.unlink(disk_path) if reuse_external: external_disk = "external_disk%s" % i disk_path = os.path.join(data_dir.get_tmp_dir(), params.get(external_disk)) if os.path.exists(disk_path): os.unlink(disk_path) # restore config if config_format and qemu_conf: qemu_conf.restore() if libvirtd_conf: libvirtd_conf.restore() if libvirtd_conf or (config_format and qemu_conf): libvirtd.restart() if libvirtd_log_path and os.path.exists(libvirtd_log_path): os.unlink(libvirtd_log_path)
def run(test, params, env): """ Test the pull-mode backup function Steps: 1. craete a vm with extra disk vdb 2. create some data on vdb 3. start a pull mode full backup on vdb 4. create some data on vdb 5. start a pull mode incremental backup 6. repeat step 5 to 7 7. check the full/incremental backup file data """ # Basic case config hotplug_disk = "yes" == params.get("hotplug_disk", "no") original_disk_size = params.get("original_disk_size", "100M") original_disk_type = params.get("original_disk_type", "local") original_disk_target = params.get("original_disk_target", "vdb") local_hostname = params.get("loal_hostname", "localhost") local_ip = params.get("local_ip", "127.0.0.1") local_user_name = params.get("local_user_name", "root") local_user_password = params.get("local_user_password", "redhat") tmp_dir = data_dir.get_tmp_dir() # Backup config scratch_type = params.get("scratch_type", "file") reuse_scratch_file = "yes" == params.get("reuse_scratch_file") prepare_scratch_file = "yes" == params.get("prepare_scratch_file") scratch_blkdev_path = params.get("scratch_blkdev_path") scratch_blkdev_size = params.get("scratch_blkdev_size", original_disk_size) prepare_scratch_blkdev = "yes" == params.get("prepare_scratch_blkdev") backup_rounds = int(params.get("backup_rounds", 3)) backup_error = "yes" == params.get("backup_error") expect_backup_canceled = "yes" == params.get("expect_backup_canceled") # NBD service config nbd_protocol = params.get("nbd_protocol", "unix") nbd_socket = params.get("nbd_socket", "/tmp/pull_backup.socket") nbd_tcp_port = params.get("nbd_tcp_port", "10809") nbd_hostname = local_hostname set_exportname = "yes" == params.get("set_exportname") set_exportbitmap = "yes" == params.get("set_exportbitmap") # TLS service config tls_enabled = "yes" == params.get("tls_enabled") tls_x509_verify = "yes" == params.get("tls_x509_verify") custom_pki_path = "yes" == params.get("custom_pki_path") tls_client_ip = tls_server_ip = local_ip tls_client_cn = tls_server_cn = local_hostname tls_client_user = tls_server_user = local_user_name tls_client_pwd = tls_server_pwd = local_user_password tls_provide_client_cert = "yes" == params.get("tls_provide_client_cert") tls_error = "yes" == params.get("tls_error") # LUKS config scratch_luks_encrypted = "yes" == params.get("scratch_luks_encrypted") luks_passphrase = params.get("luks_passphrase", "password") # Cancel the test if libvirt support related functions if not libvirt_version.version_compare(6, 0, 0): test.cancel("Current libvirt version doesn't support " "incremental backup.") if tls_enabled and not libvirt_version.version_compare(6, 6, 0): test.cancel("Current libvirt version doesn't support pull mode " "backup with tls nbd.") try: vm_name = params.get("main_vm") vm = env.get_vm(vm_name) # Make sure there is no checkpoint metadata before test utils_backup.clean_checkpoints(vm_name) # Backup vm xml vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() utils_backup.enable_inc_backup_for_vm(vm) # Prepare tls env if tls_enabled: # Prepare pki tls_config = { "qemu_tls": "yes", "auto_recover": "yes", "client_ip": tls_client_ip, "server_ip": tls_server_ip, "client_cn": tls_client_cn, "server_cn": tls_server_cn, "client_user": tls_client_user, "server_user": tls_server_user, "client_pwd": tls_client_pwd, "server_pwd": tls_server_pwd, } if custom_pki_path: pki_path = os.path.join(tmp_dir, "inc_bkup_pki") else: pki_path = "/etc/pki/libvirt-backup/" if tls_x509_verify: tls_config["client_ip"] = tls_client_ip tls_config["custom_pki_path"] = pki_path tls_obj = TLSConnection(tls_config) tls_obj.conn_setup(True, tls_provide_client_cert) logging.debug("TLS certs in: %s" % pki_path) # Set qemu.conf qemu_config = LibvirtQemuConfig() if tls_x509_verify: qemu_config.backup_tls_x509_verify = True else: qemu_config.backup_tls_x509_verify = False if custom_pki_path: qemu_config.backup_tls_x509_cert_dir = pki_path utils_libvirtd.Libvirtd().restart() # Prepare libvirt secret if scratch_luks_encrypted: utils_secret.clean_up_secrets() luks_secret_uuid = libvirt.create_secret(params) virsh.secret_set_value(luks_secret_uuid, luks_passphrase, encode=True, debug=True) # Prepare the disk to be backuped. disk_params = {} disk_path = "" if original_disk_type == "local": image_name = "{}_image.qcow2".format(original_disk_target) disk_path = os.path.join(tmp_dir, image_name) libvirt.create_local_disk("file", disk_path, original_disk_size, "qcow2") disk_params = { "device_type": "disk", "type_name": "file", "driver_type": "qcow2", "target_dev": original_disk_target, "source_file": disk_path } if original_disk_target: disk_params["target_dev"] = original_disk_target elif original_disk_type == "iscsi": iscsi_host = '127.0.0.1' iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=original_disk_size, portal_ip=iscsi_host) disk_path = ("iscsi://[%s]/%s/%s" % (iscsi_host, iscsi_target, lun_num)) process.run("qemu-img create -f qcow2 %s %s" % (disk_path, original_disk_size), shell=True, verbose=True) disk_params = { 'device_type': 'disk', 'type_name': 'network', "driver_type": "qcow2", 'target_dev': original_disk_target } disk_params_src = { 'source_protocol': 'iscsi', 'source_name': iscsi_target + "/%s" % lun_num, 'source_host_name': iscsi_host, 'source_host_port': '3260' } disk_params.update(disk_params_src) elif original_disk_type == "gluster": gluster_vol_name = "gluster_vol" gluster_pool_name = "gluster_pool" gluster_img_name = "gluster.qcow2" gluster_host_ip = gluster.setup_or_cleanup_gluster( is_setup=True, vol_name=gluster_vol_name, pool_name=gluster_pool_name, **params) disk_path = 'gluster://%s/%s/%s' % ( gluster_host_ip, gluster_vol_name, gluster_img_name) process.run("qemu-img create -f qcow2 %s %s" % (disk_path, original_disk_size), shell=True, verbose=True) disk_params = { 'device_type': 'disk', 'type_name': 'network', "driver_type": "qcow2", 'target_dev': original_disk_target } disk_params_src = { 'source_protocol': 'gluster', 'source_name': gluster_vol_name + "/%s" % gluster_img_name, 'source_host_name': gluster_host_ip, 'source_host_port': '24007' } disk_params.update(disk_params_src) else: test.error("The disk type '%s' not supported in this script.", original_disk_type) if hotplug_disk: vm.start() session = vm.wait_for_login().close() disk_xml = libvirt.create_disk_xml(disk_params) virsh.attach_device(vm_name, disk_xml, debug=True) else: disk_xml = libvirt.create_disk_xml(disk_params) virsh.attach_device(vm.name, disk_xml, flagstr="--config", debug=True) vm.start() session = vm.wait_for_login() new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys()) session.close() if len(new_disks_in_vm) != 1: test.fail("Test disk not prepared in vm") # Use the newly added disk as the test disk test_disk_in_vm = "/dev/" + new_disks_in_vm[0] vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) vm_disks = list(vmxml.get_disk_all().keys()) checkpoint_list = [] is_incremental = False backup_file_list = [] for backup_index in range(backup_rounds): # Prepare backup xml backup_params = {"backup_mode": "pull"} if backup_index > 0: is_incremental = True backup_params["backup_incremental"] = "checkpoint_" + str( backup_index - 1) # Set libvirt default nbd export name and bitmap name nbd_export_name = original_disk_target nbd_bitmap_name = "backup-" + original_disk_target backup_server_dict = {} if nbd_protocol == "unix": backup_server_dict["transport"] = "unix" backup_server_dict["socket"] = nbd_socket else: backup_server_dict["name"] = nbd_hostname backup_server_dict["port"] = nbd_tcp_port if tls_enabled: backup_server_dict["tls"] = "yes" backup_params["backup_server"] = backup_server_dict backup_disk_xmls = [] for vm_disk in vm_disks: backup_disk_params = {"disk_name": vm_disk} if vm_disk != original_disk_target: backup_disk_params["enable_backup"] = "no" else: backup_disk_params["enable_backup"] = "yes" backup_disk_params["disk_type"] = scratch_type # Custom nbd export name and bitmap name if required if set_exportname: nbd_export_name = original_disk_target + "_custom_exp" backup_disk_params["exportname"] = nbd_export_name if set_exportbitmap: nbd_bitmap_name = original_disk_target + "_custom_bitmap" backup_disk_params["exportbitmap"] = nbd_bitmap_name # Prepare nbd scratch file/dev params scratch_params = {"attrs": {}} scratch_path = None if scratch_type == "file": scratch_file_name = "scratch_file_%s" % backup_index scratch_path = os.path.join(tmp_dir, scratch_file_name) if prepare_scratch_file: libvirt.create_local_disk("file", scratch_path, original_disk_size, "qcow2") scratch_params["attrs"]["file"] = scratch_path elif scratch_type == "block": if prepare_scratch_blkdev: scratch_path = libvirt.setup_or_cleanup_iscsi( is_setup=True, image_size=scratch_blkdev_size) scratch_params["attrs"]["dev"] = scratch_path else: test.fail( "We do not support backup scratch type: '%s'" % scratch_type) if scratch_luks_encrypted: encryption_dict = { "encryption": "luks", "secret": { "type": "passphrase", "uuid": luks_secret_uuid } } scratch_params["encryption"] = encryption_dict logging.debug("scratch params: %s", scratch_params) backup_disk_params["backup_scratch"] = scratch_params backup_disk_xml = utils_backup.create_backup_disk_xml( backup_disk_params) backup_disk_xmls.append(backup_disk_xml) logging.debug("disk list %s", backup_disk_xmls) backup_xml = utils_backup.create_backup_xml( backup_params, backup_disk_xmls) logging.debug("ROUND_%s Backup Xml: %s", backup_index, backup_xml) # Prepare checkpoint xml checkpoint_name = "checkpoint_%s" % backup_index checkpoint_list.append(checkpoint_name) cp_params = {"checkpoint_name": checkpoint_name} cp_params["checkpoint_desc"] = params.get( "checkpoint_desc", "desc of cp_%s" % backup_index) disk_param_list = [] for vm_disk in vm_disks: cp_disk_param = {"name": vm_disk} if vm_disk != original_disk_target: cp_disk_param["checkpoint"] = "no" else: cp_disk_param["checkpoint"] = "bitmap" cp_disk_bitmap = params.get("cp_disk_bitmap") if cp_disk_bitmap: cp_disk_param["bitmap"] = cp_disk_bitmap + str( backup_index) disk_param_list.append(cp_disk_param) checkpoint_xml = utils_backup.create_checkpoint_xml( cp_params, disk_param_list) logging.debug("ROUND_%s Checkpoint Xml: %s", backup_index, checkpoint_xml) # Create some data in vdb dd_count = "1" dd_seek = str(backup_index * 10 + 10) dd_bs = "1M" session = vm.wait_for_login() utils_disk.dd_data_to_vm_disk(session, test_disk_in_vm, dd_bs, dd_seek, dd_count) session.close() # Start backup backup_options = backup_xml.xml + " " + checkpoint_xml.xml if reuse_scratch_file: backup_options += " --reuse-external" backup_result = virsh.backup_begin(vm_name, backup_options, ignore_status=True, debug=True) if backup_result.exit_status: raise utils_backup.BackupBeginError( backup_result.stderr.strip()) # If required, do some error operations during backup job error_operation = params.get("error_operation") if error_operation: if "destroy_vm" in error_operation: vm.destroy(gracefully=False) if "kill_qemu" in error_operation: utils_misc.safe_kill(vm.get_pid(), signal.SIGKILL) if utils_misc.wait_for( lambda: utils_backup.is_backup_canceled(vm_name), timeout=5): raise utils_backup.BackupCanceledError() elif expect_backup_canceled: test.fail("Backup job should be canceled but not.") backup_file_path = os.path.join( tmp_dir, "backup_file_%s.qcow2" % str(backup_index)) backup_file_list.append(backup_file_path) nbd_params = { "nbd_protocol": nbd_protocol, "nbd_export": nbd_export_name } if nbd_protocol == "unix": nbd_params["nbd_socket"] = nbd_socket elif nbd_protocol == "tcp": nbd_params["nbd_hostname"] = nbd_hostname nbd_params["nbd_tcp_port"] = nbd_tcp_port if tls_enabled: nbd_params["tls_dir"] = pki_path nbd_params["tls_server_ip"] = tls_server_ip if not is_incremental: # Do full backup try: utils_backup.pull_full_backup_to_file( nbd_params, backup_file_path) except Exception as details: if tls_enabled and tls_error: raise utils_backup.BackupTLSError(details) else: test.fail("Fail to get full backup data: %s" % details) logging.debug("Full backup to: %s", backup_file_path) else: # Do incremental backup utils_backup.pull_incremental_backup_to_file( nbd_params, backup_file_path, nbd_bitmap_name, original_disk_size) # Check if scratch file encrypted if scratch_luks_encrypted and scratch_path: cmd = "qemu-img info -U %s" % scratch_path result = process.run(cmd, shell=True, verbose=True).stdout_text.strip() if (not re.search("format.*luks", result, re.IGNORECASE) or not re.search("encrypted.*yes", result, re.IGNORECASE)): test.fail("scratch file/dev is not encrypted by LUKS") virsh.domjobabort(vm_name, debug=True) for checkpoint_name in checkpoint_list: virsh.checkpoint_delete(vm_name, checkpoint_name, debug=True) if vm.is_alive(): vm.destroy(gracefully=False) # Compare the backup data and original data original_data_file = os.path.join(tmp_dir, "original_data.qcow2") cmd = "qemu-img convert -f qcow2 %s -O qcow2 %s" % (disk_path, original_data_file) process.run(cmd, shell=True, verbose=True) for backup_file in backup_file_list: if not utils_backup.cmp_backup_data(original_data_file, backup_file): test.fail("Backup and original data are not identical for" "'%s' and '%s'" % (disk_path, backup_file)) else: logging.debug("'%s' contains correct backup data", backup_file) except utils_backup.BackupBeginError as detail: if backup_error: logging.debug("Backup failed as expected.") else: test.fail("Backup failed to start: %s" % detail) except utils_backup.BackupTLSError as detail: if tls_error: logging.debug("Failed to get backup data as expected.") else: test.fail("Failed to get tls backup data: %s" % detail) except utils_backup.BackupCanceledError as detail: if expect_backup_canceled: logging.debug("Backup canceled as expected.") if not vm.is_alive(): logging.debug("Check if vm can be started again when backup " "canceled.") vm.start() vm.wait_for_login().close() else: test.fail("Backup job canceled: %s" % detail) finally: # Remove checkpoints clean_checkpoint_metadata = not vm.is_alive() if "error_operation" in locals() and error_operation is not None: if "kill_qemu" in error_operation: clean_checkpoint_metadata = True utils_backup.clean_checkpoints( vm_name, clean_metadata=clean_checkpoint_metadata) if vm.is_alive(): vm.destroy(gracefully=False) # Restoring vm vmxml_backup.sync() # Remove iscsi devices if original_disk_type == "iscsi" or scratch_type == "block": libvirt.setup_or_cleanup_iscsi(False) # Remove gluster devices if original_disk_type == "gluster": gluster.setup_or_cleanup_gluster(is_setup=False, vol_name=gluster_vol_name, pool_name=gluster_pool_name, **params) # Recover qemu.conf if "qemu_config" in locals(): qemu_config.restore() # Remove tls object if "tls_obj" in locals(): del tls_obj # Remove libvirt secret if "luks_secret_uuid" in locals(): virsh.secret_undefine(luks_secret_uuid, ignore_status=True)
def run(test, params, env): """ Test migration with glusterfs. """ def create_or_clean_backend_dir(g_uri, params, session=None, is_clean=False): """ Create/cleanup backend directory :params g_uri: glusterfs uri :params params: the parameters to be checked :params session: VM/remote session object :params is_cleanup: True for cleanup backend directory; False for create one. :return: gluster_img if is_clean is equal to True """ mount_point = params.get("gluster_mount_dir") is_symlink = params.get("gluster_create_symlink") == "yes" symlink_name = params.get("gluster_symlink") gluster_img = None if not is_clean: if not utils_misc.check_exists(mount_point, session): utils_misc.make_dirs(mount_point, session) if gluster.glusterfs_is_mounted(mount_point, session): gluster.glusterfs_umount(g_uri, mount_point, session) gluster.glusterfs_mount(g_uri, mount_point, session) gluster_img = os.path.join(mount_point, disk_img) if is_symlink: utils_misc.make_symlink(mount_point, symlink_name) utils_misc.make_symlink(mount_point, symlink_name, remote_session) gluster_img = os.path.join(symlink_name, disk_img) return gluster_img else: if is_symlink: utils_misc.rm_link(symlink_name, session) gluster.glusterfs_umount(g_uri, mount_point, session) if utils_misc.check_exists(mount_point, session): utils_misc.safe_rmdir(gluster_mount_dir, session=session) # Local variables virsh_args = {"debug": True} server_ip = params["server_ip"] = params.get("remote_ip") server_user = params["server_user"] = params.get("remote_user", "root") server_pwd = params["server_pwd"] = params.get("remote_pwd") client_ip = params["client_ip"] = params.get("local_ip") client_pwd = params["client_pwd"] = params.get("local_pwd") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options", "--live --p2p --verbose") virsh_options = params.get("virsh_options", "") vol_name = params.get("vol_name") disk_format = params.get("disk_format", "qcow2") gluster_mount_dir = params.get("gluster_mount_dir") status_error = "yes" == params.get("status_error", "no") err_msg = params.get("err_msg") host_ip = params.get("gluster_server_ip", "") migrate_vm_back = params.get("migrate_vm_back", "no") == "yes" selinux_local = params.get('set_sebool_local', 'yes') == "yes" selinux_remote = params.get('set_sebool_remote', 'no') == "yes" sebool_fusefs_local = params.get('set_sebool_fusefs_local', 'yes') sebool_fusefs_remote = params.get('set_sebool_fusefs_remote', 'yes') test_dict = dict(params) test_dict["local_boolean_varible"] = "virt_use_fusefs" test_dict["remote_boolean_varible"] = "virt_use_fusefs" remote_dargs = { 'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd, 'file_path': "/etc/libvirt/libvirt.conf" } remove_pkg = False seLinuxBool = None seLinuxfusefs = None gluster_uri = None mig_result = None remove_dict = {} remote_libvirt_file = None src_libvirt_file = None # Make sure all of parameters are assigned a valid value migrate_test = migration.MigrationTest() migrate_test.check_parameters(params) extra_args = migrate_test.update_virsh_migrate_extra_args(params) # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri( params.get("migrate_source_host")) src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") # For --postcopy enable postcopy_options = params.get("postcopy_options") if postcopy_options: extra = "%s %s" % (virsh_options, postcopy_options) func_name = virsh.migrate_postcopy vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() # Back up xml file. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() try: # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) # Configure selinux if selinux_local or selinux_remote: seLinuxBool = utils_misc.SELinuxBoolean(params) seLinuxBool.setup() if sebool_fusefs_local or sebool_fusefs_remote: seLinuxfusefs = utils_misc.SELinuxBoolean(test_dict) seLinuxfusefs.setup() # Setup glusterfs disk_img = "gluster.%s" % disk_format params['disk_img'] = disk_img host_ip = gluster.setup_or_cleanup_gluster(is_setup=True, **params) logging.debug("host ip: %s ", host_ip) # Check if gluster server is deployed locally if not host_ip: logging.debug("Enable port 24007 and 49152:49216") migrate_test.migrate_pre_setup(src_uri, params, ports="24007") migrate_test.migrate_pre_setup(src_uri, params) gluster_uri = "{}:{}".format(client_ip, vol_name) else: gluster_uri = "{}:{}".format(host_ip, vol_name) remote_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") if gluster_mount_dir: # The package 'glusterfs-fuse' is not installed on target # which makes issue when trying to 'mount -t glusterfs' pkg_name = 'glusterfs-fuse' logging.debug("Check if glusterfs-fuse is installed") pkg_mgr = utils_package.package_manager(remote_session, pkg_name) if not pkg_mgr.is_installed(pkg_name): logging.debug("glusterfs-fuse will be installed") if not pkg_mgr.install(): test.error("Package '%s' installation fails" % pkg_name) else: remove_pkg = True gluster_img = create_or_clean_backend_dir(gluster_uri, params) create_or_clean_backend_dir(gluster_uri, params, remote_session) # Get the image path image_source = vm.get_first_disk_devices()['source'] image_info = utils_misc.get_image_info(image_source) if image_info["format"] == disk_format: disk_cmd = "cp -f %s %s" % (image_source, gluster_img) else: # Convert the disk format disk_cmd = ("qemu-img convert -f %s -O %s %s %s" % (image_info["format"], disk_format, image_source, gluster_img)) process.run("%s; chmod a+rw %s" % (disk_cmd, gluster_mount_dir), shell=True) logging.debug("Gluster Image is %s", gluster_img) gluster_backend_disk = {'disk_source_name': gluster_img} # Update disk xml with gluster image in backend dir libvirt.set_vm_disk(vm, gluster_backend_disk) remote_session.close() vm_xml_cxt = virsh.dumpxml(vm_name).stdout_text.strip() logging.debug("The VM XML with gluster disk source: \n%s", vm_xml_cxt) vm.wait_for_login().close() migrate_test.ping_vm(vm, params) remove_dict = {"do_search": '{"%s": "ssh:/"}' % dest_uri} src_libvirt_file = libvirt_config.remove_key_for_modular_daemon( remove_dict) vms = [vm] migrate_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_options, extra_opts=extra, **extra_args) migrate_test.ping_vm(vm, params, dest_uri) if migrate_vm_back: ssh_connection = utils_conn.SSHConnection(server_ip=client_ip, server_pwd=client_pwd, client_ip=server_ip, client_pwd=server_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() # Pre migration setup for local machine migrate_test.migrate_pre_setup(src_uri, params) remove_dict = {"do_search": ('{"%s": "ssh:/"}' % src_uri)} remote_libvirt_file = libvirt_config\ .remove_key_for_modular_daemon(remove_dict, remote_dargs) cmd = "virsh migrate %s %s %s %s" % (vm_name, options, virsh_options, src_uri) logging.debug("Start migrating: %s", cmd) cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) logging.info(cmd_result) if cmd_result.exit_status: destroy_cmd = "virsh destroy %s" % vm_name remote.run_remote_cmd(destroy_cmd, params, runner_on_target, ignore_status=False) test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result)) finally: logging.info("Recover test environment") migrate_test.cleanup_vm(vm, dest_uri) orig_config_xml.sync() if src_libvirt_file: src_libvirt_file.restore() if remote_libvirt_file: del remote_libvirt_file # Clean up of pre migration setup for local machine if migrate_vm_back: if 'ssh_connection' in locals(): ssh_connection.auto_recover = True migrate_test.migrate_pre_setup(src_uri, params, cleanup=True) # Cleanup selinu configuration if seLinuxBool: seLinuxBool.cleanup() if seLinuxfusefs: seLinuxfusefs.cleanup() # Disable ports 24007 and 49152:49216 if not host_ip: logging.debug("Disable 24007 and 49152:49216 in Firewall") migrate_test.migrate_pre_setup(src_uri, params, cleanup=True, ports="24007") migrate_test.migrate_pre_setup(src_uri, params, cleanup=True) gluster.setup_or_cleanup_gluster(False, **params) # Cleanup backend directory/symlink if gluster_mount_dir and gluster_uri: remote_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") create_or_clean_backend_dir(gluster_uri, params, is_clean=True) create_or_clean_backend_dir(gluster_uri, params, remote_session, True) if remove_pkg: pkg_mgr = utils_package.package_manager( remote_session, pkg_name) if pkg_mgr.is_installed(pkg_name): logging.debug("glusterfs-fuse will be uninstalled") if not pkg_mgr.remove(): logging.error("Package '%s' un-installation fails", pkg_name) remote_session.close()
def run(test, params, env): """ Test command: virsh blockpull <domain> <path> 1) Prepare test environment. 2) Populate a disk from its backing image. 3) Recover test environment. 4) Check result. """ def make_disk_snapshot(snapshot_take): """ Make external snapshots for disks only. :param snapshot_take: snapshots taken. """ for count in range(1, snapshot_take + 1): snap_xml = snapshot_xml.SnapshotXML() snapshot_name = "snapshot_test%s" % count snap_xml.snap_name = snapshot_name snap_xml.description = "Snapshot Test %s" % count # Add all disks into xml file. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') new_disks = [] for src_disk_xml in disks: disk_xml = snap_xml.SnapDiskXML() disk_xml.xmltreefile = src_disk_xml.xmltreefile # Skip cdrom if disk_xml.device == "cdrom": continue del disk_xml.device del disk_xml.address disk_xml.snapshot = "external" disk_xml.disk_name = disk_xml.target['dev'] # Only qcow2 works as external snapshot file format, update it # here driver_attr = disk_xml.driver driver_attr.update({'type': 'qcow2'}) disk_xml.driver = driver_attr new_attrs = disk_xml.source.attrs if 'file' in disk_xml.source.attrs: file_name = disk_xml.source.attrs['file'] new_file = "%s.snap%s" % (file_name.split('.')[0], count) snapshot_external_disks.append(new_file) new_attrs.update({'file': new_file}) hosts = None elif ('name' in disk_xml.source.attrs and disk_src_protocol == 'gluster'): src_name = disk_xml.source.attrs['name'] new_name = "%s.snap%s" % (src_name.split('.')[0], count) new_attrs.update({'name': new_name}) snapshot_external_disks.append(new_name) hosts = disk_xml.source.hosts elif ('dev' in disk_xml.source.attrs or 'name' in disk_xml.source.attrs): if (disk_xml.type_name == 'block' or disk_src_protocol in ['iscsi', 'rbd']): # Use local file as external snapshot target for block # and iscsi network type. # As block device will be treat as raw format by # default, it's not fit for external disk snapshot # target. A work around solution is use qemu-img again # with the target. # And external active snapshots are not supported on # 'network' disks using 'iscsi' protocol disk_xml.type_name = 'file' if 'dev' in new_attrs: del new_attrs['dev'] elif 'name' in new_attrs: del new_attrs['name'] del new_attrs['protocol'] new_file = "%s/blk_src_file.snap%s" % (tmp_dir, count) snapshot_external_disks.append(new_file) new_attrs.update({'file': new_file}) hosts = None new_src_dict = {"attrs": new_attrs} if hosts: new_src_dict.update({"hosts": hosts}) disk_xml.source = disk_xml.new_disk_source(**new_src_dict) new_disks.append(disk_xml) snap_xml.set_disks(new_disks) snapshot_xml_path = snap_xml.xml logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile) options = "--disk-only --xmlfile %s " % snapshot_xml_path snapshot_result = virsh.snapshot_create( vm_name, options, debug=True) if snapshot_result.exit_status != 0: test.fail(snapshot_result.stderr) # Create a file flag in VM after each snapshot flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") file_path = flag_file.name flag_file.close() status, output = session.cmd_status_output("touch %s" % file_path) if status: test.fail("Touch file in vm failed. %s" % output) snapshot_flag_files.append(file_path) def get_first_disk_source(): """ Get disk source of first device :return: first disk of first device. """ first_device = vm.get_first_disk_devices() firt_disk_src = first_device['source'] return firt_disk_src def make_relative_path_backing_files(): """ Create backing chain files of relative path. :return: absolute path of top active file """ first_disk_source = get_first_disk_source() basename = os.path.basename(first_disk_source) root_dir = os.path.dirname(first_disk_source) cmd = "mkdir -p %s" % os.path.join(root_dir, '{b..d}') ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) # Make three external relative path backing files. backing_file_dict = collections.OrderedDict() backing_file_dict["b"] = "../%s" % basename backing_file_dict["c"] = "../b/b.img" backing_file_dict["d"] = "../c/c.img" for key, value in list(backing_file_dict.items()): backing_file_path = os.path.join(root_dir, key) cmd = ("cd %s && qemu-img create -f qcow2 -o backing_file=%s,backing_fmt=qcow2 %s.img" % (backing_file_path, value, key)) ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) return os.path.join(backing_file_path, "d.img") def check_chain_backing_files(disk_src_file, expect_backing_file=False): """ Check backing chain files of relative path after blockcommit. :param disk_src_file: first disk src file. :param expect_backing_file: whether it expect to have backing files. """ first_disk_source = get_first_disk_source() # Validate source image need refer to original one after active blockcommit if not expect_backing_file and disk_src_file not in first_disk_source: test.fail("The disk image path:%s doesn't include the origin image: %s" % (first_disk_source, disk_src_file)) # Validate source image doesn't have backing files after active blockcommit cmd = "qemu-img info %s --backing-chain" % first_disk_source if qemu_img_locking_feature_support: cmd = "qemu-img info -U %s --backing-chain" % first_disk_source ret = process.run(cmd, shell=True).stdout_text.strip() if expect_backing_file: if 'backing file' not in ret: test.fail("The disk image doesn't have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) else: if 'backing file' in ret: test.fail("The disk image still have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) # MAIN TEST CODE ### # Process cartesian parameters vm_name = params.get("main_vm") vm = env.get_vm(vm_name) snapshot_take = int(params.get("snapshot_take", '0')) needs_agent = "yes" == params.get("needs_agent", "yes") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") snap_in_mirror = "yes" == params.get("snap_in_mirror", 'no') snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", 'no') bandwidth = params.get("bandwidth", None) with_timeout = ("yes" == params.get("with_timeout_option", "no")) status_error = ("yes" == params.get("status_error", "no")) base_option = params.get("base_option", None) keep_relative = "yes" == params.get("keep_relative", 'no') virsh_dargs = {'debug': True} # Check whether qemu-img need add -U suboption since locking feature was added afterwards qemu-2.10 qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support() backing_file_relative_path = "yes" == params.get("backing_file_relative_path", "no") # Process domain disk device parameters disk_type = params.get("disk_type") disk_target = params.get("disk_target", 'vda') disk_src_protocol = params.get("disk_source_protocol") restart_tgtd = params.get("restart_tgtd", "no") vol_name = params.get("vol_name") tmp_dir = data_dir.get_data_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("original xml is %s", vmxml_backup) # Abort the test if there are snapshots already exsiting_snaps = virsh.snapshot_list(vm_name) if len(exsiting_snaps) != 0: test.fail("There are snapshots created for %s already" % vm_name) snapshot_external_disks = [] # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" try: if disk_src_protocol == 'iscsi' and disk_type == 'network': if not libvirt_version.version_compare(1, 0, 4): test.cancel("'iscsi' disk doesn't support in" " current libvirt version.") if disk_src_protocol == 'gluster': if not libvirt_version.version_compare(1, 2, 7): test.cancel("Snapshot on glusterfs not" " support in current " "version. Check more info " " with https://bugzilla.re" "dhat.com/show_bug.cgi?id=" "1017289") # Set vm xml and guest agent if replace_vm_disk: if disk_src_protocol == "rbd" and disk_type == "network": src_host = params.get("disk_source_host", "EXAMPLE_HOSTS") mon_host = params.get("mon_host", "EXAMPLE_MON_HOST") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(mon_host) if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"): test.cancel("Please provide ceph host first.") detected_distro = distro.detect() rbd_img_prefix = '_'.join(['rbd', detected_distro.name, detected_distro.version, detected_distro.release, detected_distro.arch]) params.update( {"disk_source_name": os.path.join( pool_name, rbd_img_prefix + '.img')}) if utils_package.package_install(["ceph-common"]): ceph.rbd_image_rm( mon_host, *params.get("disk_source_name").split('/')) else: test.error('Failed to install ceph-common package.') if backing_file_relative_path: if vm.is_alive(): vm.destroy(gracefully=False) first_src_file = get_first_disk_source() blk_source_image = os.path.basename(first_src_file) blk_source_folder = os.path.dirname(first_src_file) replace_disk_image = make_relative_path_backing_files() params.update({'disk_source_name': replace_disk_image, 'disk_type': 'file', 'disk_src_protocol': 'file'}) vm.start() libvirt.set_vm_disk(vm, params, tmp_dir) if needs_agent: vm.prepare_guest_agent() # The first disk is supposed to include OS # We will perform blockpull operation for it. first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] blk_target = first_disk['target'] snapshot_flag_files = [] # get a vm session before snapshot session = vm.wait_for_login() # do snapshot make_disk_snapshot(snapshot_take) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug("The domain xml after snapshot is %s" % vmxml) # snapshot src file list snap_src_lst = [blk_source] snap_src_lst += snapshot_external_disks if snap_in_mirror: blockpull_options = "--bandwidth 1" else: blockpull_options = "--wait --verbose" if with_timeout: blockpull_options += " --timeout 1" if bandwidth: blockpull_options += " --bandwidth %s" % bandwidth if base_option == "async": blockpull_options += " --async" base_image = None base_index = None if (libvirt_version.version_compare(1, 2, 4) or disk_src_protocol == 'gluster'): # For libvirt is older version than 1.2.4 or source protocol is gluster # there are various base image,which depends on base option:shallow,base,top respectively if base_option == "shallow": base_index = 1 base_image = "%s[%s]" % (disk_target, base_index) elif base_option == "base": base_index = 2 base_image = "%s[%s]" % (disk_target, base_index) elif base_option == "top": base_index = 0 base_image = "%s[%s]" % (disk_target, base_index) else: if base_option == "shallow": base_image = snap_src_lst[3] elif base_option == "base": base_image = snap_src_lst[2] elif base_option == "top": base_image = snap_src_lst[4] if base_option and base_image: blockpull_options += " --base %s" % base_image if keep_relative: blockpull_options += " --keep-relative" if backing_file_relative_path: # Use block commit to shorten previous snapshots. blockcommit_options = " --active --verbose --shallow --pivot --keep-relative" for count in range(1, snapshot_take + 1): res = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) #Use block pull with --keep-relative flag,and reset base_index to 2. base_index = 2 for count in range(1, snapshot_take): if count >= 3: if libvirt_version.version_compare(6, 0, 0): break # If block pull operations are more than or equal to 3, # it need reset base_index to 1. It only affects the test # of libvirt < 6.0.0 base_index = 1 base_image = "%s[%s]" % (disk_target, base_index) blockpull_options = " --wait --verbose --base %s --keep-relative" % base_image res = virsh.blockpull(vm_name, blk_target, blockpull_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) if libvirt_version.version_compare(6, 0, 0): base_index += 1 # Check final backing chain files. check_chain_backing_files(blk_source_image, True) return # Run test case result = virsh.blockpull(vm_name, blk_target, blockpull_options, **virsh_dargs) status = result.exit_status # If pull job aborted as timeout, the exit status is different # on RHEL6(0) and RHEL7(1) if with_timeout and 'Pull aborted' in result.stdout.strip(): if libvirt_version.version_compare(1, 1, 1): status_error = True else: status_error = False # Check status_error libvirt.check_exit_status(result, status_error) if not status and not with_timeout: if snap_in_mirror: snap_mirror_path = "%s/snap_mirror" % tmp_dir snap_options = "--diskspec vda,snapshot=external," snap_options += "file=%s --disk-only" % snap_mirror_path snapshot_external_disks.append(snap_mirror_path) ret = virsh.snapshot_create_as(vm_name, snap_options, ignore_status=True, debug=True) libvirt.check_exit_status(ret, snap_in_mirror_err) return vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') for disk in disks: if disk.target['dev'] != blk_target: continue else: disk_xml = disk.xmltreefile break logging.debug("after pull the disk xml is: %s" % disk_xml) if libvirt_version.version_compare(1, 2, 4): err_msg = "Domain image backing chain check failed" if not base_option or "async" in base_option: chain_lst = snap_src_lst[-1:] ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail(err_msg) elif "base" or "shallow" in base_option: if not base_index and base_image: base_index = chain_lst.index(base_image) chain_lst = snap_src_lst[:base_index][::-1] if not libvirt_version.version_compare(6, 0, 0): chain_lst = snap_src_lst[::-1][base_index:] chain_lst.insert(0, snap_src_lst[-1]) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail(err_msg) # If base image is the top layer of snapshot chain, # virsh blockpull should fail, return directly if base_option == "top": return # Check flag files for flag in snapshot_flag_files: status, output = session.cmd_status_output("cat %s" % flag) if status: test.fail("blockpull failed: %s" % output) finally: # Remove ceph configure file if created if ceph_cfg: os.remove(ceph_cfg) if vm.is_alive(): vm.destroy(gracefully=False) # Recover xml of vm. vmxml_backup.sync("--snapshots-metadata") # Clean ceph image if used in test if 'mon_host' in locals(): if utils_package.package_install(["ceph-common"]): disk_source_name = params.get("disk_source_name") cmd = ("rbd -m {0} info {1} && rbd -m {0} rm " "{1}".format(mon_host, disk_source_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("result of rbd removal: %s", cmd_result) else: logging.debug('Failed to install ceph-common to clean ceph.') if not disk_src_protocol or disk_src_protocol != 'gluster': for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) if backing_file_relative_path: libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) process.run("cd %s && rm -rf b c d" % blk_source_folder, shell=True) libvirtd = utils_libvirtd.Libvirtd() if disk_src_protocol == 'iscsi': libvirt.setup_or_cleanup_iscsi(is_setup=False, restart_tgtd=restart_tgtd) elif disk_src_protocol == 'gluster': logging.info("clean gluster env") gluster.setup_or_cleanup_gluster(False, brick_path=brick_path, **params) libvirtd.restart() elif disk_src_protocol == 'netfs': restore_selinux = params.get('selinux_status_bak') libvirt.setup_or_cleanup_nfs(is_setup=False, restore_selinux=restore_selinux)
def run(test, params, env): """ Test virsh snapshot command when disk in all kinds of type. (1). Init the variables from params. (2). Create a image by specifice format. (3). Attach disk to vm. (4). Snapshot create. (5). Snapshot revert. (6). cleanup. """ # Init variables. vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(vm_name) vm_state = params.get("vm_state", "running") image_format = params.get("snapshot_image_format", "qcow2") snapshot_del_test = "yes" == params.get("snapshot_del_test", "no") status_error = ("yes" == params.get("status_error", "no")) snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no")) snapshot_current = ("yes" == params.get("snapshot_current", "no")) snapshot_revert_paused = ("yes" == params.get("snapshot_revert_paused", "no")) replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") disk_source_protocol = params.get("disk_source_protocol") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) multi_gluster_disks = "yes" == params.get("multi_gluster_disks", "no") transport = params.get("transport", "") # Pool variables. snapshot_with_pool = "yes" == params.get("snapshot_with_pool", "no") pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") emulated_image = params.get("emulated_image", "emulated-image") vol_format = params.get("vol_format") lazy_refcounts = "yes" == params.get("lazy_refcounts") options = params.get("snapshot_options", "") export_options = params.get("export_options", "rw,no_root_squash") # Set volume xml attribute dictionary, extract all params start with 'vol_' # which are for setting volume xml, except 'lazy_refcounts'. vol_arg = {} for key in list(params.keys()): if key.startswith('vol_'): if key[4:] in ['capacity', 'allocation', 'owner', 'group']: vol_arg[key[4:]] = int(params[key]) else: vol_arg[key[4:]] = params[key] vol_arg['lazy_refcounts'] = lazy_refcounts supported_pool_list = [ "dir", "fs", "netfs", "logical", "iscsi", "disk", "gluster" ] if snapshot_with_pool: if pool_type not in supported_pool_list: test.cancel("%s not in support list %s" % (pool_target, supported_pool_list)) # Do xml backup for final recovery vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Some variable for xmlfile of snapshot. snapshot_memory = params.get("snapshot_memory", "internal") snapshot_disk = params.get("snapshot_disk", "internal") no_memory_snap = "yes" == params.get("no_memory_snap", "no") # Skip 'qed' cases for libvirt version greater than 1.1.0 if libvirt_version.version_compare(1, 1, 0): if vol_format == "qed" or image_format == "qed": test.cancel("QED support changed, check bug: " "https://bugzilla.redhat.com/show_bug.cgi" "?id=731570") if not libvirt_version.version_compare(1, 2, 7): # As bug 1017289 closed as WONTFIX, the support only # exist on 1.2.7 and higher if disk_source_protocol == 'gluster': test.cancel("Snapshot on glusterfs not support in " "current version. Check more info with " "https://bugzilla.redhat.com/buglist.cgi?" "bug_id=1017289,1032370") # This is brought by new feature:block-dev if libvirt_version.version_compare(6, 0, 0) and transport == "rdma": test.cancel("transport protocol 'rdma' is not yet supported") # Init snapshot_name snapshot_name = None snapshot_external_disk = [] snapshot_xml_path = None del_status = None image = None pvt = None # Get a tmp dir snap_cfg_path = "/var/lib/libvirt/qemu/snapshot/%s/" % vm_name try: if vm.is_dead(): vm.start() vm.wait_for_login().close() if replace_vm_disk: utlv.set_vm_disk(vm, params, tmp_dir) if multi_gluster_disks: new_params = params.copy() new_params["pool_name"] = "gluster-pool2" new_params["vol_name"] = "gluster-vol2" new_params["disk_target"] = "vdf" new_params["image_convert"] = 'no' utlv.set_vm_disk(vm, new_params, tmp_dir) if snapshot_with_pool: # Create dst pool for create attach vol img pvt = utlv.PoolVolumeTest(test, params) kwargs = params.copy() update_items = { "image_size": "1G", "emulated_image": emulated_image, "source_name": vol_name, "pre_disk_vol": ["20M"], "export_options": export_options } kwargs.update(update_items) kwargs.pop("vol_name") pvt.pre_pool(**kwargs) if pool_type in ["iscsi", "disk"]: # iscsi and disk pool did not support create volume in libvirt, # logical pool could use libvirt to create volume but volume # format is not supported and will be 'raw' as default. pv = libvirt_storage.PoolVolume(pool_name) vols = list(pv.list_volumes().keys()) if vols: vol_name = vols[0] else: test.cancel("No volume in pool: %s" % pool_name) else: # Set volume xml file volxml = libvirt_xml.VolXML() newvol = volxml.new_vol(**vol_arg) vol_xml = newvol['xml'] # Run virsh_vol_create to create vol logging.debug("create volume from xml: %s" % newvol.xmltreefile) cmd_result = virsh.vol_create(pool_name, vol_xml, ignore_status=True, debug=True) if cmd_result.exit_status: test.cancel("Failed to create attach volume.") cmd_result = virsh.vol_path(vol_name, pool_name, debug=True) if cmd_result.exit_status: test.cancel("Failed to get volume path from pool.") img_path = cmd_result.stdout.strip() if pool_type in ["logical", "iscsi", "disk"]: # Use qemu-img to format logical, iscsi and disk block device if vol_format != "raw": cmd = "qemu-img create -f %s %s 10M" % (vol_format, img_path) cmd_result = process.run(cmd, ignore_status=True, shell=True) if cmd_result.exit_status: test.cancel("Failed to format volume, %s" % cmd_result.stdout_text.strip()) extra = "--persistent --subdriver %s" % vol_format else: # Create a image. params['image_name'] = "snapshot_test" params['image_format'] = image_format params['image_size'] = "1M" image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test") img_path, _ = image.create(params) extra = "--persistent --subdriver %s" % image_format if not multi_gluster_disks: # Do the attach action. out = process.run("qemu-img info %s" % img_path, shell=True) logging.debug("The img info is:\n%s" % out.stdout.strip()) result = virsh.attach_disk(vm_name, source=img_path, target="vdf", extra=extra, debug=True) if result.exit_status: test.error("Failed to attach disk %s to VM." "Detail: %s." % (img_path, result.stderr)) # Create snapshot. if snapshot_from_xml: snap_xml = libvirt_xml.SnapshotXML() snapshot_name = "snapshot_test" snap_xml.snap_name = snapshot_name snap_xml.description = "Snapshot Test" if not no_memory_snap: if "--disk-only" not in options: if snapshot_memory == "external": memory_external = os.path.join(tmp_dir, "snapshot_memory") snap_xml.mem_snap_type = snapshot_memory snap_xml.mem_file = memory_external snapshot_external_disk.append(memory_external) else: snap_xml.mem_snap_type = snapshot_memory # Add all disks into xml file. vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') # Remove non-storage disk such as 'cdrom' for disk in disks: if disk.device != 'disk': disks.remove(disk) new_disks = [] for src_disk_xml in disks: disk_xml = snap_xml.SnapDiskXML() disk_xml.xmltreefile = src_disk_xml.xmltreefile del disk_xml.device del disk_xml.address disk_xml.snapshot = snapshot_disk disk_xml.disk_name = disk_xml.target['dev'] # Only qcow2 works as external snapshot file format, update it # here driver_attr = disk_xml.driver driver_attr.update({'type': 'qcow2'}) disk_xml.driver = driver_attr if snapshot_disk == 'external': new_attrs = disk_xml.source.attrs if 'file' in disk_xml.source.attrs: new_file = "%s.snap" % disk_xml.source.attrs['file'] snapshot_external_disk.append(new_file) new_attrs.update({'file': new_file}) hosts = None elif 'name' in disk_xml.source.attrs: new_name = "%s.snap" % disk_xml.source.attrs['name'] new_attrs.update({'name': new_name}) hosts = disk_xml.source.hosts elif ('dev' in disk_xml.source.attrs and disk_xml.type_name == 'block'): # Use local file as external snapshot target for block type. # As block device will be treat as raw format by default, # it's not fit for external disk snapshot target. A work # around solution is use qemu-img again with the target. disk_xml.type_name = 'file' del new_attrs['dev'] new_file = "%s/blk_src_file.snap" % tmp_dir snapshot_external_disk.append(new_file) new_attrs.update({'file': new_file}) hosts = None new_src_dict = {"attrs": new_attrs} if hosts: new_src_dict.update({"hosts": hosts}) disk_xml.source = disk_xml.new_disk_source(**new_src_dict) else: del disk_xml.source new_disks.append(disk_xml) snap_xml.set_disks(new_disks) snapshot_xml_path = snap_xml.xml logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile) options += " --xmlfile %s " % snapshot_xml_path if vm_state == "shut off": vm.destroy(gracefully=False) snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) out_err = snapshot_result.stderr.strip() if snapshot_result.exit_status: if status_error: return else: if re.search( "live disk snapshot not supported with this " "QEMU binary", out_err): test.cancel(out_err) if libvirt_version.version_compare(1, 2, 5): # As commit d2e668e in 1.2.5, internal active snapshot # without memory state is rejected. Handle it as SKIP # for now. This could be supportted in future by bug: # https://bugzilla.redhat.com/show_bug.cgi?id=1103063 if re.search( "internal snapshot of a running VM" + " must include the memory state", out_err): logging.warning("Got expected error. Please check " "Bug #1083345, %s" % out_err) return test.fail("Failed to create snapshot. Error:%s." % out_err) else: snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) if snapshot_result.exit_status: if status_error: return else: test.fail("Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) snapshot_name = re.search("\d+", snapshot_result.stdout.strip()).group(0) if snapshot_current: snap_xml = libvirt_xml.SnapshotXML() new_snap = snap_xml.new_from_snapshot_dumpxml( vm_name, snapshot_name) # update an element new_snap.creation_time = snapshot_name snapshot_xml_path = new_snap.xml options += "--redefine %s --current" % snapshot_xml_path snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) if snapshot_result.exit_status: test.fail("Failed to create snapshot --current." "Error:%s." % snapshot_result.stderr.strip()) if status_error: if not snapshot_del_test: test.fail("Success to create snapshot in negative" " case\nDetail: %s" % snapshot_result) # Touch a file in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() # Init a unique name for tmp_file. tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") tmp_file_path = tmp_file.name tmp_file.close() echo_cmd = "echo SNAPSHOT_DISK_TEST >> %s" % tmp_file_path status, output = session.cmd_status_output(echo_cmd) logging.debug("The echo output in domain is: '%s'", output) if status: test.fail("'%s' run failed with '%s'" % (tmp_file_path, output)) status, output = session.cmd_status_output("cat %s" % tmp_file_path) logging.debug("File created with content: '%s'", output) session.close() # As only internal snapshot revert works now, let's only do revert # with internal, and move the all skip external cases back to pass. # After external also supported, just move the following code back. if snapshot_disk == 'internal': # Destroy vm for snapshot revert. if not libvirt_version.version_compare(1, 2, 3): virsh.destroy(vm_name) # Revert snapshot. revert_options = "" if snapshot_revert_paused: revert_options += " --paused" revert_result = virsh.snapshot_revert(vm_name, snapshot_name, revert_options, debug=True) if revert_result.exit_status: # Attempts to revert external snapshots will FAIL with an error # "revert to external disk snapshot not supported yet" or "revert # to external snapshot not supported yet" since d410e6f. Thus, # let's check for that and handle as a SKIP for now. Check bug: # https://bugzilla.redhat.com/show_bug.cgi?id=1071264 if re.search( "revert to external \w* ?snapshot not supported yet", revert_result.stderr): test.cancel(revert_result.stderr.strip()) else: test.fail("Revert snapshot failed. %s" % revert_result.stderr.strip()) if vm.is_dead(): test.fail("Revert snapshot failed.") if snapshot_revert_paused: if vm.is_paused(): vm.resume() else: test.fail("Revert command successed, but VM is not " "paused after reverting with --paused" " option.") # login vm. session = vm.wait_for_login() # Check the result of revert. status, output = session.cmd_status_output("cat %s" % tmp_file_path) logging.debug("After revert cat file output='%s'", output) if not status: test.fail("Tmp file exists, revert failed.") # Close the session. session.close() # Test delete snapshot without "--metadata", delete external disk # snapshot will fail for now. # Only do this when snapshot creat succeed which filtered in cfg file. if snapshot_del_test: if snapshot_name: del_result = virsh.snapshot_delete(vm_name, snapshot_name, debug=True, ignore_status=True) del_status = del_result.exit_status snap_xml_path = snap_cfg_path + "%s.xml" % snapshot_name if del_status: if not status_error: test.fail("Failed to delete snapshot.") else: if not os.path.exists(snap_xml_path): test.fail("Snapshot xml file %s missing" % snap_xml_path) else: if status_error: err_msg = "Snapshot delete succeed but expect fail." test.fail(err_msg) else: if os.path.exists(snap_xml_path): test.fail("Snapshot xml file %s still" % snap_xml_path + " exist") finally: if vm.is_alive(): vm.destroy(gracefully=False) virsh.detach_disk(vm_name, target="vdf", extra="--persistent") if image: image.remove() if del_status and snapshot_name: virsh.snapshot_delete(vm_name, snapshot_name, "--metadata") for disk in snapshot_external_disk: if os.path.exists(disk): os.remove(disk) vmxml_backup.sync("--snapshots-metadata") libvirtd = utils_libvirtd.Libvirtd() if disk_source_protocol == 'gluster': gluster.setup_or_cleanup_gluster(False, brick_path=brick_path, **params) if multi_gluster_disks: brick_path = os.path.join(tmp_dir, "gluster-pool2") mul_kwargs = params.copy() mul_kwargs.update({"vol_name": "gluster-vol2"}) gluster.setup_or_cleanup_gluster(False, brick_path=brick_path, **mul_kwargs) libvirtd.restart() if snapshot_xml_path: if os.path.exists(snapshot_xml_path): os.unlink(snapshot_xml_path) if pvt: try: pvt.cleanup_pool(**kwargs) except exceptions.TestFail as detail: libvirtd.restart() logging.error(str(detail))
def run(test, params, env): """ Test virsh domblkthreshold option. 1.Prepare backend storage (file/luks/iscsi/gluster/ceph/nbd) 2.Start VM 3.Set domblkthreshold on target device in VM 4.Trigger one threshold event 5.Check threshold event is received as expected 6.Clean up test environment """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} block_threshold_timeout = params.get("block_threshold_timeout", "120") event_type = params.get("event_type", "block-threshold") block_threshold_option = params.get("block_threshold_option", "--loop") def set_vm_block_domblkthreshold(vm_name, target_device, threshold, **dargs): """ Set VM block threshold on specific target device. :param vm_name: VM name. :param target_device: target device in VM :param threshold: threshold value with specific unit such as 100M :param dargs: mutable parameter dict """ ret = virsh.domblkthreshold(vm_name, target_device, threshold, **dargs) libvirt.check_exit_status(ret) def trigger_block_threshold_event(vm_domain, target): """ Trigger block threshold event. :param vm_domain: VM name :param target: Disk dev in VM. """ try: session = vm_domain.wait_for_login() time.sleep(10) cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && " " mount /dev/{0} /mnt && " " dd if=/dev/urandom of=/mnt/bigfile bs=1M count=101" .format(target)) status, output = session.cmd_status_output(cmd) if status: test.error("Failed to mount and fill data in VM: %s" % output) except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) raise def check_threshold_event(vm_name, event_type, event_timeout, options, **dargs): """ Check threshold event. :param vm_name: VM name :param event_type: event type. :param event_timeout: event timeout value :param options: event option :dargs: dynamic parameters. """ ret = virsh.event(vm_name, event_type, event_timeout, options, **dargs) logging.debug(ret.stdout_text) libvirt.check_exit_status(ret) def create_vol(p_name, vol_params): """ Create volume. :param p_name: Pool name. :param vol_params: Volume parameters dict. """ # Clean up dirty volumes if pool has. pv = libvirt_storage.PoolVolume(p_name) vol_name_list = pv.list_volumes() for vol_name in vol_name_list: pv.delete_volume(vol_name) volxml = vol_xml.VolXML() v_xml = volxml.new_vol(**vol_params) v_xml.xmltreefile.write() ret = virsh.vol_create(p_name, v_xml.xml, **virsh_dargs) libvirt.check_exit_status(ret) def trigger_block_commit(vm_name, target, blockcommit_options, **virsh_dargs): """ Trigger blockcommit. :param vm_name: VM name :param target: Disk dev in VM. :param blockcommit_options: blockcommit option :param virsh_dargs: additional parameters """ result = virsh.blockcommit(vm_name, target, blockcommit_options, ignore_status=False, **virsh_dargs) def trigger_block_copy(vm_name, target, dest_path, blockcopy_options, **virsh_dargs): """ Trigger blockcopy :param vm_name: string, VM name :param target: string, target disk :param dest_path: string, the path of copied disk :param blockcopy_options: string, some options applied :param virsh_dargs: additional options """ result = virsh.blockcopy(vm_name, target, dest_path, blockcopy_options, **virsh_dargs) libvirt.check_exit_status(result) def trigger_mirror_threshold_event(vm_domain, target): """ Trigger mirror mode block threshold event. :param vm_domain: VM name :param target: Disk target in VM. """ try: session = vm_domain.wait_for_login() # Sleep 10 seconds to let wait for events thread start first in main thread time.sleep(10) cmd = ("dd if=/dev/urandom of=file bs=1G count=3") status, output = session.cmd_status_output(cmd) if status: test.error("Failed to fill data in VM target: %s with %s" % (target, output)) except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) raise except Exception as ex: raise def get_mirror_source_index(vm_name, dev_index=0): """ Get mirror source index :param vm_name: VM name :param dev_index: Disk device index. :return mirror source index in integer """ disk_list = vm_xml.VMXML.get_disk_source(vm_name) disk_mirror = disk_list[dev_index].find('mirror') if disk_mirror is None: test.fail("Failed to get disk mirror") disk_mirror_source = disk_mirror.find('source') return int(disk_mirror_source.get('index')) # Disk specific attributes. device = params.get("virt_disk_device", "disk") device_target = params.get("virt_disk_device_target", "vdd") device_format = params.get("virt_disk_device_format", "raw") device_type = params.get("virt_disk_device_type", "file") device_bus = params.get("virt_disk_device_bus", "virtio") backend_storage_type = params.get("backend_storage_type", "iscsi") # Backend storage auth info storage_size = params.get("storage_size", "1G") enable_auth = "yes" == params.get("enable_auth") use_auth_usage = "yes" == params.get("use_auth_usage") auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi") auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi") auth_sec_uuid = "" luks_sec_uuid = "" disk_auth_dict = {} disk_encryption_dict = {} status_error = "yes" == params.get("status_error") define_error = "yes" == params.get("define_error") mirror_mode_blockcommit = "yes" == params.get("mirror_mode_blockcommit", "no") mirror_mode_blockcopy = "yes" == params.get("mirror_mode_blockcopy", "no") default_snapshot_test = "yes" == params.get("default_snapshot_test", "no") block_threshold_value = params.get("block_threshold_value", "100M") snapshot_external_disks = [] tmp_dir = data_dir.get_tmp_dir() dest_path = params.get("dest_path", "/var/lib/libvirt/images/newclone") pvt = None # Initialize one NbdExport object nbd = None img_file = os.path.join(data_dir.get_tmp_dir(), "%s_test.img" % vm_name) if ((backend_storage_type == "luks") and not libvirt_version.version_compare(3, 9, 0)): test.cancel("Cannot support <encryption> inside disk in this libvirt version.") # Start VM and get all partitions in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Additional disk images. disks_img = [] try: # Clean up dirty secrets in test environments if there are. utils_secret.clean_up_secrets() # Setup backend storage if backend_storage_type == "file": image_filename = params.get("image_filename", "raw.img") disk_path = os.path.join(data_dir.get_tmp_dir(), image_filename) device_source = libvirt.create_local_disk(backend_storage_type, disk_path, storage_size, device_format) disks_img.append({"format": device_format, "source": disk_path, "path": disk_path}) disk_src_dict = {'attrs': {'file': device_source, 'type_name': 'file'}} # Setup backend storage elif backend_storage_type == "luks": luks_encrypt_passwd = params.get("luks_encrypt_passwd", "password") luks_secret_passwd = params.get("luks_secret_passwd", "password") # Create secret luks_sec_uuid = libvirt.create_secret(params) logging.debug("A secret created with uuid = '%s'", luks_sec_uuid) virsh.secret_set_value(luks_sec_uuid, luks_secret_passwd, encode=True, ignore_status=False, debug=True) image_filename = params.get("image_filename", "raw.img") device_source = os.path.join(data_dir.get_tmp_dir(), image_filename) disks_img.append({"format": device_format, "source": device_source, "path": device_source}) disk_src_dict = {'attrs': {'file': device_source, 'type_name': 'file'}} disk_encryption_dict = {"encryption": "luks", "secret": {"type": "passphrase", "uuid": luks_sec_uuid}} cmd = ("qemu-img create -f luks " "--object secret,id=sec0,data=`printf '%s' | base64`,format=base64 " "-o key-secret=sec0 %s %s" % (luks_encrypt_passwd, device_source, storage_size)) if process.system(cmd, shell=True): test.error("Can't create a luks encrypted img by qemu-img") elif backend_storage_type == "iscsi": iscsi_host = params.get("iscsi_host") iscsi_port = params.get("iscsi_port") if device_type == "block": device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True) disk_src_dict = {'attrs': {'dev': device_source}} elif device_type == "network": chap_user = params.get("chap_user", "redhat") chap_passwd = params.get("chap_passwd", "password") auth_sec_usage = params.get("auth_sec_usage", "libvirtiscsi") auth_sec_dict = {"sec_usage": "iscsi", "sec_target": auth_sec_usage} auth_sec_uuid = libvirt.create_secret(auth_sec_dict) # Set password of auth secret (not luks encryption secret) virsh.secret_set_value(auth_sec_uuid, chap_passwd, encode=True, ignore_status=False, debug=True) iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=storage_size, chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=iscsi_host) # ISCSI auth attributes for disk xml disk_auth_dict = {"auth_user": chap_user, "secret_type": auth_sec_usage_type, "secret_usage": auth_sec_usage_target} device_source = "iscsi://%s:%s/%s/%s" % (iscsi_host, iscsi_port, iscsi_target, lun_num) disk_src_dict = {"attrs": {"protocol": "iscsi", "name": "%s/%s" % (iscsi_target, lun_num)}, "hosts": [{"name": iscsi_host, "port": iscsi_port}]} elif backend_storage_type == "gluster": gluster_vol_name = params.get("gluster_vol_name", "gluster_vol1") gluster_pool_name = params.get("gluster_pool_name", "gluster_pool1") gluster_img_name = params.get("gluster_img_name", "gluster1.img") gluster_host_ip = gluster.setup_or_cleanup_gluster( is_setup=True, vol_name=gluster_vol_name, pool_name=gluster_pool_name, **params) device_source = "gluster://%s/%s/%s" % (gluster_host_ip, gluster_vol_name, gluster_img_name) cmd = ("qemu-img create -f %s " "%s %s" % (device_format, device_source, storage_size)) if process.system(cmd, shell=True): test.error("Can't create a gluster type img by qemu-img") disk_src_dict = {"attrs": {"protocol": "gluster", "name": "%s/%s" % (gluster_vol_name, gluster_img_name)}, "hosts": [{"name": gluster_host_ip, "port": "24007"}]} elif backend_storage_type == "ceph": ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS") ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST") ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS") ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME") ceph_client_name = params.get("ceph_client_name") ceph_client_key = params.get("ceph_client_key") ceph_auth_user = params.get("ceph_auth_user") ceph_auth_key = params.get("ceph_auth_key") enable_auth = "yes" == params.get("enable_auth") key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") key_opt = "" # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" if not utils_package.package_install(["ceph-common"]): test.error("Failed to install ceph-common") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(ceph_mon_ip) # If enable auth, prepare a local file to save key if ceph_client_name and ceph_client_key: with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (ceph_client_name, ceph_client_key)) key_opt = "--keyring %s" % key_file auth_sec_dict = {"sec_usage": auth_sec_usage_type, "sec_name": "ceph_auth_secret"} auth_sec_uuid = libvirt.create_secret(auth_sec_dict) virsh.secret_set_value(auth_sec_uuid, ceph_auth_key, debug=True) disk_auth_dict = {"auth_user": ceph_auth_user, "secret_type": auth_sec_usage_type, "secret_uuid": auth_sec_uuid} else: test.error("No ceph client name/key provided.") device_source = "rbd:%s:mon_host=%s:keyring=%s" % (ceph_disk_name, ceph_mon_ip, key_file) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("pre clean up rbd disk if exists: %s", cmd_result) # Create an local image and make FS on it. disk_cmd = ("qemu-img create -f %s %s %s" % (device_format, img_file, storage_size)) process.run(disk_cmd, ignore_status=False, shell=True) # Convert the image to remote storage disk_path = ("rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip)) if ceph_client_name and ceph_client_key: disk_path += (":id=%s:key=%s" % (ceph_auth_user, ceph_auth_key)) rbd_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O" " %s %s %s" % (ceph_mon_ip, key_opt, ceph_disk_name, device_format, img_file, disk_path)) process.run(rbd_cmd, ignore_status=False, shell=True) disk_src_dict = {"attrs": {"protocol": "rbd", "name": ceph_disk_name}, "hosts": [{"name": ceph_host_ip, "port": ceph_host_port}]} elif backend_storage_type == "nfs": pool_name = params.get("pool_name", "nfs_pool") pool_target = params.get("pool_target", "nfs_mount") pool_type = params.get("pool_type", "netfs") nfs_server_dir = params.get("nfs_server_dir", "nfs_server") emulated_image = params.get("emulated_image") image_name = params.get("nfs_image_name", "nfs.img") tmp_dir = data_dir.get_tmp_dir() pvt = libvirt.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image) # Set virt_use_nfs virt_use_nfs = params.get("virt_use_nfs", "off") result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs, shell=True) if result.exit_status: test.error("Failed to set virt_use_nfs value") nfs_mount_dir = os.path.join(tmp_dir, pool_target) device_source = nfs_mount_dir + image_name # Create one image on nfs server libvirt.create_local_disk("file", device_source, '1', "raw") disks_img.append({"format": device_format, "source": device_source, "path": device_source}) disk_src_dict = {'attrs': {'file': device_source, 'type_name': 'file'}} # Create dir based pool,and then create one volume on it. elif backend_storage_type == "dir": pool_name = params.get("pool_name", "dir_pool") pool_target = params.get("pool_target") pool_type = params.get("pool_type") emulated_image = params.get("emulated_image") image_name = params.get("dir_image_name", "luks_1.img") # Create and start dir_based pool. pvt = libvirt.PoolVolumeTest(test, params) if not os.path.exists(pool_target): os.mkdir(pool_target) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image) sp = libvirt_storage.StoragePool() if not sp.is_pool_active(pool_name): sp.set_pool_autostart(pool_name) sp.start_pool(pool_name) # Create one volume on the pool. volume_name = params.get("vol_name") volume_alloc = params.get("vol_alloc") volume_cap_unit = params.get("vol_cap_unit") volume_cap = params.get("vol_cap") volume_target_path = params.get("sec_volume") volume_target_format = params.get("target_format") volume_target_encypt = params.get("target_encypt", "") volume_target_label = params.get("target_label") vol_params = {"name": volume_name, "capacity": int(volume_cap), "allocation": int(volume_alloc), "format": volume_target_format, "path": volume_target_path, "label": volume_target_label, "capacity_unit": volume_cap_unit} try: # If Libvirt version is lower than 2.5.0 # Creating luks encryption volume is not supported,so skip it. create_vol(pool_name, vol_params) except AssertionError as info: err_msgs = ("create: invalid option") if str(info).count(err_msgs): test.cancel("Creating luks encryption volume " "is not supported on this libvirt version") else: test.error("Failed to create volume." "Error: %s" % str(info)) disk_src_dict = {'attrs': {'file': volume_target_path}} device_source = volume_target_path elif backend_storage_type == "nbd": # Get server hostname. hostname = process.run('hostname', ignore_status=False, shell=True, verbose=True).stdout_text.strip() # Setup backend storage nbd_server_host = hostname nbd_server_port = params.get("nbd_server_port") image_path = params.get("emulated_image", "/var/lib/libvirt/images/nbdtest.img") # Create NbdExport object nbd = NbdExport(image_path, image_format=device_format, port=nbd_server_port) nbd.start_nbd_server() # Prepare disk source xml source_attrs_dict = {"protocol": "nbd"} disk_src_dict = {} disk_src_dict.update({"attrs": source_attrs_dict}) disk_src_dict.update({"hosts": [{"name": nbd_server_host, "port": nbd_server_port}]}) device_source = "nbd://%s:%s/%s" % (nbd_server_host, nbd_server_port, image_path) logging.debug("device source is: %s", device_source) # Add disk xml. vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = Disk(type_name=device_type) disk_xml.device = device disk_xml.target = {"dev": device_target, "bus": device_bus} driver_dict = {"name": "qemu", "type": device_format} disk_xml.driver = driver_dict disk_source = disk_xml.new_disk_source(**disk_src_dict) if disk_auth_dict: logging.debug("disk auth dict is: %s" % disk_auth_dict) disk_xml.auth = disk_xml.new_auth(**disk_auth_dict) if disk_encryption_dict: disk_encryption_dict = {"encryption": "luks", "secret": {"type": "passphrase", "uuid": luks_sec_uuid}} disk_encryption = disk_xml.new_encryption(**disk_encryption_dict) disk_xml.encryption = disk_encryption disk_xml.source = disk_source logging.debug("new disk xml is: %s", disk_xml) # Sync VM xml except mirror_mode_blockcommit or mirror_mode_blockcopy if (not mirror_mode_blockcommit and not mirror_mode_blockcopy): vmxml.add_device(disk_xml) try: vmxml.sync() vm.start() vm.wait_for_login().close() except xcepts.LibvirtXMLError as xml_error: if not define_error: test.fail("Failed to define VM:\n%s", str(xml_error)) except virt_vm.VMStartError as details: # When use wrong password in disk xml for cold plug cases, # VM cannot be started if status_error: logging.info("VM failed to start as expected: %s", str(details)) else: test.fail("VM should start but failed: %s" % str(details)) func_name = trigger_block_threshold_event # Additional operations before set block threshold if backend_storage_type == "file": logging.info("Create snapshot...") snap_opt = " %s --disk-only " snap_opt += "%s,snapshot=external,file=%s" if default_snapshot_test: for index in range(1, 5): snapshot_name = "snapshot_%s" % index snap_path = "%s/%s_%s.snap" % (tmp_dir, vm_name, index) snapshot_external_disks.append(snap_path) snap_option = snap_opt % (snapshot_name, device_target, snap_path) virsh.snapshot_create_as(vm_name, snap_option, ignore_status=False, debug=True) if mirror_mode_blockcommit: if not libvirt_version.version_compare(6, 6, 0): test.cancel("Set threshold for disk mirroring feature is not supported on current version") vmxml.del_device(disk_xml) virsh.snapshot_create_as(vm_name, "--disk-only --no-metadata", ignore_status=False, debug=True) # Do active blockcommit in background. blockcommit_options = "--active" mirror_blockcommit_thread = threading.Thread(target=trigger_block_commit, args=(vm_name, 'vda', blockcommit_options,), kwargs={'debug': True}) mirror_blockcommit_thread.start() device_target = "vda[1]" func_name = trigger_mirror_threshold_event if mirror_mode_blockcopy: if not libvirt_version.version_compare(6, 6, 0): test.cancel("Set threshold for disk mirroring feature is not supported on current version") # Do transient blockcopy in backgroud. blockcopy_options = "--transient-job " # Do cleanup if os.path.exists(dest_path): libvirt.delete_local_disk("file", dest_path) mirror_blockcopy_thread = threading.Thread(target=trigger_block_copy, args=(vm_name, 'vda', dest_path, blockcopy_options,), kwargs={'debug': True}) mirror_blockcopy_thread.start() mirror_blockcopy_thread.join(10) device_target = "vda[%d]" % get_mirror_source_index(vm_name) func_name = trigger_mirror_threshold_event set_vm_block_domblkthreshold(vm_name, device_target, block_threshold_value, **{"debug": True}) cli_thread = threading.Thread(target=func_name, args=(vm, device_target)) cli_thread.start() check_threshold_event(vm_name, event_type, block_threshold_timeout, block_threshold_option, **{"debug": True}) finally: # Delete snapshots. if virsh.domain_exists(vm_name): #To delete snapshot, destroy VM first. if vm.is_alive(): vm.destroy() libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) vmxml_backup.sync("--snapshots-metadata") if os.path.exists(img_file): libvirt.delete_local_disk("file", img_file) for img in disks_img: if os.path.exists(img["path"]): libvirt.delete_local_disk("file", img["path"]) for disk in snapshot_external_disks: libvirt.delete_local_disk('file', disk) if os.path.exists(dest_path): libvirt.delete_local_disk("file", dest_path) # Clean up backend storage if backend_storage_type == "iscsi": libvirt.setup_or_cleanup_iscsi(is_setup=False) elif backend_storage_type == "gluster": gluster.setup_or_cleanup_gluster(is_setup=False, vol_name=gluster_vol_name, pool_name=gluster_pool_name, **params) elif backend_storage_type == "ceph": # Remove ceph configure file if created. if ceph_cfg: os.remove(ceph_cfg) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("result of rbd removal: %s", cmd_result) if os.path.exists(key_file): os.remove(key_file) elif backend_storage_type == "nfs": result = process.run("setsebool virt_use_nfs off", shell=True) if result.exit_status: logging.info("Failed to restore virt_use_nfs value") elif backend_storage_type == "nbd": if nbd: try: nbd.cleanup() except Exception as ndbEx: logging.info("Clean Up nbd failed: %s" % str(ndbEx)) # Clean up secrets if auth_sec_uuid: virsh.secret_undefine(auth_sec_uuid) if luks_sec_uuid: virsh.secret_undefine(luks_sec_uuid) # Clean up pools if pvt: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
def run(test, params, env): """ Test vm backingchain, blockcopy """ vm_name = params.get('main_vm') vm = env.get_vm(vm_name) status_error = 'yes' == params.get('status_error', 'no') error_msg = params.get('error_msg', '') case = params.get('case', '') blockcommand = params.get('blockcommand', '') blk_top = int(params.get('top', 0)) blk_base = int(params.get('base', 0)) opts = params.get('opts', '--verbose --wait') check_func = params.get('check_func', '') disk_type = params.get('disk_type', '') disk_src = params.get('disk_src', '') driver_type = params.get('driver_type', 'qcow2') vol_name = params.get('vol_name', 'vol_blockpull') pool_name = params.get('pool_name', '') brick_path = os.path.join(data_dir.get_tmp_dir(), pool_name) vg_name = params.get('vg_name', 'HostVG') vol_size = params.get('vol_size', '10M') vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) bkxml = vmxml.copy() # List to collect paths to delete after test file_to_del = [] virsh_dargs = {'debug': True, 'ignore_status': False} try: all_disks = vmxml.get_disk_source(vm_name) if not all_disks: test.error('Not found any disk file in vm.') image_file = all_disks[0].find('source').get('file') logging.debug('Image file of vm: %s', image_file) # Get all dev of virtio disks to calculate the dev of new disk all_vdisks = [disk for disk in all_disks if disk.find('target').get('dev').startswith('vd')] disk_dev = all_vdisks[-1].find('target').get('dev') new_dev = disk_dev[:-1] + chr(ord(disk_dev[-1]) + 1) # Setup iscsi target if disk_src == 'iscsi': disk_target = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=True, image_size='1G') logging.debug('ISCSI target: %s', disk_target) # Setup lvm elif disk_src == 'lvm': # Stop multipathd to avoid vgcreate fail multipathd = service.Factory.create_service("multipathd") multipathd_status = multipathd.status() if multipathd_status: multipathd.stop() # Setup iscsi target device_name = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=True, image_size='1G') logging.debug('ISCSI target for lvm: %s', device_name) # Create logical device logical_device = device_name lv_utils.vg_create(vg_name, logical_device) vg_created = True # Create logical volume as backing store vol_bk, vol_disk = 'vol1', 'vol2' lv_utils.lv_create(vg_name, vol_bk, vol_size) disk_target = '/dev/%s/%s' % (vg_name, vol_bk) src_vol = '/dev/%s/%s' % (vg_name, vol_disk) # Setup gluster elif disk_src == 'gluster': host_ip = gluster.setup_or_cleanup_gluster( is_setup=True, brick_path=brick_path, **params) logging.debug(host_ip) gluster_img = 'test.img' img_create_cmd = "qemu-img create -f raw /mnt/%s 10M" % gluster_img process.run("mount -t glusterfs %s:%s /mnt; %s; umount /mnt" % (host_ip, vol_name, img_create_cmd), shell=True) disk_target = 'gluster://%s/%s/%s' % (host_ip, vol_name, gluster_img) else: test.error('Wrong disk source, unsupported by this test.') new_image = os.path.join(os.path.split(image_file)[0], 'test.img') params['snapshot_list'] = ['s%d' % i for i in range(1, 5)] if disk_src == 'lvm': new_image = src_vol if disk_type == 'block': new_image = disk_target for i in range(2, 6): lv_utils.lv_create(vg_name, 'vol%s' % i, vol_size) snapshot_image_list = ['/dev/%s/vol%s' % (vg_name, i) for i in range(2, 6)] else: file_to_del.append(new_image) snapshot_image_list = [new_image.replace('img', i) for i in params['snapshot_list']] cmd_create_img = 'qemu-img create -f %s -b %s %s -F raw' % (driver_type, disk_target, new_image) if disk_type == 'block' and driver_type == 'raw': pass else: process.run(cmd_create_img, verbose=True, shell=True) info_new = utils_misc.get_image_info(new_image) logging.debug(info_new) # Create xml of new disk and add it to vmxml if disk_type: new_disk = Disk() new_disk.xml = libvirt.create_disk_xml({ 'type_name': disk_type, 'driver_type': driver_type, 'target_dev': new_dev, 'source_file': new_image }) logging.debug(new_disk.xml) vmxml.devices = vmxml.devices.append(new_disk) vmxml.xmltreefile.write() logging.debug(vmxml) vmxml.sync() vm.start() logging.debug(virsh.dumpxml(vm_name)) # Create backing chain for i in range(len(params['snapshot_list'])): virsh.snapshot_create_as( vm_name, '%s --disk-only --diskspec %s,file=%s,stype=%s' % (params['snapshot_list'][i], new_dev, snapshot_image_list[i], disk_type), **virsh_dargs ) # Get path of each snapshot file snaps = virsh.domblklist(vm_name, debug=True).stdout.splitlines() for line in snaps: if line.lstrip().startswith(('hd', 'sd', 'vd')): file_to_del.append(line.split()[-1]) qemu_img_cmd = 'qemu-img info --backing-chain %s' % snapshot_image_list[-1] if libvirt_storage.check_qemu_image_lock_support(): qemu_img_cmd += " -U" bc_info = process.run(qemu_img_cmd, verbose=True, shell=True).stdout_text if not disk_type == 'block': bc_chain = snapshot_image_list[::-1] + [new_image, disk_target] else: bc_chain = snapshot_image_list[::-1] + [new_image] bc_result = check_backingchain(bc_chain, bc_info) if not bc_result: test.fail('qemu-img info output of backing chain is not correct: %s' % bc_info) # Generate blockpull/blockcommit options virsh_blk_cmd = eval('virsh.%s' % blockcommand) if blockcommand == 'blockpull' and blk_base != 0: opts += '--base {dev}[{}]'.format(blk_base, dev=new_dev) elif blockcommand == 'blockcommit': opt_top = ' --top {dev}[{}]'.format(blk_top, dev=new_dev) if blk_top != 0 else '' opt_base = ' --base {dev}[{}]'.format(blk_base, dev=new_dev) if blk_base != 0 else '' opts += opt_top + opt_base + ' --active' if blk_top == 0 else '' # Do blockpull/blockcommit virsh_blk_cmd(vm_name, new_dev, opts, **virsh_dargs) if blockcommand == 'blockcommit': virsh.blockjob(vm_name, new_dev, '--pivot', **virsh_dargs) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug("XML after %s: %s" % (blockcommand, vmxml)) # Check backing chain after blockpull/blockcommit check_bc_func_name = 'check_bc_%s' % check_func if check_bc_func_name in globals(): check_bc = eval(check_bc_func_name) if not callable(check_bc): logging.warning('Function "%s" is not callable.', check_bc_func_name) if not check_bc(blockcommand, vmxml, new_dev, bc_chain): test.fail('Backing chain check after %s failed' % blockcommand) else: logging.warning('Function "%s" is not implemented.', check_bc_func_name) virsh.dumpxml(vm_name, debug=True) # Check whether login is successful try: vm.wait_for_login().close() except Exception as e: test.fail('Vm login failed') finally: logging.info('Start cleaning up.') for ss in params.get('snapshot_list', []): virsh.snapshot_delete(vm_name, '%s --metadata' % ss, debug=True) bkxml.sync() for path in file_to_del: logging.debug('Remove %s', path) if os.path.exists(path): os.remove(path) if disk_src == 'iscsi': libvirt.setup_or_cleanup_iscsi(is_setup=False) elif disk_src == 'lvm': process.run('rm -rf /dev/%s/%s' % (vg_name, vol_disk), ignore_status=True) if 'vol_bk' in locals(): lv_utils.lv_remove(vg_name, vol_bk) if 'vg_created' in locals() and vg_created: lv_utils.vg_remove(vg_name) cmd = "pvs |grep %s |awk '{print $1}'" % vg_name pv_name = process.system_output(cmd, shell=True, verbose=True).strip() if pv_name: process.run("pvremove %s" % pv_name, verbose=True, ignore_status=True) libvirt.setup_or_cleanup_iscsi(is_setup=False) elif disk_src == 'gluster': gluster.setup_or_cleanup_gluster( is_setup=False, brick_path=brick_path, **params) if 'multipathd_status' in locals() and multipathd_status: multipathd.start()
def run(test, params, env): """ Test disk encryption option. 1.Prepare backend storage (blkdev/iscsi/gluster/ceph) 2.Use luks format to encrypt the backend storage 3.Prepare a disk xml indicating to the backend storage with valid/invalid luks password 4.Start VM with disk hot/cold plugged 5.Check some disk operations in VM 6.Check backend storage is still in luks format 7.Recover test environment """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} def encrypt_dev(device, params): """ Encrypt device with luks format :param device: Storage deivce to be encrypted. :param params: From the dict to get encryption password. """ password = params.get("luks_encrypt_passwd", "password") size = params.get("luks_size", "500M") cmd = ( "qemu-img create -f luks " "--object secret,id=sec0,data=`printf '%s' | base64`,format=base64 " "-o key-secret=sec0 %s %s" % (password, device, size)) if process.system(cmd, shell=True): test.fail("Can't create a luks encrypted img by qemu-img") def check_dev_format(device, fmt="luks"): """ Check if device is in luks format :param device: Storage deivce to be checked. :param fmt: Expected disk format. :return: If device's format equals to fmt, return True, else return False. """ cmd_result = process.run("qemu-img" + ' -h', ignore_status=True, shell=True, verbose=False) if b'-U' in cmd_result.stdout: cmd = ("qemu-img info -U %s| grep -i 'file format' " "| grep -i %s" % (device, fmt)) else: cmd = ("qemu-img info %s| grep -i 'file format' " "| grep -i %s" % (device, fmt)) cmd_result = process.run(cmd, ignore_status=True, shell=True) if cmd_result.exit_status: test.fail("device %s is not in %s format. err is: %s" % (device, fmt, cmd_result.stderr)) def check_in_vm(target, old_parts): """ Check mount/read/write disk in VM. :param target: Disk dev in VM. :param old_parts: Original disk partitions in VM. :return: True if check successfully. """ try: session = vm.wait_for_login() if platform.platform().count('ppc64'): time.sleep(10) new_parts = utils_disk.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False else: added_part = added_parts[0] cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && " "mkdir -p test && mount /dev/{0} test && echo" " teststring > test/testfile && umount test".format( added_part)) status, output = session.cmd_status_output(cmd) logging.debug("Disk operation in VM:\nexit code:\n%s\noutput:\n%s", status, output) return status == 0 except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False def create_vol(p_name, target_encrypt_params, vol_params): """ Create volume. :param p_name: Pool name. :param target_encrypt_params: encrypt parameters in dict. :param vol_params: Volume parameters dict. """ # Clean up dirty volumes if pool has. pv = libvirt_storage.PoolVolume(p_name) vol_name_list = pv.list_volumes() for vol_name in vol_name_list: pv.delete_volume(vol_name) volxml = vol_xml.VolXML() v_xml = volxml.new_vol(**vol_params) v_xml.encryption = volxml.new_encryption(**target_encrypt_params) v_xml.xmltreefile.write() ret = virsh.vol_create(p_name, v_xml.xml, **virsh_dargs) libvirt.check_exit_status(ret) def get_secret_list(): """ Get secret list. :return secret list """ logging.info("Get secret list ...") secret_list_result = virsh.secret_list() secret_list = secret_list_result.stdout.strip().splitlines() # First two lines contain table header followed by entries # for each secret, such as: # # UUID Usage # -------------------------------------------------------------------------------- # b4e8f6d3-100c-4e71-9f91-069f89742273 ceph client.libvirt secret secret_list = secret_list[2:] result = [] # If secret list is empty. if secret_list: for line in secret_list: # Split on whitespace, assume 1 column linesplit = line.split(None, 1) result.append(linesplit[0]) return result # Disk specific attributes. device = params.get("virt_disk_device", "disk") device_target = params.get("virt_disk_device_target", "vdd") device_format = params.get("virt_disk_device_format", "raw") device_type = params.get("virt_disk_device_type", "file") device_bus = params.get("virt_disk_device_bus", "virtio") backend_storage_type = params.get("backend_storage_type", "iscsi") # Backend storage options. storage_size = params.get("storage_size", "1G") enable_auth = "yes" == params.get("enable_auth") # Luks encryption info, luks_encrypt_passwd is the password used to encrypt # luks image, and luks_secret_passwd is the password set to luks secret, you # can set a wrong password to luks_secret_passwd for negative tests luks_encrypt_passwd = params.get("luks_encrypt_passwd", "password") luks_secret_passwd = params.get("luks_secret_passwd", "password") # Backend storage auth info use_auth_usage = "yes" == params.get("use_auth_usage") if use_auth_usage: use_auth_uuid = False else: use_auth_uuid = "yes" == params.get("use_auth_uuid", "yes") auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi") auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi") status_error = "yes" == params.get("status_error") define_error = "yes" == params.get("define_error") check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes") hotplug_disk = "yes" == params.get("hotplug_disk", "no") encryption_in_source = "yes" == params.get("encryption_in_source", "no") auth_in_source = "yes" == params.get("auth_in_source", "no") auth_sec_uuid = "" luks_sec_uuid = "" disk_auth_dict = {} disk_encryption_dict = {} pvt = None duplicated_encryption = "yes" == params.get("duplicated_encryption", "no") if ((encryption_in_source or auth_in_source) and not libvirt_version.version_compare(3, 9, 0)): test.cancel("Cannot put <encryption> or <auth> inside disk <source> " "in this libvirt version.") # Start VM and get all partions in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: # Clean up dirty secrets in test environments if there are. dirty_secret_list = get_secret_list() if dirty_secret_list: for dirty_secret_uuid in dirty_secret_list: virsh.secret_undefine(dirty_secret_uuid) # Create secret luks_sec_uuid = libvirt.create_secret(params) logging.debug("A secret created with uuid = '%s'", luks_sec_uuid) ret = virsh.secret_set_value(luks_sec_uuid, luks_secret_passwd, encode=True, debug=True) libvirt.check_exit_status(ret) # Setup backend storage if backend_storage_type == "iscsi": iscsi_host = params.get("iscsi_host") iscsi_port = params.get("iscsi_port") if device_type == "block": device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True) disk_src_dict = {'attrs': {'dev': device_source}} elif device_type == "network": if enable_auth: chap_user = params.get("chap_user", "redhat") chap_passwd = params.get("chap_passwd", "password") auth_sec_usage = params.get("auth_sec_usage", "libvirtiscsi") auth_sec_dict = { "sec_usage": "iscsi", "sec_target": auth_sec_usage } auth_sec_uuid = libvirt.create_secret(auth_sec_dict) # Set password of auth secret (not luks encryption secret) virsh.secret_set_value(auth_sec_uuid, chap_passwd, encode=True, debug=True) iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=storage_size, chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=iscsi_host) # ISCSI auth attributes for disk xml if use_auth_uuid: disk_auth_dict = { "auth_user": chap_user, "secret_type": auth_sec_usage_type, "secret_uuid": auth_sec_uuid } elif use_auth_usage: disk_auth_dict = { "auth_user": chap_user, "secret_type": auth_sec_usage_type, "secret_usage": auth_sec_usage_target } else: iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=storage_size, portal_ip=iscsi_host) device_source = "iscsi://%s:%s/%s/%s" % ( iscsi_host, iscsi_port, iscsi_target, lun_num) disk_src_dict = { "attrs": { "protocol": "iscsi", "name": "%s/%s" % (iscsi_target, lun_num) }, "hosts": [{ "name": iscsi_host, "port": iscsi_port }] } elif backend_storage_type == "gluster": gluster_vol_name = params.get("gluster_vol_name", "gluster_vol1") gluster_pool_name = params.get("gluster_pool_name", "gluster_pool1") gluster_img_name = params.get("gluster_img_name", "gluster1.img") gluster_host_ip = gluster.setup_or_cleanup_gluster( is_setup=True, vol_name=gluster_vol_name, pool_name=gluster_pool_name, **params) device_source = "gluster://%s/%s/%s" % ( gluster_host_ip, gluster_vol_name, gluster_img_name) disk_src_dict = { "attrs": { "protocol": "gluster", "name": "%s/%s" % (gluster_vol_name, gluster_img_name) }, "hosts": [{ "name": gluster_host_ip, "port": "24007" }] } elif backend_storage_type == "ceph": ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS") ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST") ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS") ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME") ceph_client_name = params.get("ceph_client_name") ceph_client_key = params.get("ceph_client_key") ceph_auth_user = params.get("ceph_auth_user") ceph_auth_key = params.get("ceph_auth_key") enable_auth = "yes" == params.get("enable_auth") key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") key_opt = "" # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" if not utils_package.package_install(["ceph-common"]): test.error("Failed to install ceph-common") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(ceph_mon_ip) if enable_auth: # If enable auth, prepare a local file to save key if ceph_client_name and ceph_client_key: with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (ceph_client_name, ceph_client_key)) key_opt = "--keyring %s" % key_file auth_sec_dict = { "sec_usage": auth_sec_usage_type, "sec_name": "ceph_auth_secret" } auth_sec_uuid = libvirt.create_secret(auth_sec_dict) virsh.secret_set_value(auth_sec_uuid, ceph_auth_key, debug=True) disk_auth_dict = { "auth_user": ceph_auth_user, "secret_type": auth_sec_usage_type, "secret_uuid": auth_sec_uuid } else: test.error("No ceph client name/key provided.") device_source = "rbd:%s:mon_host=%s:keyring=%s" % ( ceph_disk_name, ceph_mon_ip, key_file) else: device_source = "rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("pre clean up rbd disk if exists: %s", cmd_result) disk_src_dict = { "attrs": { "protocol": "rbd", "name": ceph_disk_name }, "hosts": [{ "name": ceph_host_ip, "port": ceph_host_port }] } elif backend_storage_type == "nfs": pool_name = params.get("pool_name", "nfs_pool") pool_target = params.get("pool_target", "nfs_mount") pool_type = params.get("pool_type", "netfs") nfs_server_dir = params.get("nfs_server_dir", "nfs_server") emulated_image = params.get("emulated_image") image_name = params.get("nfs_image_name", "nfs.img") tmp_dir = data_dir.get_tmp_dir() pvt = libvirt.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image) nfs_mount_dir = os.path.join(tmp_dir, pool_target) device_source = nfs_mount_dir + image_name disk_src_dict = { 'attrs': { 'file': device_source, 'type_name': 'file' } } # Create dir based pool,and then create one volume on it. elif backend_storage_type == "dir": pool_name = params.get("pool_name", "dir_pool") pool_target = params.get("pool_target") pool_type = params.get("pool_type") emulated_image = params.get("emulated_image") image_name = params.get("dir_image_name", "luks_1.img") # Create and start dir_based pool. pvt = libvirt.PoolVolumeTest(test, params) if not os.path.exists(pool_target): os.mkdir(pool_target) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image) sp = libvirt_storage.StoragePool() if not sp.is_pool_active(pool_name): sp.set_pool_autostart(pool_name) sp.start_pool(pool_name) # Create one volume on the pool. volume_name = params.get("vol_name") volume_alloc = params.get("vol_alloc") volume_cap_unit = params.get("vol_cap_unit") volume_cap = params.get("vol_cap") volume_target_path = params.get("sec_volume") volume_target_format = params.get("target_format") volume_target_encypt = params.get("target_encypt", "") volume_target_label = params.get("target_label") vol_params = { "name": volume_name, "capacity": int(volume_cap), "allocation": int(volume_alloc), "format": volume_target_format, "path": volume_target_path, "label": volume_target_label, "capacity_unit": volume_cap_unit } vol_encryption_params = {} vol_encryption_params.update({"format": "luks"}) vol_encryption_params.update( {"secret": { "type": "passphrase", "uuid": luks_sec_uuid }}) try: # If Libvirt version is lower than 2.5.0 # Creating luks encryption volume is not supported,so skip it. create_vol(pool_name, vol_encryption_params, vol_params) except AssertionError as info: err_msgs = ("create: invalid option") if str(info).count(err_msgs): test.cancel("Creating luks encryption volume " "is not supported on this libvirt version") else: test.error("Failed to create volume." "Error: %s" % str(info)) disk_src_dict = {'attrs': {'file': volume_target_path}} device_source = volume_target_path else: test.cancel("Only iscsi/gluster/rbd/nfs can be tested for now.") logging.debug("device source is: %s", device_source) if backend_storage_type != "dir": encrypt_dev(device_source, params) # Add disk xml. vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = Disk(type_name=device_type) disk_xml.device = device disk_xml.target = {"dev": device_target, "bus": device_bus} driver_dict = {"name": "qemu", "type": device_format} disk_xml.driver = driver_dict disk_source = disk_xml.new_disk_source(**disk_src_dict) if disk_auth_dict: logging.debug("disk auth dict is: %s" % disk_auth_dict) if auth_in_source: disk_source.auth = disk_xml.new_auth(**disk_auth_dict) else: disk_xml.auth = disk_xml.new_auth(**disk_auth_dict) disk_encryption_dict = { "encryption": "luks", "secret": { "type": "passphrase", "uuid": luks_sec_uuid } } disk_encryption = disk_xml.new_encryption(**disk_encryption_dict) if encryption_in_source: disk_source.encryption = disk_encryption else: disk_xml.encryption = disk_encryption if duplicated_encryption: disk_xml.encryption = disk_encryption disk_xml.source = disk_source logging.debug("new disk xml is: %s", disk_xml) # Sync VM xml if not hotplug_disk: vmxml.add_device(disk_xml) try: vmxml.sync() vm.start() vm.wait_for_login() except xcepts.LibvirtXMLError as xml_error: if not define_error: test.fail("Failed to define VM:\n%s" % str(xml_error)) except virt_vm.VMStartError as details: # When use wrong password in disk xml for cold plug cases, # VM cannot be started if status_error and not hotplug_disk: logging.info("VM failed to start as expected: %s" % str(details)) else: test.fail("VM should start but failed: %s" % str(details)) if hotplug_disk: result = virsh.attach_device(vm_name, disk_xml.xml, ignore_status=True, debug=True) libvirt.check_exit_status(result, status_error) if check_partitions and not status_error: if not check_in_vm(device_target, old_parts): test.fail("Check disk partitions in VM failed") check_dev_format(device_source) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync("--snapshots-metadata") # Clean up backend storage if backend_storage_type == "iscsi": libvirt.setup_or_cleanup_iscsi(is_setup=False) elif backend_storage_type == "gluster": gluster.setup_or_cleanup_gluster(is_setup=False, vol_name=gluster_vol_name, pool_name=gluster_pool_name, **params) elif backend_storage_type == "ceph": # Remove ceph configure file if created. if ceph_cfg: os.remove(ceph_cfg) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("result of rbd removal: %s", cmd_result) if os.path.exists(key_file): os.remove(key_file) # Clean up secrets if auth_sec_uuid: virsh.secret_undefine(auth_sec_uuid) if luks_sec_uuid: virsh.secret_undefine(luks_sec_uuid) # Clean up pools if pvt: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
def run(test, params, env): """ Test multiple disks attachment. 1.Prepare test environment,destroy or suspend a VM. 2.Prepare disk image. 3.Edit disks xml and start the domain. 4.Perform test operation. 5.Recover test environment. 6.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} gluster_server_name = params.get("gluster_server_name") # If gluster_server is specified from config file, just use this gluster server. if 'EXAMPLE' not in gluster_server_name: params.update({'gluster_server_ip': gluster_server_name}) def prepare_gluster_disk(disk_img, disk_format): """ Setup glusterfs and prepare disk image. """ # Get the image path image_source = vm.get_first_disk_devices()['source'] # Setup gluster host_ip = gluster.setup_or_cleanup_gluster(True, brick_path=brick_path, **params) logging.debug("host ip: %s ", host_ip) image_info = utils_misc.get_image_info(image_source) image_dest = "/mnt/%s" % disk_img if image_info["format"] == disk_format: disk_cmd = ("cp -f %s %s" % (image_source, image_dest)) else: # Convert the disk format disk_cmd = ( "qemu-img convert -f %s -O %s %s %s" % (image_info["format"], disk_format, image_source, image_dest)) # Mount the gluster disk and create the image. process.run("mount -t glusterfs %s:%s /mnt && " "%s && chmod a+rw /mnt/%s && umount /mnt" % (host_ip, vol_name, disk_cmd, disk_img), shell=True) return host_ip def build_disk_xml(disk_img, disk_format, host_ip): """ Try to rebuild disk xml """ if default_pool: disk_xml = Disk(type_name="file") else: disk_xml = Disk(type_name="network") disk_xml.device = "disk" driver_dict = {"name": "qemu", "type": disk_format, "cache": "none"} if driver_iothread: driver_dict.update({"iothread": driver_iothread}) disk_xml.driver = driver_dict disk_xml.target = {"dev": "vdb", "bus": "virtio"} if default_pool: utils_misc.mount("%s:%s" % (host_ip, vol_name), default_pool, "glusterfs") process.run("setsebool virt_use_fusefs on", shell=True) source_dict = {"file": "%s/%s" % (default_pool, disk_img)} disk_xml.source = disk_xml.new_disk_source( **{"attrs": source_dict}) else: source_dict = { "protocol": "gluster", "name": "%s/%s" % (vol_name, disk_img) } host_dict = [{"name": host_ip, "port": "24007"}] # If mutiple_hosts is True, attempt to add multiple hosts. if multiple_hosts: host_dict.append({ "name": params.get("dummy_host1"), "port": "24007" }) host_dict.append({ "name": params.get("dummy_host2"), "port": "24007" }) if transport: host_dict[0]['transport'] = transport disk_xml.source = disk_xml.new_disk_source(**{ "attrs": source_dict, "hosts": host_dict }) return disk_xml def test_pmsuspend(vm_name): """ Test pmsuspend command. """ if vm.is_dead(): vm.start() vm.wait_for_login() # Create swap partition if necessary. if not vm.has_swap(): swap_path = os.path.join(data_dir.get_data_dir(), 'swap.img') vm.create_swap_partition(swap_path) ret = virsh.dompmsuspend(vm_name, "disk", **virsh_dargs) libvirt.check_exit_status(ret) # wait for vm to shutdown if not utils_misc.wait_for(lambda: vm.state() == "shut off", 60): test.fail("vm is still alive after S4 operation") # Wait for vm and qemu-ga service to start vm.start() # Prepare guest agent and start guest try: vm.prepare_guest_agent() except (remote.LoginError, virt_vm.VMError) as detail: test.fail("failed to prepare agent:\n%s" % detail) #TODO This step may hang for rhel6 guest ret = virsh.dompmsuspend(vm_name, "mem", **virsh_dargs) libvirt.check_exit_status(ret) # Check vm state if not utils_misc.wait_for(lambda: vm.state() == "pmsuspended", 60): test.fail("vm isn't suspended after S3 operation") ret = virsh.dompmwakeup(vm_name, **virsh_dargs) libvirt.check_exit_status(ret) if not vm.is_alive(): test.fail("vm is not alive after dompmwakeup") # Disk specific attributes. pm_enabled = "yes" == params.get("pm_enabled", "no") gluster_disk = "yes" == params.get("gluster_disk", "no") disk_format = params.get("disk_format", "qcow2") vol_name = params.get("vol_name") transport = params.get("transport", "") default_pool = params.get("default_pool", "") pool_name = params.get("pool_name") driver_iothread = params.get("driver_iothread") dom_iothreads = params.get("dom_iothreads") brick_path = os.path.join(test.virtdir, pool_name) test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no") # Gluster server multiple hosts flag. multiple_hosts = "yes" == params.get("multiple_hosts", "no") pre_vm_state = params.get("pre_vm_state", "running") # Destroy VM first. if vm.is_alive(): vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml = vmxml_backup.copy() mnt_src = "" # This is brought by new feature:block-dev if transport == "rdma": test.cancel("transport protocol 'rdma' is not yet supported") try: # Build new vm xml. if pm_enabled: vm_xml.VMXML.set_pm_suspend(vm_name) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) logging.debug("Attempting to set guest agent channel") vmxml.set_agent_channel() vmxml.sync() if gluster_disk: # Setup glusterfs and disk xml. disk_img = "gluster.%s" % disk_format host_ip = prepare_gluster_disk(disk_img, disk_format) mnt_src = "%s:%s" % (host_ip, vol_name) global custom_disk custom_disk = build_disk_xml(disk_img, disk_format, host_ip) start_vm = "yes" == params.get("start_vm", "yes") # set domain options if dom_iothreads: try: vmxml.iothreads = int(dom_iothreads) vmxml.sync() except ValueError: # 'iothreads' may not invalid number in negative tests logging.debug("Can't convert '%s' to integer type", dom_iothreads) # If hot plug, start VM first, otherwise stop VM if running. if start_vm: if vm.is_dead(): vm.start() vm.wait_for_login().close() else: if not vm.is_dead(): vm.destroy() # If gluster_disk is True, use attach_device. attach_option = params.get("attach_option", "") if gluster_disk: cmd_result = virsh.attach_device(domainarg=vm_name, filearg=custom_disk.xml, flagstr=attach_option, dargs=virsh_dargs, debug=True) libvirt.check_exit_status(cmd_result) # Turn VM into certain state. if pre_vm_state == "running": logging.info("Starting %s...", vm_name) if vm.is_dead(): vm.start() elif pre_vm_state == "transient": logging.info("Creating %s...", vm_name) vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vm.undefine() if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status: vmxml_backup.define() test.skip("can't create the domain") # Run the tests. if pm_enabled: # Makesure the guest agent is started try: vm.prepare_guest_agent() except (remote.LoginError, virt_vm.VMError) as detail: test.fail("failed to prepare agent: %s" % detail) # Run dompmsuspend command. test_pmsuspend(vm_name) # After block-dev introduced in libvirt 6.0.0 afterwards, gluster+%s.*format information is not provided from qemu output if libvirt_version.version_compare(6, 0, 0): test_qemu_cmd = False if test_qemu_cmd: # Check qemu-kvm command line cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) if transport == "rdma": cmd += " | grep gluster+%s.*format=%s" % (transport, disk_format) else: cmd += " | grep gluster.*format=%s" % disk_format if driver_iothread: cmd += " | grep iothread=iothread%s" % driver_iothread if process.run(cmd, ignore_status=True, shell=True).exit_status: test.fail("Can't see gluster option '%s' " "in command line" % cmd) # Detach hot plugged device. if start_vm and not default_pool: if gluster_disk: ret = virsh.detach_device(vm_name, custom_disk.xml, flagstr=attach_option, dargs=virsh_dargs, wait_for_event=True) libvirt.check_exit_status(ret) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) logging.info("Restoring vm...") vmxml_backup.sync() if utils_misc.is_mounted(mnt_src, default_pool, 'fuse.glusterfs', verbose=True): process.run("umount %s" % default_pool, ignore_status=True, shell=True) if gluster_disk: gluster.setup_or_cleanup_gluster(False, brick_path=brick_path, **params)
def run(test, params, env): """ Test command: virsh blockcommit <domain> <path> 1) Prepare test environment. 2) Commit changes from a snapshot down to its backing image. 3) Recover test environment. 4) Check result. """ def make_disk_snapshot(postfix_n, snapshot_take, is_check_snapshot_tree=False, is_create_image_file_in_vm=False): """ Make external snapshots for disks only. :param postfix_n: postfix option :param snapshot_take: snapshots taken. :param is_create_image_file_in_vm: create image file in VM. """ # Add all disks into command line. disks = vm.get_disk_devices() # Make three external snapshots for disks only for count in range(1, snapshot_take): options = "%s_%s %s%s-desc " % (postfix_n, count, postfix_n, count) options += "--disk-only --atomic --no-metadata" if needs_agent: options += " --quiesce" for disk in disks: disk_detail = disks[disk] basename = os.path.basename(disk_detail['source']) # Remove the original suffix if any, appending # ".postfix_n[0-9]" diskname = basename.split(".")[0] snap_name = "%s.%s%s" % (diskname, postfix_n, count) disk_external = os.path.join(tmp_dir, snap_name) snapshot_external_disks.append(disk_external) options += " %s,snapshot=external,file=%s" % (disk, disk_external) if is_check_snapshot_tree: options = options.replace("--no-metadata", "") cmd_result = virsh.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) status = cmd_result.exit_status if status != 0: test.fail("Failed to make snapshots for disks!") if is_create_image_file_in_vm: create_file_cmd = "dd if=/dev/urandom of=/mnt/snapshot_%s.img bs=1M count=2" % count session.cmd_status_output(create_file_cmd) created_image_files_in_vm.append("snapshot_%s.img" % count) # Create a file flag in VM after each snapshot flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") file_path = flag_file.name flag_file.close() status, output = session.cmd_status_output("touch %s" % file_path) if status: test.fail("Touch file in vm failed. %s" % output) snapshot_flag_files.append(file_path) def check_snapshot_tree(): """ Check whether predefined snapshot names are equals to snapshot names by virsh snapshot-list --tree """ predefined_snapshot_name_list = [] for count in range(1, snapshot_take): predefined_snapshot_name_list.append("%s_%s" % (postfix_n, count)) snapshot_list_cmd = "virsh snapshot-list %s --tree" % vm_name result_output = process.run(snapshot_list_cmd, ignore_status=True, shell=True).stdout_text virsh_snapshot_name_list = [] for line in result_output.rsplit("\n"): strip_line = line.strip() if strip_line and "|" not in strip_line: virsh_snapshot_name_list.append(strip_line) # Compare two lists in their order and values, all need to be same. compare_list = [ out_p for out_p, out_v in zip(predefined_snapshot_name_list, virsh_snapshot_name_list) if out_p not in out_v ] if compare_list: test.fail("snapshot tree not correctly returned.") # If check_snapshot_tree is True, check snapshot tree output. if is_check_snapshot_tree: check_snapshot_tree() def get_first_disk_source(): """ Get disk source of first device :return: first disk of first device. """ first_device = vm.get_first_disk_devices() first_disk_src = first_device['source'] return first_disk_src def make_relative_path_backing_files(pre_set_root_dir=None): """ Create backing chain files of relative path. :param pre_set_root_dir: preset root dir :return: absolute path of top active file """ first_disk_source = get_first_disk_source() basename = os.path.basename(first_disk_source) if pre_set_root_dir is None: root_dir = os.path.dirname(first_disk_source) else: root_dir = pre_set_root_dir cmd = "mkdir -p %s" % os.path.join(root_dir, '{b..d}') ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) # Make three external relative path backing files. backing_file_dict = collections.OrderedDict() backing_file_dict["b"] = "../%s" % basename backing_file_dict["c"] = "../b/b.img" backing_file_dict["d"] = "../c/c.img" if pre_set_root_dir: backing_file_dict["b"] = "%s" % first_disk_source backing_file_dict["c"] = "%s/b/b.img" % root_dir backing_file_dict["d"] = "%s/c/c.img" % root_dir disk_format = params.get("disk_format", "qcow2") for key, value in list(backing_file_dict.items()): backing_file_path = os.path.join(root_dir, key) cmd = ( "cd %s && qemu-img create -f %s -o backing_file=%s,backing_fmt=%s %s.img" % (backing_file_path, "qcow2", value, disk_format, key)) ret = process.run(cmd, shell=True) disk_format = "qcow2" libvirt.check_exit_status(ret) return os.path.join(backing_file_path, "d.img") def check_chain_backing_files(disk_src_file, expect_backing_file=False): """ Check backing chain files of relative path after blockcommit. :param disk_src_file: first disk src file. :param expect_backing_file: whether it expect to have backing files. """ first_disk_source = get_first_disk_source() # Validate source image need refer to original one after active blockcommit if not expect_backing_file and disk_src_file not in first_disk_source: test.fail( "The disk image path:%s doesn't include the origin image: %s" % (first_disk_source, disk_src_file)) # Validate source image doesn't have backing files after active blockcommit cmd = "qemu-img info %s --backing-chain" % first_disk_source if qemu_img_locking_feature_support: cmd = "qemu-img info -U %s --backing-chain" % first_disk_source ret = process.run(cmd, shell=True).stdout_text.strip() if expect_backing_file: if 'backing file' not in ret: test.fail("The disk image doesn't have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) else: if 'backing file' in ret: test.fail("The disk image still have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) def create_reuse_external_snapshots(pre_set_root_dir=None): """ Create reuse external snapshots :param pre_set_root_dir: preset root directory :return: absolute path of base file """ if pre_set_root_dir is None: first_disk_source = get_first_disk_source() basename = os.path.basename(first_disk_source) root_dir = os.path.dirname(first_disk_source) else: root_dir = pre_set_root_dir meta_options = " --reuse-external --disk-only --no-metadata" # Make three external relative path backing files. backing_file_dict = collections.OrderedDict() backing_file_dict["b"] = "b.img" backing_file_dict["c"] = "c.img" backing_file_dict["d"] = "d.img" for key, value in list(backing_file_dict.items()): backing_file_path = os.path.join(root_dir, key) external_snap_shot = "%s/%s" % (backing_file_path, value) snapshot_external_disks.append(external_snap_shot) options = "%s --diskspec %s,file=%s" % (meta_options, disk_target, external_snap_shot) cmd_result = virsh.snapshot_create_as(vm_name, options, ignore_status=False, debug=True) libvirt.check_exit_status(cmd_result) logging.debug('reuse external snapshots:%s' % snapshot_external_disks) return root_dir def check_file_in_vm(): """ Check whether certain image files exists in VM internal. """ for img_file in created_image_files_in_vm: status, output = session.cmd_status_output("ls -l /mnt/%s" % img_file) logging.debug(output) if status: test.fail( "blockcommit from top to base failed when ls image file in VM: %s" % output) def do_blockcommit_pivot_repeatedly(): """ Validate bugzilla:https://bugzilla.redhat.com/show_bug.cgi?id=1857735 """ # Make external snapshot,pivot and delete snapshot file repeatedly. tmp_snapshot_name = "external_snapshot_" + "repeated.qcow2" block_target = 'vda' for count in range(0, 5): options = "%s " % tmp_snapshot_name options += "--disk-only --atomic" disk_external = os.path.join(tmp_dir, tmp_snapshot_name) options += " --diskspec %s,snapshot=external,file=%s" % ( block_target, disk_external) virsh.snapshot_create_as(vm_name, options, ignore_status=False, debug=True) virsh.blockcommit(vm_name, block_target, " --active --pivot ", ignore_status=False, debug=True) virsh.snapshot_delete(vm_name, tmp_snapshot_name, " --metadata") libvirt.delete_local_disk('file', disk_external) # MAIN TEST CODE ### # Process cartesian parameters vm_name = params.get("main_vm") vm = env.get_vm(vm_name) snapshot_take = int(params.get("snapshot_take", '0')) vm_state = params.get("vm_state", "running") needs_agent = "yes" == params.get("needs_agent", "yes") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") top_inactive = ("yes" == params.get("top_inactive")) with_timeout = ("yes" == params.get("with_timeout_option", "no")) cmd_timeout = params.get("cmd_timeout", "1") status_error = ("yes" == params.get("status_error", "no")) base_option = params.get("base_option", "none") middle_base = "yes" == params.get("middle_base", "no") pivot_opt = "yes" == params.get("pivot_opt", "no") snap_in_mirror = "yes" == params.get("snap_in_mirror", "no") snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", "no") with_active_commit = "yes" == params.get("with_active_commit", "no") multiple_chain = "yes" == params.get("multiple_chain", "no") virsh_dargs = {'debug': True} check_snapshot_tree = "yes" == params.get("check_snapshot_tree", "no") bandwidth = params.get("blockcommit_bandwidth", "") bandwidth_byte = "yes" == params.get("bandwidth_byte", "no") disk_target = params.get("disk_target", "vda") disk_format = params.get("disk_format", "qcow2") reuse_external_snapshot = "yes" == params.get("reuse_external_snapshot", "no") restart_vm_before_commit = "yes" == params.get("restart_vm_before_commit", "no") check_image_file_in_vm = "yes" == params.get("check_image_file_in_vm", "no") pre_set_root_dir = None blk_source_folder = None convert_qcow2_image_to_raw = "yes" == params.get( "convert_qcow2_image_to_raw", "no") repeatedly_do_blockcommit_pivot = "yes" == params.get( "repeatedly_do_blockcommit_pivot", "no") from_top_without_active_option = "yes" == params.get( "from_top_without_active_option", "no") top_to_middle_keep_overlay = "yes" == params.get( "top_to_middle_keep_overlay", "no") block_disk_type_based_on_file_backing_file = "yes" == params.get( "block_disk_type_based_on_file_backing_file", "no") block_disk_type_based_on_gluster_backing_file = "yes" == params.get( "block_disk_type_based_on_gluster_backing_file", "no") # Check whether qemu-img need add -U suboption since locking feature was added afterwards qemu-2.10 qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support( ) backing_file_relative_path = "yes" == params.get( "backing_file_relative_path", "no") # Process domain disk device parameters disk_type = params.get("disk_type") disk_src_protocol = params.get("disk_source_protocol") restart_tgtd = params.get("restart_tgtd", 'no') vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) if not top_inactive: if not libvirt_version.version_compare(1, 2, 4): test.cancel("live active block commit is not supported" " in current libvirt version.") # This is brought by new feature:block-dev if (libvirt_version.version_compare(6, 0, 0) and params.get("transport", "") == "rdma"): test.cancel("If blockdev is enabled, the transport protocol 'rdma' is " "not yet supported.") # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Abort the test if there are snapshots already exsiting_snaps = virsh.snapshot_list(vm_name) if len(exsiting_snaps) != 0: test.fail("There are snapshots created for %s already" % vm_name) snapshot_external_disks = [] cmd_session = None # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = '' try: if disk_src_protocol == 'iscsi' and disk_type == 'block' and reuse_external_snapshot: first_disk = vm.get_first_disk_devices() pre_set_root_dir = os.path.dirname(first_disk['source']) if disk_src_protocol == 'iscsi' and disk_type == 'network': if not libvirt_version.version_compare(1, 0, 4): test.cancel("'iscsi' disk doesn't support in" " current libvirt version.") # Set vm xml and guest agent if replace_vm_disk: if disk_src_protocol == "rbd" and disk_type == "network": src_host = params.get("disk_source_host", "EXAMPLE_HOSTS") mon_host = params.get("mon_host", "EXAMPLE_MON_HOST") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(mon_host) if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"): test.cancel("Please provide rbd host first.") params.update({ "disk_source_name": os.path.join( pool_name, 'rbd_' + utils_misc.generate_random_string(4) + '.img') }) if utils_package.package_install(["ceph-common"]): ceph.rbd_image_rm( mon_host, *params.get("disk_source_name").split('/')) else: test.error('Failed to install ceph-common to clean image.') if backing_file_relative_path: if vm.is_alive(): vm.destroy(gracefully=False) first_src_file = get_first_disk_source() blk_source_image = os.path.basename(first_src_file) blk_source_folder = os.path.dirname(first_src_file) replace_disk_image = make_relative_path_backing_files() params.update({ 'disk_source_name': replace_disk_image, 'disk_type': 'file', 'disk_src_protocol': 'file' }) vm.start() if convert_qcow2_image_to_raw: if vm.is_alive(): vm.destroy(gracefully=False) first_src_file = get_first_disk_source() blk_source_image = os.path.basename(first_src_file) blk_source_folder = os.path.dirname(first_src_file) blk_source_image_after_converted = "%s/converted_%s" % ( blk_source_folder, blk_source_image) # Convert the image from qcow2 to raw convert_disk_cmd = ("qemu-img convert" " -O %s %s %s" % (disk_format, first_src_file, blk_source_image_after_converted)) process.run(convert_disk_cmd, ignore_status=False, shell=True) params.update({ 'disk_source_name': blk_source_image_after_converted, 'disk_type': 'file', 'disk_src_protocol': 'file' }) libvirt.set_vm_disk(vm, params, tmp_dir) if needs_agent: vm.prepare_guest_agent() if repeatedly_do_blockcommit_pivot: do_blockcommit_pivot_repeatedly() # Create block type disk on file backing file if block_disk_type_based_on_file_backing_file or block_disk_type_based_on_gluster_backing_file: if not vm.is_alive(): vm.start() first_src_file = get_first_disk_source() libvirt.setup_or_cleanup_iscsi(is_setup=False) iscsi_target = libvirt.setup_or_cleanup_iscsi(is_setup=True) block_type_backstore = iscsi_target if block_disk_type_based_on_file_backing_file: first_src_file = get_first_disk_source() if block_disk_type_based_on_gluster_backing_file: first_src_file = "gluster://%s/%s/gluster.qcow2" % ( params.get("gluster_server_ip"), params.get("vol_name")) backing_file_create_cmd = ( "qemu-img create -f %s -o backing_file=%s,backing_fmt=%s %s" % ("qcow2", first_src_file, "qcow2", block_type_backstore)) process.run(backing_file_create_cmd, ignore_status=False, shell=True) meta_options = " --reuse-external --disk-only --no-metadata" options = "%s --diskspec %s,file=%s" % (meta_options, 'vda', block_type_backstore) virsh.snapshot_create_as(vm_name, options, ignore_status=False, debug=True) # The first disk is supposed to include OS # We will perform blockcommit operation for it. first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] blk_target = first_disk['target'] snapshot_flag_files = [] created_image_files_in_vm = [] # get a vm session before snapshot session = vm.wait_for_login() # do snapshot postfix_n = 'snap' if reuse_external_snapshot: make_relative_path_backing_files(pre_set_root_dir) blk_source_folder = create_reuse_external_snapshots( pre_set_root_dir) else: make_disk_snapshot(postfix_n, snapshot_take, check_snapshot_tree, check_image_file_in_vm) basename = os.path.basename(blk_source) diskname = basename.split(".")[0] snap_src_lst = [blk_source] if multiple_chain: snap_name = "%s.%s1" % (diskname, postfix_n) snap_top = os.path.join(tmp_dir, snap_name) top_index = snapshot_external_disks.index(snap_top) + 1 omit_list = snapshot_external_disks[top_index:] vm.destroy(gracefully=False) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = '' disk_xmls = vmxml.get_devices(device_type="disk") for disk in disk_xmls: if disk.get('device_tag') == 'disk': disk_xml = disk break vmxml.del_device(disk_xml) disk_dict = {'attrs': {'file': snap_top}} disk_xml.source = disk_xml.new_disk_source(**disk_dict) if libvirt_version.version_compare(6, 0, 0): bs_source = {'file': blk_source} bs_dict = { "type": params.get("disk_type", "file"), "format": { 'type': params.get("disk_format", "qcow2") } } new_bs = disk_xml.new_backingstore(**bs_dict) new_bs["source"] = disk_xml.backingstore.new_source( **bs_source) disk_xml.backingstore = new_bs vmxml.add_device(disk_xml) vmxml.sync() vm.start() session = vm.wait_for_login() postfix_n = 'new_snap' make_disk_snapshot(postfix_n, snapshot_take) snap_src_lst = [blk_source] snap_src_lst += snapshot_external_disks logging.debug("omit list is %s", omit_list) for i in omit_list: snap_src_lst.remove(i) else: # snapshot src file list snap_src_lst += snapshot_external_disks backing_chain = '' for i in reversed(list(range(snapshot_take))): if i == 0: backing_chain += "%s" % snap_src_lst[i] else: backing_chain += "%s -> " % snap_src_lst[i] logging.debug("The backing chain is: %s" % backing_chain) # check snapshot disk xml backingStore is expected vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') disk_xml = None for disk in disks: if disk.target['dev'] != blk_target: continue else: if disk.device != 'disk': continue disk_xml = disk.xmltreefile logging.debug("the target disk xml after snapshot is %s", disk_xml) break if not disk_xml: test.fail("Can't find disk xml with target %s" % blk_target) elif libvirt_version.version_compare(1, 2, 4): # backingStore element introduced in 1.2.4 chain_lst = snap_src_lst[::-1] ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing chain check failed") # set blockcommit_options top_image = None blockcommit_options = "--wait --verbose" if with_timeout: blockcommit_options += " --timeout %s" % cmd_timeout if base_option == "shallow": blockcommit_options += " --shallow" elif base_option == "base": if middle_base: snap_name = "%s.%s1" % (diskname, postfix_n) blk_source = os.path.join(tmp_dir, snap_name) blockcommit_options += " --base %s" % blk_source if len(bandwidth): blockcommit_options += " --bandwidth %s" % bandwidth if bandwidth_byte: blockcommit_options += " --bytes" if top_inactive: snap_name = "%s.%s2" % (diskname, postfix_n) top_image = os.path.join(tmp_dir, snap_name) if reuse_external_snapshot: index = len(snapshot_external_disks) - 2 top_image = snapshot_external_disks[index] blockcommit_options += " --top %s" % top_image else: blockcommit_options += " --active" if pivot_opt: blockcommit_options += " --pivot" if from_top_without_active_option: blockcommit_options = blockcommit_options.replace("--active", "") if top_to_middle_keep_overlay: blockcommit_options = blockcommit_options.replace("--active", "") blockcommit_options = blockcommit_options.replace("--pivot", "") blockcommit_options += " --keep-overlay" if restart_vm_before_commit: top = 2 base = len(snapshot_external_disks) blockcommit_options = ( "--top %s[%d] --base %s[%d] --verbose --wait --keep-relative" % (disk_target, top, disk_target, base)) vm.destroy(gracefully=True) vm.start() if vm_state == "shut off": vm.destroy(gracefully=True) if with_active_commit: # inactive commit follow active commit will fail with bug 1135339 cmd = "virsh blockcommit %s %s --active --pivot" % (vm_name, blk_target) cmd_session = aexpect.ShellSession(cmd) if backing_file_relative_path: blockcommit_options = " --active --verbose --shallow --pivot --keep-relative" block_commit_index = snapshot_take expect_backing_file = False # Do block commit using --active for count in range(1, snapshot_take): res = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) if top_inactive: vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = '' disk_xmls = vmxml.get_devices(device_type="disk") for disk in disk_xmls: if disk.get('device_tag') == 'disk': disk_xml = disk break top_index = 1 try: top_index = disk_xml.backingstore.index except AttributeError: pass else: top_index = int(top_index) block_commit_index = snapshot_take - 1 expect_backing_file = True for count in range(1, block_commit_index): # Do block commit with --wait if top_inactive if top_inactive: blockcommit_options = (" --wait --verbose --top vda[%d] " "--base vda[%d] --keep-relative" % (top_index, top_index + 1)) if not libvirt_version.version_compare(6, 0, 0): top_index = 1 else: top_index += 1 res = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) check_chain_backing_files(blk_source_image, expect_backing_file) return if reuse_external_snapshot and not top_inactive: block_commit_index = len(snapshot_external_disks) - 1 for index in range(block_commit_index): # Do block commit with --shallow --wait external_blockcommit_options = ( " --shallow --wait --verbose --top %s " % (snapshot_external_disks[index])) res = virsh.blockcommit(vm_name, blk_target, external_blockcommit_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) # Do blockcommit with top active result = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) # Check status_error libvirt.check_exit_status(result, status_error) return # Start one thread to check the bandwidth in output if bandwidth and bandwidth_byte: bandwidth += 'B' pool = ThreadPool(processes=1) pool.apply_async( check_bandwidth_thread, (libvirt.check_blockjob, vm_name, blk_target, bandwidth, test)) # Run test case # Active commit does not support on rbd based disk with bug 1200726 result = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) # Check status_error libvirt.check_exit_status(result, status_error) # Skip check chain file as per test case description if restart_vm_before_commit: return if check_image_file_in_vm: check_file_in_vm() if result.exit_status and status_error: return while True: vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') for disk in disks: if disk.target['dev'] != blk_target: continue else: disk_xml = disk.xmltreefile break if not top_inactive: disk_mirror = disk_xml.find('mirror') if '--pivot' not in blockcommit_options: if disk_mirror is not None: job_type = disk_mirror.get('job') job_ready = disk_mirror.get('ready') src_element = disk_mirror.find('source') disk_src_file = None for elem in ('file', 'name', 'dev'): elem_val = src_element.get(elem) if elem_val: disk_src_file = elem_val break err_msg = "blockcommit base source " err_msg += "%s not expected" % disk_src_file if '--shallow' in blockcommit_options: if not multiple_chain: if disk_src_file != snap_src_lst[2]: test.fail(err_msg) else: if disk_src_file != snap_src_lst[3]: test.fail(err_msg) else: if disk_src_file != blk_source: test.fail(err_msg) if libvirt_version.version_compare(1, 2, 7): # The job attribute mentions which API started the # operation since 1.2.7. if job_type != 'active-commit': test.fail("blockcommit job type '%s'" " not expected" % job_type) if job_ready != 'yes': # The attribute ready, if present, tracks # progress of the job: yes if the disk is known # to be ready to pivot, or, since 1.2.7, abort # or pivot if the job is in the process of # completing. continue else: logging.debug( "after active block commit job " "ready for pivot, the target disk" " xml is %s", disk_xml) break else: break else: break else: if disk_mirror is None: logging.debug(disk_xml) if "--shallow" in blockcommit_options: chain_lst = snap_src_lst[::-1] chain_lst.pop(0) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing " "chain check failed") cmd_result = virsh.blockjob(vm_name, blk_target, '', ignore_status=True, debug=True) libvirt.check_exit_status(cmd_result) elif "--base" in blockcommit_options: chain_lst = snap_src_lst[::-1] base_index = chain_lst.index(blk_source) chain_lst = chain_lst[base_index:] ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing " "chain check failed") break else: # wait pivot after commit is synced continue else: logging.debug("after inactive commit the disk xml is: %s" % disk_xml) if libvirt_version.version_compare(1, 2, 4): if "--shallow" in blockcommit_options: chain_lst = snap_src_lst[::-1] chain_lst.remove(top_image) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing chain " "check failed") elif "--base" in blockcommit_options: chain_lst = snap_src_lst[::-1] top_index = chain_lst.index(top_image) base_index = chain_lst.index(blk_source) val_tmp = [] for i in range(top_index, base_index): val_tmp.append(chain_lst[i]) for i in val_tmp: chain_lst.remove(i) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing chain " "check failed") break else: break # Check flag files if not vm_state == "shut off" and not multiple_chain: for flag in snapshot_flag_files: status, output = session.cmd_status_output("cat %s" % flag) if status: test.fail("blockcommit failed: %s" % output) if not pivot_opt and snap_in_mirror: # do snapshot during mirror phase snap_path = "%s/%s.snap" % (tmp_dir, vm_name) snap_opt = "--disk-only --atomic --no-metadata " snap_opt += "vda,snapshot=external,file=%s" % snap_path snapshot_external_disks.append(snap_path) cmd_result = virsh.snapshot_create_as(vm_name, snap_opt, ignore_statues=True, debug=True) libvirt.check_exit_status(cmd_result, snap_in_mirror_err) finally: if vm.is_alive(): vm.destroy(gracefully=False) # Clean ceph image if used in test if 'mon_host' in locals(): if utils_package.package_install(["ceph-common"]): disk_source_name = params.get("disk_source_name") cmd = ("rbd -m {0} info {1} && rbd -m {0} rm " "{1}".format(mon_host, disk_source_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("result of rbd removal: %s", cmd_result) else: logging.debug('Failed to install ceph-common to clean ceph.') # Recover xml of vm. vmxml_backup.sync("--snapshots-metadata") # Remove ceph configure file if created if ceph_cfg: os.remove(ceph_cfg) if cmd_session: cmd_session.close() for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) if backing_file_relative_path or reuse_external_snapshot: libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) if blk_source_folder: process.run("cd %s && rm -rf b c d" % blk_source_folder, shell=True) if disk_src_protocol == 'iscsi' or 'iscsi_target' in locals(): libvirt.setup_or_cleanup_iscsi(is_setup=False, restart_tgtd=restart_tgtd) elif disk_src_protocol == 'gluster': gluster.setup_or_cleanup_gluster(False, brick_path=brick_path, **params) libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() elif disk_src_protocol == 'netfs': restore_selinux = params.get('selinux_status_bak') libvirt.setup_or_cleanup_nfs(is_setup=False, restore_selinux=restore_selinux) # Recover images xattr if having some dirty_images = get_images_with_xattr(vm) if dirty_images: clean_images_with_xattr(dirty_images) test.fail("VM's image(s) having xattr left")
def run(test, params, env): """ Test migration with glusterfs. """ def create_or_clean_backend_dir(g_uri, params, session=None, is_clean=False): """ Create/cleanup backend directory :params g_uri: glusterfs uri :params params: the parameters to be checked :params session: VM/remote session object :params is_cleanup: True for cleanup backend directory; False for create one. :return: gluster_img if is_clean is equal to True """ mount_point = params.get("gluster_mount_dir") is_symlink = params.get("gluster_create_symlink") == "yes" symlink_name = params.get("gluster_symlink") gluster_img = None if not is_clean: if not utils_misc.check_exists(mount_point, session): utils_misc.make_dirs(mount_point, session) if gluster.glusterfs_is_mounted(mount_point, session): gluster.glusterfs_umount(g_uri, mount_point, session) gluster.glusterfs_mount(g_uri, mount_point, session) gluster_img = os.path.join(mount_point, disk_img) if is_symlink: utils_misc.make_symlink(mount_point, symlink_name) utils_misc.make_symlink(mount_point, symlink_name, remote_session) gluster_img = os.path.join(symlink_name, disk_img) return gluster_img else: if is_symlink: utils_misc.rm_link(symlink_name, session) gluster.glusterfs_umount(g_uri, mount_point, session) if utils_misc.check_exists(mount_point, session): utils_misc.safe_rmdir(gluster_mount_dir, session=session) def do_migration(vm, dest_uri, options, extra): """ Execute the migration with given parameters :param vm: the guest to be migrated :param dest_uri: the destination uri for migration :param options: options next to 'migrate' command :param extra: options in the end of the migrate command line :return: CmdResult object """ # Migrate the guest. virsh_args.update({"ignore_status": True}) migration_res = vm.migrate(dest_uri, options, extra, **virsh_args) if int(migration_res.exit_status) != 0: logging.error("Migration failed for %s.", vm_name) return migration_res if vm.is_alive(): logging.info("VM is alive on destination %s.", dest_uri) else: test.fail("VM is not alive on destination %s" % dest_uri) # Throws exception if console shows panic message vm.verify_kernel_crash() return migration_res def check_migration_res(result): """ Check if the migration result is as expected :param result: the output of migration :raise: test.fail if test is failed """ if not result: test.error("No migration result is returned.") logging.info("Migration out: %s", result.stdout_text.strip()) logging.info("Migration error: %s", result.stderr_text.strip()) if status_error: # Migration should fail if err_msg: # Special error messages are expected if not re.search(err_msg, result.stderr_text.strip()): test.fail("Can not find the expected patterns '%s' in " "output '%s'" % (err_msg, result.stderr_text.strip())) else: logging.debug("It is the expected error message") else: if int(result.exit_status) != 0: logging.debug("Migration failure is expected result") else: test.fail("Migration success is unexpected result") else: if int(result.exit_status) != 0: test.fail(result.stderr_text.strip()) # Local variables virsh_args = {"debug": True} server_ip = params["server_ip"] = params.get("remote_ip") server_user = params["server_user"] = params.get("remote_user", "root") server_pwd = params["server_pwd"] = params.get("remote_pwd") client_ip = params["client_ip"] = params.get("local_ip") client_pwd = params["client_pwd"] = params.get("local_pwd") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options") virsh_options = params.get("virsh_options", "--verbose --live") vol_name = params.get("vol_name") disk_format = params.get("disk_format", "qcow2") gluster_mount_dir = params.get("gluster_mount_dir") status_error = "yes" == params.get("status_error", "no") err_msg = params.get("err_msg") host_ip = params.get("gluster_server_ip", "") migr_vm_back = params.get("migrate_vm_back", "no") == "yes" selinux_local = params.get('set_sebool_local', 'yes') == "yes" selinux_remote = params.get('set_sebool_remote', 'no') == "yes" sebool_fusefs_local = params.get('set_sebool_fusefs_local', 'yes') sebool_fusefs_remote = params.get('set_sebool_fusefs_remote', 'yes') test_dict = dict(params) test_dict["local_boolean_varible"] = "virt_use_fusefs" test_dict["remote_boolean_varible"] = "virt_use_fusefs" remove_pkg = False seLinuxBool = None seLinuxfusefs = None gluster_uri = None mig_result = None # Make sure all of parameters are assigned a valid value check_parameters(test, params) # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri( params.get("migrate_source_host")) src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") # For --postcopy enable postcopy_options = params.get("postcopy_options") if postcopy_options: virsh_options = "%s %s" % (virsh_options, postcopy_options) params['virsh_options'] = virsh_options vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() # Back up xml file. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() migrate_setup = libvirt.MigrationTest() try: # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) # Configure selinux if selinux_local or selinux_remote: seLinuxBool = utils_misc.SELinuxBoolean(params) seLinuxBool.setup() if sebool_fusefs_local or sebool_fusefs_remote: seLinuxfusefs = utils_misc.SELinuxBoolean(test_dict) seLinuxfusefs.setup() # Setup glusterfs and disk xml. disk_img = "gluster.%s" % disk_format params['disk_img'] = disk_img libvirt.set_vm_disk(vm, params) vm_xml_cxt = virsh.dumpxml(vm_name).stdout_text.strip() logging.debug("The VM XML with gluster disk source: \n%s", vm_xml_cxt) # Check if gluster server is deployed locally if not host_ip: logging.debug("Enable port 24007 and 49152:49216") migrate_setup.migrate_pre_setup(src_uri, params, ports="24007") migrate_setup.migrate_pre_setup(src_uri, params) gluster_uri = "{}:{}".format(client_ip, vol_name) else: gluster_uri = "{}:{}".format(host_ip, vol_name) remote_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") if gluster_mount_dir: # The package 'glusterfs-fuse' is not installed on target # which makes issue when trying to 'mount -t glusterfs' pkg_name = 'glusterfs-fuse' logging.debug("Check if glusterfs-fuse is installed") pkg_mgr = utils_package.package_manager(remote_session, pkg_name) if not pkg_mgr.is_installed(pkg_name): logging.debug("glusterfs-fuse will be installed") if not pkg_mgr.install(): test.error("Package '%s' installation fails" % pkg_name) else: remove_pkg = True gluster_img = create_or_clean_backend_dir(gluster_uri, params) create_or_clean_backend_dir(gluster_uri, params, remote_session) logging.debug("Gluster Image is %s", gluster_img) gluster_backend_disk = {'disk_source_name': gluster_img} # Update disk xml with gluster image in backend dir libvirt.set_vm_disk(vm, gluster_backend_disk) remote_session.close() mig_result = do_migration(vm, dest_uri, options, extra) check_migration_res(mig_result) if migr_vm_back: ssh_connection = utils_conn.SSHConnection(server_ip=client_ip, server_pwd=client_pwd, client_ip=server_ip, client_pwd=server_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() # Pre migration setup for local machine migrate_setup.migrate_pre_setup(src_uri, params) cmd = "virsh migrate %s %s %s" % (vm_name, virsh_options, src_uri) logging.debug("Start migrating: %s", cmd) cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) logging.info(cmd_result) if cmd_result.exit_status: destroy_cmd = "virsh destroy %s" % vm_name remote.run_remote_cmd(destroy_cmd, params, runner_on_target, ignore_status=False) test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result)) finally: logging.info("Recovery test environment") orig_config_xml.sync() # Clean up of pre migration setup for local machine if migr_vm_back: if 'ssh_connection' in locals(): ssh_connection.auto_recover = True migrate_setup.migrate_pre_setup(src_uri, params, cleanup=True) # Cleanup selinu configuration if seLinuxBool: seLinuxBool.cleanup() if seLinuxfusefs: seLinuxfusefs.cleanup() # Disable ports 24007 and 49152:49216 if not host_ip: logging.debug("Disable 24007 and 49152:49216 in Firewall") migrate_setup.migrate_pre_setup(src_uri, params, cleanup=True, ports="24007") migrate_setup.migrate_pre_setup(src_uri, params, cleanup=True) gluster.setup_or_cleanup_gluster(False, **params) # Cleanup backend directory/symlink if gluster_mount_dir and gluster_uri: remote_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") create_or_clean_backend_dir(gluster_uri, params, is_clean=True) create_or_clean_backend_dir(gluster_uri, params, remote_session, True) if remove_pkg: pkg_mgr = utils_package.package_manager(remote_session, pkg_name) if pkg_mgr.is_installed(pkg_name): logging.debug("glusterfs-fuse will be uninstalled") if not pkg_mgr.remove(): logging.error("Package '%s' un-installation fails", pkg_name) remote_session.close()
def run(test, params, env): """ Test DAC setting in both domain xml and qemu.conf. (1) Init variables for test. (2) Set VM xml and qemu.conf with proper DAC label, also set monitor socket parent dir with propoer ownership and mode. (3) Start VM and check the context. """ # Get general variables. status_error = ('yes' == params.get("status_error", 'no')) host_sestatus = params.get("host_selinux", "enforcing") # Get variables about seclabel for VM. sec_type = params.get("vm_sec_type", "dynamic") vm_sec_model = params.get("vm_sec_model", "dac") vm_sec_label = params.get("vm_sec_label", None) vm_sec_relabel = params.get("vm_sec_relabel", "yes") sec_dict = { 'type': sec_type, 'model': vm_sec_model, 'relabel': vm_sec_relabel } if vm_sec_label: sec_dict['label'] = vm_sec_label set_qemu_conf = "yes" == params.get("set_qemu_conf", "no") # Get per-img seclabel variables disk_type = params.get("disk_type") disk_target = params.get('disk_target') disk_src_protocol = params.get("disk_source_protocol") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) invalid_label = 'yes' == params.get("invalid_label", "no") relabel = params.get("per_img_sec_relabel") sec_label = params.get("per_img_sec_label") per_sec_model = params.get("per_sec_model", 'dac') per_img_dict = { 'sec_model': per_sec_model, 'relabel': relabel, 'sec_label': sec_label } params.update(per_img_dict) # Get qemu.conf config variables qemu_user = params.get("qemu_user", 'qemu') qemu_group = params.get("qemu_group", 'qemu') dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes") # Get variables about VM and get a VM object and VMXML instance. vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Set selinux of host. backup_sestatus = utils_selinux.get_status() if backup_sestatus == "disabled": test.cancel("SELinux is in Disabled " "mode. it must be in Enforcing " "mode to run this test") utils_selinux.set_status(host_sestatus) qemu_sock_mod = False qemu_sock_path = '/var/lib/libvirt/qemu/' qemu_conf = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() try: if set_qemu_conf: # Set qemu.conf for user and group if qemu_user: qemu_conf.user = qemu_user if qemu_group: qemu_conf.group = qemu_group if dynamic_ownership: qemu_conf.dynamic_ownership = 1 else: qemu_conf.dynamic_ownership = 0 logging.debug("the qemu.conf content is: %s" % qemu_conf) libvirtd.restart() st = os.stat(qemu_sock_path) if not bool(st.st_mode & stat.S_IWGRP): # chmod g+w os.chmod(qemu_sock_path, st.st_mode | stat.S_IWGRP) qemu_sock_mod = True # Set the context of the VM. logging.debug("sec_dict is %s" % sec_dict) vmxml.set_seclabel([sec_dict]) vmxml.sync() # Get per-image seclabel in id string if sec_label: per_img_usr, per_img_grp = sec_label.split(':') sec_label_id = format_user_group_str(per_img_usr, per_img_grp) # Start VM to check the qemu process and image. try: # Set per-img sec context and start vm utlv.set_vm_disk(vm, params) # Start VM successfully. if status_error: if invalid_label: # invalid label should fail, more info in bug 1165485 logging.debug( "The guest failed to start as expected," "details see bug: bugzilla.redhat.com/show_bug.cgi" "?id=1165485") else: test.fail("Test succeeded in negative case.") # Get vm process label when VM is running. vm_pid = vm.get_pid() pid_stat = os.stat("/proc/%d" % vm_pid) vm_process_uid = pid_stat.st_uid vm_process_gid = pid_stat.st_gid vm_context = "%s:%s" % (vm_process_uid, vm_process_gid) logging.debug("vm process label is: %s", vm_context) # Get vm image label when VM is running if disk_type != "network": disks = vm.get_blk_devices() if libvirt_version.version_compare(3, 1, 0) and disk_type == "block": output = astring.to_text( process.system_output( "nsenter -t %d -m -- ls -l %s" % (vm_pid, disks[disk_target]['source']))) owner, group = output.strip().split()[2:4] disk_context = format_user_group_str(owner, group) else: stat_re = os.stat(disks[disk_target]['source']) disk_context = "%s:%s" % (stat_re.st_uid, stat_re.st_gid) logging.debug("The disk dac label after vm start is: %s", disk_context) if sec_label and relabel == 'yes': if disk_context != sec_label_id: test.fail("The disk label is not equal to " "'%s'." % sec_label_id) except virt_vm.VMStartError as e: # Starting VM failed. if not status_error: test.fail("Test failed in positive case." "error: %s" % e) finally: # clean up if vm.is_alive(): vm.destroy(gracefully=False) backup_xml.sync() if qemu_sock_mod: st = os.stat(qemu_sock_path) os.chmod(qemu_sock_path, st.st_mode ^ stat.S_IWGRP) if set_qemu_conf: qemu_conf.restore() libvirtd.restart() utils_selinux.set_status(backup_sestatus) if disk_src_protocol == 'iscsi': utlv.setup_or_cleanup_iscsi(is_setup=False) elif disk_src_protocol == 'gluster': gluster.setup_or_cleanup_gluster(False, brick_path=brick_path, **params) libvirtd.restart() elif disk_src_protocol == 'netfs': utlv.setup_or_cleanup_nfs(is_setup=False, restore_selinux=backup_sestatus)
def run(test, params, env): """ Test the pull-mode backup function Steps: 1. craete a vm with extra disk vdb 2. create some data on vdb 3. start a pull mode full backup on vdb 4. create some data on vdb 5. start a pull mode incremental backup 6. repeat step 5 to 7 7. check the full/incremental backup file data """ # Cancel the test if libvirt version is too low if not libvirt_version.version_compare(6, 0, 0): test.cancel("Current libvirt version doesn't support " "incremental backup.") hotplug_disk = "yes" == params.get("hotplug_disk", "no") original_disk_size = params.get("original_disk_size", "100M") original_disk_type = params.get("original_disk_type", "local") original_disk_target = params.get("original_disk_target", "vdb") scratch_type = params.get("scratch_type", "file") reuse_scratch_file = "yes" == params.get("reuse_scratch_file") prepare_scratch_file = "yes" == params.get("prepare_scratch_file") scratch_blkdev_path = params.get("scratch_blkdev_path") scratch_blkdev_size = params.get("scratch_blkdev_size", original_disk_size) prepare_scratch_blkdev = "yes" == params.get("prepare_scratch_blkdev") nbd_protocol = params.get("nbd_protocol", "unix") nbd_socket = params.get("nbd_socket", "/tmp/pull_backup.socket") nbd_tcp_port = params.get("nbd_tcp_port", "10809") set_exportname = "yes" == params.get("set_exportname") set_exportbitmap = "yes" == params.get("set_exportbitmap") backup_rounds = int(params.get("backup_rounds", 3)) backup_error = "yes" == params.get("backup_error") tmp_dir = data_dir.get_tmp_dir() try: vm_name = params.get("main_vm") vm = env.get_vm(vm_name) # Backup vm xml vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() # Enable vm incremental backup capability. This is only a workaround # to make sure incremental backup can work for the vm. Code needs to # be removded immediately when the function enabled by default, which # is tracked by bz1799015 tree = ET.parse(vmxml.xml) root = tree.getroot() for elem in root.iter('domain'): elem.set('xmlns:qemu', 'http://libvirt.org/schemas/domain/qemu/1.0') qemu_cap = ET.Element("qemu:capabilities") elem.insert(-1, qemu_cap) incbackup_cap = ET.Element("qemu:add") incbackup_cap.set('capability', 'incremental-backup') qemu_cap.insert(1, incbackup_cap) vmxml.undefine() tmp_vm_xml = os.path.join(tmp_dir, "tmp_vm.xml") tree.write(tmp_vm_xml) virsh.define(tmp_vm_xml) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("Script insert xml elements to make sure vm can support " "incremental backup. This should be removded when " "bz 1799015 fixed.") # Prepare the disk to be backuped. disk_params = {} disk_path = "" if original_disk_type == "local": image_name = "{}_image.qcow2".format(original_disk_target) disk_path = os.path.join(tmp_dir, image_name) libvirt.create_local_disk("file", disk_path, original_disk_size, "qcow2") disk_params = { "device_type": "disk", "type_name": "file", "driver_type": "qcow2", "target_dev": original_disk_target, "source_file": disk_path } if original_disk_target: disk_params["target_dev"] = original_disk_target elif original_disk_type == "iscsi": iscsi_host = '127.0.0.1' iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=original_disk_size, portal_ip=iscsi_host) disk_path = ("iscsi://[%s]/%s/%s" % (iscsi_host, iscsi_target, lun_num)) process.run("qemu-img create -f qcow2 %s %s" % (disk_path, original_disk_size), shell=True, verbose=True) disk_params = { 'device_type': 'disk', 'type_name': 'network', "driver_type": "qcow2", 'target_dev': original_disk_target } disk_params_src = { 'source_protocol': 'iscsi', 'source_name': iscsi_target + "/%s" % lun_num, 'source_host_name': iscsi_host, 'source_host_port': '3260' } disk_params.update(disk_params_src) elif original_disk_type == "gluster": gluster_vol_name = "gluster_vol" gluster_pool_name = "gluster_pool" gluster_img_name = "gluster.qcow2" gluster_host_ip = gluster.setup_or_cleanup_gluster( is_setup=True, vol_name=gluster_vol_name, pool_name=gluster_pool_name, **params) disk_path = 'gluster://%s/%s/%s' % ( gluster_host_ip, gluster_vol_name, gluster_img_name) process.run("qemu-img create -f qcow2 %s %s" % (disk_path, original_disk_size), shell=True, verbose=True) disk_params = { 'device_type': 'disk', 'type_name': 'network', "driver_type": "qcow2", 'target_dev': original_disk_target } disk_params_src = { 'source_protocol': 'gluster', 'source_name': gluster_vol_name + "/%s" % gluster_img_name, 'source_host_name': gluster_host_ip, 'source_host_port': '24007' } disk_params.update(disk_params_src) else: test.error("The disk type '%s' not supported in this script.", original_disk_type) if hotplug_disk: vm.start() session = vm.wait_for_login().close() disk_xml = libvirt.create_disk_xml(disk_params) virsh.attach_device(vm_name, disk_xml, debug=True) else: disk_xml = libvirt.create_disk_xml(disk_params) virsh.attach_device(vm.name, disk_xml, flagstr="--config", debug=True) vm.start() session = vm.wait_for_login() new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys()) session.close() if len(new_disks_in_vm) != 1: test.fail("Test disk not prepared in vm") # Use the newly added disk as the test disk test_disk_in_vm = "/dev/" + new_disks_in_vm[0] vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) vm_disks = list(vmxml.get_disk_all().keys()) checkpoint_list = [] is_incremental = False backup_file_list = [] for backup_index in range(backup_rounds): # Prepare backup xml backup_params = {"backup_mode": "pull"} if backup_index > 0: is_incremental = True backup_params["backup_incremental"] = "checkpoint_" + str( backup_index - 1) # Set libvirt default nbd export name and bitmap name nbd_export_name = original_disk_target nbd_bitmap_name = "backup-" + original_disk_target backup_server_dict = {} if nbd_protocol == "unix": backup_server_dict["transport"] = "unix" backup_server_dict["socket"] = nbd_socket else: backup_server_dict["name"] = "localhost" backup_server_dict["port"] = nbd_tcp_port backup_params["backup_server"] = backup_server_dict backup_disk_xmls = [] for vm_disk in vm_disks: backup_disk_params = {"disk_name": vm_disk} if vm_disk != original_disk_target: backup_disk_params["enable_backup"] = "no" else: backup_disk_params["enable_backup"] = "yes" backup_disk_params["disk_type"] = scratch_type # Custom nbd export name and bitmap name if required if set_exportname: nbd_export_name = original_disk_target + "_custom_exp" backup_disk_params["exportname"] = nbd_export_name if set_exportbitmap: nbd_bitmap_name = original_disk_target + "_custom_bitmap" backup_disk_params["exportbitmap"] = nbd_bitmap_name # Prepare nbd scratch file/dev params scratch_params = {} if scratch_type == "file": scratch_file_name = "scratch_file_%s" % backup_index scratch_file_path = os.path.join( tmp_dir, scratch_file_name) if prepare_scratch_file: libvirt.create_local_disk("file", scratch_file_path, original_disk_size, "qcow2") scratch_params["file"] = scratch_file_path logging.debug("scratch_params: %s", scratch_params) elif scratch_type == "block": if prepare_scratch_blkdev: scratch_blkdev_path = libvirt.setup_or_cleanup_iscsi( is_setup=True, image_size=scratch_blkdev_size) logging.debug("abcd scratch_blkdev_path:%s", scratch_blkdev_path) scratch_params["dev"] = scratch_blkdev_path else: test.fail( "We do not support backup scratch type: '%s'" % scratch_type) backup_disk_params["backup_scratch"] = scratch_params backup_disk_xml = utils_backup.create_backup_disk_xml( backup_disk_params) backup_disk_xmls.append(backup_disk_xml) logging.debug("disk list %s", backup_disk_xmls) backup_xml = utils_backup.create_backup_xml( backup_params, backup_disk_xmls) logging.debug("ROUND_%s Backup Xml: %s", backup_index, backup_xml) # Prepare checkpoint xml checkpoint_name = "checkpoint_%s" % backup_index checkpoint_list.append(checkpoint_name) cp_params = {"checkpoint_name": checkpoint_name} cp_params["checkpoint_desc"] = params.get( "checkpoint_desc", "desc of cp_%s" % backup_index) disk_param_list = [] for vm_disk in vm_disks: cp_disk_param = {"name": vm_disk} if vm_disk != original_disk_target: cp_disk_param["checkpoint"] = "no" else: cp_disk_param["checkpoint"] = "bitmap" cp_disk_bitmap = params.get("cp_disk_bitmap") if cp_disk_bitmap: cp_disk_param["bitmap"] = cp_disk_bitmap + str( backup_index) disk_param_list.append(cp_disk_param) checkpoint_xml = utils_backup.create_checkpoint_xml( cp_params, disk_param_list) logging.debug("ROUND_%s Checkpoint Xml: %s", backup_index, checkpoint_xml) # Start backup backup_options = backup_xml.xml + " " + checkpoint_xml.xml # Create some data in vdb dd_count = "1" dd_seek = str(backup_index * 10 + 10) dd_bs = "1M" utils_backup.dd_data_to_vm_disk(vm, test_disk_in_vm, dd_bs, dd_seek, dd_count) if reuse_scratch_file: backup_options += " --reuse-external" backup_result = virsh.backup_begin(vm_name, backup_options, debug=True) if backup_result.exit_status: raise utils_backup.BackupBeginError( backup_result.stderr.strip()) backup_file_path = os.path.join( tmp_dir, "backup_file_%s.qcow2" % str(backup_index)) backup_file_list.append(backup_file_path) if not is_incremental: # Do full backup if nbd_protocol == "unix": nbd_export = ("nbd+unix:///%s?socket=%s" % (nbd_export_name, nbd_socket)) elif nbd_protocol == "tcp": nbd_export = ("nbd://localhost:%s/%s" % (nbd_tcp_port, nbd_export_name)) utils_backup.pull_full_backup_to_file(nbd_export, backup_file_path) logging.debug("Full backup to: %s", backup_file_path) else: # Do incremental backup nbd_params = { "nbd_protocol": nbd_protocol, "nbd_export": nbd_export_name } if nbd_protocol == "tcp": nbd_params["nbd_tcp_port"] = nbd_tcp_port elif nbd_protocol == "unix": nbd_params["nbd_socket"] = nbd_socket utils_backup.pull_incremental_backup_to_file( nbd_params, backup_file_path, nbd_bitmap_name, original_disk_size) virsh.domjobabort(vm_name, debug=True) for checkpoint_name in checkpoint_list: virsh.checkpoint_delete(vm_name, checkpoint_name, debug=True) if vm.is_alive(): vm.destroy(gracefully=False) # Compare the backup data and original data original_data_file = os.path.join(tmp_dir, "original_data.qcow2") cmd = "qemu-img convert -f qcow2 %s -O qcow2 %s" % (disk_path, original_data_file) process.run(cmd, shell=True, verbose=True) for backup_file in backup_file_list: if not utils_backup.cmp_backup_data(original_data_file, backup_file): test.fail("Backup and original data are not identical for" "'%s' and '%s'" % (disk_path, backup_file)) else: logging.debug("'%s' contains correct backup data", backup_file) except utils_backup.BackupBeginError as details: if backup_error: logging.debug("Backup failed as expected.") else: test.fail(details) finally: # Remove checkpoints if "checkpoint_list" in locals() and checkpoint_list: for checkpoint_name in checkpoint_list: virsh.checkpoint_delete(vm_name, checkpoint_name) if vm.is_alive(): vm.destroy(gracefully=False) # Restoring vm vmxml_backup.sync() # Remove iscsi devices if original_disk_type == "iscsi" or scratch_type == "block": libvirt.setup_or_cleanup_iscsi(False) # Remove gluster devices if original_disk_type == "gluster": gluster.setup_or_cleanup_gluster(is_setup=False, vol_name=gluster_vol_name, pool_name=gluster_pool_name, **params)