def _extend_blkcpy_execution(sub_option,
                                 sub_status_error,
                                 pre_created=False):
        """
        Wrap up blockcopy execution combining with various options

        :params sub_option: option
        :params sub_status_error: expected error or not
        :params pre_created: whether pre-created
        """
        tmp_copy_path = os.path.join(data_dir.get_data_dir(),
                                     "%s_%s.img" % (vm_name, sub_option))
        if os.path.exists(tmp_copy_path):
            libvirt.delete_local_disk('file', tmp_copy_path)
        if pre_created:
            libvirt.create_local_disk('file', tmp_copy_path, '10M', 'qcow2')
        tmp_option = params.get("options") % sub_option
        if "default" in tmp_option:
            tmp_option = " --wait --verbose"
        result = virsh.blockcopy(vm_name,
                                 device_target,
                                 tmp_copy_path,
                                 options=tmp_option,
                                 ignore_status=True,
                                 debug=True)
        logging.debug(sub_status_error)
        libvirt.check_exit_status(result, expect_error=sub_status_error)
 def start_pivot_blkcpy_on_transient_vm():
     """
     Start blockcopy with pivot option
     """
     external_snapshot_disks = libvirt_disk.make_external_disk_snapshots(
         vm, device_target, "trans_snapshot", snapshot_take)
     logging.debug("external snapshots:%s\n", external_snapshot_disks)
     external_snapshot_disks.pop()
     for sub_option in ["--shallow --pivot", "--pivot"]:
         tmp_copy_path = os.path.join(
             data_dir.get_data_dir(),
             "%s_%s.img" % (vm_name, sub_option[2:5]))
         tmp_blkcopy_path.append(tmp_copy_path)
         if os.path.exists(tmp_copy_path):
             libvirt.delete_local_disk('file', tmp_copy_path)
         virsh.blockcopy(vm_name,
                         device_target,
                         tmp_copy_path,
                         options=sub_option,
                         ignore_status=False,
                         debug=True)
         back_chain_files = libvirt_disk.get_chain_backing_files(
             tmp_copy_path)
         back_chain_files = back_chain_files[1:len(back_chain_files)]
         logging.debug("debug blockcopy xml restore:%s and %s\n",
                       external_snapshot_disks, back_chain_files)
         if back_chain_files != external_snapshot_disks:
             test.fail("can not get identical backing chain")
         utils_misc.wait_for(
             lambda: libvirt.check_blockjob(vm_name, device_target), 5)
         #After pivot, no backing chain exists
         external_snapshot_disks = []
Exemplo n.º 3
0
def run(test, params, env):
    """
    Run tests with disk target configurations
    """

    libvirt_version.is_libvirt_feature_supported(params)
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    at_dt = params.get('at_dt')
    cmds_in_guest = eval(params.get('cmds_in_guest'))
    target_rotation = params.get('target_rotation')

    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        if not at_dt:
            libvirt.set_vm_disk(vm, params)

        if not vm.is_alive():
            vm.start()

        logging.debug(vm_xml.VMXML.new_from_dumpxml(vm_name))
        vm_session = vm.wait_for_login()

        pkg_list = params.get("install_pkgs")
        if pkg_list:
            install_pkg(test, eval(pkg_list), vm_session)

        if at_dt:
            old_parts = utils_disk.get_parts_list(vm_session)
            disk_xml = create_second_disk(params)
            virsh.attach_device(vm_name, disk_xml, debug=True, ignore_status=False)
            pat_in_dumpxml = params.get('pattern_in_dumpxml')
            libvirt_vmxml.check_guest_xml(vm_name, pat_in_dumpxml, status_error=False)
            time.sleep(10)
            added_parts = utils_disk.get_added_parts(vm_session, old_parts)
            if not added_parts or len(added_parts) != 1:
                test.error("Only one new partition is expected in the VM, "
                           "but found {}".format(added_parts))
            cmd = cmds_in_guest[0] % added_parts[0]
            run_cmd_in_guest(test, vm_session, cmd)
            virsh.detach_device(vm_name, disk_xml, debug=True, ignore_status=False)
            cmd = cmds_in_guest[1] % added_parts[0]
            run_cmd_in_guest(test, vm_session, cmd, any_error=True)
            libvirt_vmxml.check_guest_xml(vm_name, pat_in_dumpxml, status_error=True)
        else:
            if cmds_in_guest:
                for cmd_index in range(0, len(cmds_in_guest)):
                    any_error = False
                    if not target_rotation and cmd_index == 0:
                        any_error = True
                    run_cmd_in_guest(test, vm_session, cmds_in_guest[cmd_index], any_error=any_error)
    finally:
        backup_xml.sync()
        source_file = params.get('source_file')
        if source_file:
            libvirt.delete_local_disk('file', source_file)
Exemplo n.º 4
0
def clean_up_lvm(device_source, vg_name, lv_name):
    """
    Clean up lvm.

    :param device_source: source file
    :param vg_name: volume group name
    :param lv_name: logical volume name
    """
    libvirt.delete_local_disk("lvm", vgname=vg_name, lvname=lv_name)
    lv_utils.vg_remove(vg_name)
    process.system("pvremove %s" % device_source,
                   ignore_status=True,
                   shell=True)
    process.system("rm -rf /dev/%s" % vg_name, ignore_status=True, shell=True)
Exemplo n.º 5
0
 def do_blockcommit_pivot_repeatedly():
     """
     Validate bugzilla:https://bugzilla.redhat.com/show_bug.cgi?id=1857735
     """
     # Make external snapshot,pivot and delete snapshot file repeatedly.
     tmp_snapshot_name = "external_snapshot_" + "repeated.qcow2"
     block_target = 'vda'
     for count in range(0, 5):
         options = "%s " % tmp_snapshot_name
         options += "--disk-only --atomic"
         disk_external = os.path.join(tmp_dir, tmp_snapshot_name)
         options += "  --diskspec  %s,snapshot=external,file=%s" % (block_target, disk_external)
         virsh.snapshot_create_as(vm_name, options,
                                  ignore_status=False, debug=True)
         virsh.blockcommit(vm_name, block_target,
                           " --active --pivot ", ignore_status=False, debug=True)
         virsh.snapshot_delete(vm_name, tmp_snapshot_name, " --metadata")
         libvirt.delete_local_disk('file', disk_external)
Exemplo n.º 6
0
    # it cause the sync to fail during the internal backup.
    vm.destroy()
    vm.undefine()
    orig_config_xml.define()

    # cleanup hugepages
    if enable_HP or enable_HP_pin:
        logging.info("Cleanup Hugepages")
        # cleaning source hugepages
        hugepage_assign("0")
        # cleaning destination hugepages
        hugepage_assign(
            "0", target_ip=server_ip, user=server_user, password=server_pwd)

    if attach_scsi_disk:
        libvirt.delete_local_disk("file", path=scsi_disk)

    if seLinuxBool:
        logging.info("Recover virt NFS SELinux boolean on target host...")
        # keep .ssh/authorized_keys for NFS cleanup later
        seLinuxBool.cleanup(True)

    if nfs_client:
        logging.info("Cleanup NFS client environment...")
        nfs_client.cleanup()

    logging.info("Remove the NFS image...")
    source_file = params.get("source_file")
    libvirt.delete_local_disk("file", path=source_file)

    logging.info("Cleanup NFS server environment...")
def run(test, params, env):
    """
    Test virsh migrate-setmaxdowntime command.

    1) Prepare migration environment
    2) Start migration and set migrate-maxdowntime
    3) Cleanup environment(migrated vm on destination)
    4) Check result
    """
    dest_uri = params.get("virsh_migrate_dest_uri",
                          "qemu+ssh://MIGRATE_EXAMPLE/system")
    src_uri = params.get("virsh_migrate_src_uri",
                         "qemu+ssh://MIGRATE_EXAMPLE/system")
    if dest_uri.count('///') or dest_uri.count('MIGRATE_EXAMPLE'):
        test.cancel("Set your destination uri first.")
    if src_uri.count('MIGRATE_EXAMPLE'):
        test.cancel("Set your source uri first.")
    if src_uri == dest_uri:
        test.cancel("You should not set dest uri same as local.")
    vm_ref = params.get("setmmdt_vm_ref", "domname")
    pre_vm_state = params.get("pre_vm_state", "running")
    status_error = "yes" == params.get("status_error", "no")
    do_migrate = "yes" == params.get("do_migrate", "yes")
    migrate_maxdowntime = params.get("migrate_maxdowntime", 1.000)
    if (migrate_maxdowntime == ""):
        downtime = ""
    else:
        downtime = int(float(migrate_maxdowntime)) * 1000
    extra = params.get("setmmdt_extra")

    # For --postcopy enable
    postcopy_options = params.get("postcopy_options", "")

    # A delay between threads
    delay_time = int(params.get("delay_time", 1))
    # timeout of threads
    thread_timeout = 180

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    domuuid = vm.get_uuid()

    grep_str_local = params.get("grep_str_from_local_libvirt_log", "")

    # For safety reasons, we'd better back up original guest xml
    orig_config_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    if not orig_config_xml:
        test.error("Backing up xmlfile failed.")

    # Clean up daemon log
    log_file = params.get("log_file")
    cleanup_daemon_log(log_file)

    # Config daemon log
    log_conf_dict = eval(params.get("log_conf_dict", '{}'))
    log_conf_type = params.get("log_conf_type")
    log_conf = None
    log_conf = update_config_file(log_conf_type,
                                  log_conf_dict,
                                  scp_to_remote=False,
                                  remote_params=None)

    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = params.get("nfs_mount_dir")

    # Params to setup SSH connection
    params["server_ip"] = params.get("migrate_dest_host")
    params["server_pwd"] = params.get("migrate_dest_pwd")
    params["client_ip"] = params.get("migrate_source_host")
    params["client_pwd"] = params.get("migrate_source_pwd")
    params["nfs_client_ip"] = params.get("migrate_dest_host")
    params["nfs_server_ip"] = params.get("migrate_source_host")

    # Params to enable SELinux boolean on remote host
    params["remote_boolean_varible"] = "virt_use_nfs"
    params["remote_boolean_value"] = "on"
    params["set_sebool_remote"] = "yes"

    remote_host = params.get("migrate_dest_host")
    username = params.get("migrate_dest_user", "root")
    password = params.get("migrate_dest_pwd")
    # Config ssh autologin for remote host
    ssh_key.setup_ssh_key(remote_host, username, password, port=22)

    setmmdt_dargs = {'debug': True, 'ignore_status': True, 'uri': src_uri}
    migrate_dargs = {
        'debug': True,
        'ignore_status': True,
        'postcopy_options': postcopy_options
    }

    try:
        # Update the disk using shared storage
        libvirt.set_vm_disk(vm, params)

        if not vm.is_alive():
            vm.start()
        vm.wait_for_login()
        domid = vm.get_id()

        # Confirm how to reference a VM.
        if vm_ref == "domname":
            vm_ref = vm_name
        elif vm_ref == "domid":
            vm_ref = domid
        elif vm_ref == "domuuid":
            vm_ref = domuuid

        # Prepare vm state
        if pre_vm_state == "paused":
            vm.pause()
        elif pre_vm_state == "shutoff":
            vm.destroy(gracefully=False)
            # Ensure VM in 'shut off' status
            utils_misc.wait_for(lambda: vm.state() == "shut off", 30)

        # Set max migration downtime must be during migration
        # Using threads for synchronization
        threads = []
        if do_migrate:
            threads.append(
                threading.Thread(target=thread_func_live_migration,
                                 args=(vm, dest_uri, migrate_dargs)))

        threads.append(
            threading.Thread(target=thread_func_setmmdt,
                             args=(vm_ref, downtime, extra, setmmdt_dargs)))
        for thread in threads:
            thread.start()
            # Migration must be executing before setting maxdowntime
            time.sleep(delay_time)
        # Wait until thread is over
        for thread in threads:
            thread.join(thread_timeout)

        if (status_error is False or do_migrate is False):
            logging.debug("To match the expected pattern '%s' ...",
                          grep_str_local)
            cmd = "grep -E '%s' %s" % (grep_str_local, log_file)
            cmdResult = process.run(cmd, shell=True, verbose=False)
            logging.debug(cmdResult)

    finally:
        # Clean up.
        if do_migrate:
            logging.debug("Cleanup VM on remote host...")
            cleanup_dest(vm, src_uri, dest_uri)

        if orig_config_xml:
            logging.debug("Recover VM XML...")
            orig_config_xml.sync()

        logging.info("Remove the NFS image...")
        source_file = params.get("source_file")
        libvirt.delete_local_disk("file", path=source_file)

        # Recover libvirtd service configuration on local
        if log_conf:
            logging.debug("Recover local libvirtd configuration...")
            libvirt.customize_libvirt_config(None,
                                             remote_host=False,
                                             extra_params=params,
                                             is_recover=True,
                                             config_object=log_conf)
            cleanup_daemon_log(log_file)

    # Check results.
    if status_error:
        if ret_setmmdt:
            if not do_migrate and libvirt_version.version_compare(1, 2, 9):
                # https://bugzilla.redhat.com/show_bug.cgi?id=1146618
                # Commit fe808d9 fix it and allow setting migration
                # max downtime any time since libvirt-1.2.9
                logging.info("Libvirt version is newer than 1.2.9,"
                             "Allow set maxdowntime while VM isn't migrating")
            else:
                test.fail("virsh migrate-setmaxdowntime succeed "
                          "but not expected.")
    else:
        if do_migrate and not ret_migration:
            test.fail("Migration failed.")

        if not ret_setmmdt:
            test.fail("virsh migrate-setmaxdowntime failed.")
def run(test, params, env):
    """
    Test virsh migrate command.
    """

    def set_feature(vmxml, feature, value):
        """
        Set guest features for PPC

        :param state: the htm status
        :param vmxml: guest xml
        """
        features_xml = vm_xml.VMFeaturesXML()
        if feature == 'hpt':
            features_xml.hpt_resizing = value
        elif feature == 'htm':
            features_xml.htm = value
        vmxml.features = features_xml
        vmxml.sync()

    def trigger_hpt_resize(session):
        """
        Check the HPT order file and dmesg

        :param session: the session to guest

        :raise: test.fail if required message is not found
        """
        hpt_order_path = "/sys/kernel/debug/powerpc/hpt_order"
        hpt_order = session.cmd_output('cat %s' % hpt_order_path).strip()
        hpt_order = int(hpt_order)
        logging.info('Current hpt_order is %d', hpt_order)
        hpt_order += 1
        cmd = 'echo %d > %s' % (hpt_order, hpt_order_path)
        cmd_result = session.cmd_status_output(cmd)
        result = process.CmdResult(stderr=cmd_result[1],
                                   stdout=cmd_result[1],
                                   exit_status=cmd_result[0])
        libvirt.check_exit_status(result)
        dmesg = session.cmd('dmesg')
        dmesg_content = params.get('dmesg_content').split('|')
        for content in dmesg_content:
            if content % hpt_order not in dmesg:
                test.fail("'%s' is missing in dmesg" % (content % hpt_order))
            else:
                logging.info("'%s' is found in dmesg", content % hpt_order)

    def check_vm_network_accessed(session=None):
        """
        The operations to the VM need to be done before or after
        migration happens

        :param session: The session object to the host

        :raise: test.error when ping fails
        """
        # Confirm local/remote VM can be accessed through network.
        logging.info("Check VM network connectivity")
        s_ping, _ = utils_test.ping(vm.get_address(),
                                    count=10,
                                    timeout=20,
                                    output_func=logging.debug,
                                    session=session)
        if s_ping != 0:
            if session:
                session.close()
            test.fail("%s did not respond after %d sec." % (vm.name, 20))

    def check_virsh_command_and_option(command, option=None):
        """
        Check if virsh command exists

        :param command: the command to be checked
        :param option: the command option to be checked
        """
        msg = "This version of libvirt does not support "
        if not virsh.has_help_command(command):
            test.cancel(msg + "virsh command '%s'" % command)

        if option and not virsh.has_command_help_match(command, option):
            test.cancel(msg + "virsh command '%s' with option '%s'" % (command,
                                                                       option))

    def add_ctrls(vm_xml, dev_type="pci", dev_index="0", dev_model="pci-root"):
        """
        Add multiple devices

        :param dev_type: the type of the device to be added
        :param dev_index: the maximum index of the device to be added
        :param dev_model: the model of the device to be added
        """
        for inx in range(0, int(dev_index) + 1):
            newcontroller = Controller("controller")
            newcontroller.type = dev_type
            newcontroller.index = inx
            newcontroller.model = dev_model
            logging.debug("New device is added:\n%s", newcontroller)
            vm_xml.add_device(newcontroller)
        vm_xml.sync()

    def do_migration(vm, dest_uri, options, extra):
        """
        Execute the migration with given parameters
        :param vm: the guest to be migrated
        :param dest_uri: the destination uri for migration
        :param options: options next to 'migrate' command
        :param extra: options in the end of the migrate command line

        :return: CmdResult object
        """
        logging.info("Sleeping 10 seconds before migration")
        time.sleep(10)
        # Migrate the guest.
        virsh_args.update({"ignore_status": True})
        migration_res = vm.migrate(dest_uri, options, extra, **virsh_args)
        if int(migration_res.exit_status) != 0:
            logging.error("Migration failed for %s.", vm_name)
            return migration_res

        if vm.is_alive():  # vm.connect_uri was updated
            logging.info("VM is alive on destination %s.", dest_uri)
        else:
            test.fail("VM is not alive on destination %s" % dest_uri)

        # Throws exception if console shows panic message
        vm.verify_kernel_crash()
        return migration_res

    def cleanup_libvirtd_log(log_file):
        """
        Remove existing libvirtd log file on source and target host.

        :param log_file: log file with absolute path
        """
        if os.path.exists(log_file):
            logging.debug("Delete local libvirt log file '%s'", log_file)
            os.remove(log_file)
        cmd = "rm -f %s" % log_file
        logging.debug("Delete remote libvirt log file '%s'", log_file)
        cmd_parms = {'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd}
        remote.run_remote_cmd(cmd, cmd_parms, runner_on_target)

    def cleanup_dest(vm):
        """
        Clean up the destination host environment
        when doing the uni-direction migration.

        :param vm: the guest to be cleaned up
        """
        logging.info("Cleaning up VMs on %s", vm.connect_uri)
        try:
            if virsh.domain_exists(vm.name, uri=vm.connect_uri):
                vm_state = vm.state()
                if vm_state == "paused":
                    vm.resume()
                elif vm_state == "shut off":
                    vm.start()
                vm.destroy(gracefully=False)

                if vm.is_persistent():
                    vm.undefine()

        except Exception as detail:
            logging.error("Cleaning up destination failed.\n%s", detail)

    def run_stress_in_vm():
        """
        The function to load stress in VM
        """
        stress_args = params.get("stress_args", "--cpu 8 --io 4 "
                                 "--vm 2 --vm-bytes 128M "
                                 "--timeout 20s")
        try:
            vm_session.cmd('stress %s' % stress_args)
        except Exception as detail:
            logging.debug(detail)

    def control_migrate_speed(to_speed=1):
        """
        Control migration duration

        :param to_speed: the speed value in Mbps to be set for migration
        :return int: the new migration speed after setting
        """
        virsh_args.update({"ignore_status": False})
        old_speed = virsh.migrate_getspeed(vm_name, **virsh_args)
        logging.debug("Current migration speed is %s MiB/s\n", old_speed.stdout.strip())
        logging.debug("Set migration speed to %d MiB/s\n", to_speed)
        cmd_result = virsh.migrate_setspeed(vm_name, to_speed, "", **virsh_args)
        actual_speed = virsh.migrate_getspeed(vm_name, **virsh_args)
        logging.debug("New migration speed is %s MiB/s\n", actual_speed.stdout.strip())
        return int(actual_speed.stdout.strip())

    def check_setspeed(params):
        """
        Set/get migration speed

        :param params: the parameters used
        :raise: test.fail if speed set does not take effect
        """
        expected_value = int(params.get("migrate_speed", '41943040')) // (1024 * 1024)
        actual_value = control_migrate_speed(to_speed=expected_value)
        params.update({'compare_to_value': actual_value})
        if actual_value != expected_value:
            test.fail("Migration speed is expected to be '%d MiB/s', but '%d MiB/s' "
                      "found" % (expected_value, actual_value))

    def check_domjobinfo(params, option=""):
        """
        Check given item in domjobinfo of the guest is as expected

        :param params: the parameters used
        :param option: options for domjobinfo
        :raise: test.fail if the value of given item is unexpected
        """
        def search_jobinfo(jobinfo):
            """
            Find value of given item in domjobinfo

            :param jobinfo: cmdResult object
            :raise: test.fail if not found
            """
            for item in jobinfo.stdout.splitlines():
                if item.count(jobinfo_item):
                    groups = re.findall(r'[0-9.]+', item.strip())
                    logging.debug("In '%s' search '%s'\n", item, groups[0])
                    if (math.fabs(float(groups[0]) - float(compare_to_value)) //
                       float(compare_to_value) > diff_rate):
                        test.fail("{} {} has too much difference from "
                                  "{}".format(jobinfo_item,
                                              groups[0],
                                              compare_to_value))
                break

        jobinfo_item = params.get("jobinfo_item")
        compare_to_value = params.get("compare_to_value")
        logging.debug("compare_to_value:%s", compare_to_value)
        diff_rate = float(params.get("diff_rate", "0"))
        if not jobinfo_item or not compare_to_value:
            return
        vm_ref = '{}{}'.format(vm_name, option)
        jobinfo = virsh.domjobinfo(vm_ref, **virsh_args)
        search_jobinfo(jobinfo)

        check_domjobinfo_remote = params.get("check_domjobinfo_remote")
        if check_domjobinfo_remote:
            remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs)
            jobinfo = remote_virsh_session.domjobinfo(vm_ref, **virsh_args)
            search_jobinfo(jobinfo)
            remote_virsh_session.close_session()

    def check_maxdowntime(params):
        """
        Set/get migration maxdowntime

        :param params: the parameters used
        :raise: test.fail if maxdowntime set does not take effect
        """
        expected_value = int(float(params.get("migrate_maxdowntime", '0.3')) * 1000)
        virsh_args.update({"ignore_status": False})
        old_value = int(virsh.migrate_getmaxdowntime(vm_name).stdout.strip())
        logging.debug("Current migration maxdowntime is %d ms", old_value)
        logging.debug("Set migration maxdowntime to %d ms", expected_value)
        virsh.migrate_setmaxdowntime(vm_name, expected_value, **virsh_args)
        actual_value = int(virsh.migrate_getmaxdowntime(vm_name).stdout.strip())
        logging.debug("New migration maxdowntime is %d ms", actual_value)
        if actual_value != expected_value:
            test.fail("Migration maxdowntime is expected to be '%d ms', but '%d ms' "
                      "found" % (expected_value, actual_value))
        params.update({'compare_to_value': actual_value})

    def do_actions_during_migrate(params):
        """
        The entry point to execute action list during migration

        :param params: the parameters used
        """
        actions_during_migration = params.get("actions_during_migration")
        if not actions_during_migration:
            return
        for action in actions_during_migration.split(","):
            if action == 'setspeed':
                check_setspeed(params)
            elif action == 'domjobinfo':
                check_domjobinfo(params)
            elif action == 'setmaxdowntime':
                check_maxdowntime(params)
            time.sleep(3)

    def attach_channel_xml():
        """
        Create channel xml and attach it to guest configuration
        """
        # Check if pty channel exists already
        for elem in new_xml.devices.by_device_tag('channel'):
            if elem.type_name == channel_type_name:
                logging.debug("{0} channel already exists in guest. "
                              "No need to add new one".format(channel_type_name))
                return

        params = {'channel_type_name': channel_type_name,
                  'target_type': target_type,
                  'target_name': target_name}
        channel_xml = libvirt.create_channel_xml(params)
        virsh.attach_device(domain_opt=vm_name, file_opt=channel_xml.xml,
                            flagstr="--config", ignore_status=False)
        logging.debug("New VMXML with channel:\n%s", virsh.dumpxml(vm_name))

    def check_timeout_postcopy(params):
        """
        Check the vm state on target host after timeout
        when --postcopy and --timeout-postcopy are used.
        The vm state is expected as running.

        :param params: the parameters used
        """
        timeout = int(params.get("timeout_postcopy", 10))
        time.sleep(timeout + 1)
        remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs)
        vm_state = results_stdout_52lts(remote_virsh_session.domstate(vm_name)).strip()
        if vm_state != "running":
            remote_virsh_session.close_session()
            test.fail("After timeout '%s' seconds, "
                      "the vm state on target host should "
                      "be 'running', but '%s' found",
                      timeout, vm_state)
        remote_virsh_session.close_session()

    def get_usable_compress_cache(pagesize):
        """
        Get a number which is bigger than pagesize and is power of two.

        :param pagesize: the given integer
        :return: an integer satisfying the criteria
        """
        def calculate(num):
            result = num & (num - 1)
            return (result == 0)

        item = pagesize
        found = False
        while (not found):
            item += 1
            found = calculate(item)
        logging.debug("%d is smallest one that is bigger than '%s' and "
                      "is power of 2", item, pagesize)
        return item

    def check_migration_res(result):
        """
        Check if the migration result is as expected

        :param result: the output of migration
        :raise: test.fail if test is failed
        """
        logging.info("Migration out: %s", results_stdout_52lts(result).strip())
        logging.info("Migration error: %s", results_stderr_52lts(result).strip())

        if status_error:  # Migration should fail
            if err_msg:   # Special error messages are expected
                if not re.search(err_msg, results_stderr_52lts(result).strip()):
                    test.fail("Can not find the expected patterns '%s' in "
                              "output '%s'" % (err_msg,
                                               results_stderr_52lts(result).strip()))
                else:
                    logging.debug("It is the expected error message")
            else:
                if int(result.exit_status) != 0:
                    logging.debug("Migration failure is expected result")
                else:
                    test.fail("Migration success is unexpected result")
        else:
            if int(result.exit_status) != 0:
                test.fail(results_stderr_52lts(result).strip())

    check_parameters(test, params)

    # Params for NFS shared storage
    shared_storage = params.get("migrate_shared_storage", "")
    if shared_storage == "":
        default_guest_asset = defaults.get_default_guest_os_info()['asset']
        default_guest_asset = "%s.qcow2" % default_guest_asset
        shared_storage = os.path.join(params.get("nfs_mount_dir"),
                                      default_guest_asset)
        logging.debug("shared_storage:%s", shared_storage)

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
                                       params.get("migrate_dest_host"))
    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = params.get("nfs_mount_dir")

    # Local variables
    virsh_args = {"debug": True}
    virsh_opt = params.get("virsh_opt", "")
    server_ip = params.get("server_ip")
    server_user = params.get("server_user", "root")
    server_pwd = params.get("server_pwd")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options")
    src_uri = params.get("virsh_migrate_connect_uri")
    dest_uri = params.get("virsh_migrate_desturi")
    log_file = params.get("libvirt_log", "/var/log/libvirt/libvirtd.log")
    check_complete_job = "yes" == params.get("check_complete_job", "no")
    config_libvirtd = "yes" == params.get("config_libvirtd", "no")
    contrl_index = params.get("new_contrl_index", None)
    asynch_migration = "yes" == params.get("asynch_migrate", "no")
    grep_str_remote_log = params.get("grep_str_remote_log", "")
    grep_str_local_log = params.get("grep_str_local_log", "")
    disable_verify_peer = "yes" == params.get("disable_verify_peer", "no")
    status_error = "yes" == params.get("status_error", "no")
    stress_in_vm = "yes" == params.get("stress_in_vm", "no")
    low_speed = params.get("low_speed", None)
    remote_virsh_dargs = {'remote_ip': server_ip, 'remote_user': server_user,
                          'remote_pwd': server_pwd, 'unprivileged_user': None,
                          'ssh_remote_auth': True}

    hpt_resize = params.get("hpt_resize", None)
    htm_state = params.get("htm_state", None)

    # For pty channel test
    add_channel = "yes" == params.get("add_channel", "no")
    channel_type_name = params.get("channel_type_name", None)
    target_type = params.get("target_type", None)
    target_name = params.get("target_name", None)
    cmd_run_in_remote_guest = params.get("cmd_run_in_remote_guest", None)
    cmd_run_in_remote_guest_1 = params.get("cmd_run_in_remote_guest_1", None)
    cmd_run_in_remote_host = params.get("cmd_run_in_remote_host", None)
    cmd_run_in_remote_host_1 = params.get("cmd_run_in_remote_host_1", None)
    cmd_run_in_remote_host_2 = params.get("cmd_run_in_remote_host_2", None)

    # For qemu command line checking
    qemu_check = params.get("qemu_check", None)

    xml_check_after_mig = params.get("guest_xml_check_after_mig", None)

    # params for cache matrix test
    cache = params.get("cache")
    remove_cache = "yes" == params.get("remove_cache", "no")
    err_msg = params.get("err_msg")

    arch = platform.machine()
    if any([hpt_resize, contrl_index, htm_state]) and 'ppc64' not in arch:
        test.cancel("The case is PPC only.")

    # For TLS
    tls_recovery = params.get("tls_auto_recovery", "yes")
    # qemu config
    qemu_conf_dict = None
    # libvirtd config
    libvirtd_conf_dict = None

    remote_virsh_session = None
    vm = None
    vm_session = None
    libvirtd_conf = None
    qemu_conf = None
    mig_result = None
    test_exception = None
    is_TestError = False
    is_TestFail = False
    is_TestSkip = False

    # Objects to be cleaned up in the end
    objs_list = []
    tls_obj = None

    # Local variables
    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()
    if not orig_config_xml:
        test.error("Backing up xmlfile failed.")

    try:
        # Create a remote runner for later use
        runner_on_target = remote.RemoteRunner(host=server_ip,
                                               username=server_user,
                                               password=server_pwd)

        # Change the configuration files if needed before starting guest
        # For qemu.conf
        if extra.count("--tls"):
            # Setup TLS
            tls_obj = TLSConnection(params)
            if tls_recovery == "yes":
                objs_list.append(tls_obj)
                tls_obj.auto_recover = True
                tls_obj.conn_setup()
            if not disable_verify_peer:
                qemu_conf_dict = {"migrate_tls_x509_verify": "1"}
                # Setup qemu configure
                logging.debug("Configure the qemu")
                cleanup_libvirtd_log(log_file)
                qemu_conf = libvirt.customize_libvirt_config(qemu_conf_dict,
                                                             config_type="qemu",
                                                             remote_host=True,
                                                             extra_params=params)
        # Setup libvirtd
        if config_libvirtd:
            logging.debug("Configure the libvirtd")
            cleanup_libvirtd_log(log_file)
            libvirtd_conf_dict = setup_libvirtd_conf_dict(params)
            libvirtd_conf = libvirt.customize_libvirt_config(libvirtd_conf_dict,
                                                             remote_host=True,
                                                             extra_params=params)
        # Prepare required guest xml before starting guest
        if contrl_index:
            new_xml.remove_all_device_by_type('controller')
            logging.debug("After removing controllers, current XML:\n%s\n", new_xml)
            add_ctrls(new_xml, dev_index=contrl_index)

        if add_channel:
            attach_channel_xml()

        if hpt_resize:
            set_feature(new_xml, 'hpt', hpt_resize)

        if htm_state:
            set_feature(new_xml, 'htm', htm_state)

        if cache:
            params["driver_cache"] = cache
        if remove_cache:
            params["enable_cache"] = "no"

        # Change the disk of the vm to shared disk and then start VM
        libvirt.set_vm_disk(vm, params)

        if not vm.is_alive():
            vm.start()

        logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name))

        # Check qemu command line after guest is started
        if qemu_check:
            check_content = qemu_check
            if hpt_resize:
                check_content = "%s%s" % (qemu_check, hpt_resize)
            if htm_state:
                check_content = "%s%s" % (qemu_check, htm_state)
            libvirt.check_qemu_cmd_line(check_content)

        # Check local guest network connection before migration
        vm_session = vm.wait_for_login()
        check_vm_network_accessed()

        # Preparation for the running guest before migration
        if hpt_resize and hpt_resize != 'disabled':
            trigger_hpt_resize(vm_session)

        if low_speed:
            control_migrate_speed(int(low_speed))

        if stress_in_vm:
            pkg_name = 'stress'
            logging.debug("Check if stress tool is installed")
            pkg_mgr = utils_package.package_manager(vm_session, pkg_name)
            if not pkg_mgr.is_installed(pkg_name):
                logging.debug("Stress tool will be installed")
                if not pkg_mgr.install():
                    test.error("Package '%s' installation fails" % pkg_name)

            stress_thread = threading.Thread(target=run_stress_in_vm,
                                             args=())
            stress_thread.start()

        if extra.count("timeout-postcopy"):
            func_name = check_timeout_postcopy
        if params.get("actions_during_migration"):
            func_name = do_actions_during_migrate
        if extra.count("comp-xbzrle-cache"):
            cache = get_usable_compress_cache(memory.get_page_size())
            extra = "%s %s" % (extra, cache)

        # For --postcopy enable
        postcopy_options = params.get("postcopy_options")
        if postcopy_options:
            extra = "%s %s" % (extra, postcopy_options)

        # Execute migration process
        if not asynch_migration:
            mig_result = do_migration(vm, dest_uri, options, extra)
        else:
            migration_test = libvirt.MigrationTest()

            logging.debug("vm.connect_uri=%s", vm.connect_uri)
            vms = [vm]
            try:
                migration_test.do_migration(vms, None, dest_uri, 'orderly',
                                            options, thread_timeout=900,
                                            ignore_status=True, virsh_opt=virsh_opt,
                                            func=func_name, extra_opts=extra,
                                            func_params=params)
                mig_result = migration_test.ret
            except exceptions.TestFail as fail_detail:
                test.fail(fail_detail)
            except exceptions.TestSkipError as skip_detail:
                test.cancel(skip_detail)
            except exceptions.TestError as error_detail:
                test.error(error_detail)
            except Exception as details:
                mig_result = migration_test.ret
                logging.error(details)

        check_migration_res(mig_result)

        if add_channel:
            # Get the channel device source path of remote guest
            if not remote_virsh_session:
                remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs)
            file_path = tempfile.mktemp(dir=data_dir.get_tmp_dir())
            remote_virsh_session.dumpxml(vm_name, to_file=file_path,
                                         debug=True,
                                         ignore_status=True)
            local_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            local_vmxml.xmltreefile = xml_utils.XMLTreeFile(file_path)
            for elem in local_vmxml.devices.by_device_tag('channel'):
                logging.debug("Found channel device {}".format(elem))
                if elem.type_name == channel_type_name:
                    host_source = elem.source.get('path')
                    logging.debug("Remote guest uses {} for channel device".format(host_source))
                    break
            remote_virsh_session.close_session()
            if not host_source:
                test.fail("Can not find source for %s channel on remote host" % channel_type_name)

            # Prepare to wait for message on remote host from the channel
            cmd_parms = {'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd}
            cmd_result = remote.run_remote_cmd(cmd_run_in_remote_host % host_source,
                                               cmd_parms,
                                               runner_on_target)

            # Send message from remote guest to the channel file
            remote_vm_obj = utils_test.RemoteVMManager(cmd_parms)
            vm_ip = vm.get_address()
            vm_pwd = params.get("password")
            remote_vm_obj.setup_ssh_auth(vm_ip, vm_pwd)
            cmd_result = remote_vm_obj.run_command(vm_ip, cmd_run_in_remote_guest_1)
            remote_vm_obj.run_command(vm_ip, cmd_run_in_remote_guest % results_stdout_52lts(cmd_result).strip())
            logging.debug("Sending message is done")

            # Check message on remote host from the channel
            remote.run_remote_cmd(cmd_run_in_remote_host_1, cmd_parms, runner_on_target)
            logging.debug("Receiving message is done")
            remote.run_remote_cmd(cmd_run_in_remote_host_2, cmd_parms, runner_on_target)

        if check_complete_job:
            opts = " --completed"
            check_virsh_command_and_option("domjobinfo", opts)
            if extra.count("comp-xbzrle-cache"):
                params.update({'compare_to_value': cache // 1024})
            check_domjobinfo(params, option=opts)

        if grep_str_local_log:
            cmd = "grep -E '%s' %s" % (grep_str_local_log, log_file)
            cmdRes = process.run(cmd, shell=True, ignore_status=True)
            if cmdRes.exit_status:
                test.fail(results_stderr_52lts(cmdRes).strip())
        if grep_str_remote_log:
            cmd = "grep -E '%s' %s" % (grep_str_remote_log, log_file)
            cmd_parms = {'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd}
            remote.run_remote_cmd(cmd, cmd_parms, runner_on_target)

        if xml_check_after_mig:
            if not remote_virsh_session:
                remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs)
            target_guest_dumpxml = results_stdout_52lts(
                remote_virsh_session.dumpxml(vm_name,
                                             debug=True,
                                             ignore_status=True)).strip()
            if hpt_resize:
                check_str = hpt_resize
            elif htm_state:
                check_str = htm_state
            if hpt_resize or htm_state:
                xml_check_after_mig = "%s'%s'" % (xml_check_after_mig, check_str)
                if not re.search(xml_check_after_mig, target_guest_dumpxml):
                    remote_virsh_session.close_session()
                    test.fail("Fail to search '%s' in target guest XML:\n%s"
                              % (xml_check_after_mig, target_guest_dumpxml))

            if contrl_index:
                all_ctrls = re.findall(xml_check_after_mig, target_guest_dumpxml)
                if len(all_ctrls) != int(contrl_index) + 1:
                    remote_virsh_session.close_session()
                    test.fail("%s pci-root controllers are expected in guest XML, "
                              "but found %s" % (int(contrl_index) + 1, len(all_ctrls)))
            remote_virsh_session.close_session()

        if int(mig_result.exit_status) == 0:
            server_session = remote.wait_for_login('ssh', server_ip, '22',
                                                   server_user, server_pwd,
                                                   r"[\#\$]\s*$")
            check_vm_network_accessed(server_session)
            server_session.close()
    except exceptions.TestFail as details:
        is_TestFail = True
        test_exception = details
    except exceptions.TestSkipError as details:
        is_TestSkip = True
        test_exception = details
    except exceptions.TestError as details:
        is_TestError = True
        test_exception = details
    except Exception as details:
        test_exception = details
    finally:
        logging.debug("Recover test environment")
        try:
            # Clean VM on destination
            vm.connect_uri = dest_uri
            cleanup_dest(vm)
            vm.connect_uri = src_uri

            logging.info("Recovery VM XML configration")
            orig_config_xml.sync()
            logging.debug("The current VM XML:\n%s", orig_config_xml.xmltreefile)

            if remote_virsh_session:
                remote_virsh_session.close_session()

            if extra.count("--tls") and not disable_verify_peer:
                logging.debug("Recover the qemu configuration")
                libvirt.customize_libvirt_config(None,
                                                 config_type="qemu",
                                                 remote_host=True,
                                                 extra_params=params,
                                                 is_recover=True,
                                                 config_object=qemu_conf)

            if config_libvirtd:
                logging.debug("Recover the libvirtd configuration")
                libvirt.customize_libvirt_config(None,
                                                 remote_host=True,
                                                 extra_params=params,
                                                 is_recover=True,
                                                 config_object=libvirtd_conf)

            logging.info("Remove local NFS image")
            source_file = params.get("source_file")
            libvirt.delete_local_disk("file", path=source_file)

            if objs_list:
                for obj in objs_list:
                    logging.debug("Clean up local objs")
                    del obj

        except Exception as exception_detail:
            if (not test_exception and not is_TestError and
               not is_TestFail and not is_TestSkip):
                raise exception_detail
            else:
                # if any of above exceptions has been raised, only print
                # error log here to avoid of hiding the original issue
                logging.error(exception_detail)
    # Check result
    if is_TestFail:
        test.fail(test_exception)
    if is_TestSkip:
        test.cancel(test_exception)
    if is_TestError:
        test.error(test_exception)
    if not test_exception:
        logging.info("Case execution is done.")
    else:
        test.error(test_exception)
Exemplo n.º 9
0
def run(test, params, env):
    """
    Test disk IOThread.

    1.Prepare test environment, destroy or suspend a VM.
    2.Perform test operation.
    3.Recover test environment.
    4.Confirm the test result.
    """
    # Step 1. Prepare test environment.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = vmxml_backup.copy()

    def prepare_vmxml_and_test_disk():
        """
        Configure the XML definition of the VM under test to include
        the required starting iothread value, and prepare
        a test disk for usage.
        """
        iothreads = params.get("iothreads", 5)
        try:
            iothreads = int(iothreads)
        except ValueError:
            # 'iothreads' must be convertible to an integer
            logging.debug("Can't convert %s to integer type", iothreads)

        # Set iothreads first
        vmxml_backup.iothreads = iothreads
        logging.debug("Pre-test xml is %s", vmxml_backup)
        vmxml_backup.sync()

        device_source_name = \
            params.get("virt_disk_device_source", "attach.img")
        device_source_format = \
            params.get("virt_disk_device_source_format", "qcow2")
        device_source_path = \
            os.path.join(data_dir.get_data_dir(), device_source_name)
        device_source = libvirt.create_local_disk(
            "file",
            path=device_source_path,
            size="1",
            disk_format=device_source_format)

        # Disk specific attributes.
        device_target = params.get("virt_disk_device_target", "vdb")

        return device_source_path, device_source, device_target

    def attach_disk_iothread_zero(vm_name, device_source, device_target):
        """
        Attempt to attach a disk with an iothread value of 0.
        """
        disk_errors = \
            params.get("virt_disk_iothread_0_errors")
        attach_option = params.get("disk_attach_option_io_0", "--iothread 0")
        ret = virsh.attach_disk(vm_name,
                                device_source,
                                device_target,
                                attach_option,
                                debug=True)
        disk_attach_error = True
        libvirt.check_exit_status(ret, disk_attach_error)
        libvirt.check_result(ret, expected_fails=disk_errors)

    def attach_disk_iothread_two(vm_name, device_source, device_target):
        """
        Attach a disk with an iothread value of 2.
        """
        disk_attach_success = params.get("virt_disk_attach_success")
        attach_option = params.get("disk_attach_option_io_2")
        ret = virsh.attach_disk(vm_name,
                                device_source,
                                device_target,
                                attach_option,
                                debug=True)
        disk_attach_error = False
        libvirt.check_exit_status(ret, disk_attach_error)
        libvirt.check_result(ret, expected_match=disk_attach_success)

    def check_iothread_in_xml(vm_name):
        """
        Check that the iothread value has been set in the XML file.
        """
        dom_xml = virsh.dumpxml(vm_name, debug=False).stdout_text.strip()
        iothread_str = \
            params.get("xml_iothread_block")
        if iothread_str not in dom_xml:
            test.fail("IOThread value was not set to 2 in %s" % dom_xml)

    def delete_busy_iothread(vm_name):
        """
        Attempt to delete a busy iothread disk.
        """
        thread_id = params.get("virt_disk_thread_id", "--id 2")
        ret = virsh.iothreaddel(vm_name, thread_id)
        iothread_error = True
        disk_errors = \
            params.get("virt_disk_iothread_in_use_error")
        libvirt.check_exit_status(ret, iothread_error)
        libvirt.check_result(ret, expected_fails=disk_errors)

    def detach_disk(vm_name, device_target):
        """
        Detach a virtual disk.
        """
        disk_detach_success = params.get("virt_disk_detach_success")
        ret = virsh.detach_disk(vm_name, device_target)
        disk_detach_error = False
        libvirt.check_exit_status(ret, disk_detach_error)
        libvirt.check_result(ret, expected_match=disk_detach_success)

        def _check_disk_detach():
            try:
                if device_target not in utils_disk.get_parts_list():
                    return True
                else:
                    logging.debug("Target device is still "
                                  "present after detach command")
            except Exception:
                return False

        # If disk_detach_error is False, then wait a few seconds
        # to let detach operation complete
        if not disk_detach_error:
            utils_misc.wait_for(_check_disk_detach, timeout=20)

    try:
        if vm.is_alive():
            vm.destroy()

        device_source_path, device_source, device_target = \
            prepare_vmxml_and_test_disk()

        # Restart the vm.
        if not vm.is_alive():
            vm.start()
            vm.wait_for_login()

        # Step 2. Perform test operations.
        # Test operation 1.
        # Attach disk with --iothread 0
        attach_disk_iothread_zero(vm_name, device_source, device_target)

        # Test operation 2.
        # Attach disk of type qcow2 --iothread 2
        attach_disk_iothread_two(vm_name, device_source, device_target)

        # Test operation 3.
        # Check iothread has been set to 2 in XML.
        check_iothread_in_xml(vm_name)

        # Test operation 4.
        # Delete an IOThread which was assigned to a disk.
        delete_busy_iothread(vm_name)

        # Test operation 5. Detach a disk.
        # On hardware, the previous step results in the power
        # indicator light on the PCIE slot to blink
        # for 5 seconds. This allows the user time to cancel
        # the command. This causes the following detach command
        # to fail. We must wait 6 seconds until the blinking
        # has stopped.
        time.sleep(6)
        detach_disk(vm_name, device_target)

    finally:
        # Step 3. Recover Test environment.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        orig_config_xml.sync()
        libvirt.delete_local_disk("file", device_source)
Exemplo n.º 10
0
def run(test, params, env):
    """
    Test virsh blockcopy --xml option.

    1.Prepare backend storage (file/block/iscsi/ceph/nbd)
    2.Start VM
    3.Prepare target xml
    4.Execute virsh blockcopy --xml command
    5.Check VM xml after operation accomplished
    6.Clean up test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    ignore_check = False

    def check_blockcopy_xml(vm_name, source_image, ignore_check=False):
        """
        Check blockcopy xml in VM.

        :param vm_name: VM name
        :param source_image: source image name.
        :param ignore_check: default is False.
        """
        if ignore_check:
            return
        source_imge_list = []
        blklist = virsh.domblklist(vm_name).stdout_text.splitlines()
        for line in blklist:
            if line.strip().startswith(('hd', 'vd', 'sd', 'xvd')):
                source_imge_list.append(line.split()[-1])
        logging.debug('domblklist %s:\n%s', vm_name, source_imge_list)
        if not any(source_image in s for s in source_imge_list):
            test.fail("Cannot find expected source image: %s" % source_image)

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    backend_storage_type = params.get("backend_storage_type", "iscsi")
    blockcopy_option = params.get("blockcopy_option")

    # Backend storage auth info
    storage_size = params.get("storage_size", "1G")
    enable_auth = "yes" == params.get("enable_auth")
    use_auth_usage = "yes" == params.get("use_auth_usage")
    auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi")
    auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi")
    auth_sec_uuid = ""
    disk_auth_dict = {}
    size = "1"

    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error")

    # Initialize one NbdExport object
    nbd = None
    img_file = os.path.join(data_dir.get_tmp_dir(), "%s_test.img" % vm_name)
    # Start VM and get all partitions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Additional disk images.
    disks_img = []
    try:
        # Clean up dirty secrets in test environments if there are.
        utils_secret.clean_up_secrets()
        # Setup backend storage
        if backend_storage_type == "file":
            image_filename = params.get("image_filename", "raw.img")
            disk_path = os.path.join(data_dir.get_tmp_dir(), image_filename)
            if blockcopy_option in ['reuse_external']:
                device_source = libvirt.create_local_disk(
                    backend_storage_type, disk_path, storage_size,
                    device_format)
            else:
                device_source = disk_path
            disks_img.append({
                "format": device_format,
                "source": disk_path,
                "path": disk_path
            })
            disk_src_dict = {
                'attrs': {
                    'file': device_source,
                    'type_name': 'file'
                }
            }
            checkout_device_source = image_filename
        elif backend_storage_type == "iscsi":
            iscsi_host = params.get("iscsi_host")
            iscsi_port = params.get("iscsi_port")
            if device_type == "block":
                device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True)
                disk_src_dict = {'attrs': {'dev': device_source}}
                checkout_device_source = device_source
            elif device_type == "network":
                chap_user = params.get("chap_user", "redhat")
                chap_passwd = params.get("chap_passwd", "password")
                auth_sec_usage = params.get("auth_sec_usage", "libvirtiscsi")
                auth_sec_dict = {
                    "sec_usage": "iscsi",
                    "sec_target": auth_sec_usage
                }
                auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                # Set password of auth secret
                virsh.secret_set_value(auth_sec_uuid,
                                       chap_passwd,
                                       encode=True,
                                       debug=True)
                iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                    is_setup=True,
                    is_login=False,
                    image_size=storage_size,
                    chap_user=chap_user,
                    chap_passwd=chap_passwd,
                    portal_ip=iscsi_host)
                # ISCSI auth attributes for disk xml
                disk_auth_dict = {
                    "auth_user": chap_user,
                    "secret_type": auth_sec_usage_type,
                    "secret_usage": auth_sec_usage_target
                }
                device_source = "iscsi://%s:%s/%s/%s" % (
                    iscsi_host, iscsi_port, iscsi_target, lun_num)
                disk_src_dict = {
                    "attrs": {
                        "protocol": "iscsi",
                        "name": "%s/%s" % (iscsi_target, lun_num)
                    },
                    "hosts": [{
                        "name": iscsi_host,
                        "port": iscsi_port
                    }]
                }
                checkout_device_source = 'emulated-iscsi'
        elif backend_storage_type == "ceph":
            ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS")
            ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
            ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS")
            ceph_disk_name = params.get("ceph_disk_name",
                                        "EXAMPLE_SOURCE_NAME")
            ceph_client_name = params.get("ceph_client_name")
            ceph_client_key = params.get("ceph_client_key")
            ceph_auth_user = params.get("ceph_auth_user")
            ceph_auth_key = params.get("ceph_auth_key")
            enable_auth = "yes" == params.get("enable_auth")
            size = "0.15"

            key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
            key_opt = ""
            # Prepare a blank params to confirm whether it needs delete the configure at the end of the test
            ceph_cfg = ""
            if not utils_package.package_install(["ceph-common"]):
                test.error("Failed to install ceph-common")
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_ip)
            # If enable auth, prepare a local file to save key
            if ceph_client_name and ceph_client_key:
                with open(key_file, 'w') as f:
                    f.write("[%s]\n\tkey = %s\n" %
                            (ceph_client_name, ceph_client_key))
                key_opt = "--keyring %s" % key_file
                auth_sec_dict = {
                    "sec_usage": auth_sec_usage_type,
                    "sec_name": "ceph_auth_secret"
                }
                auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                virsh.secret_set_value(auth_sec_uuid,
                                       ceph_auth_key,
                                       ignore_status=False,
                                       debug=True)
                disk_auth_dict = {
                    "auth_user": ceph_auth_user,
                    "secret_type": auth_sec_usage_type,
                    "secret_uuid": auth_sec_uuid
                }
            else:
                test.error("No ceph client name/key provided.")
            device_source = "rbd:%s:mon_host=%s:keyring=%s" % (
                ceph_disk_name, ceph_mon_ip, key_file)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("pre clean up rbd disk if exists: %s", cmd_result)
            if blockcopy_option in ['reuse_external']:
                # Create an local image and make FS on it.
                libvirt.create_local_disk("file", img_file, storage_size,
                                          device_format)
                # Convert the image to remote storage
                disk_path = ("rbd:%s:mon_host=%s" %
                             (ceph_disk_name, ceph_mon_ip))
                if ceph_client_name and ceph_client_key:
                    disk_path += (":id=%s:key=%s" %
                                  (ceph_auth_user, ceph_auth_key))
                rbd_cmd = (
                    "rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O"
                    " %s %s %s" % (ceph_mon_ip, key_opt, ceph_disk_name,
                                   device_format, img_file, disk_path))
                process.run(rbd_cmd, ignore_status=False, shell=True)
            disk_src_dict = {
                "attrs": {
                    "protocol": "rbd",
                    "name": ceph_disk_name
                },
                "hosts": [{
                    "name": ceph_host_ip,
                    "port": ceph_host_port
                }]
            }
            checkout_device_source = ceph_disk_name
        elif backend_storage_type == "nbd":
            # Get server hostname.
            hostname = socket.gethostname().strip()
            # Setup backend storage
            nbd_server_host = hostname
            nbd_server_port = params.get("nbd_server_port")
            image_path = params.get("emulated_image",
                                    "/var/lib/libvirt/images/nbdtest.img")
            # Create NbdExport object
            nbd = NbdExport(image_path,
                            image_format=device_format,
                            port=nbd_server_port)
            nbd.start_nbd_server()
            # Prepare disk source xml
            source_attrs_dict = {"protocol": "nbd"}
            disk_src_dict = {}
            disk_src_dict.update({"attrs": source_attrs_dict})
            disk_src_dict.update({
                "hosts": [{
                    "name": nbd_server_host,
                    "port": nbd_server_port
                }]
            })
            device_source = "nbd://%s:%s/%s" % (nbd_server_host,
                                                nbd_server_port, image_path)
            checkout_device_source = image_path
            if blockcopy_option in ['pivot']:
                ignore_check = True

        logging.debug("device source is: %s", device_source)
        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}
        disk_xml.driver = driver_dict
        disk_source = disk_xml.new_disk_source(**disk_src_dict)
        auth_in_source = True
        if disk_auth_dict:
            logging.debug("disk auth dict is: %s" % disk_auth_dict)
            disk_source.auth = disk_xml.new_auth(**disk_auth_dict)
        disk_xml.source = disk_source
        logging.debug("new disk xml is: %s", disk_xml)
        # Sync VM xml
        device_source_path = os.path.join(data_dir.get_tmp_dir(), "source.raw")
        tmp_device_source = libvirt.create_local_disk("file",
                                                      path=device_source_path,
                                                      size=size,
                                                      disk_format="raw")
        s_attach = virsh.attach_disk(vm_name,
                                     tmp_device_source,
                                     device_target,
                                     "--config",
                                     debug=True)
        libvirt.check_exit_status(s_attach)
        try:
            vm.start()
            vm.wait_for_login().close()
        except xcepts.LibvirtXMLError as xml_error:
            if not define_error:
                test.fail("Failed to define VM:\n%s", str(xml_error))
        except virt_vm.VMStartError as details:
            # VM cannot be started
            if status_error:
                logging.info("VM failed to start as expected: %s",
                             str(details))
            else:
                test.fail("VM should start but failed: %s" % str(details))
        # Additional operations before set block threshold
        options = params.get("options",
                             "--pivot --transient-job --verbose --wait")
        result = virsh.blockcopy(vm_name,
                                 device_target,
                                 "--xml %s" % disk_xml.xml,
                                 options=options,
                                 debug=True)
        libvirt.check_exit_status(result)
        check_source_image = None
        if blockcopy_option in ['pivot']:
            check_source_image = checkout_device_source
        else:
            check_source_image = tmp_device_source
        check_blockcopy_xml(vm_name, check_source_image, ignore_check)
    finally:
        # Delete snapshots.
        if virsh.domain_exists(vm_name):
            #To Delete snapshot, destroy vm first.
            if vm.is_alive():
                vm.destroy()
            libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)

        vmxml_backup.sync("--snapshots-metadata")

        if os.path.exists(img_file):
            libvirt.delete_local_disk("file", img_file)
        for img in disks_img:
            if os.path.exists(img["path"]):
                libvirt.delete_local_disk("file", img["path"])
        # Clean up backend storage
        if backend_storage_type == "iscsi":
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif backend_storage_type == "ceph":
            # Remove ceph configure file if created.
            if ceph_cfg:
                os.remove(ceph_cfg)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("result of rbd removal: %s", cmd_result.stdout_text)
            if os.path.exists(key_file):
                os.remove(key_file)
        elif backend_storage_type == "nbd":
            if nbd:
                try:
                    nbd.cleanup()
                except Exception as ndbEx:
                    logging.error("Clean Up nbd failed: %s" % str(ndbEx))
        # Clean up secrets
        if auth_sec_uuid:
            virsh.secret_undefine(auth_sec_uuid)
Exemplo n.º 11
0
def create_or_cleanup_ceph_backend_vm_disk(vm, params, is_setup=True):
    """
    Setup vm ceph disk with given parameters

    :param vm: the vm object
    :param params: dict, dict include setup vm disk xml configurations
    :param is_setup: one parameter indicate whether setup or clean up
    """
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
    logging.debug("original xml is: %s", vmxml)

    # Device related configurations
    device_format = params.get("virt_disk_device_format", "raw")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdb")
    hotplug = "yes" == params.get("virt_disk_device_hotplug", "no")
    keep_raw_image_as = "yes" == params.get("keep_raw_image_as", "no")

    # Ceph related configurations
    ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
    ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS")
    ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME")
    ceph_client_name = params.get("ceph_client_name")
    ceph_client_key = params.get("ceph_client_key")
    ceph_auth_user = params.get("ceph_auth_user")
    ceph_auth_key = params.get("ceph_auth_key")
    auth_sec_usage_type = params.get("ceph_auth_sec_usage_type", "ceph")
    storage_size = params.get("storage_size", "1G")
    img_file = params.get("ceph_image_file")
    attach_option = params.get("virt_device_attach_option", "--live")
    key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
    key_opt = ""
    is_local_img_file = True if img_file is None else False
    rbd_key_file = None

    # Prepare a blank params to confirm if delete the configure at the end of the test
    ceph_cfg = ""
    disk_auth_dict = None
    auth_sec_uuid = None
    names = ceph_disk_name.split('/')
    pool_name = names[0]
    image_name = names[1]
    if not utils_package.package_install(["ceph-common"]):
        raise exceptions.TestError("Failed to install ceph-common")

    # Create config file if it doesn't exist
    ceph_cfg = ceph.create_config_file(ceph_mon_ip)
    # If enable auth, prepare a local file to save key
    if ceph_client_name and ceph_client_key:
        with open(key_file, 'w') as f:
            f.write("[%s]\n\tkey = %s\n" %
                    (ceph_client_name, ceph_client_key))
        key_opt = "--keyring %s" % key_file
        rbd_key_file = key_file
    if is_setup:
        # If enable auth, prepare disk auth
        if ceph_client_name and ceph_client_key:
            auth_sec_uuid = _create_secret(auth_sec_usage_type, ceph_auth_key)
            disk_auth_dict = {"auth_user": ceph_auth_user,
                              "secret_type": auth_sec_usage_type,
                              "secret_uuid": auth_sec_uuid}
        # clean up image file if exists
        ceph.rbd_image_rm(ceph_mon_ip, pool_name,
                          image_name, keyfile=rbd_key_file)

        #Create necessary image file if not exists
        _create_image(device_format, img_file, vm.name, storage_size,
                      ceph_disk_name, ceph_mon_ip, key_opt,
                      ceph_auth_user, ceph_auth_key)

        # Disk related config
        disk_src_dict = {"attrs": {"protocol": "rbd",
                                   "name": ceph_disk_name},
                         "hosts":  [{"name": ceph_mon_ip,
                                     "port": ceph_host_port}]}
        # Create network disk
        disk_xml = libvirt_disk.create_primitive_disk_xml("network", device, device_target, device_bus,
                                                          device_format, disk_src_dict, disk_auth_dict)
        if not keep_raw_image_as:
            if hotplug:
                virsh.attach_device(vm.name, disk_xml.xml,
                                    flagstr=attach_option, ignore_status=False, debug=True)
            else:
                vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
                vmxml.add_device(disk_xml)
                vmxml.sync()
    else:
        ceph.rbd_image_rm(ceph_mon_ip, pool_name,
                          image_name, keyfile=rbd_key_file)
        # Remove ceph config and key file if created.
        for file_path in [ceph_cfg, key_file]:
            if os.path.exists(file_path):
                os.remove(file_path)
        if is_local_img_file and img_file and os.path.exists(img_file):
            libvirt.delete_local_disk("file", img_file)
        if auth_sec_uuid:
            virsh.secret_undefine(auth_sec_uuid, ignore_status=True)
Exemplo n.º 12
0
def run(test, params, env):
    """
    Test virtio/virtio-transitional/virtio-non-transitional model of interface

    :param test: Test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment
    """
    def reboot():
        """
        Shutdown and restart guest, then wait for login
        """
        vm.destroy()
        vm.start()
        return vm.wait_for_login()

    def check_plug_to_pci_bridge(vm_name, mac):
        """
        Check if the nic is plugged onto pcie-to-pci-bridge

        :param vm_name: Vm name
        :param mac:  The mac address of plugged interface
        :return True if plugged onto pcie-to-pci-bridge, otherwise False
        """
        v_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        interface = v_xml.get_iface_all()[mac]
        bus = int(eval(interface.find('address').get('bus')))
        controllers = vmxml.get_controllers('pci')
        for controller in controllers:
            if controller.get('index') == bus:
                if controller.get('model') == 'pcie-to-pci-bridge':
                    return True
                break
        return False

    def detect_new_nic(mac):
        """
        Detect the new interface by domiflist

        :param mac: The mac address of plugged interface
        :return plugged interface name
        """
        def check_mac_exist():
            all_infos = libvirt.get_interface_details(vm_name)
            for nic_info in all_infos:
                if nic_info.get('mac') == mac:
                    return nic_info.get('interface')
            return False

        plugged_nic = utils_misc.wait_for(lambda: check_mac_exist(), 5)
        if not plugged_nic:
            test.fail("Failed to plug device %s" % mac)
        return plugged_nic

    def renew_ip_address(session, mac, guest_os_type):
        """
        Renew ip for plugged nic

        :param session: Vm session
        :param mac: The mac address of plugged interface
        :param guest_os_type: Guest os type, Linux or Windows
        """
        if guest_os_type == 'Windows':
            utils_net.restart_windows_guest_network_by_key(
                session, "macaddress", mac)
        ifname = utils_net.get_linux_ifname(session, mac)
        utils_net.create_network_script(ifname, mac, 'dhcp', '255.255.255.0')
        utils_net.restart_guest_network(session, mac)
        arp_clean = "arp -n|awk '/^[1-9]/{print \"arp -d \" $1}'|sh"
        session.cmd_output_safe(arp_clean)

    def get_hotplug_nic_ip(vm, nic, session, guest_os_type):
        """
        Get if of the plugged interface

        :param vm: Vm object
        :param nic: Nic object
        :param session: Vm session
        :param guest_os_type: Guest os type, Linux or Windows
        :return: Nic ip
        """
        def __get_address():
            """
            Get ip address and return it, configure a new ip if device
            exists but no ip

            :return: Ip address if get, otherwise None
            """
            try:
                index = [
                    _idx for _idx, _nic in enumerate(vm.virtnet) if _nic == nic
                ][0]
                return vm.wait_for_get_address(index, timeout=90)
            except IndexError:
                test.error("Nic '%s' not exists in VM '%s'" %
                           (nic["nic_name"], vm.name))
            except (virt_vm.VMIPAddressMissingError,
                    virt_vm.VMAddressVerificationError):
                renew_ip_address(session, nic["mac"], guest_os_type)
            return

        # Wait for ip address is configured for the nic device
        nic_ip = utils_misc.wait_for(__get_address, timeout=360)
        if nic_ip:
            return nic_ip
        cached_ip = vm.address_cache.get(nic["mac"])
        arps = process.system_output("arp -aen").decode()
        logging.debug("Can't get IP address:")
        logging.debug("\tCached IP: %s", cached_ip)
        logging.debug("\tARP table: %s", arps)
        return None

    def check_nic_removed(mac, session):
        """
        Get interface IP address by given MAC addrss. If try_dhclint is
        True, then try to allocate IP addrss for the interface.

        :param mac: The mac address of plugged interface
        :param session: Vm session
        """
        except_mesg = ''
        try:
            if guest_os_type == 'Windows':
                except_mesg = "Get nic netconnectionid failed"
                utils_net.restart_windows_guest_network_by_key(
                    session, "macaddress", mac)
            else:
                except_mesg = ("Failed to determine interface"
                               " name with mac %s" % mac)
                utils_net.get_linux_ifname(session, mac)
        except exceptions.TestError as e:
            if except_mesg in str(e):
                return True
        else:
            return False

    def attach_nic():  # pylint: disable=W0611
        """
        Attach interface, by xml or cmd, for both hot and cold plug
        """
        def create_iface_xml(mac):
            """
            Create interface xml file

            :param mac: The mac address of nic device
            """
            iface = Interface(type_name='network')
            iface.source = iface_source
            iface.model = iface_model
            iface.mac_address = mac
            logging.debug("Create new interface xml: %s", iface)
            return iface

        plug_method = params.get('plug_method', 'interface')
        cold_plug = params.get('cold_plug', 'no')
        mac = utils_net.generate_mac_address_simple()
        iface_source = {'network': 'default'}
        iface_model = params["virtio_model"]
        options = ("network %s --model %s --mac %s" %
                   (iface_source['network'], iface_model, mac))
        nic_params = {
            'mac': mac,
            'nettype': params['nettype'],
            'ip_version': 'ipv4'
        }
        if cold_plug == "yes":
            options += ' --config'
        if plug_method == 'interface':  # Hotplug nic vir attach-interface
            ret = virsh.attach_interface(vm_name, options, ignore_status=True)
        else:  # Hotplug nic via attach-device
            nic_xml = create_iface_xml(mac)
            nic_xml.xmltreefile.write()
            xml_file = nic_xml.xml
            with open(xml_file) as nic_file:
                logging.debug("Attach device by XML: %s", nic_file.read())
            ret = virsh.attach_device(domainarg=vm_name,
                                      filearg=xml_file,
                                      debug=True)
        libvirt.check_exit_status(ret)
        if cold_plug == "yes":
            reboot()  # Reboot guest if it is cold plug test
        detect_new_nic(mac)
        if plug_method == 'interface' and cold_plug == 'no':
            check_plug_to_pci_bridge(vm_name, mac)
        session = vm.wait_for_login(serial=True)
        # Add nic to VM object for further check
        nic_name = vm.add_nic(**nic_params)["nic_name"]
        nic = vm.virtnet[nic_name]
        # Config ip inside guest for new added nic
        if not utils_misc.wait_for(
                lambda: get_hotplug_nic_ip(vm, nic, session, guest_os_type),
                timeout=30):
            test.fail("Does not find plugged nic %s in guest" % mac)
        options = ("network %s" % mac)
        if cold_plug == "yes":
            options += ' --config'
        # Detach nic device
        if plug_method == 'interface':
            ret = virsh.detach_interface(vm_name, options, ignore_status=True)
        else:
            with open(xml_file) as nic_file:
                logging.debug("Detach device by XML: %s", nic_file.read())
            ret = virsh.detach_device(domainarg=vm_name,
                                      filearg=xml_file,
                                      debug=True)
        libvirt.check_exit_status(ret)
        if cold_plug == "yes":
            session = reboot()  # Reboot guest if it is cold plug test
        # Check if nic is removed from guest
        if not utils_misc.wait_for(lambda: check_nic_removed(mac, session),
                                   timeout=30):
            test.fail("The nic %s still exist in guest after being unplugged" %
                      nic_name)

    def save_restore():  # pylint: disable=W0611
        """
        Sub test for save and restore
        """
        save_path = os.path.join(data_dir.get_tmp_dir(),
                                 '%s.save' % params['os_variant'])
        ret = virsh.save(vm_name, save_path)
        libvirt.check_exit_status(ret)
        ret = virsh.restore(save_path)
        libvirt.check_exit_status(ret)

    def ping_test(restart_network=False):
        """
        Basic ping test for interface

        :param restart_network: True or False. Whether to restart network
        :raise: test.fail if ping test fails
        """
        session = vm.wait_for_login()
        if restart_network:
            utils_net.restart_guest_network(session)

        dest = params.get('ping_dest', 'www.baidu.com')
        status, output = utils_test.ping(dest, 10, session=session, timeout=20)
        session.close()
        if status != 0:
            test.fail("Ping failed, status: %s,"
                      " output: %s" % (status, output))

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(params["main_vm"])
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    guest_src_url = params.get("guest_src_url")
    params['disk_model'] = params['virtio_model']
    guest_os_type = params['os_type']

    target_path = None

    if not libvirt_version.version_compare(5, 0, 0):
        test.cancel("This libvirt version doesn't support "
                    "virtio-transitional model.")

    # Download and replace image when guest_src_url provided
    if guest_src_url:
        image_name = params['image_path']
        target_path = utils_misc.get_path(data_dir.get_data_dir(), image_name)
        if not os.path.exists(target_path):
            download.get_file(guest_src_url, target_path)
        params["blk_source_name"] = target_path
    libvirt.set_vm_disk(vm, params)

    # Add pcie-to-pci-bridge when there is no one
    pci_controllers = vmxml.get_controllers('pci')
    for controller in pci_controllers:
        if controller.get('model') == 'pcie-to-pci-bridge':
            break
    else:
        contr_dict = {
            'controller_type': 'pci',
            'controller_model': 'pcie-to-pci-bridge'
        }
        cntl_add = libvirt.create_controller_xml(contr_dict)
        libvirt.add_controller(vm_name, cntl_add)
    try:  # Update interface model as defined
        iface_params = {'model': params['virtio_model']}
        libvirt.modify_vm_iface(vm_name, "update_iface", iface_params)
        if not vm.is_alive():
            vm.start()
        # Test if nic works well via ping
        ping_test()
        test_step = params.get("sub_test_step")
        if test_step:
            eval(test_step)()
            # Test if nic still work well afeter sub steps test
            ping_test(True)
    finally:
        vm.destroy()
        backup_xml.sync()

        if guest_src_url and target_path:
            libvirt.delete_local_disk("file", path=target_path)
Exemplo n.º 13
0
def run(test, params, env):
    """
    Test pool command:virsh pool_autostart

    1) Define a given type pool
    2) Mark pool as autostart
    3) Restart libvirtd and check pool
    4) Destroy the pool
    5) Unmark pool as autostart
    6) Repeate step(3)
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    source_format = params.get("source_format", "")
    source_name = params.get("pool_source_name", "gluster-vol1")
    source_path = params.get("pool_source_path", "/")
    ip_protocal = params.get("ip_protocal", "ipv4")
    pool_ref = params.get("pool_ref", "name")
    pool_uuid = params.get("pool_uuid", "")
    invalid_source_path = params.get("invalid_source_path", "")
    status_error = "yes" == params.get("status_error", "no")
    readonly_mode = "yes" == params.get("readonly_mode", "no")
    pre_def_pool = "yes" == params.get("pre_def_pool", "yes")
    disk_type = params.get("disk_type", "")
    vg_name = params.get("vg_name", "")
    lv_name = params.get("lv_name", "")
    update_policy = params.get("update_policy")

    # Readonly mode
    ro_flag = False
    if readonly_mode:
        logging.debug("Readonly mode test")
        ro_flag = True

    if pool_target is "":
        pool_target = os.path.join(test.tmpdir, pool_target)

    # The file for dumped pool xml
    p_xml = os.path.join(test.tmpdir, "pool.xml.tmp")

    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            test.cancel("Gluster pool is not supported in current"
                        " libvirt version.")

    pool_ins = libvirt_storage.StoragePool()
    if pool_ins.pool_exists(pool_name):
        test.fail("Pool %s already exist" % pool_name)

    def check_pool(pool_name, pool_type, checkpoint,
                   expect_value="", expect_error=False):
        """
        Check the pool after autostart it

        :param pool_name: Name of the pool.
        :param pool_type: Type of the pool.
        :param checkpoint: Which part for checking.
        :param expect_value: Expected value.
        :param expect_error: Boolen value, expect command success or fail
        """
        libvirt_pool = libvirt_storage.StoragePool()
        virsh.pool_list(option="--all", debug=True)
        if checkpoint == 'State':
            actual_value = libvirt_pool.pool_state(pool_name)
        if checkpoint == 'Autostart':
            actual_value = libvirt_pool.pool_autostart(pool_name)
        if actual_value != expect_value:
            if not expect_error:
                if checkpoint == 'State' and pool_type in ("dir", "scsi"):
                    error_msg = "Dir pool should be always active when libvirtd restart. "
                    error_msg += "See https://bugzilla.redhat.com/show_bug.cgi?id=1238610"
                    logging.error(error_msg)
                test.fail("Pool %s isn't %s as expected" % (checkpoint, expect_value))
            else:
                logging.debug("Pool %s is %s as expected", checkpoint, actual_value)

    def change_source_path(new_path, update_policy="set"):
        n_poolxml = pool_xml.PoolXML()
        n_poolxml = n_poolxml.new_from_dumpxml(pool_name)
        s_xml = n_poolxml.get_source()
        s_xml.device_path = new_path
        if update_policy == "set":
            n_poolxml.set_source(s_xml)
        elif update_policy == "add":
            n_poolxml.add_source("device", {"path": new_path})
        else:
            test.error("Unsupported policy type")
        logging.debug("After change_source_path:\n%s" %
                      open(n_poolxml.xml).read())
        return n_poolxml

    # Run Testcase
    pvt = utlv.PoolVolumeTest(test, params)
    emulated_image = "emulated-image"
    kwargs = {'image_size': '1G', 'pre_disk_vol': ['100M'],
              'source_name': source_name, 'source_path': source_path,
              'source_format': source_format, 'persistent': True,
              'ip_protocal': ip_protocal}
    pool = pool_name
    clean_mount = False
    new_device = None
    try:
        if pre_def_pool:
            # Step(1)
            # Pool define
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image,
                         **kwargs)
            # Remove the partition for disk pool
            # For sometimes the partition will cause pool start failed
            if pool_type == "disk":
                virsh.pool_build(pool_name, "--overwrite", debug=True)
            # Get pool uuid:
            if pool_ref == "uuid" and not pool_uuid:
                pool = pool_ins.get_pool_uuid(pool_name)

            # Setup logical block device
            # Change pool source path
            # Undefine pool
            # Define pool with new xml
            # Start pool
            if update_policy:
                new_device = utlv.setup_or_cleanup_iscsi(True)
                lv_utils.vg_create(vg_name, new_device)
                new_device = utlv.create_local_disk(disk_type, size="0.5",
                                                    vgname=vg_name, lvname=lv_name)
                new_path = new_device
                if invalid_source_path:
                    new_path = invalid_source_path
                if pool_type == "fs":
                    utlv.mkfs(new_device, source_format)
                n_poolxml = change_source_path(new_path, update_policy)
                p_xml = n_poolxml.xml
                if not virsh.pool_undefine(pool_name):
                    test.fail("Undefine pool %s failed" % pool_name)
                if not virsh.pool_define(p_xml):
                    test.fail("Define pool %s from %s failed" % (pool_name, p_xml))
                logging.debug("Start pool %s" % pool_name)
                result = virsh.pool_start(pool_name, ignore_status=True, debug=True)
                utlv.check_exit_status(result, status_error)
                # Mount a valid fs to pool target
                if pool_type == "fs":
                    source_list = []
                    mnt_cmd = ""
                    pool_target = n_poolxml.target_path
                    if invalid_source_path:
                        source_list.append(new_device)
                    else:
                        s_devices = n_poolxml.xmltreefile.findall("//source/device")
                        for dev in s_devices:
                            source_list.append(dev.get('path'))
                    try:
                        for src in source_list:
                            mnt_cmd = "mount %s %s" % (src, pool_target)
                            if not process.system(mnt_cmd, shell=True):
                                clean_mount = True
                    except process.CmdError:
                        test.error("Failed to run %s" % mnt_cmd)

        # Step(2)
        # Pool autostart
        logging.debug("Try to mark pool %s as autostart" % pool_name)
        result = virsh.pool_autostart(pool, readonly=ro_flag,
                                      ignore_status=True, debug=True)
        if not pre_def_pool:
            utlv.check_exit_status(result, status_error)
        if not result.exit_status:
            check_pool(pool_name, pool_type, checkpoint='Autostart',
                       expect_value="yes", expect_error=status_error)

            # Step(3)
            # Restart libvirtd and check pool status
            logging.info("Try to restart libvirtd")
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            check_pool(pool_name, pool_type, checkpoint="State",
                       expect_value="active", expect_error=status_error)

            # Step(4)
            # Pool destroy
            if pool_ins.is_pool_active(pool_name):
                virsh.pool_destroy(pool_name)
                logging.debug("Pool %s destroyed" % pool_name)

            # Step(5)
            # Pool autostart disable
            logging.debug("Try to unmark pool %s as autostart" % pool_name)
            result = virsh.pool_autostart(pool, extra="--disable", debug=True,
                                          ignore_status=True)
            if not pre_def_pool:
                utlv.check_exit_status(result, status_error)
            if not result.exit_status:
                check_pool(pool_name, pool_type, checkpoint='Autostart',
                           expect_value="no", expect_error=status_error)

                # Repeat step (3)
                logging.debug("Try to restart libvirtd")
                libvirtd = utils_libvirtd.Libvirtd()
                libvirtd.restart()
                check_pool(pool_name, pool_type, checkpoint='State',
                           expect_value="inactive", expect_error=status_error)
    finally:
        # Clean up
        logging.debug("Try to clean up env")
        try:
            if clean_mount is True:
                for src in source_list:
                    process.system("umount %s" % pool_target)
            if pre_def_pool:
                pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                 emulated_image, **kwargs)
            if new_device:
                utlv.delete_local_disk(disk_type, vgname=vg_name, lvname=lv_name)
                lv_utils.vg_remove(vg_name)
                utlv.setup_or_cleanup_iscsi(False)
            if os.path.exists(p_xml):
                os.remove(p_xml)
        except test.fail as details:
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            logging.error(str(details))
Exemplo n.º 14
0
def run(test, params, env):
    """
    Test migration with special network settings
    1) migrate guest with bridge type interface connected to ovs bridge
    2) migrate guest with direct type interface when a macvtap device name
        exists on dest host

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def check_vm_network_accessed(ping_dest, session=None):
        """
        The operations to the VM need to be done before or after
        migration happens

        :param ping_dest: The destination to be ping
        :param session: The session object to the host
        :raise: test.fail when ping fails
        """
        # Confirm local/remote VM can be accessed through network.
        logging.info("Check VM network connectivity")
        status, output = utils_net.ping(ping_dest,
                                        count=10,
                                        timeout=20,
                                        output_func=logging.debug,
                                        session=session)
        if status != 0:
            test.fail("Ping failed, status: %s, output: %s" % (status, output))

    def vm_sync(vmxml, vm_name=None, virsh_instance=virsh):
        """
        A wrapper to sync vm xml on localhost and remote host

        :param vmxml: domain VMXML instance
        :param vm_name: The name of VM
        :param virsh_instance: virsh instance object
        """
        if vm_name and virsh_instance != virsh:
            remote.scp_to_remote(server_ip, '22', server_user,
                                 server_pwd,
                                 vmxml.xml, vmxml.xml)
            if virsh_instance.domain_exists(vm_name):
                if virsh_instance.is_alive(vm_name):
                    virsh_instance.destroy(vm_name, ignore_status=True)
                virsh_instance.undefine(vmxml.xml, ignore_status=True)
            virsh_instance.define(vmxml.xml, debug=True)
        else:
            vmxml.sync()

    def update_iface_xml(vm_name, iface_dict, virsh_instance=virsh):
        """
        Update interfaces for guest

        :param vm_name: The name of VM
        :param iface_dict: The interface configurations params
        :param virsh_instance: virsh instance object
        """
        logging.debug("update iface xml")
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
            vm_name, virsh_instance=virsh_instance)
        vmxml.remove_all_device_by_type('interface')
        vm_sync(vmxml, vm_name, virsh_instance=virsh_instance)
        iface = interface.Interface('network')
        iface.xml = libvirt.modify_vm_iface(vm_name, "get_xml", iface_dict,
                                            virsh_instance=virsh_instance)
        vmxml.add_device(iface)
        vmxml.xmltreefile.write()
        vm_sync(vmxml, vm_name, virsh_instance=virsh_instance)
        logging.debug("VM XML after updating interface: %s" % vmxml)

    def update_net_dict(net_dict, runner=utils_net.local_runner):
        """
        Update network dict

        :param net_dict: The network dict to be updated
        :param runner: Command runner
        :return: Updated network dict
        """
        if net_dict.get("net_name", "") == "direct-macvtap":
            logging.info("Updating network iface name")
            iface_name = utils_net.get_net_if(runner=runner, state="UP")[0]
            net_dict.update({"forward_iface": iface_name})
        else:
            # TODO: support other types
            logging.info("No need to update net_dict. We only support to "
                         "update direct-macvtap type for now.")
        logging.debug("net_dict is %s" % net_dict)
        return net_dict

    def get_remote_direct_mode_vm_mac(vm_name, uri):
        """
        Get mac of remote direct mode VM

        :param vm_name: The name of VM
        :param uri: The uri on destination
        :return: mac
        :raise: test.fail when the result of virsh domiflist is incorrect
        """
        vm_mac = None
        res = virsh.domiflist(
            vm_name, uri=uri, ignore_status=False).stdout_text.strip().split("\n")
        if len(res) < 2:
            test.fail("Unable to get remote VM's mac: %s" % res)
        else:
            vm_mac = res[-1].split()[-1]
        return vm_mac

    def create_fake_tap(remote_session):
        """
        Create a fake macvtap on destination host.

        :param remote_session: The session to the destination host.
        :return: The new tap device
        """
        tap_cmd = "ls /dev/tap* |awk -F 'tap' '{print $NF}'"
        tap_idx = remote_session.cmd_output(tap_cmd).strip()
        if not tap_idx:
            test.fail("Unable to get tap index using %s."
                      % tap_cmd)
        fake_tap_dest = 'tap'+str(int(tap_idx)+1)
        logging.debug("creating a fake tap %s...", fake_tap_dest)
        cmd = "touch /dev/%s" % fake_tap_dest
        remote_session.cmd(cmd)
        return fake_tap_dest

    migration_test = migration.MigrationTest()
    migration_test.check_parameters(params)

    libvirt_version.is_libvirt_feature_supported(params)

    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = params.get("nfs_mount_dir")

    # Local variables
    virsh_args = {"debug": True}
    server_ip = params.get("server_ip")
    server_user = params.get("server_user", "root")
    server_pwd = params.get("server_pwd")
    client_ip = params.get("client_ip")
    client_pwd = params.get("client_pwd")
    virsh_options = params.get("virsh_options", "")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options", "--live --p2p --verbose")
    restart_dhclient = params.get("restart_dhclient", "dhclient -r; dhclient")
    ping_dest = params.get("ping_dest", "www.baidu.com")
    extra_args = migration_test.update_virsh_migrate_extra_args(params)

    migrate_vm_back = "yes" == params.get("migrate_vm_back", "no")

    target_vm_name = params.get("target_vm_name")
    direct_mode = "yes" == params.get("direct_mode", "no")
    check_macvtap_exists = "yes" == params.get("check_macvtap_exists", "no")
    create_fake_tap_dest = "yes" == params.get("create_fake_tap_dest", "no")
    macvtap_cmd = params.get("macvtap_cmd")
    modify_target_vm = "yes" == params.get("modify_target_vm", "no")
    ovs_bridge_name = params.get("ovs_bridge_name")
    network_dict = eval(params.get("network_dict", '{}'))
    iface_dict = eval(params.get("iface_dict", '{}'))
    remote_virsh_dargs = {'remote_ip': server_ip, 'remote_user': server_user,
                          'remote_pwd': server_pwd, 'unprivileged_user': None,
                          'ssh_remote_auth': True}
    cmd_parms = {'server_ip': server_ip, 'server_user': server_user,
                 'server_pwd': server_pwd}

    virsh_session_remote = None
    libvirtd_conf = None
    mig_result = None
    target_org_xml = None
    target_vm_session = None
    target_vm = None
    exp_macvtap = []
    fake_tap_dest = None

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri(
        params.get("migrate_source_host"))
    src_uri = params.get("virsh_migrate_connect_uri")
    dest_uri = params.get("virsh_migrate_desturi")

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()
    bk_uri = vm.connect_uri

    postcopy_options = params.get("postcopy_options")
    action_during_mig = None
    if postcopy_options:
        extra = "%s %s" % (extra, postcopy_options)
        action_during_mig = virsh.migrate_postcopy

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()

    try:
        # Create a remote runner for later use
        runner_on_target = remote.RemoteRunner(host=server_ip,
                                               username=server_user,
                                               password=server_pwd)
        remote_session = remote.remote_login("ssh", server_ip, "22",
                                             server_user, server_pwd,
                                             r'[$#%]')
        virsh_session_remote = virsh.VirshPersistent(**remote_virsh_dargs)

        if target_vm_name:
            target_vm = libvirt_vm.VM(target_vm_name, params, vm.root_dir,
                                      vm.address_cache)
            target_vm.connect_uri = dest_uri
            if not virsh_session_remote.domain_exists(target_vm_name):
                test.error("VM %s should be installed on %s."
                           % (target_vm_name, server_ip))
            # Backup guest's xml on remote
            target_org_xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                target_vm_name, virsh_instance=virsh_session_remote)
            # Scp original xml to remote for restoration
            remote.scp_to_remote(server_ip, '22', server_user,
                                 server_pwd,
                                 target_org_xml.xml, target_org_xml.xml)
            logging.debug("target xml is %s" % target_org_xml)

        if ovs_bridge_name:
            status, stdout = utils_net.create_ovs_bridge(ovs_bridge_name)
            if status:
                test.fail("Failed to create ovs bridge on local. Status: %s"
                          "Stdout: %s" % (status, stdout))
            status, stdout = utils_net.create_ovs_bridge(
                ovs_bridge_name, session=remote_session)
            if status:
                test.fail("Failed to create ovs bridge on remote. Status: %s"
                          "Stdout: %s" % (status, stdout))
        if network_dict:
            update_net_dict(network_dict, runner=remote_session.cmd)
            libvirt_network.create_or_del_network(
                network_dict, remote_args=remote_virsh_dargs)
            logging.info("dest: network created")
            update_net_dict(network_dict)
            libvirt_network.create_or_del_network(network_dict)
            logging.info("localhost: network created")

        if target_vm_name:
            if modify_target_vm and iface_dict:
                logging.info("Updating remote VM's interface")
                update_iface_xml(target_vm_name, iface_dict,
                                 virsh_instance=virsh_session_remote)
            target_vm.start()
            target_vm_session = target_vm.wait_for_serial_login(timeout=240)
            check_vm_network_accessed(ping_dest, session=target_vm_session)
            if check_macvtap_exists and macvtap_cmd:
                # Get macvtap device's index on remote after target_vm started
                idx = remote_session.cmd_output(macvtap_cmd).strip()
                if not idx:
                    test.fail("Unable to get macvtap index using %s."
                              % macvtap_cmd)
                # Generate the expected macvtap devices' index list
                exp_macvtap = ['macvtap'+idx, 'macvtap'+str(int(idx)+1)]
                if create_fake_tap_dest:
                    fake_tap_dest = create_fake_tap(remote_session)

        remote_session.close()
        # Change domain network xml
        if iface_dict:
            if "mac" not in iface_dict:
                mac = utils_net.generate_mac_address_simple()
                iface_dict.update({'mac': mac})
            else:
                mac = iface_dict["mac"]

            update_iface_xml(vm_name, iface_dict)

        # Change the disk of the vm
        libvirt.set_vm_disk(vm, params)

        if not vm.is_alive():
            try:
                vm.start()
            except virt_vm.VMStartError as err:
                test.fail("Failed to start VM: %s" % err)

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))

        # Check local guest network connection before migration
        if vm.serial_console is not None:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        vm_session = vm.wait_for_serial_login(timeout=240)
        if not utils_package.package_install('dhcp-client', session=vm_session):
            test.error("Failed to install dhcp-client on guest.")
        utils_net.restart_guest_network(vm_session)
        vm_ip = utils_net.get_guest_ip_addr(vm_session, mac)
        logging.debug("VM IP Addr: %s", vm_ip)

        if direct_mode:
            check_vm_network_accessed(ping_dest, session=vm_session)
        else:
            check_vm_network_accessed(vm_ip)

        # Execute migration process
        vms = [vm]

        migration_test.do_migration(vms, None, dest_uri, 'orderly',
                                    options, thread_timeout=900,
                                    ignore_status=True, virsh_opt=virsh_options,
                                    func=action_during_mig,
                                    extra_opts=extra,
                                    **extra_args)

        mig_result = migration_test.ret

        # Check network accessibility after migration
        if int(mig_result.exit_status) == 0:
            vm.connect_uri = dest_uri
            if vm.serial_console is not None:
                vm.cleanup_serial_console()
            vm.create_serial_console()
            vm_session_after_mig = vm.wait_for_serial_login(timeout=240)
            vm_session_after_mig.cmd(restart_dhclient)
            check_vm_network_accessed(ping_dest, session=vm_session_after_mig)

            if check_macvtap_exists and macvtap_cmd:
                remote_session = remote.remote_login("ssh", server_ip, "22",
                                                     server_user, server_pwd,
                                                     r'[$#%]')
                # Check macvtap devices' index after migration
                idx = remote_session.cmd_output(macvtap_cmd)
                act_macvtap = ['macvtap'+i for i in idx.strip().split("\n")]
                if act_macvtap != exp_macvtap:
                    test.fail("macvtap devices after migration are incorrect!"
                              " Actual: %s, Expected: %s. "
                              % (act_macvtap, exp_macvtap))
        else:
            if fake_tap_dest:
                res = remote.run_remote_cmd("ls /dev/%s" % fake_tap_dest,
                                            params, runner_on_target)
                libvirt.check_exit_status(res)

        if target_vm_session:
            check_vm_network_accessed(ping_dest, session=target_vm_session)
        # Execute migration from remote
        if migrate_vm_back:
            ssh_connection = utils_conn.SSHConnection(server_ip=client_ip,
                                                      server_pwd=client_pwd,
                                                      client_ip=server_ip,
                                                      client_pwd=server_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()

            # Pre migration setup for local machine
            migration_test.migrate_pre_setup(src_uri, params)

            cmd = "virsh migrate %s %s %s" % (vm_name, options, src_uri)
            logging.debug("Start migration: %s", cmd)
            cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
            logging.info(cmd_result)
            if cmd_result.exit_status:
                test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result))
            logging.debug("VM is migrated back.")

            vm.connect_uri = bk_uri
            if vm.serial_console is not None:
                vm.cleanup_serial_console()
            vm.create_serial_console()
            vm_session_after_mig_bak = vm.wait_for_serial_login(timeout=240)
            vm_session_after_mig_bak.cmd(restart_dhclient)
            check_vm_network_accessed(ping_dest, vm_session_after_mig_bak)
    finally:
        logging.debug("Recover test environment")
        vm.connect_uri = bk_uri
        migration_test.cleanup_vm(vm, dest_uri)

        logging.info("Recovery VM XML configration")
        orig_config_xml.sync()
        remote_session = remote.remote_login("ssh", server_ip, "22",
                                             server_user, server_pwd,
                                             r'[$#%]')
        if target_vm and target_vm.is_alive():
            target_vm.destroy(gracefully=False)

        if target_org_xml and target_vm_name:
            logging.info("Recovery XML configration for %s.", target_vm_name)
            virsh_session_remote = virsh.VirshPersistent(**remote_virsh_dargs)
            vm_sync(target_org_xml, vm_name=target_vm_name,
                    virsh_instance=virsh_session_remote)
            virsh_session_remote.close_session()

        if fake_tap_dest:
            remote_session.cmd_output_safe("rm -rf /dev/%s" % fake_tap_dest)

        if network_dict:
            libvirt_network.create_or_del_network(
                network_dict, is_del=True, remote_args=remote_virsh_dargs)
            libvirt_network.create_or_del_network(network_dict, is_del=True)
        if ovs_bridge_name:
            utils_net.delete_ovs_bridge(ovs_bridge_name)
            utils_net.delete_ovs_bridge(ovs_bridge_name, session=remote_session)

        remote_session.close()
        if target_vm_session:
            target_vm_session.close()

        if virsh_session_remote:
            virsh_session_remote.close_session()

        if migrate_vm_back:
            if 'ssh_connection' in locals():
                ssh_connection.auto_recover = True
            migration_test.migrate_pre_setup(src_uri, params,
                                             cleanup=True)
        logging.info("Remove local NFS image")
        source_file = params.get("source_file")
        if source_file:
            libvirt.delete_local_disk("file", path=source_file)
Exemplo n.º 15
0
def run(test, params, env):
    """
    Test migration with special network settings
    1) migrate guest with bridge type interface connected to ovs bridge

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def check_vm_network_accessed(ping_dest, session=None):
        """
        The operations to the VM need to be done before or after
        migration happens

        :param ping_dest: The destination to be ping
        :param session: The session object to the host
        :raise: test.fail when ping fails
        """
        # Confirm local/remote VM can be accessed through network.
        logging.info("Check VM network connectivity")
        status, output = utils_net.ping(ping_dest,
                                        count=10,
                                        timeout=20,
                                        output_func=logging.debug,
                                        session=session)
        if status != 0:
            test.fail("Ping failed, status: %s, output: %s" % (status, output))

    def update_iface_xml(vm_name, iface_dict):
        """
        Update interfaces for guest

        :param vm_name: The name of VM
        :param iface_dict: The interface configurations params
        """
        logging.debug("update iface xml")
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml.remove_all_device_by_type('interface')
        vmxml.sync()

        iface = interface.Interface('network')
        iface.xml = libvirt.modify_vm_iface(vm.name, "get_xml", iface_dict)
        libvirt.add_vm_device(vmxml, iface)

    migration_test = migration.MigrationTest()
    migration_test.check_parameters(params)

    # Params for NFS shared storage
    shared_storage = params.get("migrate_shared_storage", "")
    if shared_storage == "":
        default_guest_asset = defaults.get_default_guest_os_info()['asset']
        default_guest_asset = "%s.qcow2" % default_guest_asset
        shared_storage = os.path.join(params.get("nfs_mount_dir"),
                                      default_guest_asset)
        logging.debug("shared_storage:%s", shared_storage)

    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = params.get("nfs_mount_dir")

    # Local variables
    virsh_args = {"debug": True}
    server_ip = params.get("server_ip")
    server_user = params.get("server_user", "root")
    server_pwd = params.get("server_pwd")
    client_ip = params.get("client_ip")
    client_pwd = params.get("client_pwd")
    virsh_options = params.get("virsh_options", "")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options", "--live --p2p --verbose")
    func_params_exists = "yes" == params.get("func_params_exists", "no")
    migr_vm_back = "yes" == params.get("migr_vm_back", "no")

    ovs_bridge_name = params.get("ovs_bridge_name")
    network_dict = eval(params.get("network_dict", '{}'))
    iface_dict = eval(params.get("iface_dict", '{}'))
    remote_virsh_dargs = {
        'remote_ip': server_ip,
        'remote_user': server_user,
        'remote_pwd': server_pwd,
        'unprivileged_user': None,
        'ssh_remote_auth': True
    }
    func_name = None
    libvirtd_conf = None
    mig_result = None

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri(
        params.get("migrate_source_host"))
    src_uri = params.get("virsh_migrate_connect_uri")
    dest_uri = params.get("virsh_migrate_desturi")

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    extra_args = {}
    if func_params_exists:
        extra_args.update({'func_params': params})
    postcopy_options = params.get("postcopy_options")
    if postcopy_options:
        extra = "%s %s" % (extra, postcopy_options)
        func_name = virsh.migrate_postcopy

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()

    try:
        # Create a remote runner for later use
        runner_on_target = remote.RemoteRunner(host=server_ip,
                                               username=server_user,
                                               password=server_pwd)
        remote_session = remote.remote_login("ssh", server_ip, "22",
                                             server_user, server_pwd, r'[$#%]')
        if ovs_bridge_name:
            status, stdout = utils_net.create_ovs_bridge(ovs_bridge_name)
            if status:
                test.fail("Failed to create ovs bridge on local. Status: %s"
                          "Stdout: %s" % (status, stdout))
            status, stdout = utils_net.create_ovs_bridge(
                ovs_bridge_name, session=remote_session)
            if status:
                test.fail("Failed to create ovs bridge on remote. Status: %s"
                          "Stdout: %s" % (status, stdout))
        if network_dict:
            libvirt_network.create_or_del_network(
                network_dict, remote_args=remote_virsh_dargs)
            libvirt_network.create_or_del_network(network_dict)

        remote_session.close()
        # Change domain network xml
        if iface_dict:
            if "mac" not in iface_dict:
                mac = utils_net.generate_mac_address_simple()
                iface_dict.update({'mac': mac})
            else:
                mac = iface_dict["mac"]

            update_iface_xml(vm_name, iface_dict)

        # Change the disk of the vm
        libvirt.set_vm_disk(vm, params)

        if not vm.is_alive():
            try:
                vm.start()
            except virt_vm.VMStartError as err:
                test.fail("Failed to start VM: %s" % err)

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))

        # Check local guest network connection before migration
        if vm.serial_console is not None:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        vm_session = vm.wait_for_serial_login(timeout=240)
        utils_net.restart_guest_network(vm_session)
        vm_ip = utils_net.get_guest_ip_addr(vm_session, mac)
        logging.debug("VM IP Addr: %s", vm_ip)

        check_vm_network_accessed(vm_ip)

        # Execute migration process
        vms = [vm]

        migration_test.do_migration(vms,
                                    None,
                                    dest_uri,
                                    'orderly',
                                    options,
                                    thread_timeout=900,
                                    ignore_status=True,
                                    virsh_opt=virsh_options,
                                    func=func_name,
                                    extra_opts=extra,
                                    **extra_args)

        mig_result = migration_test.ret
        migration_test.check_result(mig_result, params)

        if int(mig_result.exit_status) == 0:
            remote_session = remote.remote_login("ssh", server_ip, "22",
                                                 server_user, server_pwd,
                                                 r'[$#%]')
            check_vm_network_accessed(vm_ip, session=remote_session)
            remote_session.close()

        # Execute migration from remote
        if migr_vm_back:
            ssh_connection = utils_conn.SSHConnection(server_ip=client_ip,
                                                      server_pwd=client_pwd,
                                                      client_ip=server_ip,
                                                      client_pwd=server_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()

            # Pre migration setup for local machine
            migration_test.migrate_pre_setup(src_uri, params)

            cmd = "virsh migrate %s %s %s" % (vm_name, options, src_uri)
            logging.debug("Start migration: %s", cmd)
            cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
            logging.info(cmd_result)
            if cmd_result.exit_status:
                test.fail("Failed to run '%s' on remote: %s" %
                          (cmd, cmd_result))
            logging.debug("VM is migrated back.")
            check_vm_network_accessed(vm_ip)
    finally:
        logging.debug("Recover test environment")
        # Clean VM on destination and source
        try:
            migration_test.cleanup_dest_vm(vm, vm.connect_uri, dest_uri)
        except Exception as err:
            logging.error(err)
        if vm.is_alive():
            vm.destroy(gracefully=False)

        logging.info("Recovery VM XML configration")
        orig_config_xml.sync()
        remote_session = remote.remote_login("ssh", server_ip, "22",
                                             server_user, server_pwd, r'[$#%]')
        if network_dict:
            libvirt_network.create_or_del_network(
                network_dict, is_del=True, remote_args=remote_virsh_dargs)
            libvirt_network.create_or_del_network(network_dict, is_del=True)
        if ovs_bridge_name:
            utils_net.delete_ovs_bridge(ovs_bridge_name)
            utils_net.delete_ovs_bridge(ovs_bridge_name,
                                        session=remote_session)

        remote_session.close()
        if migr_vm_back:
            if 'ssh_connection' in locals():
                ssh_connection.auto_recover = True
            migration_test.migrate_pre_setup(src_uri, params, cleanup=True)
        logging.info("Remove local NFS image")
        source_file = params.get("source_file")
        if source_file:
            libvirt.delete_local_disk("file", path=source_file)
Exemplo n.º 16
0
def run(test, params, env):
    """
    Test virsh domblkthreshold option.

    1.Prepare backend storage (file/luks/iscsi/gluster/ceph/nbd)
    2.Start VM
    3.Set domblkthreshold on target device in VM
    4.Trigger one threshold event
    5.Check threshold event is received as expected
    6.Clean up test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    block_threshold_timeout = params.get("block_threshold_timeout", "120")
    event_type = params.get("event_type", "block-threshold")
    block_threshold_option = params.get("block_threshold_option", "--loop")

    def set_vm_block_domblkthreshold(vm_name, target_device, threshold, **dargs):
        """
        Set VM block threshold on specific target device.

        :param vm_name: VM name.
        :param target_device: target device in VM
        :param threshold: threshold value with specific unit such as 100M
        :param dargs: mutable parameter dict
        """
        ret = virsh.domblkthreshold(vm_name, target_device, threshold, **dargs)
        libvirt.check_exit_status(ret)

    def trigger_block_threshold_event(vm_domain, target):
        """
        Trigger block threshold event.

        :param vm_domain: VM name
        :param target: Disk dev in VM.
        """
        try:
            session = vm_domain.wait_for_login()
            time.sleep(10)
            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   " mount /dev/{0} /mnt && "
                   " dd if=/dev/urandom of=/mnt/bigfile bs=1M count=101"
                   .format(target))
            status, output = session.cmd_status_output(cmd)
            if status:
                test.error("Failed to mount and fill data in VM: %s" % output)
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            raise

    def check_threshold_event(vm_name, event_type, event_timeout, options, **dargs):
        """
        Check threshold event.

        :param vm_name: VM name
        :param event_type: event type.
        :param event_timeout: event timeout value
        :param options: event option
        :dargs: dynamic parameters.
        """
        ret = virsh.event(vm_name, event_type, event_timeout, options, **dargs)
        logging.debug(ret.stdout_text)
        libvirt.check_exit_status(ret)

    def create_vol(p_name, vol_params):
        """
        Create volume.

        :param p_name: Pool name.
        :param vol_params: Volume parameters dict.
        """
        # Clean up dirty volumes if pool has.
        pv = libvirt_storage.PoolVolume(p_name)
        vol_name_list = pv.list_volumes()
        for vol_name in vol_name_list:
            pv.delete_volume(vol_name)

        volxml = vol_xml.VolXML()
        v_xml = volxml.new_vol(**vol_params)
        v_xml.xmltreefile.write()

        ret = virsh.vol_create(p_name, v_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def trigger_block_commit(vm_name, target, blockcommit_options, **virsh_dargs):
        """
        Trigger blockcommit.

        :param vm_name: VM name
        :param target: Disk dev in VM.
        :param blockcommit_options: blockcommit option
        :param virsh_dargs: additional parameters
        """
        result = virsh.blockcommit(vm_name, target,
                                   blockcommit_options, ignore_status=False, **virsh_dargs)

    def trigger_block_copy(vm_name, target, dest_path, blockcopy_options, **virsh_dargs):
        """
        Trigger blockcopy

        :param vm_name: string, VM name
        :param target: string, target disk
        :param dest_path: string, the path of copied disk
        :param blockcopy_options: string, some options applied
        :param virsh_dargs: additional options
        """
        result = virsh.blockcopy(vm_name, target, dest_path, blockcopy_options, **virsh_dargs)
        libvirt.check_exit_status(result)

    def trigger_mirror_threshold_event(vm_domain, target):
        """
        Trigger mirror mode block threshold event.

        :param vm_domain: VM name
        :param target: Disk target in VM.
        """
        try:
            session = vm_domain.wait_for_login()
            # Sleep 10 seconds to let wait for events thread start first in main thread
            time.sleep(10)
            cmd = ("dd if=/dev/urandom of=file bs=1G count=3")
            status, output = session.cmd_status_output(cmd)
            if status:
                test.error("Failed to fill data in VM target: %s with %s" % (target, output))
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            raise
        except Exception as ex:
            raise

    def get_mirror_source_index(vm_name, dev_index=0):
        """
        Get mirror source index

        :param vm_name: VM name
        :param dev_index: Disk device index.
        :return mirror source index in integer
        """
        disk_list = vm_xml.VMXML.get_disk_source(vm_name)
        disk_mirror = disk_list[dev_index].find('mirror')
        if disk_mirror is None:
            test.fail("Failed to get disk mirror")
        disk_mirror_source = disk_mirror.find('source')
        return int(disk_mirror_source.get('index'))

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    backend_storage_type = params.get("backend_storage_type", "iscsi")

    # Backend storage auth info
    storage_size = params.get("storage_size", "1G")
    enable_auth = "yes" == params.get("enable_auth")
    use_auth_usage = "yes" == params.get("use_auth_usage")
    auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi")
    auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi")
    auth_sec_uuid = ""
    luks_sec_uuid = ""
    disk_auth_dict = {}
    disk_encryption_dict = {}

    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error")

    mirror_mode_blockcommit = "yes" == params.get("mirror_mode_blockcommit", "no")
    mirror_mode_blockcopy = "yes" == params.get("mirror_mode_blockcopy", "no")
    default_snapshot_test = "yes" == params.get("default_snapshot_test", "no")
    block_threshold_value = params.get("block_threshold_value", "100M")
    snapshot_external_disks = []
    tmp_dir = data_dir.get_tmp_dir()
    dest_path = params.get("dest_path", "/var/lib/libvirt/images/newclone")

    pvt = None
    # Initialize one NbdExport object
    nbd = None
    img_file = os.path.join(data_dir.get_tmp_dir(),
                            "%s_test.img" % vm_name)
    if ((backend_storage_type == "luks") and
            not libvirt_version.version_compare(3, 9, 0)):
        test.cancel("Cannot support <encryption> inside disk in this libvirt version.")

    # Start VM and get all partitions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Additional disk images.
    disks_img = []
    try:
        # Clean up dirty secrets in test environments if there are.
        utils_secret.clean_up_secrets()
        # Setup backend storage
        if backend_storage_type == "file":
            image_filename = params.get("image_filename", "raw.img")
            disk_path = os.path.join(data_dir.get_tmp_dir(), image_filename)
            device_source = libvirt.create_local_disk(backend_storage_type, disk_path, storage_size, device_format)
            disks_img.append({"format": device_format,
                              "source": disk_path, "path": disk_path})
            disk_src_dict = {'attrs': {'file': device_source,
                                       'type_name': 'file'}}
        # Setup backend storage
        elif backend_storage_type == "luks":
            luks_encrypt_passwd = params.get("luks_encrypt_passwd", "password")
            luks_secret_passwd = params.get("luks_secret_passwd", "password")
            # Create secret
            luks_sec_uuid = libvirt.create_secret(params)
            logging.debug("A secret created with uuid = '%s'", luks_sec_uuid)
            virsh.secret_set_value(luks_sec_uuid, luks_secret_passwd,
                                   encode=True, ignore_status=False, debug=True)
            image_filename = params.get("image_filename", "raw.img")
            device_source = os.path.join(data_dir.get_tmp_dir(), image_filename)

            disks_img.append({"format": device_format,
                              "source": device_source, "path": device_source})
            disk_src_dict = {'attrs': {'file': device_source,
                                       'type_name': 'file'}}
            disk_encryption_dict = {"encryption": "luks",
                                    "secret": {"type": "passphrase",
                                               "uuid": luks_sec_uuid}}

            cmd = ("qemu-img create -f luks "
                   "--object secret,id=sec0,data=`printf '%s' | base64`,format=base64 "
                   "-o key-secret=sec0 %s %s" % (luks_encrypt_passwd, device_source, storage_size))
            if process.system(cmd, shell=True):
                test.error("Can't create a luks encrypted img by qemu-img")
        elif backend_storage_type == "iscsi":
            iscsi_host = params.get("iscsi_host")
            iscsi_port = params.get("iscsi_port")
            if device_type == "block":
                device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True)
                disk_src_dict = {'attrs': {'dev': device_source}}
            elif device_type == "network":
                chap_user = params.get("chap_user", "redhat")
                chap_passwd = params.get("chap_passwd", "password")
                auth_sec_usage = params.get("auth_sec_usage",
                                            "libvirtiscsi")
                auth_sec_dict = {"sec_usage": "iscsi",
                                 "sec_target": auth_sec_usage}
                auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                # Set password of auth secret (not luks encryption secret)
                virsh.secret_set_value(auth_sec_uuid, chap_passwd,
                                       encode=True, ignore_status=False, debug=True)
                iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                    is_setup=True, is_login=False, image_size=storage_size,
                    chap_user=chap_user, chap_passwd=chap_passwd,
                    portal_ip=iscsi_host)
                # ISCSI auth attributes for disk xml
                disk_auth_dict = {"auth_user": chap_user,
                                  "secret_type": auth_sec_usage_type,
                                  "secret_usage": auth_sec_usage_target}
                device_source = "iscsi://%s:%s/%s/%s" % (iscsi_host, iscsi_port,
                                                         iscsi_target, lun_num)
                disk_src_dict = {"attrs": {"protocol": "iscsi",
                                           "name": "%s/%s" % (iscsi_target, lun_num)},
                                 "hosts": [{"name": iscsi_host, "port": iscsi_port}]}
        elif backend_storage_type == "gluster":
            gluster_vol_name = params.get("gluster_vol_name", "gluster_vol1")
            gluster_pool_name = params.get("gluster_pool_name", "gluster_pool1")
            gluster_img_name = params.get("gluster_img_name", "gluster1.img")
            gluster_host_ip = gluster.setup_or_cleanup_gluster(
                    is_setup=True,
                    vol_name=gluster_vol_name,
                    pool_name=gluster_pool_name,
                    **params)

            device_source = "gluster://%s/%s/%s" % (gluster_host_ip,
                                                    gluster_vol_name,
                                                    gluster_img_name)
            cmd = ("qemu-img create -f %s "
                   "%s %s" % (device_format, device_source, storage_size))
            if process.system(cmd, shell=True):
                test.error("Can't create a gluster type img by qemu-img")
            disk_src_dict = {"attrs": {"protocol": "gluster",
                                       "name": "%s/%s" % (gluster_vol_name,
                                                          gluster_img_name)},
                             "hosts":  [{"name": gluster_host_ip,
                                         "port": "24007"}]}
        elif backend_storage_type == "ceph":
            ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS")
            ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
            ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS")
            ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME")
            ceph_client_name = params.get("ceph_client_name")
            ceph_client_key = params.get("ceph_client_key")
            ceph_auth_user = params.get("ceph_auth_user")
            ceph_auth_key = params.get("ceph_auth_key")
            enable_auth = "yes" == params.get("enable_auth")

            key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
            key_opt = ""
            # Prepare a blank params to confirm if delete the configure at the end of the test
            ceph_cfg = ""
            if not utils_package.package_install(["ceph-common"]):
                test.error("Failed to install ceph-common")
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_ip)
            # If enable auth, prepare a local file to save key
            if ceph_client_name and ceph_client_key:
                with open(key_file, 'w') as f:
                    f.write("[%s]\n\tkey = %s\n" %
                            (ceph_client_name, ceph_client_key))
                key_opt = "--keyring %s" % key_file
                auth_sec_dict = {"sec_usage": auth_sec_usage_type,
                                 "sec_name": "ceph_auth_secret"}
                auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                virsh.secret_set_value(auth_sec_uuid, ceph_auth_key,
                                       debug=True)
                disk_auth_dict = {"auth_user": ceph_auth_user,
                                  "secret_type": auth_sec_usage_type,
                                  "secret_uuid": auth_sec_uuid}
            else:
                test.error("No ceph client name/key provided.")
            device_source = "rbd:%s:mon_host=%s:keyring=%s" % (ceph_disk_name,
                                                               ceph_mon_ip,
                                                               key_file)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("pre clean up rbd disk if exists: %s", cmd_result)
            # Create an local image and make FS on it.
            disk_cmd = ("qemu-img create -f %s %s %s" %
                        (device_format, img_file, storage_size))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Convert the image to remote storage
            disk_path = ("rbd:%s:mon_host=%s" %
                         (ceph_disk_name, ceph_mon_ip))
            if ceph_client_name and ceph_client_key:
                disk_path += (":id=%s:key=%s" %
                              (ceph_auth_user, ceph_auth_key))
            rbd_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O"
                       " %s %s %s" % (ceph_mon_ip, key_opt, ceph_disk_name,
                                      device_format, img_file, disk_path))
            process.run(rbd_cmd, ignore_status=False, shell=True)
            disk_src_dict = {"attrs": {"protocol": "rbd",
                                       "name": ceph_disk_name},
                             "hosts":  [{"name": ceph_host_ip,
                                         "port": ceph_host_port}]}
        elif backend_storage_type == "nfs":
            pool_name = params.get("pool_name", "nfs_pool")
            pool_target = params.get("pool_target", "nfs_mount")
            pool_type = params.get("pool_type", "netfs")
            nfs_server_dir = params.get("nfs_server_dir", "nfs_server")
            emulated_image = params.get("emulated_image")
            image_name = params.get("nfs_image_name", "nfs.img")
            tmp_dir = data_dir.get_tmp_dir()
            pvt = libvirt.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
            # Set virt_use_nfs
            virt_use_nfs = params.get("virt_use_nfs", "off")
            result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs, shell=True)
            if result.exit_status:
                test.error("Failed to set virt_use_nfs value")

            nfs_mount_dir = os.path.join(tmp_dir, pool_target)
            device_source = nfs_mount_dir + image_name
            # Create one image on nfs server
            libvirt.create_local_disk("file", device_source, '1', "raw")
            disks_img.append({"format": device_format,
                              "source": device_source, "path": device_source})
            disk_src_dict = {'attrs': {'file': device_source,
                                       'type_name': 'file'}}
        # Create dir based pool,and then create one volume on it.
        elif backend_storage_type == "dir":
            pool_name = params.get("pool_name", "dir_pool")
            pool_target = params.get("pool_target")
            pool_type = params.get("pool_type")
            emulated_image = params.get("emulated_image")
            image_name = params.get("dir_image_name", "luks_1.img")
            # Create and start dir_based pool.
            pvt = libvirt.PoolVolumeTest(test, params)
            if not os.path.exists(pool_target):
                os.mkdir(pool_target)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
            sp = libvirt_storage.StoragePool()
            if not sp.is_pool_active(pool_name):
                sp.set_pool_autostart(pool_name)
                sp.start_pool(pool_name)
            # Create one volume on the pool.
            volume_name = params.get("vol_name")
            volume_alloc = params.get("vol_alloc")
            volume_cap_unit = params.get("vol_cap_unit")
            volume_cap = params.get("vol_cap")
            volume_target_path = params.get("sec_volume")
            volume_target_format = params.get("target_format")
            volume_target_encypt = params.get("target_encypt", "")
            volume_target_label = params.get("target_label")
            vol_params = {"name": volume_name, "capacity": int(volume_cap),
                          "allocation": int(volume_alloc), "format":
                          volume_target_format, "path": volume_target_path,
                          "label": volume_target_label,
                          "capacity_unit": volume_cap_unit}
            try:
                # If Libvirt version is lower than 2.5.0
                # Creating luks encryption volume is not supported,so skip it.
                create_vol(pool_name, vol_params)
            except AssertionError as info:
                err_msgs = ("create: invalid option")
                if str(info).count(err_msgs):
                    test.cancel("Creating luks encryption volume "
                                "is not supported on this libvirt version")
                else:
                    test.error("Failed to create volume."
                               "Error: %s" % str(info))
            disk_src_dict = {'attrs': {'file': volume_target_path}}
            device_source = volume_target_path
        elif backend_storage_type == "nbd":
            # Get server hostname.
            hostname = process.run('hostname', ignore_status=False, shell=True, verbose=True).stdout_text.strip()
            # Setup backend storage
            nbd_server_host = hostname
            nbd_server_port = params.get("nbd_server_port")
            image_path = params.get("emulated_image", "/var/lib/libvirt/images/nbdtest.img")
            # Create NbdExport object
            nbd = NbdExport(image_path, image_format=device_format,
                            port=nbd_server_port)
            nbd.start_nbd_server()
            # Prepare disk source xml
            source_attrs_dict = {"protocol": "nbd"}
            disk_src_dict = {}
            disk_src_dict.update({"attrs": source_attrs_dict})
            disk_src_dict.update({"hosts": [{"name": nbd_server_host, "port": nbd_server_port}]})
            device_source = "nbd://%s:%s/%s" % (nbd_server_host,
                                                nbd_server_port,
                                                image_path)

        logging.debug("device source is: %s", device_source)

        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}
        disk_xml.driver = driver_dict
        disk_source = disk_xml.new_disk_source(**disk_src_dict)
        if disk_auth_dict:
            logging.debug("disk auth dict is: %s" % disk_auth_dict)
            disk_xml.auth = disk_xml.new_auth(**disk_auth_dict)
        if disk_encryption_dict:
            disk_encryption_dict = {"encryption": "luks",
                                    "secret": {"type": "passphrase",
                                               "uuid": luks_sec_uuid}}
            disk_encryption = disk_xml.new_encryption(**disk_encryption_dict)

            disk_xml.encryption = disk_encryption
        disk_xml.source = disk_source
        logging.debug("new disk xml is: %s", disk_xml)
        # Sync VM xml except mirror_mode_blockcommit or mirror_mode_blockcopy
        if (not mirror_mode_blockcommit and not mirror_mode_blockcopy):
            vmxml.add_device(disk_xml)
        try:
            vmxml.sync()
            vm.start()
            vm.wait_for_login().close()
        except xcepts.LibvirtXMLError as xml_error:
            if not define_error:
                test.fail("Failed to define VM:\n%s", str(xml_error))
        except virt_vm.VMStartError as details:
            # When use wrong password in disk xml for cold plug cases,
            # VM cannot be started
            if status_error:
                logging.info("VM failed to start as expected: %s", str(details))
            else:
                test.fail("VM should start but failed: %s" % str(details))
        func_name = trigger_block_threshold_event
        # Additional operations before set block threshold
        if backend_storage_type == "file":
            logging.info("Create snapshot...")
            snap_opt = " %s --disk-only "
            snap_opt += "%s,snapshot=external,file=%s"
            if default_snapshot_test:
                for index in range(1, 5):
                    snapshot_name = "snapshot_%s" % index
                    snap_path = "%s/%s_%s.snap" % (tmp_dir, vm_name, index)
                    snapshot_external_disks.append(snap_path)
                    snap_option = snap_opt % (snapshot_name, device_target, snap_path)
                    virsh.snapshot_create_as(vm_name, snap_option,
                                             ignore_status=False, debug=True)

            if mirror_mode_blockcommit:
                if not libvirt_version.version_compare(6, 6, 0):
                    test.cancel("Set threshold for disk mirroring feature is not supported on current version")
                vmxml.del_device(disk_xml)
                virsh.snapshot_create_as(vm_name, "--disk-only --no-metadata",
                                         ignore_status=False, debug=True)
                # Do active blockcommit in background.
                blockcommit_options = "--active"
                mirror_blockcommit_thread = threading.Thread(target=trigger_block_commit,
                                                             args=(vm_name, 'vda', blockcommit_options,),
                                                             kwargs={'debug': True})
                mirror_blockcommit_thread.start()
                device_target = "vda[1]"
                func_name = trigger_mirror_threshold_event
            if mirror_mode_blockcopy:
                if not libvirt_version.version_compare(6, 6, 0):
                    test.cancel("Set threshold for disk mirroring feature is not supported on current version")
                # Do transient blockcopy in backgroud.
                blockcopy_options = "--transient-job "
                # Do cleanup
                if os.path.exists(dest_path):
                    libvirt.delete_local_disk("file", dest_path)
                mirror_blockcopy_thread = threading.Thread(target=trigger_block_copy,
                                                           args=(vm_name, 'vda', dest_path, blockcopy_options,),
                                                           kwargs={'debug': True})
                mirror_blockcopy_thread.start()
                mirror_blockcopy_thread.join(10)
                device_target = "vda[%d]" % get_mirror_source_index(vm_name)
                func_name = trigger_mirror_threshold_event
        set_vm_block_domblkthreshold(vm_name, device_target, block_threshold_value, **{"debug": True})
        cli_thread = threading.Thread(target=func_name,
                                      args=(vm, device_target))
        cli_thread.start()
        check_threshold_event(vm_name, event_type, block_threshold_timeout, block_threshold_option, **{"debug": True})
    finally:
        # Delete snapshots.
        if virsh.domain_exists(vm_name):
            #To delete snapshot, destroy VM first.
            if vm.is_alive():
                vm.destroy()
            libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)

        vmxml_backup.sync("--snapshots-metadata")

        if os.path.exists(img_file):
            libvirt.delete_local_disk("file", img_file)
        for img in disks_img:
            if os.path.exists(img["path"]):
                libvirt.delete_local_disk("file", img["path"])

        for disk in snapshot_external_disks:
            libvirt.delete_local_disk('file', disk)

        if os.path.exists(dest_path):
            libvirt.delete_local_disk("file", dest_path)

        # Clean up backend storage
        if backend_storage_type == "iscsi":
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif backend_storage_type == "gluster":
            gluster.setup_or_cleanup_gluster(is_setup=False,
                                             vol_name=gluster_vol_name,
                                             pool_name=gluster_pool_name,
                                             **params)
        elif backend_storage_type == "ceph":
            # Remove ceph configure file if created.
            if ceph_cfg:
                os.remove(ceph_cfg)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("result of rbd removal: %s", cmd_result)
            if os.path.exists(key_file):
                os.remove(key_file)
        elif backend_storage_type == "nfs":
            result = process.run("setsebool virt_use_nfs off",
                                 shell=True)
            if result.exit_status:
                logging.info("Failed to restore virt_use_nfs value")
        elif backend_storage_type == "nbd":
            if nbd:
                try:
                    nbd.cleanup()
                except Exception as ndbEx:
                    logging.info("Clean Up nbd failed: %s" % str(ndbEx))
        # Clean up secrets
        if auth_sec_uuid:
            virsh.secret_undefine(auth_sec_uuid)
        if luks_sec_uuid:
            virsh.secret_undefine(luks_sec_uuid)

        # Clean up pools
        if pvt:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
Exemplo n.º 17
0
def run(test, params, env):
    """
    Test virtio/virtio-transitional/virtio-non-transitional model of disk

    :param test: Test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment
    """
    def reboot():
        """
        Shutdown and restart guest, then wait for login
        """
        vm.destroy()
        vm.start()
        vm.wait_for_login()

    def attach(xml, device_name, plug_method="hot"):
        """
        Attach device with xml, for both hot and cold plug

        :param xml: Device xml to be attached
        :param device_name: Device name to be attached
        :param plug_method: hot or cold for plug method
        """
        device_before_plug = find_device(vm, params)
        with open(xml) as disk_file:
            logging.debug("Attach disk by XML: %s", disk_file.read())
        file_arg = xml
        if plug_method == "cold":
            file_arg += ' --config'
        s_attach = virsh.attach_device(domainarg=vm_name,
                                       filearg=file_arg,
                                       debug=True)
        libvirt.check_exit_status(s_attach)
        if plug_method == "cold":
            reboot()
        detect_time = params.get("detect_disk_time", 20)
        plug_disks = utils_misc.wait_for(
            lambda: get_new_device(device_before_plug, find_device(vm, params)
                                   ), detect_time)
        if not plug_disks:
            test.fail("Failed to hotplug device %s to guest" % device_name)

    def detach(xml, device_name, unplug_method="hot"):
        """
        Detach device with xml, for both hot and cold unplug

        :param xml: Device xml to be attached
        :param device_name: Device name to be attached
        :param plug_method: hot or cold for unplug method
        """
        with open(xml) as disk_file:
            logging.debug("Detach device by XML: %s", disk_file.read())
        file_arg = xml
        if unplug_method == "cold":
            file_arg = xml + ' --config'
        s_detach = virsh.detach_device(domainarg=vm_name,
                                       filearg=file_arg,
                                       debug=True)
        if unplug_method == "cold":
            reboot()
        libvirt.check_exit_status(s_detach)

    def attach_disk():  # pylint: disable=W0611
        """
        Sub test for attach disk, including hot and cold plug/unplug
        """
        plug_method = params.get("plug_method", "hot")
        device_source_format = params.get("at_disk_source_format", "raw")
        device_target = params.get("at_disk_target", "vdb")
        device_disk_bus = params.get("at_disk_bus", "virtio")
        device_source_name = params.get("at_disk_source", "attach.img")
        detect_time = params.get("detect_disk_time", 10)
        device_source_path = os.path.join(tmp_dir, device_source_name)
        device_source = libvirt.create_local_disk(
            "file",
            path=device_source_path,
            size="1",
            disk_format=device_source_format)

        def _generate_disk_xml():
            """Generate xml for device hotplug/unplug usage"""
            diskxml = devices.disk.Disk("file")
            diskxml.device = "disk"
            source_params = {"attrs": {'file': device_source}}
            diskxml.source = diskxml.new_disk_source(**source_params)
            diskxml.target = {'dev': device_target, 'bus': device_disk_bus}
            if params.get("disk_model"):
                diskxml.model = params.get("disk_model")
            if pci_bridge_index and device_disk_bus == 'virtio':
                addr = diskxml.new_disk_address('pci')
                addr.set_attrs({'bus': pci_bridge_index, 'slot': slot})
                diskxml.address = addr
            return diskxml.xml

        v_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        slot = get_free_slot(pci_bridge_index, v_xml)
        disk_xml = _generate_disk_xml()
        attach(disk_xml, device_target, plug_method)
        if plug_method == "cold":
            disk_xml = _generate_disk_xml()
        detach(disk_xml, device_target, plug_method)
        if not utils_misc.wait_for(
                lambda: not libvirt.device_exists(vm, device_target),
                detect_time):
            test.fail("Detach disk failed.")

    def attach_controller():  # pylint: disable=W0611
        """
        Sub test for attach controller
        """
        v_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        contr_index = len(v_xml.get_controllers('scsi'))
        contr_type = params.get("controller_type", 'scsi')
        contr_model = params.get("controller_model", "virtio-scsi")
        contr_dict = {
            'controller_type': contr_type,
            'controller_model': contr_model,
            'controller_index': contr_index
        }
        if pci_bridge_index:
            slot = get_free_slot(pci_bridge_index, v_xml)
            addr = '{"bus": %s, "slot": %s}' % (pci_bridge_index, slot)
            contr_dict.update({'controller_addr': addr})
        cntl_add = libvirt.create_controller_xml(contr_dict=contr_dict)
        attach(cntl_add.xml, params['controller_model'])
        cntl_add = libvirt.create_controller_xml(contr_dict=contr_dict)
        detach(cntl_add.xml, params['controller_model'])

    def snapshot():  # pylint: disable=W0611
        """
        Sub test for snapshot
        """
        for i in range(1, 4):
            ret = virsh.snapshot_create_as(vm_name, "sn%s --disk-only" % i)
            libvirt.check_exit_status(ret)
        libvirtd_obj = utils_libvirtd.Libvirtd()
        libvirtd_obj.restart()
        save_path = os.path.join(tmp_dir, "test.save")
        ret = virsh.save(vm_name, save_path)
        libvirt.check_exit_status(ret)
        ret = virsh.restore(save_path)
        libvirt.check_exit_status(ret)
        session = vm.wait_for_login()
        session.close()

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(params["main_vm"])
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    add_pcie_to_pci_bridge = params.get("add_pcie_to_pci_bridge")
    pci_bridge_index = None
    tmp_dir = data_dir.get_tmp_dir()
    guest_src_url = params.get("guest_src_url")
    set_crypto_policy = params.get("set_crypto_policy")

    if not libvirt_version.version_compare(5, 0, 0):
        test.cancel("This libvirt version doesn't support "
                    "virtio-transitional model.")

    if guest_src_url:
        image_name = params['image_path']
        target_path = utils_misc.get_path(data_dir.get_data_dir(), image_name)
        if not os.path.exists(target_path):
            download.get_file(guest_src_url, target_path)
        params["blk_source_name"] = target_path
    if set_crypto_policy:
        utils_conn.update_crypto_policy(set_crypto_policy)
    try:
        if add_pcie_to_pci_bridge:
            pci_controllers = vmxml.get_controllers('pci')
            for controller in pci_controllers:
                if controller.get('model') == 'pcie-to-pci-bridge':
                    pci_bridge = controller
                    break
            else:
                contr_dict = {
                    'controller_type': 'pci',
                    'controller_model': 'pcie-to-pci-bridge'
                }
                pci_bridge = libvirt.create_controller_xml(contr_dict)
                libvirt.add_controller(vm_name, pci_bridge)
                pci_bridge = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)\
                    .get_controllers('pci', 'pcie-to-pci-bridge')[0]
            pci_bridge_index = '%0#4x' % int(pci_bridge.get("index"))
        if (params["os_variant"] == 'rhel6'
                or 'rhel6' in params.get("shortname")):
            iface_params = {'model': 'virtio-transitional'}
            libvirt.modify_vm_iface(vm_name, "update_iface", iface_params)
        libvirt.set_vm_disk(vm, params)
        if pci_bridge_index:
            v_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if params.get("disk_target_bus") == "scsi":
                scsi_controllers = v_xml.get_controllers('scsi')
                for index, controller in enumerate(scsi_controllers):
                    controller.find('address').set('bus', pci_bridge_index)
                    controller.find('address').set(
                        'slot', get_free_slot(pci_bridge_index, v_xml))
            else:
                disks = v_xml.get_devices(device_type="disk")
                for index, disk in enumerate(disks):
                    args = {
                        'bus': pci_bridge_index,
                        'slot': get_free_slot(pci_bridge_index, v_xml)
                    }
                    libvirt.set_disk_attr(v_xml, disk.target['dev'], 'address',
                                          args)
            v_xml.xmltreefile.write()
            v_xml.sync()
        if not vm.is_alive():
            vm.start()
        vm.wait_for_login()
        test_step = params.get("sub_test_step")
        if test_step:
            eval(test_step)()
    finally:
        vm.destroy()
        libvirt.clean_up_snapshots(vm_name)
        backup_xml.sync()
        if guest_src_url and target_path:
            libvirt.delete_local_disk("file", path=target_path)
        if set_crypto_policy:
            utils_conn.update_crypto_policy()
Exemplo n.º 18
0
def run(test, params, env):
    """
    Test virsh migrate command.
    """
    def check_vm_network_accessed(session=None):
        """
        The operations to the VM need to be done before or after
        migration happens

        :param session: The session object to the host

        :raise: test.error when ping fails
        """
        # Confirm local/remote VM can be accessed through network.
        logging.info("Check VM network connectivity")
        s_ping, _ = utils_test.ping(vm.get_address(),
                                    count=10,
                                    timeout=20,
                                    output_func=logging.debug,
                                    session=session)
        if s_ping != 0:
            if session:
                session.close()
            test.fail("%s did not respond after %d sec." % (vm.name, 20))

    def check_migration_res(result):
        """
        Check if the migration result is as expected

        :param result: the output of migration
        :raise: test.fail if test is failed
        """
        if not result:
            test.error("No migration result is returned.")

        logging.info("Migration out: %s", result.stdout_text.strip())
        logging.info("Migration error: %s", result.stderr_text.strip())

        if status_error:  # Migration should fail
            if err_msg:  # Special error messages are expected
                if not re.search(err_msg, result.stderr_text.strip()):
                    test.fail("Can not find the expected patterns '%s' in "
                              "output '%s'" %
                              (err_msg, result.stderr_text.strip()))
                else:
                    logging.debug("It is the expected error message")
            else:
                if int(result.exit_status) != 0:
                    logging.debug("Migration failure is expected result")
                else:
                    test.fail("Migration success is unexpected result")
        else:
            if int(result.exit_status) != 0:
                test.fail(result.stderr_text.strip())

    check_parameters(test, params)

    # Params for NFS shared storage
    shared_storage = params.get("migrate_shared_storage", "")
    if shared_storage == "":
        default_guest_asset = defaults.get_default_guest_os_info()['asset']
        default_guest_asset = "%s.qcow2" % default_guest_asset
        shared_storage = os.path.join(params.get("nfs_mount_dir"),
                                      default_guest_asset)
        logging.debug("shared_storage:%s", shared_storage)

    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = params.get("nfs_mount_dir")

    # Local variables
    virsh_args = {"debug": True}
    virsh_options = params.get("virsh_options", "")

    server_ip = params.get("server_ip")
    server_user = params.get("server_user", "root")
    server_pwd = params.get("server_pwd")
    client_ip = params.get("client_ip")
    client_pwd = params.get("client_pwd")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options")

    guest_src_url = params.get("guest_src_url")
    guest_src_path = params.get("guest_src_path",
                                "/var/lib/libvirt/images/guest.img")
    check_disk = "yes" == params.get("check_disk")
    disk_model = params.get("disk_model")
    disk_target = params.get("disk_target", "vda")
    controller_model = params.get("controller_model")

    check_interface = "yes" == params.get("check_interface")
    iface_type = params.get("iface_type", "network")
    iface_model = params.get("iface_model", "virtio")
    iface_params = {
        'type': iface_type,
        'model': iface_model,
        'del_addr': True,
        'source': '{"network": "default"}'
    }

    check_memballoon = "yes" == params.get("check_memballoon")
    membal_model = params.get("membal_model")

    check_rng = "yes" == params.get("check_rng")
    rng_model = params.get("rng_model")

    migr_vm_back = "yes" == params.get("migrate_vm_back", "no")
    status_error = "yes" == params.get("status_error", "no")
    remote_virsh_dargs = {
        'remote_ip': server_ip,
        'remote_user': server_user,
        'remote_pwd': server_pwd,
        'unprivileged_user': None,
        'ssh_remote_auth': True
    }

    xml_check_after_mig = params.get("guest_xml_check_after_mig")

    err_msg = params.get("err_msg")
    vm_session = None
    remote_virsh_session = None
    vm = None
    mig_result = None

    if not libvirt_version.version_compare(5, 0, 0):
        test.cancel("This libvirt version doesn't support "
                    "virtio-transitional model.")
    # Make sure all of parameters are assigned a valid value
    check_parameters(test, params)

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri(
        params.get("migrate_source_host"))
    src_uri = params.get("virsh_migrate_connect_uri")
    dest_uri = params.get("virsh_migrate_desturi")

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()

    migration_test = migration.MigrationTest()
    try:
        # Create a remote runner for later use
        runner_on_target = remote.RemoteRunner(host=server_ip,
                                               username=server_user,
                                               password=server_pwd)
        # download guest source and update interface model to keep guest up
        if guest_src_url:
            blk_source = download.get_file(guest_src_url, guest_src_path)
            if not blk_source:
                test.error("Fail to download image.")
            params["blk_source_name"] = blk_source
            if (not check_interface) and iface_model:
                iface_dict = {'model': iface_model}
                libvirt.modify_vm_iface(vm_name, "update_iface", iface_dict)
            if not check_disk:
                params["disk_model"] = "virtio-transitional"

        if check_interface:
            libvirt.modify_vm_iface(vm_name, "update_iface", iface_params)

        if check_memballoon:
            membal_dict = {'membal_model': membal_model}
            dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            libvirt.update_memballoon_xml(dom_xml, membal_dict)

        if check_rng:
            rng_dict = {'rng_model': rng_model}
            rng_xml = libvirt.create_rng_xml(rng_dict)
            dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            libvirt.add_vm_device(dom_xml, rng_xml)

        # Change the disk of the vm
        libvirt.set_vm_disk(vm, params)

        if not vm.is_alive():
            vm.start()

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))

        # Check local guest network connection before migration
        vm_session = vm.wait_for_login(restart_network=True)
        check_vm_network_accessed()

        # Execute migration process
        vms = [vm]
        migration_test.do_migration(vms,
                                    None,
                                    dest_uri,
                                    'orderly',
                                    options,
                                    thread_timeout=900,
                                    ignore_status=True,
                                    virsh_opt=virsh_options,
                                    extra_opts=extra)
        mig_result = migration_test.ret

        check_migration_res(mig_result)

        if int(mig_result.exit_status) == 0:
            server_session = remote.wait_for_login('ssh', server_ip, '22',
                                                   server_user, server_pwd,
                                                   r"[\#\$]\s*$")
            check_vm_network_accessed(server_session)
            server_session.close()

        if xml_check_after_mig:
            if not remote_virsh_session:
                remote_virsh_session = virsh.VirshPersistent(
                    **remote_virsh_dargs)
            target_guest_dumpxml = (remote_virsh_session.dumpxml(
                vm_name, debug=True, ignore_status=True).stdout_text.strip())
            if check_disk:
                check_str = disk_model if disk_model else controller_model
            if check_interface:
                check_str = iface_model
            if check_memballoon:
                check_str = membal_model
            if check_rng:
                check_str = rng_model

            xml_check_after_mig = "%s'%s'" % (xml_check_after_mig, check_str)
            if not re.search(xml_check_after_mig, target_guest_dumpxml):
                test.fail("Fail to search '%s' in target guest XML:\n%s" %
                          (xml_check_after_mig, target_guest_dumpxml))
            remote_virsh_session.close_session()

        # Execute migration from remote
        if migr_vm_back:
            ssh_connection = utils_conn.SSHConnection(server_ip=client_ip,
                                                      server_pwd=client_pwd,
                                                      client_ip=server_ip,
                                                      client_pwd=server_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()

            # Pre migration setup for local machine
            migration_test.migrate_pre_setup(src_uri, params)
            cmd = "virsh migrate %s %s %s" % (vm_name, virsh_options, src_uri)
            logging.debug("Start migration: %s", cmd)
            cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
            logging.info(cmd_result)
            if cmd_result.exit_status:
                test.fail("Failed to run '%s' on remote: %s" %
                          (cmd, cmd_result))

    finally:
        logging.debug("Recover test environment")
        # Clean VM on destination
        vm.connect_uri = ''
        migration_test.cleanup_dest_vm(vm, src_uri, dest_uri)

        logging.info("Recovery VM XML configration")
        orig_config_xml.sync()
        logging.debug("The current VM XML:\n%s", orig_config_xml.xmltreefile)

        # Clean up of pre migration setup for local machine
        if migr_vm_back:
            if 'ssh_connection' in locals():
                ssh_connection.auto_recover = True
            migration_test.migrate_pre_setup(src_uri, params, cleanup=True)
        if remote_virsh_session:
            remote_virsh_session.close_session()

        logging.info("Remove local NFS image")
        source_file = params.get("source_file")
        libvirt.delete_local_disk("file", path=source_file)
        if guest_src_url and blk_source:
            libvirt.delete_local_disk("file", path=blk_source)
            elif abnormal_type == "migration_interupted":
                params["thread_timeout"] = 120
            # Raise after cleanup
            if fail_flag:
                raise

            # Migrate it again to confirm failed reason
            copied_migration(vms, params)
    finally:
        # Recover created vm
        if vm.is_alive():
            vm.destroy()
        if disks_count and vm.name == new_vm_name:
            vm.undefine()
        for disk in added_disks_list:
            utlv.delete_local_disk(disk_type, disk)
            rdm.remove_path(disk_type, disk)
        rdm.remove_path("file", file_path)
        if pool_created:
            pool_destroyed = create_destroy_pool_on_remote("destroy", params)
            if not pool_destroyed:
                raise error.TestError("Destroy pool on remote host '%s' failed."
                                      % remote_host)

        if disk_type == "lvm":
            rdm.remove_vg(vgname)
            rdm.iscsi_login_setup(local_host, target2, is_login=False)
            try:
                lv_utils.vg_remove(vgname)
            except:
                pass    # let it go to confirm cleanup iscsi device
Exemplo n.º 20
0
def run(test, params, env):
    """
    Test KVM migration scenarios
    """
    migrate_options = params.get("migrate_options", "")
    migrate_postcopy = params.get("migrate_postcopy", "")
    migrate_dest_ip = params.get("migrate_dest_host")
    nfs_mount_path = params.get("nfs_mount_dir")
    migrate_start_state = params.get("migrate_start_state", "paused")
    postcopy_func = None
    if migrate_postcopy:
        postcopy_func = virsh.migrate_postcopy
    migrate_type = params.get("migrate_type", "orderly")
    vm_state = params.get("migrate_vm_state", "running")
    ping_count = int(params.get("ping_count", 15))

    vms = params.get("vms").split()
    vm_list = env.get_all_vms()

    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = nfs_mount_path

    src_uri = "qemu:///system"
    dest_uri = libvirt_vm.complete_uri(params["server_ip"])

    vmxml_dict = {}

    migrate_setup = libvirt.MigrationTest()
    try:
        for vm in vm_list:
            vmxml_dict[vm.name] = vm_xml.VMXML.new_from_dumpxml(vm.name)
            params["source_dist_img"] = "%s-nfs-img" % vm.name
            if vm.is_alive():
                vm.destroy()
            libvirt.set_vm_disk(vm, params)
            migrate_setup.ping_vm(vm, test, params, ping_count=ping_count)
        try:
            migrate_setup.do_migration(vm_list, src_uri, dest_uri,
                                       migrate_type, migrate_options,
                                       func=postcopy_func,
                                       migrate_start_state=migrate_start_state)
        except Exception as info:
            test.fail(info)
        for vm in vm_list:
            if not migrate_setup.check_vm_state(vm.name, vm_state, dest_uri):
                test.fail("Migrated VMs failed to be in %s state at "
                          "destination" % vm_state)
            logging.info("Guest state is '%s' at destination is as expected",
                         vm_state)
            migrate_setup.ping_vm(vm, test, params, uri=dest_uri, ping_count=ping_count)
    finally:
        logging.debug("cleanup the migration setup in source/destination")
        for vm in vm_list:
            if migrate_setup:
                migrate_setup.cleanup_dest_vm(vm, src_uri, dest_uri)
            if vm.exists() and vm.is_persistent():
                vm.undefine()
            if vm.is_alive():
                vm.destroy()
        for source_file in params.get("source_file_list", []):
            libvirt.delete_local_disk("file", path=source_file)

        if vmxml_dict:
            for key in vmxml_dict.keys():
                vmxml_dict[key].define()
Exemplo n.º 21
0
            elif abnormal_type == "migration_interupted":
                params["thread_timeout"] = 120
            # Raise after cleanup
            if fail_flag:
                raise

            # Migrate it again to confirm failed reason
            copied_migration(vms, params)
    finally:
        # Recover created vm
        if vm.is_alive():
            vm.destroy()
        if disks_count and vm.name == new_vm_name:
            vm.undefine()
        for disk in added_disks_list:
            utlv.delete_local_disk(disk_type, disk)
            rdm.remove_path(disk_type, disk)
        rdm.remove_path("file", file_path)
        if pool_created:
            pool_destroyed = create_destroy_pool_on_remote("destroy", params)
            if not pool_destroyed:
                raise error.TestError(
                    "Destroy pool on remote host '%s' failed." % remote_host)

        if disk_type == "lvm":
            rdm.remove_vg(vgname)
            rdm.iscsi_login_setup(local_host, target2, is_login=False)
            try:
                lv_utils.vg_remove(vgname)
            except:
                pass  # let it go to confirm cleanup iscsi device
Exemplo n.º 22
0
                check_count_after_shutdown = False
        elif test_cmd == "detach-disk":
            if disk_count_after_shutdown < disk_count_before_cmd:
                check_count_after_shutdown = False

    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if os.path.exists(save_file):
            os.remove(save_file)
        if test_block_dev:
            if test_logcial_dev:
                libvirt.delete_local_disk("lvm",
                                          vgname=vg_name,
                                          lvname=lv_name)
                lv_utils.vg_remove(vg_name)
                utils.system("pvremove %s" % device_source, ignore_status=True)
            libvirt.setup_or_cleanup_iscsi(False)
        else:
            libvirt.delete_local_disk("file", device_source)

    # Check results.
    if status_error:
        if not status:
            raise error.TestFail("virsh %s exit with unexpected value." %
                                 test_cmd)
    else:
        if status:
            raise error.TestFail("virsh %s failed." % test_cmd)
def run(test, params, env):
    """
    Test virsh migrate-setmaxdowntime command.

    1) Prepare migration environment
    2) Start migration and set migrate-maxdowntime
    3) Cleanup environment(migrated vm on destination)
    4) Check result
    """
    dest_uri = params.get(
        "virsh_migrate_dest_uri", "qemu+ssh://MIGRATE_EXAMPLE/system")
    src_uri = params.get(
        "virsh_migrate_src_uri", "qemu+ssh://MIGRATE_EXAMPLE/system")
    if dest_uri.count('///') or dest_uri.count('MIGRATE_EXAMPLE'):
        raise error.TestNAError("Set your destination uri first.")
    if src_uri.count('MIGRATE_EXAMPLE'):
        raise error.TestNAError("Set your source uri first.")
    if src_uri == dest_uri:
        raise error.TestNAError("You should not set dest uri same as local.")
    vm_ref = params.get("setmmdt_vm_ref", "domname")
    pre_vm_state = params.get("pre_vm_state", "running")
    status_error = "yes" == params.get("status_error", "no")
    do_migrate = "yes" == params.get("do_migrate", "yes")
    migrate_maxdowntime = params.get("migrate_maxdowntime", 1.000)
    if (migrate_maxdowntime == ""):
        downtime = ""
    else:
        downtime = int(float(migrate_maxdowntime)) * 1000
    extra = params.get("setmmdt_extra")

    # A delay between threads
    delay_time = int(params.get("delay_time", 1))
    # timeout of threads
    thread_timeout = 180

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    domuuid = vm.get_uuid()

    grep_str_local = params.get("grep_str_from_local_libvirt_log", "")

    # For safety reasons, we'd better back up original guest xml
    orig_config_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    if not orig_config_xml:
        raise error.TestError("Backing up xmlfile failed.")

    # Params to configure libvirtd.conf
    log_file = "/var/log/libvirt/libvirtd.log"
    log_level = "1"
    log_filters = '"1:json 1:libvirt 1:qemu 1:monitor 3:remote 4:event"'
    libvirtd_conf_dict = {"log_level": log_level,
                          "log_filters": log_filters,
                          "log_outputs": '"%s:file:%s"' % (log_level, log_file)}

    # Update libvirtd config with new parameters
    libvirtd = utils_libvirtd.Libvirtd()
    libvirtd_conf = config_libvirt(libvirtd_conf_dict)
    libvirtd.restart()

    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = params.get("nfs_mount_dir")

    # Params to setup SSH connection
    params["server_ip"] = params.get("migrate_dest_host")
    params["server_pwd"] = params.get("migrate_dest_pwd")
    params["client_ip"] = params.get("migrate_source_host")
    params["client_pwd"] = params.get("migrate_source_pwd")
    params["nfs_client_ip"] = params.get("migrate_dest_host")
    params["nfs_server_ip"] = params.get("migrate_source_host")

    # Params to enable SELinux boolean on remote host
    params["remote_boolean_varible"] = "virt_use_nfs"
    params["remote_boolean_value"] = "on"
    params["set_sebool_remote"] = "yes"

    remote_host = params.get("migrate_dest_host")
    username = params.get("migrate_dest_user", "root")
    password = params.get("migrate_dest_pwd")
    # Config ssh autologin for remote host
    ssh_key.setup_ssh_key(remote_host, username, password, port=22)

    setmmdt_dargs = {'debug': True, 'ignore_status': True, 'uri': src_uri}
    migrate_dargs = {'debug': True, 'ignore_status': True}

    seLinuxBool = None
    nfs_client = None
    local_selinux_bak = ""

    try:
        # Update the disk using shared storage
        libvirt.set_vm_disk(vm, params)

        # Backup the SELinux status on local host for recovering
        local_selinux_bak = params.get("selinux_status_bak", "")

        # Configure NFS client on remote host
        nfs_client = nfs.NFSClient(params)
        nfs_client.setup()

        logging.info("Enable virt NFS SELinux boolean on target host")
        seLinuxBool = utils_misc.SELinuxBoolean(params)
        seLinuxBool.setup()

        if not vm.is_alive():
            vm.start()
        vm.wait_for_login()
        domid = vm.get_id()

        # Confirm how to reference a VM.
        if vm_ref == "domname":
            vm_ref = vm_name
        elif vm_ref == "domid":
            vm_ref = domid
        elif vm_ref == "domuuid":
            vm_ref = domuuid

        # Prepare vm state
        if pre_vm_state == "paused":
            vm.pause()
        elif pre_vm_state == "shutoff":
            vm.destroy(gracefully=False)
            # Ensure VM in 'shut off' status
            utils_misc.wait_for(lambda: vm.state() == "shut off", 30)

        # Set max migration downtime must be during migration
        # Using threads for synchronization
        threads = []
        if do_migrate:
            threads.append(threading.Thread(target=thread_func_live_migration,
                                            args=(vm, dest_uri,
                                                  migrate_dargs)))

        threads.append(threading.Thread(target=thread_func_setmmdt,
                                        args=(vm_ref, downtime, extra,
                                              setmmdt_dargs)))
        for thread in threads:
            thread.start()
            # Migration must be executing before setting maxdowntime
            time.sleep(delay_time)
        # Wait until thread is over
        for thread in threads:
            thread.join(thread_timeout)

        if (status_error is False or do_migrate is False):
            logging.debug("To match the expected pattern '%s' ...",
                          grep_str_local)
            cmd = "grep -E '%s' %s" % (grep_str_local, log_file)
            cmdResult = process.run(cmd, shell=True, verbose=False)
            logging.debug(cmdResult)

    finally:
        # Clean up.
        if do_migrate:
            logging.debug("Cleanup VM on remote host...")
            cleanup_dest(vm, src_uri, dest_uri)

        if orig_config_xml:
            logging.debug("Recover VM XML...")
            orig_config_xml.sync()

        if seLinuxBool:
            logging.info("Recover NFS SELinux boolean on remote host...")
            seLinuxBool.cleanup(True)

        if nfs_client:
            logging.info("Cleanup NFS client environment...")
            nfs_client.cleanup()

        logging.info("Remove the NFS image...")
        source_file = params.get("source_file")
        libvirt.delete_local_disk("file", path=source_file)

        logging.info("Cleanup NFS server environment...")
        exp_dir = params.get("export_dir")
        mount_dir = params.get("mnt_path_name")
        libvirt.setup_or_cleanup_nfs(False, export_dir=exp_dir,
                                     mount_dir=mount_dir,
                                     restore_selinux=local_selinux_bak)

        # Recover libvirtd service configuration on local
        if libvirtd_conf:
            logging.debug("Recover local libvirtd configuration...")
            libvirtd_conf.restore()
            libvirtd.restart()
            os.remove(log_file)

    # Check results.
    if status_error:
        if ret_setmmdt:
            if not do_migrate and libvirt_version.version_compare(1, 2, 9):
                # https://bugzilla.redhat.com/show_bug.cgi?id=1146618
                # Commit fe808d9 fix it and allow setting migration
                # max downtime any time since libvirt-1.2.9
                logging.info("Libvirt version is newer than 1.2.9,"
                             "Allow set maxdowntime while VM isn't migrating")
            else:
                raise error.TestFail("virsh migrate-setmaxdowntime succeed "
                                     "but not expected.")
    else:
        if do_migrate and not ret_migration:
            raise error.TestFail("Migration failed.")

        if not ret_setmmdt:
            raise error.TestFail("virsh migrate-setmaxdowntime failed.")
Exemplo n.º 24
0
def run(test, params, env):
    """
    Test pool command:virsh pool_autostart

    1) Define a given type pool
    2) Mark pool as autostart
    3) Restart libvirtd and check pool
    4) Destroy the pool
    5) Unmark pool as autostart
    6) Repeate step(3)
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    source_format = params.get("source_format", "")
    source_name = params.get("pool_source_name", "gluster-vol1")
    source_path = params.get("pool_source_path", "/")
    ip_protocal = params.get("ip_protocal", "ipv4")
    pool_ref = params.get("pool_ref", "name")
    pool_uuid = params.get("pool_uuid", "")
    invalid_source_path = params.get("invalid_source_path", "")
    status_error = "yes" == params.get("status_error", "no")
    readonly_mode = "yes" == params.get("readonly_mode", "no")
    pre_def_pool = "yes" == params.get("pre_def_pool", "yes")
    disk_type = params.get("disk_type", "")
    vg_name = params.get("vg_name", "")
    lv_name = params.get("lv_name", "")
    update_policy = params.get("update_policy")

    # Readonly mode
    ro_flag = False
    if readonly_mode:
        logging.debug("Readonly mode test")
        ro_flag = True

    if pool_target is "":
        pool_target = os.path.join(test.tmpdir, pool_target)

    # The file for dumped pool xml
    p_xml = os.path.join(test.tmpdir, "pool.xml.tmp")

    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            test.cancel("Gluster pool is not supported in current"
                        " libvirt version.")

    pool_ins = libvirt_storage.StoragePool()
    if pool_ins.pool_exists(pool_name):
        test.fail("Pool %s already exist" % pool_name)

    def check_pool(pool_name,
                   pool_type,
                   checkpoint,
                   expect_value="",
                   expect_error=False):
        """
        Check the pool after autostart it

        :param pool_name: Name of the pool.
        :param pool_type: Type of the pool.
        :param checkpoint: Which part for checking.
        :param expect_value: Expected value.
        :param expect_error: Boolen value, expect command success or fail
        """
        libvirt_pool = libvirt_storage.StoragePool()
        virsh.pool_list(option="--all", debug=True)
        if checkpoint == 'State':
            actual_value = libvirt_pool.pool_state(pool_name)
        if checkpoint == 'Autostart':
            actual_value = libvirt_pool.pool_autostart(pool_name)
        if actual_value != expect_value:
            if not expect_error:
                if checkpoint == 'State' and pool_type in ("dir", "scsi"):
                    debug_msg = "Dir pool should be always active when libvirtd restart. "
                    debug_msg += "See https://bugzilla.redhat.com/show_bug.cgi?id=1238610"
                    logging.debug(debug_msg)
                else:
                    test.fail("Pool %s isn't %s as expected" %
                              (checkpoint, expect_value))
            else:
                logging.debug("Pool %s is %s as expected", checkpoint,
                              actual_value)

    def change_source_path(new_path, update_policy="set"):
        n_poolxml = pool_xml.PoolXML()
        n_poolxml = n_poolxml.new_from_dumpxml(pool_name)
        s_xml = n_poolxml.get_source()
        s_xml.device_path = new_path
        if update_policy == "set":
            n_poolxml.set_source(s_xml)
        elif update_policy == "add":
            n_poolxml.add_source("device", {"path": new_path})
        else:
            test.error("Unsupported policy type")
        logging.debug("After change_source_path:\n%s" %
                      open(n_poolxml.xml).read())
        return n_poolxml

    # Run Testcase
    pvt = utlv.PoolVolumeTest(test, params)
    kwargs = {
        'image_size': '1G',
        'pre_disk_vol': ['100M'],
        'source_name': source_name,
        'source_path': source_path,
        'source_format': source_format,
        'persistent': True,
        'ip_protocal': ip_protocal,
        'emulated_image': "emulated-image",
        'pool_target': pool_target
    }
    params.update(kwargs)
    pool = pool_name
    clean_mount = False
    new_device = None
    try:
        if pre_def_pool:
            # Step(1)
            # Pool define
            pvt.pre_pool(**params)
            # Remove the partition for disk pool
            # For sometimes the partition will cause pool start failed
            if pool_type == "disk":
                virsh.pool_build(pool_name, "--overwrite", debug=True)
            # Get pool uuid:
            if pool_ref == "uuid" and not pool_uuid:
                pool = pool_ins.get_pool_uuid(pool_name)

            # Setup logical block device
            # Change pool source path
            # Undefine pool
            # Define pool with new xml
            # Start pool
            if update_policy:
                new_device = utlv.setup_or_cleanup_iscsi(True)
                lv_utils.vg_create(vg_name, new_device)
                new_device = utlv.create_local_disk(disk_type,
                                                    size="0.5",
                                                    vgname=vg_name,
                                                    lvname=lv_name)
                new_path = new_device
                if invalid_source_path:
                    new_path = invalid_source_path
                if pool_type == "fs":
                    utlv.mkfs(new_device, source_format)
                n_poolxml = change_source_path(new_path, update_policy)
                p_xml = n_poolxml.xml
                if not virsh.pool_undefine(pool_name):
                    test.fail("Undefine pool %s failed" % pool_name)
                if not virsh.pool_define(p_xml):
                    test.fail("Define pool %s from %s failed" %
                              (pool_name, p_xml))
                logging.debug("Start pool %s" % pool_name)
                result = virsh.pool_start(pool_name,
                                          ignore_status=True,
                                          debug=True)
                utlv.check_exit_status(result, status_error)
                # Mount a valid fs to pool target
                if pool_type == "fs":
                    source_list = []
                    mnt_cmd = ""
                    pool_target = n_poolxml.target_path
                    if invalid_source_path:
                        source_list.append(new_device)
                    else:
                        s_devices = n_poolxml.xmltreefile.findall(
                            "//source/device")
                        for dev in s_devices:
                            source_list.append(dev.get('path'))
                    try:
                        for src in source_list:
                            mnt_cmd = "mount %s %s" % (src, pool_target)
                            if not process.system(mnt_cmd, shell=True):
                                clean_mount = True
                    except process.CmdError:
                        test.error("Failed to run %s" % mnt_cmd)

        # Step(2)
        # Pool autostart
        logging.debug("Try to mark pool %s as autostart" % pool_name)
        result = virsh.pool_autostart(pool,
                                      readonly=ro_flag,
                                      ignore_status=True,
                                      debug=True)
        if not pre_def_pool:
            utlv.check_exit_status(result, status_error)
        if not result.exit_status:
            check_pool(pool_name,
                       pool_type,
                       checkpoint='Autostart',
                       expect_value="yes",
                       expect_error=status_error)

            # Step(3)
            # Restart libvirtd and check pool status
            logging.info("Try to restart libvirtd")
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            check_pool(pool_name,
                       pool_type,
                       checkpoint="State",
                       expect_value="active",
                       expect_error=status_error)

            # Step(4)
            # Pool destroy
            if pool_ins.is_pool_active(pool_name):
                virsh.pool_destroy(pool_name)
                logging.debug("Pool %s destroyed" % pool_name)

            # Step(5)
            # Pool autostart disable
            logging.debug("Try to unmark pool %s as autostart" % pool_name)
            result = virsh.pool_autostart(pool,
                                          extra="--disable",
                                          debug=True,
                                          ignore_status=True)
            if not pre_def_pool:
                utlv.check_exit_status(result, status_error)
            if not result.exit_status:
                check_pool(pool_name,
                           pool_type,
                           checkpoint='Autostart',
                           expect_value="no",
                           expect_error=status_error)

                # Repeat step (3)
                logging.debug("Try to restart libvirtd")
                libvirtd = utils_libvirtd.Libvirtd()
                libvirtd.restart()
                check_pool(pool_name,
                           pool_type,
                           checkpoint='State',
                           expect_value="inactive",
                           expect_error=status_error)
    finally:
        # Clean up
        logging.debug("Try to clean up env")
        try:
            if clean_mount is True:
                for src in source_list:
                    process.system("umount %s" % pool_target)
            if pre_def_pool:
                pvt.cleanup_pool(**params)
            if new_device:
                utlv.delete_local_disk(disk_type,
                                       vgname=vg_name,
                                       lvname=lv_name)
                lv_utils.vg_remove(vg_name)
                utlv.setup_or_cleanup_iscsi(False)
            if os.path.exists(p_xml):
                os.remove(p_xml)
        except exceptions.TestFail as details:
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            logging.error(str(details))
Exemplo n.º 25
0
def run(test, params, env):
    """
    Test virsh migrate command.
    """
    def set_feature(vmxml, feature, value):
        """
        Set guest features for PPC

        :param state: the htm status
        :param vmxml: guest xml
        """
        features_xml = vm_xml.VMFeaturesXML()
        if feature == 'hpt':
            features_xml.hpt_resizing = value
        elif feature == 'htm':
            features_xml.htm = value
        vmxml.features = features_xml
        vmxml.sync()

    def trigger_hpt_resize(session):
        """
        Check the HPT order file and dmesg

        :param session: the session to guest

        :raise: test.fail if required message is not found
        """
        hpt_order_path = "/sys/kernel/debug/powerpc/hpt_order"
        hpt_order = session.cmd_output('cat %s' % hpt_order_path).strip()
        hpt_order = int(hpt_order)
        logging.info('Current hpt_order is %d', hpt_order)
        hpt_order += 1
        cmd = 'echo %d > %s' % (hpt_order, hpt_order_path)
        cmd_result = session.cmd_status_output(cmd)
        result = process.CmdResult(stderr=cmd_result[1],
                                   stdout=cmd_result[1],
                                   exit_status=cmd_result[0])
        libvirt.check_exit_status(result)
        dmesg = session.cmd('dmesg')
        dmesg_content = params.get('dmesg_content').split('|')
        for content in dmesg_content:
            if content % hpt_order not in dmesg:
                test.fail("'%s' is missing in dmesg" % (content % hpt_order))
            else:
                logging.info("'%s' is found in dmesg", content % hpt_order)

    def check_vm_network_accessed(session=None):
        """
        The operations to the VM need to be done before or after
        migration happens

        :param session: The session object to the host

        :raise: test.error when ping fails
        """
        # Confirm local/remote VM can be accessed through network.
        logging.info("Check VM network connectivity")
        s_ping, _ = utils_test.ping(vm.get_address(),
                                    count=10,
                                    timeout=20,
                                    output_func=logging.debug,
                                    session=session)
        if s_ping != 0:
            if session:
                session.close()
            test.fail("%s did not respond after %d sec." % (vm.name, 20))

    def check_virsh_command_and_option(command, option=None):
        """
        Check if virsh command exists

        :param command: the command to be checked
        :param option: the command option to be checked
        """
        msg = "This version of libvirt does not support "
        if not virsh.has_help_command(command):
            test.cancel(msg + "virsh command '%s'" % command)

        if option and not virsh.has_command_help_match(command, option):
            test.cancel(msg + "virsh command '%s' with option '%s'" %
                        (command, option))

    def add_ctrls(vm_xml, dev_type="pci", dev_index="0", dev_model="pci-root"):
        """
        Add multiple devices

        :param dev_type: the type of the device to be added
        :param dev_index: the maximum index of the device to be added
        :param dev_model: the model of the device to be added
        """
        for inx in range(0, int(dev_index) + 1):
            newcontroller = Controller("controller")
            newcontroller.type = dev_type
            newcontroller.index = inx
            newcontroller.model = dev_model
            logging.debug("New device is added:\n%s", newcontroller)
            vm_xml.add_device(newcontroller)
        vm_xml.sync()

    def do_migration(vm, dest_uri, options, extra):
        """
        Execute the migration with given parameters
        :param vm: the guest to be migrated
        :param dest_uri: the destination uri for migration
        :param options: options next to 'migrate' command
        :param extra: options in the end of the migrate command line

        :return: CmdResult object
        """
        logging.info("Sleeping 10 seconds before migration")
        time.sleep(10)
        # Migrate the guest.
        virsh_args.update({"ignore_status": True})
        migration_res = vm.migrate(dest_uri, options, extra, **virsh_args)
        if int(migration_res.exit_status) != 0:
            logging.error("Migration failed for %s.", vm_name)
            return migration_res

        if vm.is_alive():  # vm.connect_uri was updated
            logging.info("VM is alive on destination %s.", dest_uri)
        else:
            test.fail("VM is not alive on destination %s" % dest_uri)

        # Throws exception if console shows panic message
        vm.verify_kernel_crash()
        return migration_res

    def cleanup_libvirtd_log(log_file):
        """
        Remove existing libvirtd log file on source and target host.

        :param log_file: log file with absolute path
        """
        if os.path.exists(log_file):
            logging.debug("Delete local libvirt log file '%s'", log_file)
            os.remove(log_file)
        cmd = "rm -f %s" % log_file
        logging.debug("Delete remote libvirt log file '%s'", log_file)
        cmd_parms = {
            'server_ip': server_ip,
            'server_user': server_user,
            'server_pwd': server_pwd
        }
        remote.run_remote_cmd(cmd, cmd_parms, runner_on_target)

    def cleanup_dest(vm):
        """
        Clean up the destination host environment
        when doing the uni-direction migration.

        :param vm: the guest to be cleaned up
        """
        logging.info("Cleaning up VMs on %s", vm.connect_uri)
        try:
            if virsh.domain_exists(vm.name, uri=vm.connect_uri):
                vm_state = vm.state()
                if vm_state == "paused":
                    vm.resume()
                elif vm_state == "shut off":
                    vm.start()
                vm.destroy(gracefully=False)

                if vm.is_persistent():
                    vm.undefine()

        except Exception as detail:
            logging.error("Cleaning up destination failed.\n%s", detail)

    def run_stress_in_vm():
        """
        The function to load stress in VM
        """
        stress_args = params.get(
            "stress_args", "--cpu 8 --io 4 "
            "--vm 2 --vm-bytes 128M "
            "--timeout 20s")
        try:
            vm_session.cmd('stress %s' % stress_args)
        except Exception as detail:
            logging.debug(detail)

    def control_migrate_speed(to_speed=1):
        """
        Control migration duration

        :param to_speed: the speed value in Mbps to be set for migration
        :return int: the new migration speed after setting
        """
        virsh_args.update({"ignore_status": False})
        old_speed = virsh.migrate_getspeed(vm_name, **virsh_args)
        logging.debug("Current migration speed is %s MiB/s\n",
                      old_speed.stdout.strip())
        logging.debug("Set migration speed to %d MiB/s\n", to_speed)
        cmd_result = virsh.migrate_setspeed(vm_name, to_speed, "",
                                            **virsh_args)
        actual_speed = virsh.migrate_getspeed(vm_name, **virsh_args)
        logging.debug("New migration speed is %s MiB/s\n",
                      actual_speed.stdout.strip())
        return int(actual_speed.stdout.strip())

    def check_setspeed(params):
        """
        Set/get migration speed

        :param params: the parameters used
        :raise: test.fail if speed set does not take effect
        """
        expected_value = int(params.get("migrate_speed",
                                        '41943040')) // (1024 * 1024)
        actual_value = control_migrate_speed(to_speed=expected_value)
        params.update({'compare_to_value': actual_value})
        if actual_value != expected_value:
            test.fail(
                "Migration speed is expected to be '%d MiB/s', but '%d MiB/s' "
                "found" % (expected_value, actual_value))

    def check_domjobinfo(params, option=""):
        """
        Check given item in domjobinfo of the guest is as expected

        :param params: the parameters used
        :param option: options for domjobinfo
        :raise: test.fail if the value of given item is unexpected
        """
        def search_jobinfo(jobinfo):
            """
            Find value of given item in domjobinfo

            :param jobinfo: cmdResult object
            :raise: test.fail if not found
            """
            for item in jobinfo.stdout.splitlines():
                if item.count(jobinfo_item):
                    groups = re.findall(r'[0-9.]+', item.strip())
                    logging.debug("In '%s' search '%s'\n", item, groups[0])
                    if (math.fabs(float(groups[0]) - float(compare_to_value))
                            // float(compare_to_value) > diff_rate):
                        test.fail("{} {} has too much difference from "
                                  "{}".format(jobinfo_item, groups[0],
                                              compare_to_value))
                break

        jobinfo_item = params.get("jobinfo_item")
        compare_to_value = params.get("compare_to_value")
        logging.debug("compare_to_value:%s", compare_to_value)
        diff_rate = float(params.get("diff_rate", "0"))
        if not jobinfo_item or not compare_to_value:
            return
        vm_ref = '{}{}'.format(vm_name, option)
        jobinfo = virsh.domjobinfo(vm_ref, **virsh_args)
        search_jobinfo(jobinfo)

        check_domjobinfo_remote = params.get("check_domjobinfo_remote")
        if check_domjobinfo_remote:
            remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs)
            jobinfo = remote_virsh_session.domjobinfo(vm_ref, **virsh_args)
            search_jobinfo(jobinfo)
            remote_virsh_session.close_session()

    def check_maxdowntime(params):
        """
        Set/get migration maxdowntime

        :param params: the parameters used
        :raise: test.fail if maxdowntime set does not take effect
        """
        expected_value = int(
            float(params.get("migrate_maxdowntime", '0.3')) * 1000)
        virsh_args.update({"ignore_status": False})
        old_value = int(virsh.migrate_getmaxdowntime(vm_name).stdout.strip())
        logging.debug("Current migration maxdowntime is %d ms", old_value)
        logging.debug("Set migration maxdowntime to %d ms", expected_value)
        virsh.migrate_setmaxdowntime(vm_name, expected_value, **virsh_args)
        actual_value = int(
            virsh.migrate_getmaxdowntime(vm_name).stdout.strip())
        logging.debug("New migration maxdowntime is %d ms", actual_value)
        if actual_value != expected_value:
            test.fail(
                "Migration maxdowntime is expected to be '%d ms', but '%d ms' "
                "found" % (expected_value, actual_value))
        params.update({'compare_to_value': actual_value})

    def do_actions_during_migrate(params):
        """
        The entry point to execute action list during migration

        :param params: the parameters used
        """
        actions_during_migration = params.get("actions_during_migration")
        if not actions_during_migration:
            return
        for action in actions_during_migration.split(","):
            if action == 'setspeed':
                check_setspeed(params)
            elif action == 'domjobinfo':
                check_domjobinfo(params)
            elif action == 'setmaxdowntime':
                check_maxdowntime(params)
            time.sleep(3)

    def attach_channel_xml():
        """
        Create channel xml and attach it to guest configuration
        """
        # Check if pty channel exists already
        for elem in new_xml.devices.by_device_tag('channel'):
            if elem.type_name == channel_type_name:
                logging.debug(
                    "{0} channel already exists in guest. "
                    "No need to add new one".format(channel_type_name))
                return

        params = {
            'channel_type_name': channel_type_name,
            'target_type': target_type,
            'target_name': target_name
        }
        channel_xml = libvirt.create_channel_xml(params)
        virsh.attach_device(domain_opt=vm_name,
                            file_opt=channel_xml.xml,
                            flagstr="--config",
                            ignore_status=False)
        logging.debug("New VMXML with channel:\n%s", virsh.dumpxml(vm_name))

    def check_timeout_postcopy(params):
        """
        Check the vm state on target host after timeout
        when --postcopy and --timeout-postcopy are used.
        The vm state is expected as running.

        :param params: the parameters used
        """
        timeout = int(params.get("timeout_postcopy", 10))
        time.sleep(timeout + 1)
        remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs)
        vm_state = results_stdout_52lts(
            remote_virsh_session.domstate(vm_name)).strip()
        if vm_state != "running":
            remote_virsh_session.close_session()
            test.fail(
                "After timeout '%s' seconds, "
                "the vm state on target host should "
                "be 'running', but '%s' found", timeout, vm_state)
        remote_virsh_session.close_session()

    def get_usable_compress_cache(pagesize):
        """
        Get a number which is bigger than pagesize and is power of two.

        :param pagesize: the given integer
        :return: an integer satisfying the criteria
        """
        def calculate(num):
            result = num & (num - 1)
            return (result == 0)

        item = pagesize
        found = False
        while (not found):
            item += 1
            found = calculate(item)
        logging.debug(
            "%d is smallest one that is bigger than '%s' and "
            "is power of 2", item, pagesize)
        return item

    def check_migration_res(result):
        """
        Check if the migration result is as expected

        :param result: the output of migration
        :raise: test.fail if test is failed
        """
        logging.info("Migration out: %s", results_stdout_52lts(result).strip())
        logging.info("Migration error: %s",
                     results_stderr_52lts(result).strip())

        if status_error:  # Migration should fail
            if err_msg:  # Special error messages are expected
                if not re.search(err_msg,
                                 results_stderr_52lts(result).strip()):
                    test.fail("Can not find the expected patterns '%s' in "
                              "output '%s'" %
                              (err_msg, results_stderr_52lts(result).strip()))
                else:
                    logging.debug("It is the expected error message")
            else:
                if int(result.exit_status) != 0:
                    logging.debug("Migration failure is expected result")
                else:
                    test.fail("Migration success is unexpected result")
        else:
            if int(result.exit_status) != 0:
                test.fail(results_stderr_52lts(result).strip())

    check_parameters(test, params)

    # Params for NFS shared storage
    shared_storage = params.get("migrate_shared_storage", "")
    if shared_storage == "":
        default_guest_asset = defaults.get_default_guest_os_info()['asset']
        default_guest_asset = "%s.qcow2" % default_guest_asset
        shared_storage = os.path.join(params.get("nfs_mount_dir"),
                                      default_guest_asset)
        logging.debug("shared_storage:%s", shared_storage)

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = params.get("nfs_mount_dir")

    # Local variables
    virsh_args = {"debug": True}
    virsh_opt = params.get("virsh_opt", "")
    server_ip = params.get("server_ip")
    server_user = params.get("server_user", "root")
    server_pwd = params.get("server_pwd")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options")
    src_uri = params.get("virsh_migrate_connect_uri")
    dest_uri = params.get("virsh_migrate_desturi")
    log_file = params.get("libvirt_log", "/var/log/libvirt/libvirtd.log")
    check_complete_job = "yes" == params.get("check_complete_job", "no")
    config_libvirtd = "yes" == params.get("config_libvirtd", "no")
    contrl_index = params.get("new_contrl_index", None)
    asynch_migration = "yes" == params.get("asynch_migrate", "no")
    grep_str_remote_log = params.get("grep_str_remote_log", "")
    grep_str_local_log = params.get("grep_str_local_log", "")
    disable_verify_peer = "yes" == params.get("disable_verify_peer", "no")
    status_error = "yes" == params.get("status_error", "no")
    stress_in_vm = "yes" == params.get("stress_in_vm", "no")
    low_speed = params.get("low_speed", None)
    remote_virsh_dargs = {
        'remote_ip': server_ip,
        'remote_user': server_user,
        'remote_pwd': server_pwd,
        'unprivileged_user': None,
        'ssh_remote_auth': True
    }

    hpt_resize = params.get("hpt_resize", None)
    htm_state = params.get("htm_state", None)

    # For pty channel test
    add_channel = "yes" == params.get("add_channel", "no")
    channel_type_name = params.get("channel_type_name", None)
    target_type = params.get("target_type", None)
    target_name = params.get("target_name", None)
    cmd_run_in_remote_guest = params.get("cmd_run_in_remote_guest", None)
    cmd_run_in_remote_guest_1 = params.get("cmd_run_in_remote_guest_1", None)
    cmd_run_in_remote_host = params.get("cmd_run_in_remote_host", None)
    cmd_run_in_remote_host_1 = params.get("cmd_run_in_remote_host_1", None)
    cmd_run_in_remote_host_2 = params.get("cmd_run_in_remote_host_2", None)

    # For qemu command line checking
    qemu_check = params.get("qemu_check", None)

    xml_check_after_mig = params.get("guest_xml_check_after_mig", None)

    # params for cache matrix test
    cache = params.get("cache")
    remove_cache = "yes" == params.get("remove_cache", "no")
    err_msg = params.get("err_msg")

    arch = platform.machine()
    if any([hpt_resize, contrl_index, htm_state]) and 'ppc64' not in arch:
        test.cancel("The case is PPC only.")

    # For TLS
    tls_recovery = params.get("tls_auto_recovery", "yes")
    # qemu config
    qemu_conf_dict = None
    # libvirtd config
    libvirtd_conf_dict = None

    remote_virsh_session = None
    vm = None
    vm_session = None
    libvirtd_conf = None
    qemu_conf = None
    mig_result = None
    test_exception = None
    is_TestError = False
    is_TestFail = False
    is_TestSkip = False

    # Objects to be cleaned up in the end
    objs_list = []
    tls_obj = None

    # Local variables
    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()
    if not orig_config_xml:
        test.error("Backing up xmlfile failed.")

    try:
        # Create a remote runner for later use
        runner_on_target = remote.RemoteRunner(host=server_ip,
                                               username=server_user,
                                               password=server_pwd)

        # Change the configuration files if needed before starting guest
        # For qemu.conf
        if extra.count("--tls"):
            # Setup TLS
            tls_obj = TLSConnection(params)
            if tls_recovery == "yes":
                objs_list.append(tls_obj)
                tls_obj.auto_recover = True
                tls_obj.conn_setup()
            if not disable_verify_peer:
                qemu_conf_dict = {"migrate_tls_x509_verify": "1"}
                # Setup qemu configure
                logging.debug("Configure the qemu")
                cleanup_libvirtd_log(log_file)
                qemu_conf = libvirt.customize_libvirt_config(
                    qemu_conf_dict,
                    config_type="qemu",
                    remote_host=True,
                    extra_params=params)
        # Setup libvirtd
        if config_libvirtd:
            logging.debug("Configure the libvirtd")
            cleanup_libvirtd_log(log_file)
            libvirtd_conf_dict = setup_libvirtd_conf_dict(params)
            libvirtd_conf = libvirt.customize_libvirt_config(
                libvirtd_conf_dict, remote_host=True, extra_params=params)
        # Prepare required guest xml before starting guest
        if contrl_index:
            new_xml.remove_all_device_by_type('controller')
            logging.debug("After removing controllers, current XML:\n%s\n",
                          new_xml)
            add_ctrls(new_xml, dev_index=contrl_index)

        if add_channel:
            attach_channel_xml()

        if hpt_resize:
            set_feature(new_xml, 'hpt', hpt_resize)

        if htm_state:
            set_feature(new_xml, 'htm', htm_state)

        if cache:
            params["driver_cache"] = cache
        if remove_cache:
            params["enable_cache"] = "no"

        # Change the disk of the vm to shared disk and then start VM
        libvirt.set_vm_disk(vm, params)

        if not vm.is_alive():
            vm.start()

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))

        # Check qemu command line after guest is started
        if qemu_check:
            check_content = qemu_check
            if hpt_resize:
                check_content = "%s%s" % (qemu_check, hpt_resize)
            if htm_state:
                check_content = "%s%s" % (qemu_check, htm_state)
            libvirt.check_qemu_cmd_line(check_content)

        # Check local guest network connection before migration
        vm_session = vm.wait_for_login()
        check_vm_network_accessed()

        # Preparation for the running guest before migration
        if hpt_resize and hpt_resize != 'disabled':
            trigger_hpt_resize(vm_session)

        if low_speed:
            control_migrate_speed(int(low_speed))

        if stress_in_vm:
            pkg_name = 'stress'
            logging.debug("Check if stress tool is installed")
            pkg_mgr = utils_package.package_manager(vm_session, pkg_name)
            if not pkg_mgr.is_installed(pkg_name):
                logging.debug("Stress tool will be installed")
                if not pkg_mgr.install():
                    test.error("Package '%s' installation fails" % pkg_name)

            stress_thread = threading.Thread(target=run_stress_in_vm, args=())
            stress_thread.start()

        if extra.count("timeout-postcopy"):
            func_name = check_timeout_postcopy
        if params.get("actions_during_migration"):
            func_name = do_actions_during_migrate
        if extra.count("comp-xbzrle-cache"):
            cache = get_usable_compress_cache(memory.get_page_size())
            extra = "%s %s" % (extra, cache)

        # For --postcopy enable
        postcopy_options = params.get("postcopy_options")
        if postcopy_options:
            extra = "%s %s" % (extra, postcopy_options)

        # Execute migration process
        if not asynch_migration:
            mig_result = do_migration(vm, dest_uri, options, extra)
        else:
            migration_test = libvirt.MigrationTest()

            logging.debug("vm.connect_uri=%s", vm.connect_uri)
            vms = [vm]
            try:
                migration_test.do_migration(vms,
                                            None,
                                            dest_uri,
                                            'orderly',
                                            options,
                                            thread_timeout=900,
                                            ignore_status=True,
                                            virsh_opt=virsh_opt,
                                            func=func_name,
                                            extra_opts=extra,
                                            func_params=params)
                mig_result = migration_test.ret
            except exceptions.TestFail as fail_detail:
                test.fail(fail_detail)
            except exceptions.TestSkipError as skip_detail:
                test.cancel(skip_detail)
            except exceptions.TestError as error_detail:
                test.error(error_detail)
            except Exception as details:
                mig_result = migration_test.ret
                logging.error(details)

        check_migration_res(mig_result)

        if add_channel:
            # Get the channel device source path of remote guest
            if not remote_virsh_session:
                remote_virsh_session = virsh.VirshPersistent(
                    **remote_virsh_dargs)
            file_path = tempfile.mktemp(dir=data_dir.get_tmp_dir())
            remote_virsh_session.dumpxml(vm_name,
                                         to_file=file_path,
                                         debug=True,
                                         ignore_status=True)
            local_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            local_vmxml.xmltreefile = xml_utils.XMLTreeFile(file_path)
            for elem in local_vmxml.devices.by_device_tag('channel'):
                logging.debug("Found channel device {}".format(elem))
                if elem.type_name == channel_type_name:
                    host_source = elem.source.get('path')
                    logging.debug(
                        "Remote guest uses {} for channel device".format(
                            host_source))
                    break
            remote_virsh_session.close_session()
            if not host_source:
                test.fail("Can not find source for %s channel on remote host" %
                          channel_type_name)

            # Prepare to wait for message on remote host from the channel
            cmd_parms = {
                'server_ip': server_ip,
                'server_user': server_user,
                'server_pwd': server_pwd
            }
            cmd_result = remote.run_remote_cmd(
                cmd_run_in_remote_host % host_source, cmd_parms,
                runner_on_target)

            # Send message from remote guest to the channel file
            remote_vm_obj = utils_test.RemoteVMManager(cmd_parms)
            vm_ip = vm.get_address()
            vm_pwd = params.get("password")
            remote_vm_obj.setup_ssh_auth(vm_ip, vm_pwd)
            cmd_result = remote_vm_obj.run_command(vm_ip,
                                                   cmd_run_in_remote_guest_1)
            remote_vm_obj.run_command(
                vm_ip, cmd_run_in_remote_guest %
                results_stdout_52lts(cmd_result).strip())
            logging.debug("Sending message is done")

            # Check message on remote host from the channel
            remote.run_remote_cmd(cmd_run_in_remote_host_1, cmd_parms,
                                  runner_on_target)
            logging.debug("Receiving message is done")
            remote.run_remote_cmd(cmd_run_in_remote_host_2, cmd_parms,
                                  runner_on_target)

        if check_complete_job:
            opts = " --completed"
            check_virsh_command_and_option("domjobinfo", opts)
            if extra.count("comp-xbzrle-cache"):
                params.update({'compare_to_value': cache // 1024})
            check_domjobinfo(params, option=opts)

        if grep_str_local_log:
            cmd = "grep -E '%s' %s" % (grep_str_local_log, log_file)
            cmdRes = process.run(cmd, shell=True, ignore_status=True)
            if cmdRes.exit_status:
                test.fail(results_stderr_52lts(cmdRes).strip())
        if grep_str_remote_log:
            cmd = "grep -E '%s' %s" % (grep_str_remote_log, log_file)
            cmd_parms = {
                'server_ip': server_ip,
                'server_user': server_user,
                'server_pwd': server_pwd
            }
            remote.run_remote_cmd(cmd, cmd_parms, runner_on_target)

        if xml_check_after_mig:
            if not remote_virsh_session:
                remote_virsh_session = virsh.VirshPersistent(
                    **remote_virsh_dargs)
            target_guest_dumpxml = results_stdout_52lts(
                remote_virsh_session.dumpxml(vm_name,
                                             debug=True,
                                             ignore_status=True)).strip()
            if hpt_resize:
                check_str = hpt_resize
            elif htm_state:
                check_str = htm_state
            if hpt_resize or htm_state:
                xml_check_after_mig = "%s'%s'" % (xml_check_after_mig,
                                                  check_str)
                if not re.search(xml_check_after_mig, target_guest_dumpxml):
                    remote_virsh_session.close_session()
                    test.fail("Fail to search '%s' in target guest XML:\n%s" %
                              (xml_check_after_mig, target_guest_dumpxml))

            if contrl_index:
                all_ctrls = re.findall(xml_check_after_mig,
                                       target_guest_dumpxml)
                if len(all_ctrls) != int(contrl_index) + 1:
                    remote_virsh_session.close_session()
                    test.fail(
                        "%s pci-root controllers are expected in guest XML, "
                        "but found %s" %
                        (int(contrl_index) + 1, len(all_ctrls)))
            remote_virsh_session.close_session()

        if int(mig_result.exit_status) == 0:
            server_session = remote.wait_for_login('ssh', server_ip, '22',
                                                   server_user, server_pwd,
                                                   r"[\#\$]\s*$")
            check_vm_network_accessed(server_session)
            server_session.close()
    except exceptions.TestFail as details:
        is_TestFail = True
        test_exception = details
    except exceptions.TestSkipError as details:
        is_TestSkip = True
        test_exception = details
    except exceptions.TestError as details:
        is_TestError = True
        test_exception = details
    except Exception as details:
        test_exception = details
    finally:
        logging.debug("Recover test environment")
        try:
            # Clean VM on destination
            vm.connect_uri = dest_uri
            cleanup_dest(vm)
            vm.connect_uri = src_uri

            logging.info("Recovery VM XML configration")
            orig_config_xml.sync()
            logging.debug("The current VM XML:\n%s",
                          orig_config_xml.xmltreefile)

            if remote_virsh_session:
                remote_virsh_session.close_session()

            if extra.count("--tls") and not disable_verify_peer:
                logging.debug("Recover the qemu configuration")
                libvirt.customize_libvirt_config(None,
                                                 config_type="qemu",
                                                 remote_host=True,
                                                 extra_params=params,
                                                 is_recover=True,
                                                 config_object=qemu_conf)

            if config_libvirtd:
                logging.debug("Recover the libvirtd configuration")
                libvirt.customize_libvirt_config(None,
                                                 remote_host=True,
                                                 extra_params=params,
                                                 is_recover=True,
                                                 config_object=libvirtd_conf)

            logging.info("Remove local NFS image")
            source_file = params.get("source_file")
            libvirt.delete_local_disk("file", path=source_file)

            if objs_list:
                for obj in objs_list:
                    logging.debug("Clean up local objs")
                    del obj

        except Exception as exception_detail:
            if (not test_exception and not is_TestError and not is_TestFail
                    and not is_TestSkip):
                raise exception_detail
            else:
                # if any of above exceptions has been raised, only print
                # error log here to avoid of hiding the original issue
                logging.error(exception_detail)
    # Check result
    if is_TestFail:
        test.fail(test_exception)
    if is_TestSkip:
        test.cancel(test_exception)
    if is_TestError:
        test.error(test_exception)
    if not test_exception:
        logging.info("Case execution is done.")
    else:
        test.error(test_exception)
Exemplo n.º 26
0
 def vm_stress_events(self, event, vm):
     """
     Stress events
     :param event: event name
     :param vm: vm object
     """
     dargs = {'ignore_status': True, 'debug': True}
     for itr in range(self.iterations):
         if "vcpupin" in event:
             for vcpu in range(int(self.current_vcpu)):
                 result = virsh.vcpupin(vm.name, vcpu,
                                        random.choice(self.host_cpu_list),
                                        **dargs)
                 if not self.ignore_status:
                     libvirt.check_exit_status(result)
         elif "emulatorpin" in event:
             for vcpu in range(int(self.current_vcpu)):
                 result = virsh.emulatorpin(vm.name,
                                            random.choice(
                                                self.host_cpu_list),
                                            **dargs)
                 if not self.ignore_status:
                     libvirt.check_exit_status(result)
         elif "suspend" in event:
             result = virsh.suspend(vm.name, **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
             time.sleep(self.event_sleep_time)
             result = virsh.resume(vm.name, **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
         elif "cpuhotplug" in event:
             result = virsh.setvcpus(vm.name, self.max_vcpu, "--live",
                                     **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
                 exp_vcpu = {'max_config': self.max_vcpu,
                             'max_live': self.max_vcpu,
                             'cur_config': self.current_vcpu,
                             'cur_live': self.max_vcpu,
                             'guest_live': self.max_vcpu}
                 utils_hotplug.check_vcpu_value(
                     vm, exp_vcpu, option="--live")
             time.sleep(self.event_sleep_time)
             result = virsh.setvcpus(vm.name, self.current_vcpu, "--live",
                                     **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
                 exp_vcpu = {'max_config': self.max_vcpu,
                             'max_live': self.max_vcpu,
                             'cur_config': self.current_vcpu,
                             'cur_live': self.current_vcpu,
                             'guest_live': self.current_vcpu}
                 utils_hotplug.check_vcpu_value(
                     vm, exp_vcpu, option="--live")
         elif "reboot" in event:
             vm.reboot()
         elif "nethotplug" in event:
             for iface_num in range(int(self.iface_num)):
                 logging.debug("Try to attach interface %d" % iface_num)
                 mac = utils_net.generate_mac_address_simple()
                 options = ("%s %s --model %s --mac %s %s" %
                            (self.iface_type, self.iface_source['network'],
                             self.iface_model, mac, self.attach_option))
                 logging.debug("VM name: %s , Options for Network attach: %s", vm.name, options)
                 ret = virsh.attach_interface(vm.name, options,
                                              ignore_status=True)
                 time.sleep(self.event_sleep_time)
                 if not self.ignore_status:
                     libvirt.check_exit_status(ret)
                 if self.detach_option:
                     options = ("--type %s --mac %s %s" %
                                (self.iface_type, mac, self.detach_option))
                     logging.debug("VM name: %s , Options for Network detach: %s", vm.name, options)
                     ret = virsh.detach_interface(vm.name, options,
                                                  ignore_status=True)
                     if not self.ignore_status:
                         libvirt.check_exit_status(ret)
         elif "diskhotplug" in event:
             for disk_num in range(len(self.device_source_names)):
                 disk = {}
                 disk_attach_error = False
                 disk_name = os.path.join(self.path, vm.name, self.device_source_names[disk_num])
                 device_source = libvirt.create_local_disk(
                     self.disk_type, disk_name, self.disk_size, disk_format=self.disk_format)
                 disk.update({"format": self.disk_format,
                              "source": device_source})
                 disk_xml = Disk(self.disk_type)
                 disk_xml.device = self.disk_device
                 disk_xml.driver = {"name": self.disk_driver, "type": self.disk_format}
                 ret = virsh.attach_disk(vm.name, disk["source"], self.device_target[disk_num], self.attach_option, debug=True)
                 if not self.ignore_status:
                     libvirt.check_exit_status(ret, disk_attach_error)
                 if self.detach_option:
                     ret = virsh.detach_disk(vm.name, self.device_target[disk_num], extra=self.detach_option)
                     if not self.ignore_status:
                         libvirt.check_exit_status(ret)
                     libvirt.delete_local_disk(self.disk_type, disk_name)
         else:
             raise NotImplementedError
Exemplo n.º 27
0
            data_dir.clean_tmp_files()
            logging.debug("Cleanup mem hotplug xml")

    # cleanup hugepages
    if enable_HP or enable_HP_pin:
        logging.info("Cleanup Hugepages")
        # cleaning source hugepages
        hugepage_assign("0")
        # cleaning destination hugepages
        hugepage_assign("0",
                        target_ip=server_ip,
                        user=server_user,
                        password=server_pwd)

    if attach_scsi_disk:
        libvirt.delete_local_disk("file", path=scsi_disk)

    if seLinuxBool:
        logging.info("Recover virt NFS SELinux boolean on target host...")
        # keep .ssh/authorized_keys for NFS cleanup later
        seLinuxBool.cleanup(True)

    if nfs_client:
        logging.info("Cleanup NFS client environment...")
        nfs_client.cleanup()

    logging.info("Remove the NFS image...")
    source_file = params.get("source_file")
    libvirt.delete_local_disk("file", path=source_file)

    logging.info("Cleanup NFS server environment...")
def run(test, params, env):
    """
    Test virsh blockcopy with various option based on transient_guest.

    1.Prepare backend storage (iscsi)
    2.Start VM
    3.Execute virsh blockcopy target command
    4.Check status after operation accomplished
    5.Clean up test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    def setup_file_backend_env(params):
        """
        Setup iscsi test environment

        :param params: one dict to wrap up parameters
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        type_name = params.get("virt_disk_device_type")
        disk_device = params.get("virt_disk_device")
        device_target = params.get("virt_disk_device_target")
        device_bus = params.get("virt_disk_device_bus")
        device_format = params.get("virt_disk_device_format")
        blockcopy_image_name = params.get("blockcopy_image_name")
        emulated_size = int(params.get("emulated_size", "2"))

        libvirt.create_local_disk("file", blockcopy_image_name, emulated_size,
                                  "qcow2")

        disk_src_dict = {"attrs": {"file": blockcopy_image_name}}

        file_disk = libvirt_disk.create_primitive_disk_xml(
            type_name, disk_device, device_target, device_bus, device_format,
            disk_src_dict, None)
        logging.debug("guest xml after undefined and recreated:\n%s",
                      file_disk)
        return file_disk

    def start_pivot_blkcpy_on_transient_vm():
        """
        Start blockcopy with pivot option
        """
        external_snapshot_disks = libvirt_disk.make_external_disk_snapshots(
            vm, device_target, "trans_snapshot", snapshot_take)
        logging.debug("external snapshots:%s\n", external_snapshot_disks)
        external_snapshot_disks.pop()
        for sub_option in ["--shallow --pivot", "--pivot"]:
            tmp_copy_path = os.path.join(
                data_dir.get_data_dir(),
                "%s_%s.img" % (vm_name, sub_option[2:5]))
            tmp_blkcopy_path.append(tmp_copy_path)
            if os.path.exists(tmp_copy_path):
                libvirt.delete_local_disk('file', tmp_copy_path)
            virsh.blockcopy(vm_name,
                            device_target,
                            tmp_copy_path,
                            options=sub_option,
                            ignore_status=False,
                            debug=True)
            back_chain_files = libvirt_disk.get_chain_backing_files(
                tmp_copy_path)
            back_chain_files = back_chain_files[1:len(back_chain_files)]
            logging.debug("debug blockcopy xml restore:%s and %s\n",
                          external_snapshot_disks, back_chain_files)
            if back_chain_files != external_snapshot_disks:
                test.fail("can not get identical backing chain")
            utils_misc.wait_for(
                lambda: libvirt.check_blockjob(vm_name, device_target), 5)
            #After pivot, no backing chain exists
            external_snapshot_disks = []

    def check_info_in_libvird_log_file(matchedMsg=None):
        """
        Check if information can be found in libvirtd log.

        :params matchedMsg: expected matched messages
        """
        # Check libvirtd log file.
        libvirtd_log_file = log_config_path
        if not os.path.exists(libvirtd_log_file):
            test.fail("Expected VM log file: %s not exists" %
                      libvirtd_log_file)
        cmd = ("grep -nr '%s' %s" % (matchedMsg, libvirtd_log_file))
        return process.run(cmd, ignore_status=True,
                           shell=True).exit_status == 0

    def check_bandwidth_progress(bandwidth_value):
        """
        Check bandwidth

        :param bandwidth_value: expected bandwidth value
        """
        ret = utils_misc.wait_for(
            lambda: libvirt.check_blockjob(vm_name, device_target, "bandwidth",
                                           bandwidth_value), 30)
        if not ret:
            test.fail("Failed to get bandwidth limit output")

    def _extend_blkcpy_execution(sub_option,
                                 sub_status_error,
                                 pre_created=False):
        """
        Wrap up blockcopy execution combining with various options

        :params sub_option: option
        :params sub_status_error: expected error or not
        :params pre_created: whether pre-created
        """
        tmp_copy_path = os.path.join(data_dir.get_data_dir(),
                                     "%s_%s.img" % (vm_name, sub_option))
        if os.path.exists(tmp_copy_path):
            libvirt.delete_local_disk('file', tmp_copy_path)
        if pre_created:
            libvirt.create_local_disk('file', tmp_copy_path, '10M', 'qcow2')
        tmp_option = params.get("options") % sub_option
        if "default" in tmp_option:
            tmp_option = " --wait --verbose"
        result = virsh.blockcopy(vm_name,
                                 device_target,
                                 tmp_copy_path,
                                 options=tmp_option,
                                 ignore_status=True,
                                 debug=True)
        logging.debug(sub_status_error)
        libvirt.check_exit_status(result, expect_error=sub_status_error)

    def start_granularity_blkcpy_on_transient_vm():
        """Start blockcopy with granularity operations """
        granularity_value = params.get('granularity_value').split()
        option_status_error = [
            value == 'yes'
            for value in params.get('option_status_error').split()
        ]
        for sub_option, sub_status_error in zip(granularity_value,
                                                option_status_error):
            _extend_blkcpy_execution(sub_option, sub_status_error)
            if not option_status_error:
                #Check log whether granularity keyword is there
                result = utils_misc.wait_for(
                    lambda: check_info_in_libvird_log_file('"granularity":%s' %
                                                           sub_option),
                    timeout=20)
                if not result:
                    test.fail(
                        "Failed to get expected messages from log file: %s." %
                        log_config_path)

            virsh.blockjob(vm_name,
                           device_target,
                           '--abort',
                           ignore_status=True)

    def start_bandwidth_blkcpy_on_transient_vm():
        """Start blockcopy with bandwidth operations """
        bandwidth_value = params.get('bandwidth_value').split()
        option_status_error = [
            value == 'yes'
            for value in params.get('option_status_error').split()
        ]
        for sub_option, sub_status_error in zip(bandwidth_value,
                                                option_status_error):
            _extend_blkcpy_execution(sub_option, sub_status_error)
            if not sub_status_error and 'default' not in sub_option:
                check_bandwidth_progress(sub_option)
            virsh.blockjob(vm_name,
                           device_target,
                           '--abort',
                           ignore_status=True)

    def start_timeout_blkcpy_on_transient_vm():
        """Start blockcopy with timeout operations """
        timeout_value = params.get('timeout_value').split()
        option_status_error = [
            value == 'yes'
            for value in params.get('option_status_error').split()
        ]
        for sub_option, sub_status_error in zip(timeout_value,
                                                option_status_error):
            _extend_blkcpy_execution(sub_option, sub_status_error)
            virsh.blockjob(vm_name,
                           device_target,
                           '--abort',
                           ignore_status=True)

    def start_bufsize_blkcpy_on_transient_vm():
        """Start blockcopy with buffer size operations """
        bufsize_value = params.get('bufsize_value').split()
        option_status_error = [
            value == 'yes'
            for value in params.get('option_status_error').split()
        ]
        for sub_option, sub_status_error in zip(bufsize_value,
                                                option_status_error):
            _extend_blkcpy_execution(sub_option, sub_status_error)
            if not option_status_error:
                #Check log whether  buf-size keyword is there
                result = utils_misc.wait_for(
                    lambda: check_info_in_libvird_log_file('buf-size'),
                    timeout=20)
                if not result:
                    test.fail(
                        "Failed to get expected messages from log file: %s." %
                        log_config_path)
            virsh.blockjob(vm_name,
                           device_target,
                           '--abort',
                           ignore_status=True)

    def start_reuse_external_blkcpy_on_transient_vm():
        """Start reuse external blockcopy operations """
        reuse_external_value = params.get('reuse-external_value').split()
        option_status_error = [
            value == 'yes'
            for value in params.get('option_status_error').split()
        ]
        for sub_option, sub_status_error in zip(reuse_external_value,
                                                option_status_error):
            _extend_blkcpy_execution(sub_option,
                                     sub_status_error,
                                     pre_created=True)
            if option_status_error:
                #Check blockcommit job information
                job_result = virsh.blockjob(
                    vm_name, device_target, '--info',
                    ignore_status=True).stdout_text.strip()
                if 'No current block job for' not in job_result:
                    test.fail(
                        "Failed to get unexpected active blockcommit job")
            virsh.blockjob(vm_name,
                           device_target,
                           '--abort',
                           ignore_status=True)

    # Disk specific attributes.
    device_target = params.get("virt_disk_device_target", "vdd")
    blockcopy_option = params.get("blockcopy_option")
    backend_storage_type = params.get("backend_storage_type")
    device_type = params.get("virt_disk_device_type")

    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error")
    snapshot_take = int(params.get("snapshot_take", "4"))

    # Configure libvirtd log path
    log_config_path = params.get("libvirtd_debug_file",
                                 "/var/log/libvirt/libvird.log")

    # Additional disk images.
    tmp_blkcopy_path = []
    external_snapshot_disks = []
    attach_disk_xml = None

    # Start VM and get all partitions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        # Setup backend storage
        if backend_storage_type == "file":
            attach_disk_xml = setup_file_backend_env(params)

        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug("disk xml is:\n%s" % attach_disk_xml)
        # Sync VM xml.
        if attach_disk_xml is None:
            test.fail("Fail to create attached disk xml")
        else:
            vmxml.add_device(attach_disk_xml)
            vmxml.sync()
        try:
            # Create a transient VM
            transient_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            virsh.undefine(vm_name, debug=True, ignore_status=False)
            virsh.create(transient_vmxml.xml, ignore_status=False, debug=True)
            vm.wait_for_login().close()
            debug_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            logging.debug("guest xml after undefined and recreated:%s\n",
                          debug_xml)
        except xcepts.LibvirtXMLError as xml_error:
            if not define_error:
                test.fail("Failed to define VM:\n%s" % str(xml_error))
        except virt_vm.VMStartError as details:
            # VM cannot be started
            if status_error:
                logging.info("VM failed to start as expected: %s",
                             str(details))
            else:
                test.fail("VM should start but failed: %s" % str(details))

        if blockcopy_option in ['pivot_shadow']:
            start_pivot_blkcpy_on_transient_vm()

        if blockcopy_option in ['granularity']:
            start_granularity_blkcpy_on_transient_vm()

        if blockcopy_option in ['bandwidth']:
            start_bandwidth_blkcpy_on_transient_vm()

        if blockcopy_option in ['timeout']:
            start_timeout_blkcpy_on_transient_vm()

        if blockcopy_option in ['buf_size']:
            start_bufsize_blkcpy_on_transient_vm()

        if blockcopy_option in ['reuse_external']:
            start_reuse_external_blkcpy_on_transient_vm()
    finally:
        if virsh.domain_exists(vm_name):
            #To clean up snapshots and restore VM
            try:
                libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
            finally:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
                virsh.define(vmxml_backup.xml, debug=True)
        vmxml_backup.sync()
        # Clean up backend storage
        for tmp_path in tmp_blkcopy_path:
            if os.path.exists(tmp_path):
                libvirt.delete_local_disk('file', tmp_path)
        if backend_storage_type == "iscsi":
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
Exemplo n.º 29
0
def run(test, params, env):
    """
    Test virsh blockpull with various option on VM.

    1.Prepare backend storage (iscsi,nbd,file,block)
    2.Start VM
    3.Execute virsh blockpull target command
    4.Check status after operation accomplished
    5.Clean up test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    def setup_iscsi_block_env(params):
        """
        Setup iscsi as block test environment

        :param params: one dict to wrap up parameters
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
        emulated_size = params.get("emulated_size", "10G")
        chap_user = params.get("iscsi_user")
        chap_passwd = params.get("iscsi_password")
        auth_sec_usage_type = params.get("secret_usage_type")
        encoding = locale.getpreferredencoding()
        secret_string = base64.b64encode(
            chap_passwd.encode(encoding)).decode(encoding)

        device_source = libvirt.setup_or_cleanup_iscsi(
            is_setup=True,
            is_login=True,
            image_size=emulated_size,
            chap_user=chap_user,
            chap_passwd=chap_passwd,
            portal_ip="127.0.0.1")

        auth_sec_uuid = libvirt_ceph_utils._create_secret(
            auth_sec_usage_type, secret_string)
        disk_auth_dict = {
            "auth_user": chap_user,
            "secret_type": auth_sec_usage_type,
            "secret_uuid": auth_sec_uuid
        }

        disk_src_dict = {'attrs': {'dev': device_source}}
        iscsi_disk = libvirt_disk.create_primitive_disk_xml(
            type_name, disk_device, device_target, device_bus, device_format,
            disk_src_dict, disk_auth_dict)
        # Add disk xml.
        logging.debug("disk xml is:\n%s" % iscsi_disk)
        # Sync VM xml.
        vmxml.add_device(iscsi_disk)
        vmxml.sync()

    def setup_file_env(params):
        """
        Setup file test environment

        :param params: one dict to wrap up parameters
        """
        # If additional_disk is False, it means that there is no need to create additional disk
        if additional_disk is False:
            return
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        backstore_image_target_path = params.get("backstore_image_name")
        tmp_blkpull_path.append(backstore_image_target_path)
        libvirt.create_local_disk("file", backstore_image_target_path, "1",
                                  "qcow2")
        backing_chain_list.append(backstore_image_target_path)

        disk_src_dict = {"attrs": {"file": backstore_image_target_path}}

        file_disk = libvirt_disk.create_primitive_disk_xml(
            type_name, disk_device, device_target, device_bus, device_format,
            disk_src_dict, None)
        logging.debug("disk xml is:\n%s" % file_disk)
        # Sync VM xml.
        vmxml.add_device(file_disk)
        vmxml.sync()
        _generate_backstore_attribute(params)

    def _generate_backstore_attribute(params):
        """
        Create one disk with backingStore attribute by creating snapshot

        :param params: one dict to wrap up parameters
        """
        device_target = params.get("virt_disk_device_target")
        top_file_image_name = params.get("top_file_image_name")
        second_file_image_name = params.get("second_file_image_name")
        tmp_blkpull_path.append(top_file_image_name)
        tmp_blkpull_path.append(second_file_image_name)
        backing_chain_list.append(top_file_image_name)
        if vm.is_dead():
            vm.start()
        snapshot_tmp_name = "blockpull_tmp_snap"
        options = " %s --disk-only --diskspec %s,file=%s" % (
            snapshot_tmp_name, 'vda', second_file_image_name)
        options += " --diskspec %s,file=%s" % (device_target,
                                               top_file_image_name)
        virsh.snapshot_create_as(vm_name,
                                 options,
                                 ignore_status=False,
                                 debug=True)
        vm.destroy()
        virsh.snapshot_delete(vm_name,
                              snapshot_tmp_name,
                              "--metadata",
                              ignore_status=False,
                              debug=True)
        vmxml_dir = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug("backstore prepare readiness :\n%s", vmxml_dir)

    def setup_block_env(params):
        """
        Setup block test environment

        :param params: one dict to wrap up parameters
        """
        if additional_disk is False:
            return
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True)
        disk_src_dict = {'attrs': {'dev': device_source}}
        backing_chain_list.append(device_source)

        file_disk = libvirt_disk.create_primitive_disk_xml(
            type_name, disk_device, device_target, device_bus, device_format,
            disk_src_dict, None)
        logging.debug("disk xml is:\n%s" % file_disk)
        # Sync VM xml.
        vmxml.add_device(file_disk)
        vmxml.sync()
        _generate_backstore_attribute(params)

    def setup_nbd_env(params):
        """
        Setup nbd test environment

        :param params: one dict to wrap up parameters
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        # Get server hostname.
        hostname = process.run('hostname',
                               ignore_status=False,
                               shell=True,
                               verbose=True).stdout_text.strip()
        # Setup backend storage
        nbd_server_host = hostname
        nbd_server_port = params.get("nbd_server_port", "10001")
        image_path = params.get("emulated_image",
                                "/var/lib/libvirt/images/nbdtest.img")
        enable_ga_agent = "yes" == params.get("enable_ga_agent", "no")

        # Create NbdExport object
        nbd = NbdExport(image_path,
                        image_format=device_format,
                        port=nbd_server_port)
        nbd.start_nbd_server()

        # Prepare disk source xml
        source_attrs_dict = {"protocol": "nbd", "tls": "%s" % "no"}

        disk_src_dict = {}
        disk_src_dict.update({"attrs": source_attrs_dict})
        disk_src_dict.update(
            {"hosts": [{
                "name": nbd_server_host,
                "port": nbd_server_port
            }]})

        network_disk = libvirt_disk.create_primitive_disk_xml(
            type_name, disk_device, device_target, device_bus, device_format,
            disk_src_dict, None)

        logging.debug("disk xml is:\n%s" % network_disk)
        # Sync VM xml.
        vmxml.add_device(network_disk)
        vmxml.sync()
        if enable_ga_agent:
            vm.prepare_guest_agent()
            vm.destroy(gracefully=False)

    def check_chain_backing_files(chain_list, disk_target):
        """
        Check chain backing files

        :param chain_list: list, expected backing chain list
        :param disk_target: disk target
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.devices.by_device_tag('disk')
        disk_xml = None
        for disk in disks:
            if disk.target['dev'] == disk_target:
                disk_xml = disk
                break
        logging.debug("disk xml in check_blockcommit_with_bandwidth: %s\n",
                      disk_xml.xmltreefile)
        backingstore_list = disk_xml.get_backingstore_list()
        backingstore_list.pop()
        parse_source_file_list = [
            elem.find('source').get('file') or elem.find('source').get('name')
            or elem.find('source').get('dev') for elem in backingstore_list
        ]
        logging.debug("before expected backing chain list is %s", chain_list)
        chain_list = chain_list[0:3]
        if backend_storage_type == "nbd":
            chain_list = chain_list[0:1]
        chain_list = chain_list[::-1]
        logging.debug("expected backing chain list is %s", chain_list)
        logging.debug("parse source list is %s", parse_source_file_list)
        # Check whether two are equals
        if blockpull_option in ['keep_relative']:
            if parse_source_file_list[-1] != chain_list[-1]:
                test.fail(
                    "checked backchain list in last element is not equals to expected one"
                )
        elif parse_source_file_list != chain_list:
            test.fail("checked backchain list is not equals to expected one")

    def _extend_blkpull_execution(base=None,
                                  status_error=False,
                                  err_msg=None,
                                  expected_msg=None):
        """
        Wrap up blockpull execution combining with various options

        :params base: specific base
        :params status_error: expected error or not
        :params err_msg: error message if blockpull command fail
        :params expected_msg: jobinfo expected message if checked
        """
        blockpull_options = params.get("options")
        if '--base' in blockpull_options:
            if base:
                blockpull_options = params.get("options") % base
            else:
                blockpull_options = params.get(
                    "options") % external_snapshot_disks[0]
        result = virsh.blockpull(vm_name,
                                 device_target,
                                 blockpull_options,
                                 ignore_status=True,
                                 debug=True)
        libvirt.check_exit_status(result, expect_error=status_error)
        if status_error:
            if err_msg not in result.stdout_text and err_msg not in result.stderr_text:
                test.fail(
                    "Can not find failed message in standard output: %s or : %s"
                    % (result.stdout_text, result.stderr_text))
        res = virsh.blockjob(vm_name, device_target, "--info").stdout.strip()
        logging.debug("virsh jobinfo is :%s\n", res)
        if expected_msg:
            job_msg = expected_msg
        else:
            job_msg = "No current block job for %s" % device_target
        if res and job_msg not in res:
            test.fail("Find unexpected block job information in %s" % res)

    def start_async_blkpull_on_vm():
        """Start blockpull with async"""
        context_msg = "Pull aborted"
        _extend_blkpull_execution(None, True, context_msg)

    def start_bandwidth_blkpull_on_vm():
        """Start blockpull with bandwidth option """
        _extend_blkpull_execution()

    def start_timeout_blkpull_on_vm():
        """Start blockpull with timeout option """
        _extend_blkpull_execution()

    def start_middle_to_top_to_base_on_vm():
        """Start blockpull from middle to top """
        _extend_blkpull_execution()
        virsh.blockjob(vm_name, device_target, '--abort', ignore_status=True)
        params.update({"options": params.get("top_options")})
        _extend_blkpull_execution()

    def start_reuse_external_blkpull_on_vm():
        """Start blockpull with reuse_external """
        _extend_blkpull_execution(base=backing_chain_list[1])
        check_chain_backing_files(backing_chain_list,
                                  params.get("virt_disk_device_target"))
        virsh.blockjob(vm_name, device_target, '--abort', ignore_status=True)
        params.update({"options": params.get("top_options")})
        _extend_blkpull_execution()

    def start_top_as_base_blkpull_on_vm():
        """Start blockpull with top as base """
        error_msg = "error: invalid argument"
        _extend_blkpull_execution(base=backing_chain_list[-1],
                                  status_error=True,
                                  err_msg=error_msg)

    def start_base_to_top_blkpull_on_vm():
        """Start blockpull with base as top """
        _extend_blkpull_execution()

    def start_middletotop_blkpull_on_vm():
        """start middletotop blockpull on vm """
        _extend_blkpull_execution()
        check_chain_backing_files(backing_chain_list,
                                  params.get("virt_disk_device_target"))

    # Disk specific attributes.
    type_name = params.get("virt_disk_device_type")
    disk_device = params.get("virt_disk_device")
    device_target = params.get("virt_disk_device_target")
    device_bus = params.get("virt_disk_device_bus")
    device_format = params.get("virt_disk_device_format")

    blockpull_option = params.get("blockpull_option")
    options_value = params.get("options_value")
    backend_storage_type = params.get("backend_storage_type")
    backend_path = params.get("backend_path")
    additional_disk = "yes" == params.get("additional_disk", "yes")

    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error")
    snapshot_take = int(params.get("snapshot_take", "4"))
    fill_in_vm = "yes" == params.get("fill_in_vm", "no")

    first_src_file = libvirt_disk.get_first_disk_source(vm)
    pre_set_root_dir = os.path.dirname(first_src_file)
    replace_disk_image = None

    # Additional disk images.
    tmp_dir = data_dir.get_data_dir()
    tmp_blkpull_path = []
    disks_img = []
    external_snapshot_disks = []
    attach_disk_xml = None
    backing_chain_list = []

    # Initialize one NbdExport object
    nbd = None

    # Start VM and get all partitions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        utils_secret.clean_up_secrets()

        # Setup backend storage
        if backend_storage_type == "iscsi":
            setup_iscsi_block_env(params)
        elif backend_storage_type == "file":
            setup_file_env(params)
        elif backend_storage_type == "block":
            setup_block_env(params)
        elif backend_storage_type == "nbd":
            setup_nbd_env(params)
        try:
            vm.start()
            vm.wait_for_login().close()
        except virt_vm.VMStartError as details:
            # VM cannot be started
            if status_error:
                logging.info("VM failed to start as expected: %s",
                             str(details))
            else:
                test.fail("VM should start but failed: %s" % str(details))

        if fill_in_vm:
            libvirt_disk.fill_null_in_vm(vm, device_target)

        if backend_path in ['native_path']:
            external_snapshot_disks = libvirt_disk.make_external_disk_snapshots(
                vm, device_target, "blockpull_snapshot", snapshot_take)
            backing_chain_list.extend(external_snapshot_disks)
        elif backend_path in ['reuse_external']:
            replace_disk_image, backing_chain_list = libvirt_disk.make_relative_path_backing_files(
                vm, pre_set_root_dir, first_src_file, device_format)
            params.update({
                'disk_source_name': replace_disk_image,
                'disk_type': 'file',
                'disk_format': 'qcow2',
                'disk_source_protocol': 'file'
            })
            libvirt.set_vm_disk(vm, params, tmp_dir)

        if blockpull_option in ['middle_to_top']:
            start_middletotop_blkpull_on_vm()

        if blockpull_option in ['async']:
            start_async_blkpull_on_vm()

        if blockpull_option in ['bandwidth']:
            start_bandwidth_blkpull_on_vm()

        if blockpull_option in ['timeout']:
            start_timeout_blkpull_on_vm()

        if blockpull_option in ['middle_to_top_to_base']:
            start_middle_to_top_to_base_on_vm()

        if blockpull_option in ['keep_relative']:
            start_reuse_external_blkpull_on_vm()

        if blockpull_option in ['top_as_base']:
            start_top_as_base_blkpull_on_vm()

        if blockpull_option in ['base_to_top']:
            start_base_to_top_blkpull_on_vm()
    finally:
        # Recover VM.
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
        utils_secret.clean_up_secrets()
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
        # Delete reuse external disk if exists
        for disk in external_snapshot_disks:
            if os.path.exists(disk):
                os.remove(disk)
        # Clean up backend storage
        for tmp_path in tmp_blkpull_path:
            if os.path.exists(tmp_path):
                libvirt.delete_local_disk('file', tmp_path)
        if backend_storage_type == "iscsi":
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        if nbd:
            nbd.cleanup()
        # Clean up created folders
        for folder in [
                chr(letter) for letter in range(ord('a'),
                                                ord('a') + 4)
        ]:
            rm_cmd = "rm -rf %s" % os.path.join(pre_set_root_dir, folder)
            process.run(rm_cmd, shell=True)
Exemplo n.º 30
0
def run(test, params, env):
    """
    Test virsh migrate command.
    """
    def set_feature(vmxml, feature, value):
        """
        Set guest features for PPC

        :param state: the htm status
        :param vmxml: guest xml
        """
        features_xml = vm_xml.VMFeaturesXML()
        if feature == 'hpt':
            features_xml.hpt_resizing = value
        elif feature == 'htm':
            features_xml.htm = value
        vmxml.features = features_xml
        vmxml.sync()

    def trigger_hpt_resize(session):
        """
        Check the HPT order file and dmesg

        :param session: the session to guest

        :raise: test.fail if required message is not found
        """
        hpt_order_path = "/sys/kernel/debug/powerpc/hpt_order"
        hpt_order = session.cmd_output('cat %s' % hpt_order_path).strip()
        hpt_order = int(hpt_order)
        logging.info('Current hpt_order is %d', hpt_order)
        hpt_order += 1
        cmd = 'echo %d > %s' % (hpt_order, hpt_order_path)
        cmd_result = session.cmd_status_output(cmd)
        result = process.CmdResult(stderr=cmd_result[1],
                                   stdout=cmd_result[1],
                                   exit_status=cmd_result[0])
        libvirt.check_exit_status(result)
        dmesg = session.cmd('dmesg')
        dmesg_content = params.get('dmesg_content').split('|')
        for content in dmesg_content:
            if content % hpt_order not in dmesg:
                test.fail("'%s' is missing in dmesg" % (content % hpt_order))
            else:
                logging.info("'%s' is found in dmesg", content % hpt_order)

    def check_qemu_cmd_line(content, err_ignore=False):
        """
        Check the specified content in the qemu command line

        :param content: the desired string to search
        :param err_ignore: True to return False when fail
                           False to raise exception when fail

        :return: True if exist, False otherwise
        """
        cmd = 'ps -ef|grep qemu|grep -v grep'
        qemu_line = results_stdout_52lts(process.run(cmd, shell=True))
        if content not in qemu_line:
            if err_ignore:
                return False
            else:
                test.fail("Expected '%s' was not found in "
                          "qemu command line" % content)
        return True

    def check_vm_network_accessed(session=None):
        """
        The operations to the VM need to be done before or after
        migration happens

        :param session: The session object to the host

        :raise: test.error when ping fails
        """
        # Confirm local/remote VM can be accessed through network.
        logging.info("Check VM network connectivity")
        s_ping, _ = utils_test.ping(vm.get_address(),
                                    count=10,
                                    timeout=20,
                                    output_func=logging.debug,
                                    session=session)
        if s_ping != 0:
            if session:
                session.close()
            test.fail("%s did not respond after %d sec." % (vm.name, 20))

    def check_virsh_command_and_option(command, option=None):
        """
        Check if virsh command exists

        :param command: the command to be checked
        :param option: the command option to be checked
        """
        msg = "This version of libvirt does not support "
        if not virsh.has_help_command(command):
            test.cancel(msg + "virsh command '%s'" % command)

        if option and not virsh.has_command_help_match(command, option):
            test.cancel(msg + "virsh command '%s' with option '%s'" %
                        (command, option))

    def add_ctrls(vm_xml, dev_type="pci", dev_index="0", dev_model="pci-root"):
        """
        Add multiple devices

        :param dev_type: the type of the device to be added
        :param dev_index: the maximum index of the device to be added
        :param dev_model: the model of the device to be added
        """
        for inx in range(0, int(dev_index) + 1):
            newcontroller = Controller("controller")
            newcontroller.type = dev_type
            newcontroller.index = inx
            newcontroller.model = dev_model
            logging.debug("New device is added:\n%s", newcontroller)
            vm_xml.add_device(newcontroller)
        vm_xml.sync()

    def do_migration(vm, dest_uri, options, extra):
        """
        Execute the migration with given parameters
        :param vm: the guest to be migrated
        :param dest_uri: the destination uri for migration
        :param options: options next to 'migrate' command
        :param extra: options in the end of the migrate command line

        :return: CmdResult object
        """
        logging.info("Sleeping 10 seconds before migration")
        time.sleep(10)
        # Migrate the guest.
        migration_res = vm.migrate(dest_uri, options, extra, **virsh_args)
        logging.info("Migration out: %s",
                     results_stdout_52lts(migration_res).strip())
        logging.info("Migration error: %s",
                     results_stderr_52lts(migration_res).strip())
        if int(migration_res.exit_status) != 0:
            logging.error("Migration failed for %s.", vm_name)
            return migration_res

        if vm.is_alive():  # vm.connect_uri was updated
            logging.info("VM is alive on destination %s.", dest_uri)
        else:
            test.fail("VM is not alive on destination %s" % dest_uri)

        # Throws exception if console shows panic message
        vm.verify_kernel_crash()
        return migration_res

    def cleanup_libvirtd_log(log_file):
        """
        Remove existing libvirtd log file on source and target host.

        :param log_file: log file with absolute path
        """
        if os.path.exists(log_file):
            logging.debug("Delete local libvirt log file '%s'", log_file)
            os.remove(log_file)
        cmd = "rm -f %s" % log_file
        logging.debug("Delete remote libvirt log file '%s'", log_file)
        run_remote_cmd(cmd)

    def cleanup_dest(vm):
        """
        Clean up the destination host environment
        when doing the uni-direction migration.

        :param vm: the guest to be cleaned up
        """
        logging.info("Cleaning up VMs on %s", vm.connect_uri)
        try:
            if virsh.domain_exists(vm.name, uri=vm.connect_uri):
                vm_state = vm.state()
                if vm_state == "paused":
                    vm.resume()
                elif vm_state == "shut off":
                    vm.start()
                vm.destroy(gracefully=False)

                if vm.is_persistent():
                    vm.undefine()

        except Exception as detail:
            logging.error("Cleaning up destination failed.\n%s", detail)

    def run_remote_cmd(cmd):
        """
        A function to run a command on remote host.

        :param cmd: the command to be executed

        :return: CmdResult object
        """
        remote_runner = remote.RemoteRunner(host=server_ip,
                                            username=server_user,
                                            password=server_pwd)
        cmdResult = remote_runner.run(cmd, ignore_status=True)
        if cmdResult.exit_status:
            test.fail("Failed to run '%s' on remote: %s" %
                      (cmd, results_stderr_52lts(cmdResult).strip()))
        return cmdResult

    def run_stress_in_vm():
        """
        The function to load stress in VM
        """
        stress_args = params.get(
            "stress_args", "--cpu 8 --io 4 "
            "--vm 2 --vm-bytes 128M "
            "--timeout 20s")
        try:
            vm_session.cmd('stress %s' % stress_args)
        except Exception as detail:
            logging.debug(detail)

    def check_timeout_postcopy(params):
        """
        Check the vm state on target host after timeout
        when --postcopy and --timeout-postcopy are used.
        The vm state is expected as running.

        :param params: the parameters used
        """
        timeout = int(params.get("timeout_postcopy", 10))
        time.sleep(timeout + 1)
        remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs)
        vm_state = results_stdout_52lts(
            remote_virsh_session.domstate(vm_name)).strip()
        if vm_state != "running":
            remote_virsh_session.close_session()
            test.fail(
                "After timeout '%s' seconds, "
                "the vm state on target host should "
                "be 'running', but '%s' found", timeout, vm_state)
        remote_virsh_session.close_session()

    def get_usable_compress_cache(pagesize):
        """
        Get a number which is bigger than pagesize and is power of two.

        :param pagesize: the given integer
        :return: an integer satisfying the criteria
        """
        def calculate(num):
            result = num & (num - 1)
            return (result == 0)

        item = pagesize
        found = False
        while (not found):
            item += 1
            found = calculate(item)
        logging.debug(
            "%d is smallest one that is bigger than '%s' and "
            "is power of 2", item, pagesize)
        return item

    check_parameters(test, params)

    # Params for NFS shared storage
    shared_storage = params.get("migrate_shared_storage", "")
    if shared_storage == "":
        default_guest_asset = defaults.get_default_guest_os_info()['asset']
        default_guest_asset = "%s.qcow2" % default_guest_asset
        shared_storage = os.path.join(params.get("nfs_mount_dir"),
                                      default_guest_asset)
        logging.debug("shared_storage:%s", shared_storage)

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = params.get("nfs_mount_dir")

    # Local variables
    virsh_args = {"ignore_status": True, "debug": True}
    server_ip = params.get("server_ip")
    server_user = params.get("server_user", "root")
    server_pwd = params.get("server_pwd")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options")
    src_uri = params.get("virsh_migrate_connect_uri")
    dest_uri = params.get("virsh_migrate_desturi")
    log_file = params.get("libvirt_log", "/var/log/libvirt/libvirtd.log")
    check_complete_job = "yes" == params.get("check_complete_job", "no")
    config_libvirtd = "yes" == params.get("config_libvirtd", "no")
    contrl_index = params.get("new_contrl_index", None)
    grep_str_remote_log = params.get("grep_str_remote_log", "")
    grep_str_local_log = params.get("grep_str_local_log", "")
    stress_in_vm = "yes" == params.get("stress_in_vm", "no")
    remote_virsh_dargs = {
        'remote_ip': server_ip,
        'remote_user': server_user,
        'remote_pwd': server_pwd,
        'unprivileged_user': None,
        'ssh_remote_auth': True
    }

    hpt_resize = params.get("hpt_resize", None)
    htm_state = params.get("htm_state", None)
    qemu_check = params.get("qemu_check", None)
    xml_check_after_mig = params.get("guest_xml_check_after_mig", None)

    arch = platform.machine()
    if any([hpt_resize, contrl_index, htm_state]) and 'ppc64' not in arch:
        test.cancel("The case is PPC only.")

    # For TLS
    tls_recovery = params.get("tls_auto_recovery", "yes")
    # qemu config
    qemu_conf_dict = None
    # libvirtd config
    libvirtd_conf_dict = None

    remote_virsh_session = None
    vm = None
    vm_session = None
    libvirtd_conf = None
    qemu_conf = None
    mig_result = None
    test_exception = None
    is_TestError = False
    is_TestFail = False
    is_TestSkip = False
    asynch_migration = False

    # Objects to be cleaned up in the end
    objs_list = []
    tls_obj = None

    # Local variables
    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()
    if not orig_config_xml:
        test.error("Backing up xmlfile failed.")

    try:
        # Change VM xml in below part
        if contrl_index:
            new_xml.remove_all_device_by_type('controller')
            logging.debug("After removing controllers, current XML:\n%s\n",
                          new_xml)
            add_ctrls(new_xml, dev_index=contrl_index)

        if extra.count("--tls"):
            qemu_conf_dict = {"migrate_tls_x509_verify": "1"}
            # Setup TLS
            tls_obj = TLSConnection(params)
            if tls_recovery == "yes":
                objs_list.append(tls_obj)
                tls_obj.auto_recover = True
                tls_obj.conn_setup()
            # Setup qemu configure
            logging.debug("Configure the qemu")
            cleanup_libvirtd_log(log_file)
            qemu_conf = libvirt.customize_libvirt_config(qemu_conf_dict,
                                                         config_type="qemu",
                                                         remote_host=True,
                                                         extra_params=params)
        # Setup libvirtd
        if config_libvirtd:
            logging.debug("Configure the libvirtd")
            cleanup_libvirtd_log(log_file)
            libvirtd_conf_dict = setup_libvirtd_conf_dict(params)
            libvirtd_conf = libvirt.customize_libvirt_config(
                libvirtd_conf_dict, remote_host=True, extra_params=params)
        if hpt_resize:
            set_feature(new_xml, 'hpt', hpt_resize)

        if htm_state:
            set_feature(new_xml, 'htm', htm_state)
        # Change the disk of the vm to shared disk and then start VM
        libvirt.set_vm_disk(vm, params)
        if not vm.is_alive():
            vm.start()

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))

        if qemu_check:
            check_content = qemu_check
            if hpt_resize:
                check_content = "%s%s" % (qemu_check, hpt_resize)
            if htm_state:
                check_content = "%s%s" % (qemu_check, htm_state)
            check_qemu_cmd_line(check_content)

        vm_session = vm.wait_for_login()
        check_vm_network_accessed()

        if hpt_resize and hpt_resize != 'disabled':
            trigger_hpt_resize(vm_session)

        if stress_in_vm:
            pkg_name = 'stress'
            logging.debug("Check if stress tool is installed")
            pkg_mgr = utils_package.package_manager(vm_session, pkg_name)
            if not pkg_mgr.is_installed(pkg_name):
                logging.debug("Stress tool will be installed")
                if not pkg_mgr.install():
                    test.error("Package '%s' installation fails" % pkg_name)

            stress_thread = threading.Thread(target=run_stress_in_vm, args=())
            stress_thread.start()

        if extra.count("timeout-postcopy"):
            asynch_migration = True
            func_name = check_timeout_postcopy
        if extra.count("comp-xbzrle-cache"):
            cache = get_usable_compress_cache(memory.get_page_size())
            extra = "%s %s" % (extra, cache)

        # For --postcopy enable
        postcopy_options = params.get("postcopy_options")
        if postcopy_options and not extra.count(postcopy_options):
            extra = "%s %s" % (extra, postcopy_options)

        if not asynch_migration:
            mig_result = do_migration(vm, dest_uri, options, extra)
        else:
            migration_test = libvirt.MigrationTest()

            logging.debug("vm.connect_uri=%s", vm.connect_uri)
            vms = [vm]
            try:
                migration_test.do_migration(vms,
                                            None,
                                            dest_uri,
                                            'orderly',
                                            options,
                                            thread_timeout=900,
                                            ignore_status=True,
                                            func=func_name,
                                            extra_opts=extra,
                                            func_params=params)
                mig_result = migration_test.ret
            except exceptions.TestFail as fail_detail:
                test.fail(fail_detail)
            except exceptions.TestSkipError as skip_detail:
                test.cancel(skip_detail)
            except exceptions.TestError as error_detail:
                test.error(error_detail)
            except Exception as details:
                mig_result = migration_test.ret
                logging.error(details)

        if int(mig_result.exit_status) != 0:
            test.fail(results_stderr_52lts(mig_result).strip())

        if check_complete_job:
            search_str_domjobinfo = params.get("search_str_domjobinfo", None)
            opts = "--completed"
            args = vm_name + " " + opts
            check_virsh_command_and_option("domjobinfo", opts)
            jobinfo = results_stdout_52lts(
                virsh.domjobinfo(args, debug=True,
                                 ignore_status=True)).strip()
            logging.debug("Local job info on completion:\n%s", jobinfo)
            if extra.count("comp-xbzrle-cache") and search_str_domjobinfo:
                search_str_domjobinfo = "%s %s" % (search_str_domjobinfo,
                                                   cache // 1024)
            if search_str_domjobinfo:
                if not re.search(search_str_domjobinfo, jobinfo):
                    test.fail("Fail to search '%s' on local:\n%s" %
                              (search_str_domjobinfo, jobinfo))
            # Check remote host
            if not remote_virsh_session:
                remote_virsh_session = virsh.VirshPersistent(
                    **remote_virsh_dargs)
            jobinfo = results_stdout_52lts(
                remote_virsh_session.domjobinfo(args,
                                                debug=True,
                                                ignore_status=True)).strip()
            logging.debug("Remote job info on completion:\n%s", jobinfo)
            if search_str_domjobinfo:
                if not re.search(search_str_domjobinfo, jobinfo):
                    remote_virsh_session.close_session()
                    test.fail("Fail to search '%s' on remote:\n%s" %
                              (search_str_domjobinfo, jobinfo))
            remote_virsh_session.close_session()

        if grep_str_local_log:
            cmd = "grep -E '%s' %s" % (grep_str_local_log, log_file)
            cmdRes = process.run(cmd, shell=True, ignore_status=True)
            if cmdRes.exit_status:
                test.fail(results_stderr_52lts(cmdRes).strip())
        if grep_str_remote_log:
            cmd = "grep -E '%s' %s" % (grep_str_remote_log, log_file)
            run_remote_cmd(cmd)

        if xml_check_after_mig:
            if not remote_virsh_session:
                remote_virsh_session = virsh.VirshPersistent(
                    **remote_virsh_dargs)
            target_guest_dumpxml = results_stdout_52lts(
                remote_virsh_session.dumpxml(vm_name,
                                             debug=True,
                                             ignore_status=True)).strip()
            if hpt_resize:
                check_str = hpt_resize
            elif htm_state:
                check_str = htm_state
            if hpt_resize or htm_state:
                xml_check_after_mig = "%s'%s'" % (xml_check_after_mig,
                                                  check_str)
                if not re.search(xml_check_after_mig, target_guest_dumpxml):
                    remote_virsh_session.close_session()
                    test.fail("Fail to search '%s' in target guest XML:\n%s" %
                              (xml_check_after_mig, target_guest_dumpxml))

            if contrl_index:
                all_ctrls = re.findall(xml_check_after_mig,
                                       target_guest_dumpxml)
                if len(all_ctrls) != int(contrl_index) + 1:
                    remote_virsh_session.close_session()
                    test.fail(
                        "%s pci-root controllers are expected in guest XML, "
                        "but found %s" %
                        (int(contrl_index) + 1, len(all_ctrls)))
            remote_virsh_session.close_session()

        server_session = remote.wait_for_login('ssh', server_ip, '22',
                                               server_user, server_pwd,
                                               r"[\#\$]\s*$")
        check_vm_network_accessed(server_session)
        server_session.close()
    except exceptions.TestFail as details:
        is_TestFail = True
        test_exception = details
    except exceptions.TestSkipError as details:
        is_TestSkip = True
        test_exception = details
    except exceptions.TestError as details:
        is_TestError = True
        test_exception = details
    except Exception as details:
        test_exception = details
    finally:
        logging.debug("Recover test environment")
        try:
            # Clean VM on destination
            vm.connect_uri = dest_uri
            cleanup_dest(vm)
            vm.connect_uri = src_uri

            logging.info("Recovery VM XML configration")
            orig_config_xml.sync()
            logging.debug("The current VM XML:\n%s",
                          orig_config_xml.xmltreefile)

            if remote_virsh_session:
                remote_virsh_session.close_session()

            if extra.count("--tls"):
                logging.debug("Recover the qemu configuration")
                libvirt.customize_libvirt_config(None,
                                                 config_type="qemu",
                                                 remote_host=True,
                                                 extra_params=params,
                                                 is_recover=True,
                                                 config_object=qemu_conf)

            if config_libvirtd:
                logging.debug("Recover the libvirtd configuration")
                libvirt.customize_libvirt_config(None,
                                                 remote_host=True,
                                                 extra_params=params,
                                                 is_recover=True,
                                                 config_object=libvirtd_conf)

            logging.info("Remove local NFS image")
            source_file = params.get("source_file")
            libvirt.delete_local_disk("file", path=source_file)

            if objs_list:
                for obj in objs_list:
                    logging.debug("Clean up local objs")
                    del obj

        except Exception as exception_detail:
            if (not test_exception and not is_TestError and not is_TestFail
                    and not is_TestSkip):
                raise exception_detail
            else:
                # if any of above exceptions has been raised, only print
                # error log here to avoid of hiding the original issue
                logging.error(exception_detail)
    # Check result
    if is_TestFail:
        test.fail(test_exception)
    if is_TestSkip:
        test.cancel(test_exception)
    if is_TestError:
        test.error(test_exception)
    if not test_exception:
        logging.info("Case execution is done.")
    else:
        test.error(test_exception)
Exemplo n.º 31
0
    def vm_stress_events(self, event, vm):
        """
        Stress events

        :param event: event name
        :param vm: vm object
        """
        dargs = {'ignore_status': True, 'debug': True}
        for itr in range(self.iterations):
            if "vcpupin" in event:
                for vcpu in range(int(self.current_vcpu)):
                    result = virsh.vcpupin(vm.name, vcpu,
                                           random.choice(self.host_cpu_list),
                                           **dargs)
                    if not self.ignore_status:
                        libvirt.check_exit_status(result)
            elif "emulatorpin" in event:
                for vcpu in range(int(self.current_vcpu)):
                    result = virsh.emulatorpin(vm.name,
                                               random.choice(
                                                   self.host_cpu_list),
                                               **dargs)
                    if not self.ignore_status:
                        libvirt.check_exit_status(result)
            elif "suspend" in event:
                result = virsh.suspend(vm.name, **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
                time.sleep(self.event_sleep_time)
                result = virsh.resume(vm.name, **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
            elif "cpuhotplug" in event:
                result = virsh.setvcpus(vm.name, self.max_vcpu, "--live",
                                        **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
                    exp_vcpu = {'max_config': self.max_vcpu,
                                'max_live': self.max_vcpu,
                                'cur_config': self.current_vcpu,
                                'cur_live': self.max_vcpu,
                                'guest_live': self.max_vcpu}
                    utils_hotplug.check_vcpu_value(
                        vm, exp_vcpu, option="--live")
                time.sleep(self.event_sleep_time)
                result = virsh.setvcpus(vm.name, self.current_vcpu, "--live",
                                        **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
                    exp_vcpu = {'max_config': self.max_vcpu,
                                'max_live': self.max_vcpu,
                                'cur_config': self.current_vcpu,
                                'cur_live': self.current_vcpu,
                                'guest_live': self.current_vcpu}
                    utils_hotplug.check_vcpu_value(
                        vm, exp_vcpu, option="--live")
            elif "reboot" in event:
                vm.reboot()
            elif "nethotplug" in event:
                for iface_num in range(int(self.iface_num)):
                    logging.debug("Try to attach interface %d" % iface_num)
                    mac = utils_net.generate_mac_address_simple()
                    options = ("%s %s --model %s --mac %s %s" %
                               (self.iface_type, self.iface_source['network'],
                                self.iface_model, mac, self.attach_option))
                    logging.debug("VM name: %s , Options for Network attach: %s", vm.name, options)
                    ret = virsh.attach_interface(vm.name, options,
                                                 ignore_status=True)
                    time.sleep(self.event_sleep_time)
                    if not self.ignore_status:
                        libvirt.check_exit_status(ret)
                    if self.detach_option:
                        options = ("--type %s --mac %s %s" %
                                   (self.iface_type, mac, self.detach_option))
                        logging.debug("VM name: %s , Options for Network detach: %s", vm.name, options)
                        ret = virsh.detach_interface(vm.name, options,
                                                     ignore_status=True)
                        if not self.ignore_status:
                            libvirt.check_exit_status(ret)
            elif "diskhotplug" in event:
                for disk_num in range(len(self.device_source_names)):
                    disk = {}
                    disk_attach_error = False
                    disk_name = os.path.join(self.path, vm.name, self.device_source_names[disk_num])
                    device_source = libvirt.create_local_disk(
                        self.disk_type, disk_name, self.disk_size, disk_format=self.disk_format)
                    disk.update({"format": self.disk_format,
                                 "source": device_source})
                    disk_xml = Disk(self.disk_type)
                    disk_xml.device = self.disk_device
                    disk_xml.driver = {"name": self.disk_driver, "type": self.disk_format}
                    ret = virsh.attach_disk(vm.name, disk["source"], self.device_target[disk_num], self.attach_option, debug=True)
                    if not self.ignore_status:
                        libvirt.check_exit_status(ret, disk_attach_error)
                    if self.detach_option:
                        ret = virsh.detach_disk(vm.name, self.device_target[disk_num], extra=self.detach_option)
                        if not self.ignore_status:
                            libvirt.check_exit_status(ret)
                        libvirt.delete_local_disk(self.disk_type, disk_name)
            else:
                raise NotImplementedError
Exemplo n.º 32
0
def run(test, params, env):
    """
    Test virsh migrate command.
    """

    def check_vm_network_accessed(session=None, ping_dest="www.baidu.com"):
        """
        The operations to the VM need to be done before or after
        migration happens

        :param session: The session object to the host
        :param ping_dest: The destination to be ping

        :raise: test.fail when ping fails
        """
        # Confirm local/remote VM can be accessed through network.
        logging.info("Check VM network connectivity")
        status, output = utils_test.ping(ping_dest,
                                         count=10,
                                         timeout=20,
                                         output_func=logging.debug,
                                         session=session)
        if status != 0:
            test.fail("Ping failed, status: %s,"
                      " output: %s" % (status, output))

    def get_vm_ifaces(session=None):
        """
        Get interfaces of vm

        :param session: The session object to the host
        :return: interfaces
        """
        p_iface, v_iface = utils_net.get_remote_host_net_ifs(session)

        return p_iface

    def check_vm_iface_num(iface_list, exp_num=3):
        """
        Check he number of interfaces

        :param iface_list: The interface list
        :param exp_num: The expected number
        :raise: test.fail when interfaces' number is not equal to exp_num
        """
        if len(iface_list) != exp_num:
            test.fail("%d interfaces should be found on the vm, "
                      "but find %s." % (exp_num, iface_list))

    def create_or_del_networks(pf_name, params, remote_virsh_session=None,
                               is_del=False):
        """
        Create or delete network on local or remote

        :param params: Dictionary with the test parameters
        :param pf_name: The name of PF
        :param remote_virsh_session: The virsh session object to the remote host
        :param is_del: Whether the networks should be deleted
        :raise: test.fail when fails to define/start network
        """
        net_hostdev_name = params.get("net_hostdev_name", "hostdev-net")
        net_hostdev_fwd = params.get("net_hostdev_fwd",
                                     '{"mode": "hostdev", "managed": "yes"}')
        net_bridge_name = params.get("net_bridge_name", "host-bridge")
        net_bridge_fwd = params.get("net_bridge_fwd", '{"mode": "bridge"}')
        bridge_name = params.get("bridge_name", "br0")

        net_dict = {"net_name": net_hostdev_name,
                    "net_forward": net_hostdev_fwd,
                    "net_forward_pf": '{"dev": "%s"}' % pf_name}
        bridge_dict = {"net_name": net_bridge_name,
                       "net_forward": net_bridge_fwd,
                       "net_bridge": '{"name": "%s"}' % bridge_name}

        if not is_del:
            for net_params in (net_dict, bridge_dict):
                net_dev = libvirt.create_net_xml(net_params.get("net_name"),
                                                 net_params)
                if not remote_virsh_session:
                    if net_dev.get_active():
                        net_dev.undefine()
                    net_dev.define()
                    net_dev.start()
                else:
                    remote.scp_to_remote(server_ip, '22', server_user, server_pwd,
                                         net_dev.xml, net_dev.xml, limit="",
                                         log_filename=None, timeout=600,
                                         interface=None)
                    remote_virsh_session.net_define(net_dev.xml, **virsh_args)
                    remote_virsh_session.net_start(net_params.get("net_name"),
                                                   **virsh_args)

        else:
            virsh_session = virsh
            if remote_virsh_session:
                virsh_session = remote_virsh_session
            for nname in (net_hostdev_name, net_bridge_name):
                if nname not in virsh_session.net_state_dict():
                    continue
                virsh_session.net_destroy(nname, debug=True, ignore_status=True)
                virsh_session.net_undefine(nname, debug=True, ignore_status=True)

    def check_vm_network_connection(net_name, expected_conn=0):
        """
        Check network connections in network xml

        :param net_name: The network to be checked
        :param expected_conn: The expected value
        :raise: test.fail when fails
        """
        output = virsh.net_dumpxml(net_name, debug=True).stdout_text
        if expected_conn == 0:
            reg_pattern = r"<network>"
        else:
            reg_pattern = r"<network connections='(\d)'>"
        res = re.findall(reg_pattern, output, re.I)
        if not res:
            test.fail("Unable to find expected connection in %s." % net_name)
        if expected_conn != 0:
            if expected_conn != int(res[0]):
                test.fail("Unable to get expected connection number."
                          "Expected: %s, Actual %s" % (expected_conn, int(res[0])))

    def get_hostdev_addr_from_xml():
        """
        Get VM hostdev address

        :return: pci driver id
        """
        address_dict = {}
        for ifac in vm_xml.VMXML.new_from_dumpxml(vm_name).devices.by_device_tag("interface"):
            if ifac.type_name == "hostdev":
                address_dict = ifac.hostdev_address.attrs

        return libvirt.pci_info_from_address(address_dict, 16, "id")

    def check_vfio_pci(pci_path, status_error=False):
        """
        Check if vf driver is vfio-pci

        :param pci_path: The absolute path of pci device
        :param status_error: Whether the driver should be vfio-pci
        """
        cmd = "readlink %s/driver | awk -F '/' '{print $NF}'" % pci_path
        output = process.run(cmd, shell=True, verbose=True).stdout_text.strip()
        if (output == "vfio-pci") == status_error:
            test.fail("Get incorrect dirver %s, it should%s be vfio-pci."
                      % (output, ' not' if status_error else ''))

    def update_iface_xml(vmxml):
        """
        Update interfaces for guest

        :param vmxml: vm_xml.VMXML object
        """
        vmxml.remove_all_device_by_type('interface')
        vmxml.sync()

        iface_dict = {"type": "network", "source": "{'network': 'host-bridge'}",
                      "mac": mac_addr, "model": "virtio",
                      "teaming": '{"type":"persistent"}',
                      "alias": '{"name": "ua-backup0"}',
                      "inbound": '{"average":"5"}',
                      "outbound": '{"average":"5"}'}

        iface_dict2 = {"type": "network", "source": "{'network': 'hostdev-net'}",
                       "mac": mac_addr, "model": "virtio",
                       "teaming": '{"type":"transient", "persistent": "ua-backup0"}'}

        iface = interface.Interface('network')
        for ifc in (iface_dict, iface_dict2):
            iface.xml = libvirt.modify_vm_iface(vm.name, "get_xml", ifc)
            vmxml.add_device(iface)
        vmxml.sync()

    migration_test = migration.MigrationTest()
    migration_test.check_parameters(params)

    # Params for NFS shared storage
    shared_storage = params.get("migrate_shared_storage", "")
    if shared_storage == "":
        default_guest_asset = defaults.get_default_guest_os_info()['asset']
        default_guest_asset = "%s.qcow2" % default_guest_asset
        shared_storage = os.path.join(params.get("nfs_mount_dir"),
                                      default_guest_asset)
        logging.debug("shared_storage:%s", shared_storage)

    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = params.get("nfs_mount_dir")

    # Local variables
    virsh_args = {"debug": True}
    virsh_options = params.get("virsh_options", "")

    server_ip = params.get("server_ip")
    server_user = params.get("server_user", "root")
    server_pwd = params.get("server_pwd")
    client_ip = params.get("client_ip")
    client_pwd = params.get("client_pwd")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options")

    bridge_name = params.get("bridge_name", "br0")
    net_hostdev_name = params.get("net_hostdev_name", "hostdev-net")
    net_bridge_name = params.get("net_bridge_name", "host-bridge")
    driver = params.get("driver", "ixgbe")
    vm_tmp_file = params.get("vm_tmp_file", "/tmp/test.txt")
    cmd_during_mig = params.get("cmd_during_mig")
    net_failover_test = "yes" == params.get("net_failover_test", "no")
    cancel_migration = "yes" == params.get("cancel_migration", "no")
    try:
        vf_no = int(params.get("vf_no", "4"))
    except ValueError as e:
        test.error(e)

    migr_vm_back = "yes" == params.get("migrate_vm_back", "no")
    err_msg = params.get("err_msg")
    status_error = "yes" == params.get("status_error", "no")
    cmd_parms = {'server_ip': server_ip, 'server_user': server_user,
                 'server_pwd': server_pwd}
    remote_virsh_dargs = {'remote_ip': server_ip, 'remote_user': server_user,
                          'remote_pwd': server_pwd, 'unprivileged_user': None,
                          'ssh_remote_auth': True}
    destparams_dict = copy.deepcopy(params)

    remote_virsh_session = None
    vm_session = None
    vm = None
    mig_result = None
    func_name = None
    extra_args = {}
    default_src_vf = 0
    default_dest_vf = 0
    default_src_rp_filter = 1
    default_dest_rp_filer = 1

    if not libvirt_version.version_compare(6, 0, 0):
        test.cancel("This libvirt version doesn't support migration with "
                    "net failover devices.")

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
                                       params.get("migrate_dest_host"))
    params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri(
                                       params.get("migrate_source_host"))
    src_uri = params.get("virsh_migrate_connect_uri")
    dest_uri = params.get("virsh_migrate_desturi")

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()

    try:
        # Create a remote runner for later use
        runner_on_target = remote.RemoteRunner(host=server_ip,
                                               username=server_user,
                                               password=server_pwd)

        server_session = remote.wait_for_login('ssh', server_ip, '22',
                                               server_user, server_pwd,
                                               r"[\#\$]\s*$")
        if net_failover_test:
            src_pf, src_pf_pci = utils_sriov.find_pf(driver)
            logging.debug("src_pf is %s. src_pf_pci: %s", src_pf, src_pf_pci)
            params['pf_name'] = src_pf
            dest_pf, dest_pf_pci = utils_sriov.find_pf(driver, server_session)
            logging.debug("dest_pf is %s. dest_pf_pci: %s", dest_pf, dest_pf_pci)
            destparams_dict['pf_name'] = dest_pf

            src_pf_pci_path = utils_misc.get_pci_path(src_pf_pci)
            dest_pf_pci_path = utils_misc.get_pci_path(dest_pf_pci, server_session)

            cmd = "cat %s/sriov_numvfs" % (src_pf_pci_path)
            default_src_vf = process.run(cmd, shell=True,
                                         verbose=True).stdout_text

            cmd = "cat %s/sriov_numvfs" % (dest_pf_pci_path)
            status, default_dest_vf = utils_misc.cmd_status_output(cmd,
                                                                   shell=True,
                                                                   session=server_session)
            if status:
                test.error("Unable to get default sriov_numvfs on target!"
                           "status: %s, output: %s" % (status, default_dest_vf))

            if not utils_sriov.set_vf(src_pf_pci_path, vf_no):
                test.error("Failed to set vf on source.")

            if not utils_sriov.set_vf(dest_pf_pci_path, vf_no, session=server_session):
                test.error("Failed to set vf on target.")

            # Create PF and bridge connection on source and target host
            cmd = 'cat /proc/sys/net/ipv4/conf/all/rp_filter'
            default_src_rp_filter = process.run(cmd, shell=True,
                                                verbose=True).stdout_text
            status, default_dest_rp_filter = utils_misc.cmd_status_output(cmd,
                                                                          shell=True,
                                                                          session=server_session)
            if status:
                test.error("Unable to get default rp_filter on target!"
                           "status: %s, output: %s" % (status, default_dest_rp_filter))
            cmd = 'echo 0 >/proc/sys/net/ipv4/conf/all/rp_filter'
            process.run(cmd, shell=True, verbose=True)
            utils_misc.cmd_status_output(cmd, shell=True, session=server_session)
            utils_sriov.add_or_del_connection(params, is_del=False)
            utils_sriov.add_or_del_connection(destparams_dict, is_del=False,
                                              session=server_session)

            if not remote_virsh_session:
                remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs)
            create_or_del_networks(dest_pf, params,
                                   remote_virsh_session=remote_virsh_session)
            remote_virsh_session.close_session()
            create_or_del_networks(src_pf, params)
            # Change network interface xml
            mac_addr = utils_net.generate_mac_address_simple()
            update_iface_xml(new_xml)

        # Change the disk of the vm
        libvirt.set_vm_disk(vm, params)

        if not vm.is_alive():
            vm.start()

        # Check local guest network connection before migration
        if vm.serial_console is not None:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        vm_session = vm.wait_for_serial_login(timeout=240)

        if net_failover_test:
            utils_net.restart_guest_network(vm_session)
        iface_list = get_vm_ifaces(vm_session)

        vm_ipv4, vm_ipv6 = utils_net.get_linux_ipaddr(vm_session, iface_list[0])
        check_vm_network_accessed(ping_dest=vm_ipv4)

        if net_failover_test:
            check_vm_iface_num(iface_list)
            check_vm_network_connection(net_hostdev_name, 1)
            check_vm_network_connection(net_bridge_name, 1)

            hostdev_pci_id = get_hostdev_addr_from_xml()
            vf_path = utils_misc.get_pci_path(hostdev_pci_id)
            check_vfio_pci(vf_path)
            if cmd_during_mig:
                s, o = utils_misc.cmd_status_output(cmd_during_mig, shell=True,
                                                    session=vm_session)
                if s:
                    test.fail("Failed to run %s in vm." % cmd_during_mig)

        if extra.count("--postcopy"):
            func_name = virsh.migrate_postcopy
            extra_args.update({'func_params': params})
        if cancel_migration:
            func_name = migration_test.do_cancel

        # Execute migration process
        vms = [vm]

        migration_test.do_migration(vms, None, dest_uri, 'orderly',
                                    options, thread_timeout=900,
                                    ignore_status=True, virsh_opt=virsh_options,
                                    func=func_name, extra_opts=extra,
                                    **extra_args)
        mig_result = migration_test.ret

        migration_test.check_result(mig_result, params)

        if int(mig_result.exit_status) == 0:
            server_session = remote.wait_for_login('ssh', server_ip, '22',
                                                   server_user, server_pwd,
                                                   r"[\#\$]\s*$")
            check_vm_network_accessed(server_session, vm_ipv4)
            server_session.close()
            if net_failover_test:
                # Check network connection
                check_vm_network_connection(net_hostdev_name)
                check_vm_network_connection(net_bridge_name)
                # VF driver should not be vfio-pci
                check_vfio_pci(vf_path, True)

                cmd_parms.update({'vm_ip': vm_ipv4,
                                  'vm_pwd': params.get("password")})
                vm_after_mig = remote.VMManager(cmd_parms)
                vm_after_mig.setup_ssh_auth()
                cmd = "ip link"
                cmd_result = vm_after_mig.run_command(cmd)
                libvirt.check_result(cmd_result)
                p_iface = re.findall(r"\d+:\s+(\w+):\s+.*", cmd_result.stdout_text)
                p_iface = [x for x in p_iface if x != 'lo']
                check_vm_iface_num(p_iface)

                # Check the output of ping command
                cmd = 'cat %s' % vm_tmp_file
                cmd_result = vm_after_mig.run_command(cmd)
                libvirt.check_result(cmd_result)

                if re.findall('Destination Host Unreachable', cmd_result.stdout_text, re.M):
                    test.fail("The network does not work well during "
                              "the migration peirod. ping output: %s"
                              % cmd_result.stdout_text)

            # Execute migration from remote
            if migr_vm_back:
                ssh_connection = utils_conn.SSHConnection(server_ip=client_ip,
                                                          server_pwd=client_pwd,
                                                          client_ip=server_ip,
                                                          client_pwd=server_pwd)
                try:
                    ssh_connection.conn_check()
                except utils_conn.ConnectionError:
                    ssh_connection.conn_setup()
                    ssh_connection.conn_check()

                # Pre migration setup for local machine
                migration_test.migrate_pre_setup(src_uri, params)

                cmd = "virsh migrate %s %s %s" % (vm_name,
                                                  virsh_options, src_uri)
                logging.debug("Start migration: %s", cmd)
                cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
                logging.info(cmd_result)
                if cmd_result.exit_status:
                    test.fail("Failed to run '%s' on remote: %s"
                              % (cmd, cmd_result))
                logging.debug("migration back done")
                check_vm_network_accessed(ping_dest=vm_ipv4)
                if net_failover_test:
                    if vm_session:
                        vm_session.close()
                    vm_session = vm.wait_for_login()
                    iface_list = get_vm_ifaces(vm_session)
                    check_vm_iface_num(iface_list)

        else:
            check_vm_network_accessed(ping_dest=vm_ipv4)
            if net_failover_test:
                iface_list = get_vm_ifaces(vm_session)
                check_vm_iface_num(iface_list)

    finally:
        logging.debug("Recover test environment")
        # Clean VM on destination
        migration_test.cleanup_dest_vm(vm, vm.connect_uri, dest_uri)

        if vm.is_alive():
            vm.destroy(gracefully=False)

        logging.info("Recovery VM XML configration")
        orig_config_xml.sync()
        logging.debug("The current VM XML:\n%s", orig_config_xml.xmltreefile)

        server_session = remote.wait_for_login('ssh', server_ip, '22',
                                               server_user, server_pwd,
                                               r"[\#\$]\s*$")
        if 'src_pf' in locals():
            cmd = 'echo %s  >/proc/sys/net/ipv4/conf/all/rp_filter' % default_src_rp_filter
            process.run(cmd, shell=True, verbose=True)
            utils_sriov.add_or_del_connection(params, is_del=True)
            create_or_del_networks(src_pf, params, is_del=True)

        if 'dest_pf' in locals():
            cmd = 'echo %s  >/proc/sys/net/ipv4/conf/all/rp_filter' % default_dest_rp_filter
            utils_misc.cmd_status_output(cmd, shell=True, session=server_session)
            utils_sriov.add_or_del_connection(destparams_dict, session=server_session,
                                              is_del=True)
            remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs)
            create_or_del_networks(dest_pf, params,
                                   remote_virsh_session,
                                   is_del=True)
            remote_virsh_session.close_session()

        if 'dest_pf_pci_path' in locals() and default_dest_vf != vf_no:
            utils_sriov.set_vf(dest_pf_pci_path, default_dest_vf, server_session)
        if 'src_pf_pci_path' in locals() and default_src_vf != vf_no:
            utils_sriov.set_vf(src_pf_pci_path, default_src_vf)

        # Clean up of pre migration setup for local machine
        if migr_vm_back:
            if 'ssh_connection' in locals():
                ssh_connection.auto_recover = True
            migration_test.migrate_pre_setup(src_uri, params,
                                             cleanup=True)

        server_session.close()
        if remote_virsh_session:
            remote_virsh_session.close_session()

        logging.info("Remove local NFS image")
        source_file = params.get("source_file")
        if source_file:
            libvirt.delete_local_disk("file", path=source_file)
def run(test, params, env):
    """
    Test migration with option --copy-storage-all or --copy-storage-inc.
    """
    vm = env.get_vm(params.get("migrate_main_vm"))
    disk_type = params.get("copy_storage_type", "file")
    if disk_type == "file":
        params['added_disk_type'] = "file"
    else:
        params['added_disk_type'] = "lvm"
    cp_mig = None
    primary_target = vm.get_first_disk_devices()["target"]
    file_path, file_size = vm.get_device_size(primary_target)
    # Convert to Gib
    file_size = int(file_size) // 1073741824

    # Set the pool target using the source of the first disk
    params["precreation_pool_target"] = os.path.dirname(file_path)

    remote_host = params.get("migrate_dest_host", "REMOTE.EXAMPLE")
    local_host = params.get("migrate_source_host", "LOCAL.EXAMPLE")
    remote_user = params.get("migrate_dest_user", "root")
    remote_passwd = params.get("migrate_dest_pwd")
    if remote_host.count("EXAMPLE") or local_host.count("EXAMPLE"):
        test.cancel("Config remote or local host first.")
    # Config ssh autologin for it
    ssh_key.setup_ssh_key(remote_host, remote_user, remote_passwd, port=22)

    # Attach additional disks to vm if disk count big than 1
    disks_count = int(params.get("added_disks_count", 1)) - 1
    if disks_count:
        new_vm_name = "%s_smtest" % vm.name
        if vm.is_alive():
            vm.destroy()
        utlv.define_new_vm(vm.name, new_vm_name)
        vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
                           vm.address_cache)
    vms = [vm]
    vms_ip = {}
    for vm in vms:
        if vm.is_dead():
            vm.start()
        vm.wait_for_login().close()
        vms_ip[vm.name] = vm.get_address()
    # Check if image pre-creation is supported.
    support_precreation = False
    try:
        if qemu_test("drive-mirror") and qemu_test("nbd-server"):
            support_precreation = True
    except exceptions.TestError as e:
        logging.debug(e)
    params["support_precreation"] = support_precreation
    # Abnormal parameters
    migrate_again = "yes" == params.get("migrate_again", "no")
    abnormal_type = params.get("abnormal_type")
    added_disks_list = []
    rdm = None
    src_libvirt_file = None
    try:
        rdm = utils_test.RemoteDiskManager(params)
        vgname = params.get("sm_vg_name", "SMTEST")
        pool_created = False

        if disk_type == "lvm":
            target1 = target2 = ""  # For cleanup
            # Create volume group with iscsi
            # For local, target is a device name
            target1 = utlv.setup_or_cleanup_iscsi(is_setup=True, is_login=True,
                                                  emulated_image="emulated-iscsi1")
            lv_utils.vg_create(vgname, target1)
            logging.debug("Created VG %s", vgname)
            # For remote, target is real target name
            target2, _ = utlv.setup_or_cleanup_iscsi(is_setup=True, is_login=False,
                                                     emulated_image="emulated-iscsi2")
            logging.debug("Created target: %s", target2)
            # Login on remote host
            remote_device = rdm.iscsi_login_setup(local_host, target2)
            if not rdm.create_vg(vgname, remote_device):
                test.error("Create VG %s on %s failed."
                           % (vgname, remote_host))

        all_disks = utlv.attach_disks(vm, file_path, vgname, params)
        # Reserve for cleanup
        added_disks_list = list(all_disks.keys())
        all_disks[file_path] = file_size
        logging.debug("All disks need to be migrated:%s", all_disks)

        if abnormal_type == "occupied_disk":
            occupied_path = rdm.occupy_space(disk_type, file_size,
                                             file_path, vgname, timeout=600)
        if abnormal_type != "not_exist_file":
            for disk, size in list(all_disks.items()):
                if disk == file_path:
                    if support_precreation:
                        pool_created = create_destroy_pool_on_remote(test, "create",
                                                                     params)
                        if not pool_created:
                            test.error("Create pool on remote " +
                                       "host '%s' failed."
                                       % remote_host)
                    else:
                        rdm.create_image("file", disk, size, None,
                                         None, img_frmt='qcow2')
                else:
                    sparse = False if disk_type == 'lvm' else True
                    rdm.create_image(disk_type, disk, size, vgname,
                                     os.path.basename(disk),
                                     sparse=sparse, timeout=120)

        fail_flag = False
        remove_dict = {
            "do_search": '{"%s": "ssh:/"}' % params.get("migrate_dest_uri")}
        src_libvirt_file = libvirt_config.remove_key_for_modular_daemon(
            remove_dict)
        try:
            logging.debug("Start migration...")
            cp_mig = copied_migration(test, vms, vms_ip, params)
            # Check the new disk can be working well with I/O after migration
            utils_disk.check_remote_vm_disks({'server_ip': remote_host,
                                              'server_user': remote_user,
                                              'server_pwd': remote_passwd,
                                              'vm_ip': vms_ip[vm.name],
                                              'vm_pwd': params.get('password')})

            if migrate_again:
                fail_flag = True
                test.fail("Migration succeed, but not expected!")
            else:
                return
        except exceptions.TestFail:
            if not migrate_again:
                raise

            if abnormal_type == "occupied_disk":
                rdm.remove_path(disk_type, occupied_path)
            elif abnormal_type == "not_exist_file":
                for disk, size in list(all_disks.items()):
                    if disk == file_path:
                        rdm.create_image("file", disk, size, None,
                                         None, img_frmt='qcow2')
                    else:
                        rdm.create_image(disk_type, disk, size, vgname,
                                         os.path.basename(disk))
            elif abnormal_type == "migration_interupted":
                params["thread_timeout"] = 120
            # Raise after cleanup
            if fail_flag:
                raise

            # Migrate it again to confirm failed reason
            params["status_error"] = "no"
            cp_mig = copied_migration(test, vms, vms_ip, params)
    finally:
        # Recover created vm
        if cp_mig:
            cp_mig.cleanup_dest_vm(vm, None, params.get("migrate_dest_uri"))
        if vm.is_alive():
            vm.destroy()

        if src_libvirt_file:
            src_libvirt_file.restore()

        if disks_count and vm.name == new_vm_name:
            vm.undefine()
        for disk in added_disks_list:
            if disk_type == 'file':
                utlv.delete_local_disk(disk_type, disk)
            else:
                lvname = os.path.basename(disk)
                utlv.delete_local_disk(disk_type, disk, vgname, lvname)
            rdm.remove_path(disk_type, disk)
        rdm.remove_path("file", file_path)
        if pool_created:
            pool_destroyed = create_destroy_pool_on_remote(test, "destroy", params)
            if not pool_destroyed:
                test.error("Destroy pool on remote host '%s' failed."
                           % remote_host)

        if disk_type == "lvm":
            rdm.remove_vg(vgname)
            rdm.iscsi_login_setup(local_host, target2, is_login=False)
            try:
                lv_utils.vg_remove(vgname)
            except Exception:
                pass    # let it go to confirm cleanup iscsi device
            utlv.setup_or_cleanup_iscsi(is_setup=False,
                                        emulated_image="emulated-iscsi1")
            utlv.setup_or_cleanup_iscsi(is_setup=False,
                                        emulated_image="emulated-iscsi2")
Exemplo n.º 34
0
def run(test, params, env):
    """
    Test migration with option --copy-storage-all or --copy-storage-inc.
    """
    vm = env.get_vm(params.get("main_vm"))
    disk_type = params.get("copy_storage_type", "file")
    if disk_type == "file":
        params['added_disk_type'] = "file"
    else:
        params['added_disk_type'] = "block"
    primary_target = vm.get_first_disk_devices()["target"]
    file_path, file_size = vm.get_device_size(primary_target)
    # Convert to Gib
    file_size = int(file_size) / 1073741824

    remote_host = params.get("remote_ip", "REMOTE.EXAMPLE")
    local_host = params.get("local_ip", "LOCAL.EXAMPLE")
    remote_user = params.get("remote_user", "root")
    remote_passwd = params.get("remote_pwd", "PASSWORD.EXAMPLE")
    if remote_host.count("EXAMPLE") or local_host.count("EXAMPLE"):
        raise error.TestNAError("Config remote or local host first.")
    # Config ssh autologin for it
    ssh_key.setup_ssh_key(remote_host, remote_user, remote_passwd, port=22)

    # Attach additional disks to vm if disk count big than 1
    disks_count = int(params.get("added_disks_count", 1)) - 1
    if disks_count:
        new_vm_name = "%s_smtest" % vm.name
        if vm.is_alive():
            vm.destroy()
        utlv.define_new_vm(vm.name, new_vm_name)
        vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
                           vm.address_cache)
    vms = [vm]
    if vm.is_dead():
        vm.start()

    # Abnormal parameters
    migrate_again = "yes" == params.get("migrate_again", "no")
    abnormal_type = params.get("abnormal_type")

    try:
        rdm = utils_test.RemoteDiskManager(params)
        vgname = params.get("sm_vg_name", "SMTEST")
        added_disks_list = []
        if disk_type == "lvm":
            target1 = target2 = ""  # For cleanup
            # Create volume group with iscsi
            # For local, target is a device name
            target1 = utlv.setup_or_cleanup_iscsi(is_setup=True, is_login=True,
                                                  emulated_image="emulated_iscsi1")
            lv_utils.vg_create(vgname, target1)
            logging.debug("Created VG %s", vgname)
            # For remote, target is real target name
            target2 = utlv.setup_or_cleanup_iscsi(is_setup=True, is_login=False,
                                                  emulated_image="emulated_iscsi2")
            logging.debug("Created target: %s", target2)
            # Login on remote host
            remote_device = rdm.iscsi_login_setup(local_host, target2)
            if not rdm.create_vg(vgname, remote_device):
                raise error.TestError("Create VG %s on %s failed."
                                      % (vgname, remote_host))

        all_disks = utlv.attach_disks(vm, file_path, vgname, params)
        # Reserve for cleanup
        added_disks_list = all_disks.keys()
        all_disks[file_path] = file_size
        logging.debug("All disks need to be migrated:%s", all_disks)

        if abnormal_type == "occupied_disk":
            occupied_path = rdm.occupy_space(disk_type, file_size,
                                             file_path, vgname, timeout=600)
        if not abnormal_type == "not_exist_file":
            for disk, size in all_disks.items():
                if disk == file_path:
                    rdm.create_image("file", disk, size, None, None)
                else:
                    rdm.create_image(disk_type, disk, size, vgname,
                                     os.path.basename(disk))

        fail_flag = False
        try:
            logging.debug("Start migration...")
            copied_migration(vms, params)
            if migrate_again:
                fail_flag = True
                raise error.TestFail("Migration succeed, but not expected!")
            else:
                return
        except error.TestFail:
            if not migrate_again:
                raise

            if abnormal_type == "occupied_disk":
                rdm.remove_path(disk_type, occupied_path)
            elif abnormal_type == "not_exist_file":
                for disk, size in all_disks.items():
                    if disk == file_path:
                        rdm.create_image("file", disk, size, None, None)
                    else:
                        rdm.create_image(disk_type, disk, size, vgname,
                                         os.path.basename(disk))
            elif abnormal_type == "migration_interupted":
                params["thread_timeout"] = 120
            # Raise after cleanup
            if fail_flag:
                raise

            # Migrate it again to confirm failed reason
            copied_migration(vms, params)
    finally:
        # Recover created vm
        if vm.is_alive():
            vm.destroy()
        if disks_count and vm.name == new_vm_name:
            vm.undefine()
        for disk in added_disks_list:
            utlv.delete_local_disk(disk_type, disk)
            rdm.remove_path(disk_type, disk)
        rdm.remove_path("file", file_path)
        if disk_type == "lvm":
            rdm.remove_vg(vgname)
            rdm.iscsi_login_setup(local_host, target2, is_login=False)
            try:
                lv_utils.vg_remove(vgname)
            except:
                pass    # let it go to confirm cleanup iscsi device
            utlv.setup_or_cleanup_iscsi(is_setup=False,
                                        emulated_image="emulated_iscsi1")
            utlv.setup_or_cleanup_iscsi(is_setup=False,
                                        emulated_image="emulated_iscsi2")
            if disk_count_after_shutdown == disk_count_before_cmd:
                check_count_after_shutdown = False
        elif test_cmd == "detach-disk":
            if disk_count_after_shutdown < disk_count_before_cmd:
                check_count_after_shutdown = False

    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if os.path.exists(save_file):
            os.remove(save_file)
        if test_block_dev:
            if test_logcial_dev:
                libvirt.delete_local_disk("lvm", vgname=vg_name, lvname=lv_name)
                lv_utils.vg_remove(vg_name)
                utils.system("pvremove %s" % device_source, ignore_status=True)
            libvirt.setup_or_cleanup_iscsi(False)
        else:
            libvirt.delete_local_disk("file", device_source)

    # Check results.
    if status_error:
        if not status:
            raise error.TestFail("virsh %s exit with unexpected value."
                                 % test_cmd)
    else:
        if status:
            raise error.TestFail("virsh %s failed." % test_cmd)
        if test_cmd == "attach-disk":
Exemplo n.º 36
0
def run(test, params, env):
    """
    Test storage migration
    1) Do storage migration(copy-storage-all/copy-storage-inc) with
    TLS encryption - NBD transport
    2) Cancel storage migration with TLS encryption
    3) Copy only the top image for storage migration with backing chain

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """

    def prepare_nfs_backingfile(vm, params):
        """
        Create an image using nfs type backing_file

        :param vm: The guest
        :param params: the parameters used
        """
        mnt_path_name = params.get("nfs_mount_dir", "nfs-mount")
        exp_opt = params.get("export_options", "rw,no_root_squash,fsid=0")
        exp_dir = params.get("export_dir", "nfs-export")
        backingfile_img = params.get("source_dist_img", "nfs-img")
        disk_format = params.get("disk_format", "qcow2")
        img_name = params.get("img_name", "test.img")
        precreation = "yes" == params.get("precreation", "yes")
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        disk_xml = vmxml.devices.by_device_tag('disk')[0]
        src_disk_format = disk_xml.xmltreefile.find('driver').get('type')
        first_disk = vm.get_first_disk_devices()
        blk_source = first_disk['source']
        disk_img = os.path.join(os.path.dirname(blk_source), img_name)

        res = libvirt.setup_or_cleanup_nfs(True, mnt_path_name, is_mount=True,
                                           export_options=exp_opt,
                                           export_dir=exp_dir)
        mnt_path = res["mount_dir"]
        params["selinux_status_bak"] = res["selinux_status_bak"]

        if vm.is_alive():
            vm.destroy(gracefully=False)

        disk_cmd = ("qemu-img convert -f %s -O %s %s %s/%s" %
                    (src_disk_format, disk_format,
                     blk_source, mnt_path, backingfile_img))
        process.run(disk_cmd, ignore_status=False, verbose=True)
        local_image_list.append("%s/%s" % (mnt_path, backingfile_img))
        logging.debug("Create a local image backing on NFS.")
        disk_cmd = ("qemu-img create -f %s -b %s/%s %s" %
                    (disk_format, mnt_path, backingfile_img, disk_img))
        process.run(disk_cmd, ignore_status=False, verbose=True)
        local_image_list.append(disk_img)
        if precreation:
            logging.debug("Create an image backing on NFS on remote host.")
            remote_session = remote.remote_login("ssh", server_ip, "22",
                                                 server_user, server_pwd,
                                                 r'[$#%]')
            utils_misc.make_dirs(os.path.dirname(blk_source), remote_session)
            status, stdout = utils_misc.cmd_status_output(
                disk_cmd, session=remote_session)
            logging.debug("status: {}, stdout: {}".format(status, stdout))
            remote_image_list.append("%s/%s" % (mnt_path, backingfile_img))
            remote_image_list.append(disk_img)
            remote_session.close()

        params.update({'disk_source_name': disk_img,
                       'disk_type': 'file',
                       'disk_source_protocol': 'file'})
        libvirt.set_vm_disk(vm, params)

    migration_test = migration.MigrationTest()
    migration_test.check_parameters(params)

    # Local variables
    server_ip = params["server_ip"] = params.get("remote_ip")
    server_user = params["server_user"] = params.get("remote_user", "root")
    server_pwd = params["server_pwd"] = params.get("remote_pwd")
    client_ip = params["client_ip"] = params.get("local_ip")
    client_pwd = params["client_pwd"] = params.get("local_pwd")
    virsh_options = params.get("virsh_options", "")
    copy_storage_option = params.get("copy_storage_option")
    extra = params.get("virsh_migrate_extra", "")
    options = params.get("virsh_migrate_options", "--live --verbose")
    backingfile_type = params.get("backingfile_type")
    check_str_local_log = params.get("check_str_local_log", "")
    disk_format = params.get("disk_format", "qcow2")
    log_file = params.get("log_outputs", "/var/log/libvirt/libvirtd.log")
    daemon_conf_dict = eval(params.get("daemon_conf_dict", '{}'))
    cancel_migration = "yes" == params.get("cancel_migration", "no")
    migrate_again = "yes" == params.get("migrate_again", "no")
    precreation = "yes" == params.get("precreation", "yes")
    tls_recovery = "yes" == params.get("tls_auto_recovery", "yes")
    func_params_exists = "yes" == params.get("func_params_exists", "no")
    status_error = "yes" == params.get("status_error", "no")

    local_image_list = []
    remote_image_list = []
    tls_obj = None

    func_name = None
    daemon_conf = None
    mig_result = None
    remote_session = None
    vm_session = None

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
                                       params.get("migrate_dest_host"))
    dest_uri = params.get("virsh_migrate_desturi")

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    extra = "{} {}".format(extra, copy_storage_option)

    extra_args = {}
    if func_params_exists:
        extra_args.update({'func_params': params})
    if cancel_migration:
        func_name = migration_test.do_cancel

    # For safety reasons, we'd better back up  xmlfile.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = vmxml.copy()

    try:
        if backingfile_type:
            if backingfile_type == "nfs":
                prepare_nfs_backingfile(vm, params)

        if extra.count("copy-storage-all") and precreation:
            blk_source = vm.get_first_disk_devices()['source']
            vsize = utils_misc.get_image_info(blk_source).get("vsize")
            remote_session = remote.remote_login("ssh", server_ip, "22",
                                                 server_user, server_pwd,
                                                 r'[$#%]')
            utils_misc.make_dirs(os.path.dirname(blk_source), remote_session)
            disk_cmd = ("qemu-img create -f %s %s %s" %
                        (disk_format, blk_source, vsize))
            status, stdout = utils_misc.cmd_status_output(
                disk_cmd, session=remote_session)
            logging.debug("status: {}, stdout: {}".format(status, stdout))
            remote_image_list.append(blk_source)
            remote_session.close()

        # Update libvirtd configuration
        if daemon_conf_dict:
            if os.path.exists(log_file):
                os.remove(log_file)
            daemon_conf = libvirt.customize_libvirt_config(daemon_conf_dict)

        if extra.count("--tls"):
            tls_obj = TLSConnection(params)
            if tls_recovery:
                tls_obj.auto_recover = True
                tls_obj.conn_setup()

        if not vm.is_alive():
            vm.start()

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))
        # Check local guest network connection before migration
        vm_session = vm.wait_for_login(restart_network=True)
        migration_test.ping_vm(vm, params)

        # Execute migration process
        vms = [vm]

        migration_test.do_migration(vms, None, dest_uri, 'orderly',
                                    options, thread_timeout=900,
                                    ignore_status=True, virsh_opt=virsh_options,
                                    extra_opts=extra, func=func_name,
                                    **extra_args)

        mig_result = migration_test.ret
        migration_test.check_result(mig_result, params)

        if migrate_again and status_error:
            logging.debug("Sleeping 10 seconds before rerun migration")
            time.sleep(10)
            if cancel_migration:
                func_name = None
            params["status_error"] = "no"
            migration_test.do_migration(vms, None, dest_uri, 'orderly',
                                        options, thread_timeout=900,
                                        ignore_status=True,
                                        virsh_opt=virsh_options,
                                        extra_opts=extra, func=func_name,
                                        **extra_args)

            mig_result = migration_test.ret
            migration_test.check_result(mig_result, params)
        if int(mig_result.exit_status) == 0:
            migration_test.ping_vm(vm, params, uri=dest_uri)

        if check_str_local_log:
            libvirt.check_logfile(check_str_local_log, log_file)

    finally:
        logging.debug("Recover test environment")
        # Clean VM on destination and source
        try:
            migration_test.cleanup_dest_vm(vm, vm.connect_uri, dest_uri)
            if vm.is_alive():
                vm.destroy(gracefully=False)
        except Exception as err:
            logging.error(err)

        logging.info("Recovery VM XML configration")
        orig_config_xml.sync()

        if daemon_conf:
            logging.debug("Recover the configurations")
            libvirt.customize_libvirt_config(None, is_recover=True,
                                             config_object=daemon_conf)
        if tls_obj:
            logging.debug("Clean up local objs")
            del tls_obj
        for source_file in local_image_list:
            libvirt.delete_local_disk("file", path=source_file)
        for img in remote_image_list:
            remote.run_remote_cmd("rm -rf %s" % img, params)

        if remote_session:
            remote_session.close()
Exemplo n.º 37
0
def run(test, params, env):
    """
    Test blkdevio tuning

    Positive test has covered the following combination.
    -------------------------
    | total | read  | write |
    -------------------------
    |   0   |   0   |   0   |
    | non-0 |   0   |   0   |
    |   0   | non-0 | non-0 |
    |   0   | non-0 |  0    |
    |   0   |   0   | non-0 |
    -------------------------

    Negative test has covered unsupported combination and
    invalid command arguments.

    NB: only qemu-kvm-rhev supports block I/O throttling on >= RHEL6.5,
    the qemu-kvm is okay for block I/O throttling on >= RHEL7.0.
    """

    # Run test case
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    start_vm = params.get("start_vm", "yes")
    change_parameters = params.get("change_parameters", "no")
    attach_disk = "yes" == params.get("attach_disk", "no")
    attach_before_start = "yes" == params.get("attach_before_start", "yes")
    disk_type = params.get("disk_type", 'file')
    disk_format = params.get("disk_format", 'qcow2')
    disk_bus = params.get("disk_bus", 'virtio')
    disk_alias = params.get("disk_alias")
    attach_options = params.get("attach_options")
    slice_test = "yes" == params.get("disk_slice_enabled", "yes")
    test_size = params.get("test_size", "1")

    original_vm_xml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Used for default device of blkdeviotune
    device = params.get("device_name", "vmblk")
    sys_image_target = vm.get_first_disk_devices()["target"]

    # Make sure vm is down if start not requested
    if (start_vm == "no" or attach_before_start) and vm and vm.is_alive():
        vm.destroy()

    disk_source = tempfile.mktemp(dir=data_dir.get_tmp_dir())
    params["input_source_file"] = disk_source
    params["disk_slice"] = {"slice_test": "yes"}
    if attach_disk and not slice_test:
        libvirt.create_local_disk(disk_type,
                                  path=disk_source,
                                  size='1',
                                  disk_format=disk_format)
        attach_extra = ""
        if disk_alias:
            attach_extra += " --alias %s" % disk_alias
        if disk_bus:
            attach_extra += " --targetbus %s" % disk_bus
        if disk_format:
            attach_extra += " --subdriver %s" % disk_format
        if attach_options:
            attach_extra += " %s" % attach_options
    test_dict = dict(params)
    test_dict['vm'] = vm
    # Coldplug disk with slice image
    if attach_disk and slice_test and attach_before_start:
        libvirt.create_local_disk(disk_type="file",
                                  extra=" -o preallocation=full",
                                  path=disk_source,
                                  disk_format="qcow2",
                                  size=test_size)
        disk_xml = libvirt.create_disk_xml(params)
        ret = virsh.attach_device(vm_name, disk_xml, flagstr="--config")
        libvirt.check_exit_status(ret)
    # Coldplug disk without slice image
    if attach_disk and attach_before_start and not slice_test:
        ret = virsh.attach_disk(vm_name,
                                disk_source,
                                device,
                                extra=attach_extra,
                                debug=True)
        libvirt.check_exit_status(ret)
    # Recover previous running guest
    if vm and not vm.is_alive() and start_vm == "yes":
        try:
            vm.start()
            vm.wait_for_login().close()
        except (virt_vm.VMError, remote.LoginError) as detail:
            vm.destroy()
            test.fail(str(detail))

    # Hotplug disk with slice image
    if attach_disk and slice_test and not attach_before_start:
        libvirt.create_local_disk(disk_type="file",
                                  extra=" -o preallocation=full",
                                  path=disk_source,
                                  disk_format="qcow2",
                                  size=test_size)
        disk_xml = libvirt.create_disk_xml(params)
        ret = virsh.attach_device(vm_name, disk_xml, flagstr="")
        libvirt.check_exit_status(ret)

    # Hotplug disk without slice image
    if attach_disk and not attach_before_start and not slice_test:
        ret = virsh.attach_disk(vm_name,
                                disk_source,
                                device,
                                extra=attach_extra,
                                debug=True)
        libvirt.check_exit_status(ret)

    if device == "vmblk":
        test_dict['device_name'] = sys_image_target

    # Make sure libvirtd service is running
    if not utils_libvirtd.libvirtd_is_running():
        test.cancel("libvirt service is not running!")

    # Positive and negative testing
    try:
        if change_parameters == "no":
            get_blkdevio_parameter(test_dict, test)
        else:
            set_blkdevio_parameter(test_dict, test)
    finally:
        # Restore guest
        original_vm_xml.sync()
        libvirt.delete_local_disk('file', path=disk_source)
Exemplo n.º 38
0
def run(test, params, env):
    """
    Test virsh {at|de}tach-disk command.

    The command can attach new disk/detach disk.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh attach/detach-disk operation.
    3.Recover test environment.
    4.Confirm the test result.
    """
    def check_vm_partition(vm, device, os_type, target_name, old_parts):
        """
        Check VM disk's partition.

        :param vm. VM guest.
        :param os_type. VM's operation system type.
        :param target_name. Device target type.
        :return: True if check successfully.
        """
        logging.info("Checking VM partittion...")
        if vm.is_dead():
            vm.start()
        try:
            attached = False
            if os_type == "linux":
                session = vm.wait_for_login()
                new_parts = utils_disk.get_parts_list(session)
                added_parts = list(set(new_parts).difference(set(old_parts)))
                logging.debug("Added parts: %s" % added_parts)
                for i in range(len(added_parts)):
                    if device == "disk":
                        if target_name.startswith("vd"):
                            if added_parts[i].startswith("vd"):
                                attached = True
                        elif target_name.startswith(
                                "hd") or target_name.startswith("sd"):
                            if added_parts[i].startswith("sd"):
                                attached = True
                    elif device == "cdrom":
                        if added_parts[i].startswith("sr"):
                            attached = True
                session.close()
            return attached
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def acpiphp_module_modprobe(vm, os_type):
        """
        Add acpiphp module if VM's os type is rhle5.*

        :param vm. VM guest.
        :param os_type. VM's operation system type.
        :return: True if operate successfully.
        """
        if vm.is_dead():
            vm.start()
        try:
            if os_type == "linux":
                session = vm.wait_for_login()
                s_rpm, _ = session.cmd_status_output("rpm --version")
                # If status is different from 0, this
                # guest OS doesn't support the rpm package
                # manager
                if s_rpm:
                    session.close()
                    return True
                _, o_vd = session.cmd_status_output(
                    "rpm -qa | grep redhat-release")
                if o_vd.find("5Server") != -1:
                    s_mod, o_mod = session.cmd_status_output(
                        "modprobe acpiphp")
                    del o_mod
                    if s_mod != 0:
                        session.close()
                        return False
                session.close()
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def check_shareable(at_with_shareable, test_twice):
        """
        check if current libvirt version support shareable option

        at_with_shareable: True or False. Whether attach disk with shareable option
        test_twice: True or False. Whether perform operations twice
        return: True or cancel the test
        """
        if at_with_shareable or test_twice:
            if libvirt_version.version_compare(3, 9, 0):
                return True
            else:
                test.cancel(
                    "Current libvirt version doesn't support shareable feature"
                )

    # Get test command.
    test_cmd = params.get("at_dt_disk_test_cmd", "attach-disk")

    vm_ref = params.get("at_dt_disk_vm_ref", "name")
    at_options = params.get("at_dt_disk_at_options", "")
    dt_options = params.get("at_dt_disk_dt_options", "")
    at_with_shareable = "yes" == params.get("at_with_shareable", 'no')
    pre_vm_state = params.get("at_dt_disk_pre_vm_state", "running")
    status_error = "yes" == params.get("status_error", 'no')
    no_attach = params.get("at_dt_disk_no_attach", 'no')
    os_type = params.get("os_type", "linux")
    qemu_file_lock = params.get("qemu_file_lock", "")
    if qemu_file_lock:
        if utils_misc.compare_qemu_version(2, 9, 0):
            logging.info('From qemu-kvm-rhev 2.9.0:'
                         'QEMU image locking, which should prevent multiple '
                         'runs of QEMU or qemu-img when a VM is running.')
            if test_cmd == "detach-disk" or pre_vm_state == "shut off":
                test.cancel('This case is not supported.')
            else:
                logging.info(
                    'The expect result is failure as opposed with succeed')
                status_error = True

    # Disk specific attributes.
    device = params.get("at_dt_disk_device", "disk")
    device_source_name = params.get("at_dt_disk_device_source", "attach.img")
    device_source_format = params.get("at_dt_disk_device_source_format", "raw")
    device_target = params.get("at_dt_disk_device_target", "vdd")
    device_disk_bus = params.get("at_dt_disk_bus_type", "virtio")
    source_path = "yes" == params.get("at_dt_disk_device_source_path", "yes")
    create_img = "yes" == params.get("at_dt_disk_create_image", "yes")
    test_twice = "yes" == params.get("at_dt_disk_test_twice", "no")
    test_type = "yes" == params.get("at_dt_disk_check_type", "no")
    test_audit = "yes" == params.get("at_dt_disk_check_audit", "no")
    test_block_dev = "yes" == params.get("at_dt_disk_iscsi_device", "no")
    test_logcial_dev = "yes" == params.get("at_dt_disk_logical_device", "no")
    restart_libvirtd = "yes" == params.get("at_dt_disk_restart_libvirtd", "no")
    detach_disk_with_print_xml = "yes" == params.get(
        "detach_disk_with_print_xml", "no")
    vg_name = params.get("at_dt_disk_vg", "vg_test_0")
    lv_name = params.get("at_dt_disk_lv", "lv_test_0")
    serial = params.get("at_dt_disk_serial", "")
    address = params.get("at_dt_disk_address", "")
    address2 = params.get("at_dt_disk_address2", "")
    cache_options = params.get("cache_options", "")
    time_sleep = params.get("time_sleep", 3)
    if check_shareable(at_with_shareable, test_twice):
        at_options += " --mode shareable"
    if serial:
        at_options += (" --serial %s" % serial)
    if address2:
        at_options_twice = at_options + (" --address %s" % address2)
    if address:
        at_options += (" --address %s" % address)
    if cache_options:
        if cache_options.count("directsync"):
            if not libvirt_version.version_compare(1, 0, 0):
                test.cancel("'directsync' cache option doesn't "
                            "support in current libvirt version.")
        at_options += (" --cache %s" % cache_options)

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Create virtual device file.
    device_source_path = os.path.join(data_dir.get_tmp_dir(),
                                      device_source_name)
    if test_block_dev:
        device_source = libvirt.setup_or_cleanup_iscsi(True)
        if not device_source:
            # We should skip this case
            test.cancel("Can not get iscsi device name in host")
        if test_logcial_dev:
            lv_utils.vg_create(vg_name, device_source)
            device_source = libvirt.create_local_disk("lvm",
                                                      size="10M",
                                                      vgname=vg_name,
                                                      lvname=lv_name)
            logging.debug("New created volume: %s", lv_name)
    else:
        if source_path and create_img:
            device_source = libvirt.create_local_disk(
                "file",
                path=device_source_path,
                size="1G",
                disk_format=device_source_format)
        else:
            device_source = device_source_name

    # if we are testing audit, we need to start audit servcie first.
    if test_audit:
        auditd_service = Factory.create_service("auditd")
        if not auditd_service.status():
            auditd_service.start()
        logging.info("Auditd service status: %s" % auditd_service.status())

    # If we are testing cdrom device, we need to detach hdc in VM first.
    if device == "cdrom":
        if vm.is_alive():
            vm.destroy(gracefully=False)
        s_detach = virsh.detach_disk(vm_name, device_target, "--config")
        if not s_detach:
            logging.error("Detach hdc failed before test.")

    # If we are testing detach-disk, we need to attach certain device first.
    if test_cmd == "detach-disk" and no_attach != "yes":
        s_at_options = "--driver qemu --config"
        #Since lock feature is introduced in libvirt 3.9.0 afterwards, disk shareable options
        #need be set if disk needs be attached multitimes
        if check_shareable(at_with_shareable, test_twice):
            s_at_options += " --mode shareable"

        s_attach = virsh.attach_disk(vm_name,
                                     device_source,
                                     device_target,
                                     s_at_options,
                                     debug=True).exit_status
        if s_attach != 0:
            logging.error("Attaching device failed before testing detach-disk")
        else:
            logging.debug(
                "Attaching device succeeded before testing detach-disk")
        if test_twice:
            device_target2 = params.get("at_dt_disk_device_target2",
                                        device_target)
            device_source = libvirt.create_local_disk(
                "file",
                path=device_source_path,
                size="1",
                disk_format=device_source_format)
            s_attach = virsh.attach_disk(vm_name, device_source,
                                         device_target2,
                                         s_at_options).exit_status
            if s_attach != 0:
                logging.error("Attaching device failed before testing "
                              "detach-disk test_twice")

    vm.start()
    vm.wait_for_login()

    # Add acpiphp module before testing if VM's os type is rhle5.*
    if not acpiphp_module_modprobe(vm, os_type):
        test.error("Add acpiphp module failed before test.")

    # Turn VM into certain state.
    if pre_vm_state == "paused":
        logging.info("Suspending %s..." % vm_name)
        if vm.is_alive():
            vm.pause()
    elif pre_vm_state == "shut off":
        logging.info("Shuting down %s..." % vm_name)
        if vm.is_alive():
            vm.destroy(gracefully=False)

    # Get disk count before test.
    disk_count_before_cmd = vm_xml.VMXML.get_disk_count(vm_name)

    # Test.
    domid = vm.get_id()
    domuuid = vm.get_uuid()

    # Confirm how to reference a VM.
    if vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    else:
        vm_ref = ""

    if test_cmd == "attach-disk":
        status = virsh.attach_disk(vm_ref,
                                   device_source,
                                   device_target,
                                   at_options,
                                   debug=True).exit_status
    elif test_cmd == "detach-disk":
        # For detach disk with print-xml option, it only print information,and not actual disk detachment.
        if detach_disk_with_print_xml and libvirt_version.version_compare(
                4, 5, 0):
            ret = virsh.detach_disk(vm_ref, device_target, at_options)
            libvirt.check_exit_status(ret)
            cmd = ("echo \"%s\" | grep -A 16 %s" %
                   (ret.stdout.strip(), device_source_name))
            if process.system(cmd, ignore_status=True, shell=True):
                test.error("Check disk with source image name failed")
        status = virsh.detach_disk(vm_ref,
                                   device_target,
                                   dt_options,
                                   debug=True).exit_status

    if restart_libvirtd:
        libvirtd_serv = utils_libvirtd.Libvirtd()
        libvirtd_serv.restart()

    if test_twice:
        device_target2 = params.get("at_dt_disk_device_target2", device_target)
        device_source = libvirt.create_local_disk(
            "file",
            path=device_source_path,
            size="1G",
            disk_format=device_source_format)
        if test_cmd == "attach-disk":
            if address2:
                at_options = at_options_twice
            status = virsh.attach_disk(vm_ref,
                                       device_source,
                                       device_target2,
                                       at_options,
                                       debug=True).exit_status
        elif test_cmd == "detach-disk":
            status = virsh.detach_disk(vm_ref,
                                       device_target2,
                                       dt_options,
                                       debug=True).exit_status

    # Resume guest after command. On newer libvirt this is fixed as it has
    # been a bug. The change in xml file is done after the guest is resumed.
    if pre_vm_state == "paused":
        vm.resume()
        time.sleep(5)

    # Check audit log
    check_audit_after_cmd = True
    if test_audit:
        grep_audit = ('grep "%s" /var/log/audit/audit.log' %
                      test_cmd.split("-")[0])
        cmd = (grep_audit + ' | ' +
               'grep "%s" | tail -n1 | grep "res=success"' % device_source)
        if process.run(cmd, shell=True).exit_status:
            logging.error("Audit check failed")
            check_audit_after_cmd = False

    # Need wait a while for xml to sync
    time.sleep(float(time_sleep))
    # Check disk count after command.
    check_count_after_cmd = True
    disk_count_after_cmd = vm_xml.VMXML.get_disk_count(vm_name)
    if test_cmd == "attach-disk":
        if disk_count_after_cmd == disk_count_before_cmd:
            check_count_after_cmd = False
    elif test_cmd == "detach-disk":
        if disk_count_after_cmd < disk_count_before_cmd:
            check_count_after_cmd = False

    # Recover VM state.
    if pre_vm_state == "shut off":
        vm.start()

    # Check in VM after command.
    check_vm_after_cmd = True
    check_vm_after_cmd = check_vm_partition(vm, device, os_type, device_target,
                                            old_parts)

    # Check disk type after attach.
    check_disk_type = True
    if test_type:
        if test_block_dev:
            check_disk_type = vm_xml.VMXML.check_disk_type(
                vm_name, device_source, "block")
        else:
            check_disk_type = vm_xml.VMXML.check_disk_type(
                vm_name, device_source, "file")
    # Check disk serial after attach.
    check_disk_serial = True
    if serial:
        disk_serial = vm_xml.VMXML.get_disk_serial(vm_name, device_target)
        if serial != disk_serial:
            check_disk_serial = False

    # Check disk address after attach.
    check_disk_address = True
    if address:
        disk_address = vm_xml.VMXML.get_disk_address(vm_name, device_target)
        if address != disk_address:
            check_disk_address = False

    # Check multifunction address after attach.
    check_disk_address2 = True
    if address2:
        disk_address2 = vm_xml.VMXML.get_disk_address(vm_name, device_target2)
        if address2 != disk_address2:
            check_disk_address2 = False

    # Check disk cache option after attach.
    check_cache_after_cmd = True
    if cache_options:
        disk_cache = vm_xml.VMXML.get_disk_attr(vm_name, device_target,
                                                "driver", "cache")
        if cache_options == "default":
            if disk_cache is not None:
                check_cache_after_cmd = False
        elif disk_cache != cache_options:
            check_cache_after_cmd = False

    # Eject cdrom test
    eject_cdrom = "yes" == params.get("at_dt_disk_eject_cdrom", "no")
    save_vm = "yes" == params.get("at_dt_disk_save_vm", "no")
    save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save")
    try:
        if eject_cdrom:
            eject_params = {
                'type_name': "file",
                'device_type': "cdrom",
                'target_dev': device_target,
                'target_bus': device_disk_bus
            }
            eject_xml = libvirt.create_disk_xml(eject_params)
            with open(eject_xml) as eject_file:
                logging.debug("Eject CDROM by XML: %s", eject_file.read())
            # Run command tiwce to make sure cdrom tray open first #BZ892289
            # Open tray
            virsh.attach_device(domainarg=vm_name,
                                filearg=eject_xml,
                                debug=True)
            # Add time sleep between two attach commands.
            if time_sleep:
                time.sleep(float(time_sleep))
            # Eject cdrom
            result = virsh.attach_device(domainarg=vm_name,
                                         filearg=eject_xml,
                                         debug=True)
            if result.exit_status != 0:
                test.fail("Eject CDROM failed")
            if vm_xml.VMXML.check_disk_exist(vm_name, device_source):
                test.fail("Find %s after do eject" % device_source)
        # Save and restore VM
        if save_vm:
            result = virsh.save(vm_name, save_file, debug=True)
            libvirt.check_exit_status(result)
            result = virsh.restore(save_file, debug=True)
            libvirt.check_exit_status(result)
            if vm_xml.VMXML.check_disk_exist(vm_name, device_source):
                test.fail("Find %s after do restore" % device_source)

        # Destroy VM.
        vm.destroy(gracefully=False)

        # Check disk count after VM shutdown (with --config).
        check_count_after_shutdown = True
        inactive_vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        disk_count_after_shutdown = len(inactive_vmxml.get_disk_all())
        if test_cmd == "attach-disk":
            if disk_count_after_shutdown == disk_count_before_cmd:
                check_count_after_shutdown = False
        elif test_cmd == "detach-disk":
            if disk_count_after_shutdown < disk_count_before_cmd:
                check_count_after_shutdown = False

    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.debug("Restore the VM XML")
        backup_xml.sync()
        if os.path.exists(save_file):
            os.remove(save_file)
        if test_block_dev:
            if test_logcial_dev:
                libvirt.delete_local_disk("lvm",
                                          vgname=vg_name,
                                          lvname=lv_name)
                lv_utils.vg_remove(vg_name)
                process.run("pvremove %s" % device_source,
                            shell=True,
                            ignore_status=True)
            libvirt.setup_or_cleanup_iscsi(False)
        else:
            libvirt.delete_local_disk("file", device_source)

    # Check results.
    if status_error:
        if not status:
            test.fail("virsh %s exit with unexpected value." % test_cmd)
    else:
        if status:
            test.fail("virsh %s failed." % test_cmd)
        if test_cmd == "attach-disk":
            if at_options.count("config"):
                if not check_count_after_shutdown:
                    test.fail("Cannot see config attached device "
                              "in xml file after VM shutdown.")
                if not check_disk_serial:
                    test.fail("Serial set failed after attach")
                if not check_disk_address:
                    test.fail("Address set failed after attach")
                if not check_disk_address2:
                    test.fail("Address(multifunction) set failed"
                              " after attach")
            else:
                if not check_count_after_cmd:
                    test.fail("Cannot see device in xml file" " after attach.")
                if not check_vm_after_cmd:
                    test.fail("Cannot see device in VM after" " attach.")
                if not check_disk_type:
                    test.fail("Check disk type failed after" " attach.")
                if not check_audit_after_cmd:
                    test.fail("Audit hotplug failure after attach")
                if not check_cache_after_cmd:
                    test.fail("Check cache failure after attach")
                if at_options.count("persistent"):
                    if not check_count_after_shutdown:
                        test.fail("Cannot see device attached "
                                  "with persistent after "
                                  "VM shutdown.")
                else:
                    if check_count_after_shutdown:
                        test.fail("See non-config attached device "
                                  "in xml file after VM shutdown.")
        elif test_cmd == "detach-disk":
            if dt_options.count("config"):
                if check_count_after_shutdown:
                    test.fail("See config detached device in "
                              "xml file after VM shutdown.")
            else:
                if check_count_after_cmd:
                    test.fail("See device in xml file " "after detach.")
                if check_vm_after_cmd:
                    test.fail("See device in VM after detach.")
                if not check_audit_after_cmd:
                    test.fail("Audit hotunplug failure " "after detach")

                if dt_options.count("persistent"):
                    if check_count_after_shutdown:
                        test.fail("See device deattached "
                                  "with persistent after "
                                  "VM shutdown.")
                else:
                    if not check_count_after_shutdown:
                        test.fail("See non-config detached "
                                  "device in xml file after "
                                  "VM shutdown.")

        else:
            test.error("Unknown command %s." % test_cmd)
def run(test, params, env):
    """
    Test virsh {at|de}tach-disk command.

    The command can attach new disk/detach disk.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh attach/detach-disk operation.
    3.Recover test environment.
    4.Confirm the test result.
    """

    def check_vm_partition(vm, device, os_type, target_name, old_parts):
        """
        Check VM disk's partition.

        :param vm. VM guest.
        :param os_type. VM's operation system type.
        :param target_name. Device target type.
        :return: True if check successfully.
        """
        logging.info("Checking VM partittion...")
        if vm.is_dead():
            vm.start()
        try:
            attached = False
            if os_type == "linux":
                session = vm.wait_for_login()
                new_parts = libvirt.get_parts_list(session)
                added_parts = list(set(new_parts).difference(set(old_parts)))
                logging.debug("Added parts: %s" % added_parts)
                for i in range(len(added_parts)):
                    if device == "disk":
                        if target_name.startswith("vd"):
                            if added_parts[i].startswith("vd"):
                                attached = True
                        elif target_name.startswith("hd") or target_name.startswith("sd"):
                            if added_parts[i].startswith("sd"):
                                attached = True
                    elif device == "cdrom":
                        if added_parts[i].startswith("sr"):
                            attached = True
                session.close()
            return attached
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def acpiphp_module_modprobe(vm, os_type):
        """
        Add acpiphp module if VM's os type is rhle5.*

        :param vm. VM guest.
        :param os_type. VM's operation system type.
        :return: True if operate successfully.
        """
        if vm.is_dead():
            vm.start()
        try:
            if os_type == "linux":
                session = vm.wait_for_login()
                s_rpm, _ = session.cmd_status_output(
                    "rpm --version")
                # If status is different from 0, this
                # guest OS doesn't support the rpm package
                # manager
                if s_rpm:
                    session.close()
                    return True
                _, o_vd = session.cmd_status_output(
                    "rpm -qa | grep redhat-release")
                if o_vd.find("5Server") != -1:
                    s_mod, o_mod = session.cmd_status_output(
                        "modprobe acpiphp")
                    del o_mod
                    if s_mod != 0:
                        session.close()
                        return False
                session.close()
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def check_shareable(at_with_shareable, test_twice):
        """
        check if current libvirt version support shareable option

        at_with_shareable: True or False. Whether attach disk with shareable option
        test_twice: True or False. Whether perform operations twice
        return: True or cancel the test
        """
        if at_with_shareable or test_twice:
            if libvirt_version.version_compare(3, 9, 0):
                return True
            else:
                test.cancel("Current libvirt version doesn't support shareable feature")

    # Get test command.
    test_cmd = params.get("at_dt_disk_test_cmd", "attach-disk")

    vm_ref = params.get("at_dt_disk_vm_ref", "name")
    at_options = params.get("at_dt_disk_at_options", "")
    dt_options = params.get("at_dt_disk_dt_options", "")
    at_with_shareable = "yes" == params.get("at_with_shareable", 'no')
    pre_vm_state = params.get("at_dt_disk_pre_vm_state", "running")
    status_error = "yes" == params.get("status_error", 'no')
    no_attach = params.get("at_dt_disk_no_attach", 'no')
    os_type = params.get("os_type", "linux")
    qemu_file_lock = params.get("qemu_file_lock", "")
    if qemu_file_lock:
        if utils_misc.compare_qemu_version(2, 9, 0):
            logging.info('From qemu-kvm-rhev 2.9.0:'
                         'QEMU image locking, which should prevent multiple '
                         'runs of QEMU or qemu-img when a VM is running.')
            if test_cmd == "detach-disk" or pre_vm_state == "shut off":
                test.cancel('This case is not supported.')
            else:
                logging.info('The expect result is failure as opposed with succeed')
                status_error = True

    # Disk specific attributes.
    device = params.get("at_dt_disk_device", "disk")
    device_source_name = params.get("at_dt_disk_device_source", "attach.img")
    device_source_format = params.get("at_dt_disk_device_source_format", "raw")
    device_target = params.get("at_dt_disk_device_target", "vdd")
    device_disk_bus = params.get("at_dt_disk_bus_type", "virtio")
    source_path = "yes" == params.get("at_dt_disk_device_source_path", "yes")
    create_img = "yes" == params.get("at_dt_disk_create_image", "yes")
    test_twice = "yes" == params.get("at_dt_disk_test_twice", "no")
    test_type = "yes" == params.get("at_dt_disk_check_type", "no")
    test_audit = "yes" == params.get("at_dt_disk_check_audit", "no")
    test_block_dev = "yes" == params.get("at_dt_disk_iscsi_device", "no")
    test_logcial_dev = "yes" == params.get("at_dt_disk_logical_device", "no")
    restart_libvirtd = "yes" == params.get("at_dt_disk_restart_libvirtd", "no")
    detach_disk_with_print_xml = "yes" == params.get("detach_disk_with_print_xml", "no")
    vg_name = params.get("at_dt_disk_vg", "vg_test_0")
    lv_name = params.get("at_dt_disk_lv", "lv_test_0")
    serial = params.get("at_dt_disk_serial", "")
    address = params.get("at_dt_disk_address", "")
    address2 = params.get("at_dt_disk_address2", "")
    cache_options = params.get("cache_options", "")
    time_sleep = params.get("time_sleep", 3)
    if check_shareable(at_with_shareable, test_twice):
        at_options += " --mode shareable"
    if serial:
        at_options += (" --serial %s" % serial)
    if address2:
        at_options_twice = at_options + (" --address %s" % address2)
    if address:
        at_options += (" --address %s" % address)
    if cache_options:
        if cache_options.count("directsync"):
            if not libvirt_version.version_compare(1, 0, 0):
                test.cancel("'directsync' cache option doesn't "
                            "support in current libvirt version.")
        at_options += (" --cache %s" % cache_options)

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Create virtual device file.
    device_source_path = os.path.join(data_dir.get_tmp_dir(), device_source_name)
    if test_block_dev:
        device_source = libvirt.setup_or_cleanup_iscsi(True)
        if not device_source:
            # We should skip this case
            test.cancel("Can not get iscsi device name in host")
        if test_logcial_dev:
            lv_utils.vg_create(vg_name, device_source)
            device_source = libvirt.create_local_disk("lvm",
                                                      size="10M",
                                                      vgname=vg_name,
                                                      lvname=lv_name)
            logging.debug("New created volume: %s", lv_name)
    else:
        if source_path and create_img:
            device_source = libvirt.create_local_disk(
                "file", path=device_source_path,
                size="1G", disk_format=device_source_format)
        else:
            device_source = device_source_name

    # if we are testing audit, we need to start audit servcie first.
    if test_audit:
        auditd_service = Factory.create_service("auditd")
        if not auditd_service.status():
            auditd_service.start()
        logging.info("Auditd service status: %s" % auditd_service.status())

    # If we are testing cdrom device, we need to detach hdc in VM first.
    if device == "cdrom":
        if vm.is_alive():
            vm.destroy(gracefully=False)
        s_detach = virsh.detach_disk(vm_name, device_target, "--config")
        if not s_detach:
            logging.error("Detach hdc failed before test.")

    # If we are testing detach-disk, we need to attach certain device first.
    if test_cmd == "detach-disk" and no_attach != "yes":
        s_at_options = "--driver qemu --config"
        #Since lock feature is introduced in libvirt 3.9.0 afterwards, disk shareable options
        #need be set if disk needs be attached multitimes
        if check_shareable(at_with_shareable, test_twice):
            s_at_options += " --mode shareable"

        s_attach = virsh.attach_disk(vm_name, device_source, device_target,
                                     s_at_options, debug=True).exit_status
        if s_attach != 0:
            logging.error("Attaching device failed before testing detach-disk")
        else:
            logging.debug("Attaching device succeeded before testing detach-disk")
        if test_twice:
            device_target2 = params.get("at_dt_disk_device_target2",
                                        device_target)
            device_source = libvirt.create_local_disk(
                "file", path=device_source_path,
                size="1", disk_format=device_source_format)
            s_attach = virsh.attach_disk(vm_name, device_source, device_target2,
                                         s_at_options).exit_status
            if s_attach != 0:
                logging.error("Attaching device failed before testing "
                              "detach-disk test_twice")

    vm.start()
    vm.wait_for_login()

    # Add acpiphp module before testing if VM's os type is rhle5.*
    if not acpiphp_module_modprobe(vm, os_type):
        test.error("Add acpiphp module failed before test.")

    # Turn VM into certain state.
    if pre_vm_state == "paused":
        logging.info("Suspending %s..." % vm_name)
        if vm.is_alive():
            vm.pause()
    elif pre_vm_state == "shut off":
        logging.info("Shuting down %s..." % vm_name)
        if vm.is_alive():
            vm.destroy(gracefully=False)

    # Get disk count before test.
    disk_count_before_cmd = vm_xml.VMXML.get_disk_count(vm_name)

    # Test.
    domid = vm.get_id()
    domuuid = vm.get_uuid()

    # Confirm how to reference a VM.
    if vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    else:
        vm_ref = ""

    if test_cmd == "attach-disk":
        status = virsh.attach_disk(vm_ref, device_source, device_target,
                                   at_options, debug=True).exit_status
    elif test_cmd == "detach-disk":
        # For detach disk with print-xml option, it only print information,and not actual disk detachment.
        if detach_disk_with_print_xml and libvirt_version.version_compare(4, 5, 0):
            ret = virsh.detach_disk(vm_ref, device_target, at_options)
            libvirt.check_exit_status(ret)
            cmd = ("echo \"%s\" | grep -A 16 %s"
                   % (ret.stdout.strip(), device_source_name))
            if process.system(cmd, ignore_status=True, shell=True):
                test.error("Check disk with source image name failed")
        status = virsh.detach_disk(vm_ref, device_target, dt_options,
                                   debug=True).exit_status

    if restart_libvirtd:
        libvirtd_serv = utils_libvirtd.Libvirtd()
        libvirtd_serv.restart()

    if test_twice:
        device_target2 = params.get("at_dt_disk_device_target2", device_target)
        device_source = libvirt.create_local_disk(
            "file", path=device_source_path,
            size="1G", disk_format=device_source_format)
        if test_cmd == "attach-disk":
            if address2:
                at_options = at_options_twice
            status = virsh.attach_disk(vm_ref, device_source,
                                       device_target2, at_options,
                                       debug=True).exit_status
        elif test_cmd == "detach-disk":
            status = virsh.detach_disk(vm_ref, device_target2, dt_options,
                                       debug=True).exit_status

    # Resume guest after command. On newer libvirt this is fixed as it has
    # been a bug. The change in xml file is done after the guest is resumed.
    if pre_vm_state == "paused":
        vm.resume()
        time.sleep(5)

    # Check audit log
    check_audit_after_cmd = True
    if test_audit:
        grep_audit = ('grep "%s" /var/log/audit/audit.log'
                      % test_cmd.split("-")[0])
        cmd = (grep_audit + ' | ' + 'grep "%s" | tail -n1 | grep "res=success"'
               % device_source)
        if process.run(cmd, shell=True).exit_status:
            logging.error("Audit check failed")
            check_audit_after_cmd = False

    # Need wait a while for xml to sync
    time.sleep(float(time_sleep))
    # Check disk count after command.
    check_count_after_cmd = True
    disk_count_after_cmd = vm_xml.VMXML.get_disk_count(vm_name)
    if test_cmd == "attach-disk":
        if disk_count_after_cmd == disk_count_before_cmd:
            check_count_after_cmd = False
    elif test_cmd == "detach-disk":
        if disk_count_after_cmd < disk_count_before_cmd:
            check_count_after_cmd = False

    # Recover VM state.
    if pre_vm_state == "shut off":
        vm.start()

    # Check in VM after command.
    check_vm_after_cmd = True
    check_vm_after_cmd = check_vm_partition(vm, device, os_type,
                                            device_target, old_parts)

    # Check disk type after attach.
    check_disk_type = True
    if test_type:
        if test_block_dev:
            check_disk_type = vm_xml.VMXML.check_disk_type(vm_name,
                                                           device_source,
                                                           "block")
        else:
            check_disk_type = vm_xml.VMXML.check_disk_type(vm_name,
                                                           device_source,
                                                           "file")
    # Check disk serial after attach.
    check_disk_serial = True
    if serial:
        disk_serial = vm_xml.VMXML.get_disk_serial(vm_name, device_target)
        if serial != disk_serial:
            check_disk_serial = False

    # Check disk address after attach.
    check_disk_address = True
    if address:
        disk_address = vm_xml.VMXML.get_disk_address(vm_name, device_target)
        if address != disk_address:
            check_disk_address = False

    # Check multifunction address after attach.
    check_disk_address2 = True
    if address2:
        disk_address2 = vm_xml.VMXML.get_disk_address(vm_name, device_target2)
        if address2 != disk_address2:
            check_disk_address2 = False

    # Check disk cache option after attach.
    check_cache_after_cmd = True
    if cache_options:
        disk_cache = vm_xml.VMXML.get_disk_attr(vm_name, device_target,
                                                "driver", "cache")
        if cache_options == "default":
            if disk_cache is not None:
                check_cache_after_cmd = False
        elif disk_cache != cache_options:
            check_cache_after_cmd = False

    # Eject cdrom test
    eject_cdrom = "yes" == params.get("at_dt_disk_eject_cdrom", "no")
    save_vm = "yes" == params.get("at_dt_disk_save_vm", "no")
    save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save")
    try:
        if eject_cdrom:
            eject_params = {'type_name': "file", 'device_type': "cdrom",
                            'target_dev': device_target, 'target_bus': device_disk_bus}
            eject_xml = libvirt.create_disk_xml(eject_params)
            with open(eject_xml) as eject_file:
                logging.debug("Eject CDROM by XML: %s", eject_file.read())
            # Run command tiwce to make sure cdrom tray open first #BZ892289
            # Open tray
            virsh.attach_device(domainarg=vm_name, filearg=eject_xml, debug=True)
            # Add time sleep between two attach commands.
            if time_sleep:
                time.sleep(float(time_sleep))
            # Eject cdrom
            result = virsh.attach_device(domainarg=vm_name, filearg=eject_xml,
                                         debug=True)
            if result.exit_status != 0:
                test.fail("Eject CDROM failed")
            if vm_xml.VMXML.check_disk_exist(vm_name, device_source):
                test.fail("Find %s after do eject" % device_source)
        # Save and restore VM
        if save_vm:
            result = virsh.save(vm_name, save_file, debug=True)
            libvirt.check_exit_status(result)
            result = virsh.restore(save_file, debug=True)
            libvirt.check_exit_status(result)
            if vm_xml.VMXML.check_disk_exist(vm_name, device_source):
                test.fail("Find %s after do restore" % device_source)

        # Destroy VM.
        vm.destroy(gracefully=False)

        # Check disk count after VM shutdown (with --config).
        check_count_after_shutdown = True
        inactive_vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        disk_count_after_shutdown = len(inactive_vmxml.get_disk_all())
        if test_cmd == "attach-disk":
            if disk_count_after_shutdown == disk_count_before_cmd:
                check_count_after_shutdown = False
        elif test_cmd == "detach-disk":
            if disk_count_after_shutdown < disk_count_before_cmd:
                check_count_after_shutdown = False

    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.debug("Restore the VM XML")
        backup_xml.sync()
        if os.path.exists(save_file):
            os.remove(save_file)
        if test_block_dev:
            if test_logcial_dev:
                libvirt.delete_local_disk("lvm", vgname=vg_name, lvname=lv_name)
                lv_utils.vg_remove(vg_name)
                process.run("pvremove %s" % device_source, shell=True, ignore_status=True)
            libvirt.setup_or_cleanup_iscsi(False)
        else:
            libvirt.delete_local_disk("file", device_source)

    # Check results.
    if status_error:
        if not status:
            test.fail("virsh %s exit with unexpected value."
                      % test_cmd)
    else:
        if status:
            test.fail("virsh %s failed." % test_cmd)
        if test_cmd == "attach-disk":
            if at_options.count("config"):
                if not check_count_after_shutdown:
                    test.fail("Cannot see config attached device "
                              "in xml file after VM shutdown.")
                if not check_disk_serial:
                    test.fail("Serial set failed after attach")
                if not check_disk_address:
                    test.fail("Address set failed after attach")
                if not check_disk_address2:
                    test.fail("Address(multifunction) set failed"
                              " after attach")
            else:
                if not check_count_after_cmd:
                    test.fail("Cannot see device in xml file"
                              " after attach.")
                if not check_vm_after_cmd:
                    test.fail("Cannot see device in VM after"
                              " attach.")
                if not check_disk_type:
                    test.fail("Check disk type failed after"
                              " attach.")
                if not check_audit_after_cmd:
                    test.fail("Audit hotplug failure after attach")
                if not check_cache_after_cmd:
                    test.fail("Check cache failure after attach")
                if at_options.count("persistent"):
                    if not check_count_after_shutdown:
                        test.fail("Cannot see device attached "
                                  "with persistent after "
                                  "VM shutdown.")
                else:
                    if check_count_after_shutdown:
                        test.fail("See non-config attached device "
                                  "in xml file after VM shutdown.")
        elif test_cmd == "detach-disk":
            if dt_options.count("config"):
                if check_count_after_shutdown:
                    test.fail("See config detached device in "
                              "xml file after VM shutdown.")
            else:
                if check_count_after_cmd:
                    test.fail("See device in xml file "
                              "after detach.")
                if check_vm_after_cmd:
                    test.fail("See device in VM after detach.")
                if not check_audit_after_cmd:
                    test.fail("Audit hotunplug failure "
                              "after detach")

                if dt_options.count("persistent"):
                    if check_count_after_shutdown:
                        test.fail("See device deattached "
                                  "with persistent after "
                                  "VM shutdown.")
                else:
                    if not check_count_after_shutdown:
                        test.fail("See non-config detached "
                                  "device in xml file after "
                                  "VM shutdown.")

        else:
            test.error("Unknown command %s." % test_cmd)