Exemplo n.º 1
0
    def add_disk(vm_name, init_source, target_device, extra_param, format=''):
        """
        Add disk/cdrom for test vm

        :param vm_name: guest name
        :param init_source: source file
        :param target_device: target of disk device
        :param extra_param: additional arguments to command
        :param format: init_source format(qcow2 or raw)
        """
        if not os.path.exists(new_disk):
            if format == "qcow2":
                process.run(
                    'qemu-img create -f qcow2 %s %s -o preallocation=full' %
                    (new_disk, '1G'),
                    shell=True,
                    verbose=True)
            elif format == "raw":
                process.run('qemu-img create -f raw %s %s' % (new_disk, '1G'),
                            shell=True,
                            verbose=True)
            else:
                open(new_disk, 'a').close()
        if virsh.is_alive(vm_name) and 'cdrom' in extra_param:
            virsh.destroy(vm_name)
        virsh.attach_disk(vm_name, init_source, target_device, extra_param,
                          **virsh_dargs)
Exemplo n.º 2
0
    def add_disk(vm_name, init_source, target_device, extra_param, format=''):
        """
        Add disk/cdrom for test vm

        :param vm_name: guest name
        :param init_source: source file
        :param target_device: target of disk device
        :param extra_param: additional arguments to command
        :param format: init_source format(qcow2 or raw)
        """
        if not os.path.exists(init_source):
            disk_size = params.get("disk_size", "1G")
            if format == "qcow2":
                create_option = "" if not disk_prealloc else "-o preallocation=full"
                process.run('qemu-img create -f qcow2 %s %s %s' % (init_source, disk_size, create_option),
                            shell=True, verbose=True)
            elif format == "raw":
                process.run('qemu-img create -f raw %s %s' % (init_source, disk_size),
                            shell=True, verbose=True)
            else:
                open(init_source, 'a').close()
        if virsh.is_alive(vm_name) and 'cdrom' in extra_param:
            virsh.destroy(vm_name)
        if 'cdrom' in extra_param:
            init_source = "''"
        virsh.attach_disk(vm_name, init_source, target_device,
                          extra_param, **virsh_dargs)
        vmxml_disk = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug("Current vmxml after adding disk is %s\n" % vmxml_disk)
Exemplo n.º 3
0
 def clean_clone_vm():
     """
     Clean up cloned domain.
     """
     try:
         if virsh.domain_exists(vm_clone_name):
             if virsh.is_alive(vm_clone_name):
                 virsh.destroy(vm_clone_name, ignore_status=False)
             virsh.undefine(vm_clone_name, ignore_status=False)
         if os.path.exists(clone_image):
             os.remove(clone_image)
     except error.CmdError, detail:
         raise error.TestFail("Clean clone guest failed!:%s" % detail)
Exemplo n.º 4
0
 def clean_clone_vm():
     """
     Clean up cloned domain.
     """
     try:
         if virsh.domain_exists(vm_clone_name):
             if virsh.is_alive(vm_clone_name):
                 virsh.destroy(vm_clone_name, ignore_status=False)
             virsh.undefine(vm_clone_name, ignore_status=False)
         if os.path.exists(clone_image):
             os.remove(clone_image)
     except error.CmdError, detail:
         raise error.TestFail("Clean clone guest failed!:%s" % detail)
Exemplo n.º 5
0
def check_numatune_xml(params):
    """
    Compare mode and nodeset value with guest XML configuration
    :params: the parameter dictionary
    """
    #vm_name = params.get("vms")
    vm_name = params.get("main_vm")
    mode = params.get("numa_mode", "")
    nodeset = params.get("numa_nodeset", "")
    options = params.get("options", "")
    # --config option will act after vm shutdown.
    if options == "config":
        virsh.shutdown(vm_name)
    # The verification of the numa params should
    # be done when vm is running.
    if not virsh.is_alive(vm_name):
        virsh.start(vm_name)

    try:
        numa_params = libvirt_xml.VMXML.get_numa_memory_params(vm_name)
    # VM XML omit numa entry when the placement is auto and mode is strict
    # So we need to set numa_params manually when exception happens.
    except LibvirtXMLAccessorError:
        numa_params = {'placement': 'auto', 'mode': 'strict'}

    if not numa_params:
        logging.error("Could not get numa parameters for %s", vm_name)
        return False

    mode_from_xml = numa_params['mode']
    # if the placement is auto, there is no nodeset in numa param.
    try:
        nodeset_from_xml = numa_params['nodeset']
    except KeyError:
        nodeset_from_xml = ""

    if mode and mode != mode_from_xml:
        logging.error("To expect %s: %s", mode, mode_from_xml)
        return False

    # The actual nodeset value is different with guest XML configuration,
    # so need to compare them via a middle result, for example, if you
    # set nodeset is '0,1,2' then it will be a list '0-2' in guest XML
    nodeset = cpus_parser(nodeset)
    nodeset_from_xml = cpus_parser(nodeset_from_xml)

    if nodeset and nodeset != nodeset_from_xml:
        logging.error("To expect %s: %s", nodeset, nodeset_from_xml)
        return False

    return True
Exemplo n.º 6
0
def check_numatune_xml(params):
    """
    Compare mode and nodeset value with guest XML configuration
    :params: the parameter dictionary
    """
    # vm_name = params.get("vms")
    vm_name = params.get("main_vm")
    mode = params.get("numa_mode", "")
    nodeset = params.get("numa_nodeset", "")
    options = params.get("options", "")
    # --config option will act after vm shutdown.
    if options == "config":
        virsh.shutdown(vm_name)
    # The verification of the numa params should
    # be done when vm is running.
    if not virsh.is_alive(vm_name):
        virsh.start(vm_name)

    try:
        numa_params = libvirt_xml.VMXML.get_numa_memory_params(vm_name)
    # VM XML omit numa entry when the placement is auto and mode is strict
    # So we need to set numa_params manually when exception happens.
    except LibvirtXMLAccessorError:
        numa_params = {"placement": "auto", "mode": "strict"}

    if not numa_params:
        logging.error("Could not get numa parameters for %s", vm_name)
        return False

    mode_from_xml = numa_params["mode"]
    # if the placement is auto, there is no nodeset in numa param.
    try:
        nodeset_from_xml = numa_params["nodeset"]
    except KeyError:
        nodeset_from_xml = ""

    if mode and mode != mode_from_xml:
        logging.error("To expect %s: %s", mode, mode_from_xml)
        return False

    # The actual nodeset value is different with guest XML configuration,
    # so need to compare them via a middle result, for example, if you
    # set nodeset is '0,1,2' then it will be a list '0-2' in guest XML
    nodeset = cpus_parser(nodeset)
    nodeset_from_xml = cpus_parser(nodeset_from_xml)

    if nodeset and nodeset != nodeset_from_xml:
        logging.error("To expect %s: %s", nodeset, nodeset_from_xml)
        return False

    return True
Exemplo n.º 7
0
    def add_disk(vm_name, init_source, target_device, extra_param):
        """
        Add disk/cdrom for test vm

        :param vm_name: guest name
        :param init_source: source file
        :param target_device: target of disk device
        :param extra_param: additional arguments to command
        """
        if not os.path.exists(new_disk):
            open(new_disk, 'a').close()
        if virsh.is_alive(vm_name) and 'cdrom' in extra_param:
            virsh.destroy(vm_name)
        virsh.attach_disk(vm_name, init_source, target_device, extra_param,
                          **virsh_dargs)
Exemplo n.º 8
0
def check_numatune_xml(params):
    """
    Compare mode and nodeset value with guest XML configuration
    @params: the parameter dictionary
    """
    vm_name = params.get("vms")
    mode = params.get("numa_mode", "")
    nodeset = params.get("numa_nodeset", "")
    options = params.get("options", "")
    #--config option will act after vm shutdown.
    if options == "config":
        virsh.shutdown(vm_name)
    #The verification of the numa params should
    #be done when vm is running.
    if not virsh.is_alive(vm_name):
        virsh.start(vm_name)

    numa_params = libvirt_xml.VMXML.get_numa_params(vm_name)
    if not numa_params:
        logging.error("Could not get numa parameters for %s", vm_name)
        return False

    mode_from_xml = numa_params['mode']
    #if the placement is auto, there is no nodeset in numa param.
    try:
        nodeset_from_xml = numa_params['nodeset']
    except KeyError():
        nodeset_from_xml = ""

    if mode and mode != mode_from_xml:
        logging.error("To expect %s: %s", mode, mode_from_xml)
        return False

    # The actual nodeset value is different with guest XML configuration,
    # so need to compare them via a middle result, for example, if you
    # set nodeset is '0,1,2' then it will be a list '0-2' in guest XML
    nodeset = nodeset_parser(nodeset)
    nodeset_from_xml = nodeset_parser(nodeset_from_xml)

    if nodeset and nodeset != nodeset_from_xml:
        logging.error("To expect %s: %s", nodeset, nodeset_from_xml)
        return False

    return True
Exemplo n.º 9
0
def get_cpu_info_from_virsh(params):
    """
    Try to get cpu model and features from virsh with 'host-model'
    """
    vm_arch = params["vm_arch_name"]
    if vm_arch not in ['x86_64', 'i686']:
        raise NotImplementedError("Arch '%s' is not supported" % vm_arch)
    machine = params.get('machine_type')
    name = uuid.uuid4().hex
    try:
        path.find_command("virsh")
    except path.CmdNotFoundError:
        logging.warning("Virsh executable not set or found on path")
        return

    xml = """
            <domain type='kvm'>
                <name>%s</name>
                <memory>1</memory>
                <os>
                    <type arch='%s' machine='%s'>hvm</type>
                </os>
                <cpu mode='host-model'/>
            </domain>
          """ % (name, vm_arch, machine)
    xml_file = os.path.join(data_dir.get_tmp_dir(), "temp_xml_for_cpu")
    with open(xml_file, "w") as f:
        f.write(xml)
    try:
        logging.info("Get cpu model and features from virsh")
        virsh.define(xml_file)
        virsh.start(name)
    except Exception as err:
        logging.error(err)
        return
    else:
        cpu_info = get_cpu_info_from_virsh_qemu_cli(name)
        return cpu_info
    finally:
        if virsh.is_alive(name):
            virsh.destroy(name, ignore_status=True)
        virsh.undefine(name, ignore_status=True)
Exemplo n.º 10
0
def run(test, params, env):
    """
    Test command: virsh net-destroy.

    The command can forcefully stop a given network.
    1.Make sure the network exists.
    2.Prepare network status.
    3.Perform virsh net-destroy operation.
    4.Check if the network has been destroied.
    5.Recover network environment.
    6.Confirm the test result.
    """

    net_ref = params.get("net_destroy_net_ref")
    extra = params.get("net_destroy_extra", "")
    network_name = params.get("net_destroy_network", "default")
    network_status = params.get("net_destroy_status", "active")
    status_error = params.get("status_error", "no")
    net_persistent = "yes" == params.get("net_persistent", "yes")
    net_cfg_file = params.get("net_cfg_file",
                              "/usr/share/libvirt/networks/default.xml")
    check_libvirtd = "yes" == params.get("check_libvirtd")
    vm_defined = "yes" == params.get("vm_defined")
    check_vm = "yes" == params.get("check_vm")

    # libvirt acl polkit related params
    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    output_all = virsh.net_list("--all").stdout.strip()
    # prepare the network status: active, persistent
    if not re.search(network_name, output_all):
        if net_persistent:
            virsh.net_define(net_cfg_file, ignore_status=False)
            virsh.net_start(network_name, ignore_status=False)
        else:
            virsh.create(net_cfg_file, ignore_status=False)
    # Backup the current network xml
    net_xml_bk = os.path.join(data_dir.get_tmp_dir(), "%s.xml" % network_name)
    virsh.net_dumpxml(network_name, to_file=net_xml_bk)
    if net_persistent:
        if not virsh.net_state_dict()[network_name]['persistent']:
            logging.debug("make the network persistent...")
            virsh.net_define(net_xml_bk)
    else:
        if virsh.net_state_dict()[network_name]['persistent']:
            virsh.net_undefine(network_name, ignore_status=False)
    if not virsh.net_state_dict()[network_name]['active']:
        if network_status == "active":
            virsh.net_start(network_name, ignore_status=False)
    else:
        if network_status == "inactive":
            logging.debug(
                "destroy network as we need to test inactive network...")
            virsh.net_destroy(network_name, ignore_status=False)
    logging.debug("After prepare: %s" % virsh.net_state_dict())

    # Run test case
    if net_ref == "uuid":
        net_ref = virsh.net_uuid(network_name).stdout.strip()
    elif net_ref == "name":
        net_ref = network_name

    if check_libvirtd or check_vm:
        vm_name = params.get("main_vm")
        if virsh.is_alive(vm_name):
            virsh.destroy(vm_name)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml_backup = vmxml
        # make sure there is interface with source network as default
        iface_devices = vmxml.get_devices(device_type="interface")
        has_default_net = False
        for iface in iface_devices:
            source = iface.get_source()
            if 'network' in source.keys() and source['network'] == 'default':
                has_default_net = True
                break
            elif 'bridge' in source.keys() and source['bridge'] == 'virbr0':
                has_default_net = True
                break
        if not has_default_net:
            options = "network default --current"
            virsh.attach_interface(vm_name, options, ignore_status=False)
        try:
            if vm_defined:
                ret = virsh.start(vm_name)
            else:
                logging.debug("undefine the vm, then create the vm...")
                vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
                virsh.undefine(vm_name)
                ret = virsh.create(vmxml.xml)
                logging.debug(ret.stdout)
            # check the create or start cmd status
            utils_test.libvirt.check_exit_status(
                ret, expect_error=(network_status != 'active'))
            status = 1

            if status_error != 'yes':
                libvirtd = utils_libvirtd.Libvirtd("virtqemud")
                daemon_name = libvirtd.service_name
                pid_before_run = utils_misc.get_pid(daemon_name)
                ret = virsh.net_destroy(net_ref,
                                        extra,
                                        uri=uri,
                                        debug=True,
                                        unprivileged_user=unprivileged_user,
                                        ignore_status=True)
                utils_test.libvirt.check_exit_status(ret, expect_error=False)
                # check_libvirtd pid no change
                pid_after_run = utils_misc.get_pid(daemon_name)
                if pid_after_run != pid_before_run:
                    test.fail("libvirtd crash after destroy network!")
                    status = 1
                else:
                    logging.debug(
                        "libvirtd do not crash after destroy network!")
                    status = 0
                if check_libvirtd:
                    # destroy vm, check libvirtd pid no change
                    ret = virsh.destroy(vm_name)
                    utils_test.libvirt.check_exit_status(ret,
                                                         expect_error=False)
                    pid_after_run2 = utils_misc.get_pid(daemon_name)
                    if pid_after_run2 != pid_before_run:
                        test.fail("libvirtd crash after destroy vm!")
                        status = 1
                    else:
                        logging.debug(
                            "libvirtd do not crash after destroy vm!")
                        status = 0
                elif check_vm:
                    # restart libvirtd and check vm is running
                    libvirtd = utils_libvirtd.Libvirtd()
                    libvirtd.restart()
                    if not virsh.is_alive(vm_name):
                        test.fail(
                            "vm shutdown when transient network destroyed then libvirtd restart"
                        )
                    else:
                        status = 0

        finally:
            if not vm_defined:
                vmxml_backup.define()
            vmxml_backup.sync()

    else:
        readonly = (params.get("net_destroy_readonly", "no") == "yes")
        status = virsh.net_destroy(net_ref,
                                   extra,
                                   uri=uri,
                                   readonly=readonly,
                                   debug=True,
                                   unprivileged_user=unprivileged_user,
                                   ignore_status=True).exit_status
        # Confirm the network has been destroyed.
        if net_persistent:
            if virsh.net_state_dict()[network_name]['active']:
                status = 1
        else:
            output_all = virsh.net_list("--all").stdout.strip()
            if re.search(network_name, output_all):
                status = 1
                logging.debug(
                    "transient network should not exists after destroy")

    # Recover network status to system default status
    try:
        if network_name not in virsh.net_state_dict():
            virsh.net_define(net_xml_bk, ignore_status=False)
        if not virsh.net_state_dict()[network_name]['active']:
            virsh.net_start(network_name, ignore_status=False)
        if not virsh.net_state_dict()[network_name]['persistent']:
            virsh.net_define(net_xml_bk, ignore_status=False)
        if not virsh.net_state_dict()[network_name]['autostart']:
            virsh.net_autostart(network_name, ignore_status=False)
    except process.CmdError:
        test.error("Recover network status failed!")

    # Clean up the backup network xml file
    if os.path.isfile(net_xml_bk):
        data_dir.clean_tmp_files()
        logging.debug("Cleaning up the network backup xml")

    # Check status_error
    if status_error == "yes":
        if status == 0:
            test.fail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            test.fail("Run failed with right command")
    else:
        test.error("The status_error must be 'yes' or 'no'!")
Exemplo n.º 11
0
def run(test, params, env):
    """
    Test command: virsh net-destroy.

    The command can forcefully stop a given network.
    1.Make sure the network exists.
    2.Prepare network status.
    3.Perform virsh net-destroy operation.
    4.Check if the network has been destroied.
    5.Recover network environment.
    6.Confirm the test result.
    """

    net_ref = params.get("net_destroy_net_ref")
    extra = params.get("net_destroy_extra", "")
    network_name = params.get("net_destroy_network", "default")
    network_status = params.get("net_destroy_status", "active")
    status_error = params.get("status_error", "no")
    net_persistent = "yes" == params.get("net_persistent", "yes")
    net_cfg_file = params.get("net_cfg_file", "/usr/share/libvirt/networks/default.xml")
    check_libvirtd = "yes" == params.get("check_libvirtd")
    vm_defined = "yes" == params.get("vm_defined")

    # libvirt acl polkit related params
    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    output_all = virsh.net_list("--all").stdout.strip()
    # prepare the network status: active, persistent
    if not re.search(network_name, output_all):
        if net_persistent:
            virsh.net_define(net_cfg_file, ignore_status=False)
            virsh.net_start(network_name, ignore_status=False)
        else:
            virsh.create(net_cfg_file, ignore_status=False)
    if net_persistent:
        if not virsh.net_state_dict()[network_name]['persistent']:
            logging.debug("make the network persistent...")
            make_net_persistent(network_name)
    else:
        if virsh.net_state_dict()[network_name]['persistent']:
            virsh.net_undefine(network_name, ignore_status=False)
    if not virsh.net_state_dict()[network_name]['active']:
        if network_status == "active":
            virsh.net_start(network_name, ignore_status=False)
    else:
        if network_status == "inactive":
            logging.debug("destroy network as we need to test inactive network...")
            virsh.net_destroy(network_name, ignore_status=False)
    logging.debug("After prepare: %s" % virsh.net_state_dict())

    # Run test case
    if net_ref == "uuid":
        net_ref = virsh.net_uuid(network_name).stdout.strip()
    elif net_ref == "name":
        net_ref = network_name

    if check_libvirtd:
        vm_name = params.get("main_vm")
        if virsh.is_alive(vm_name):
            virsh.destroy(vm_name)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml_backup = vmxml
        # make sure there is interface with source network as default
        iface_devices = vmxml.get_devices(device_type="interface")
        has_default_net = False
        for iface in iface_devices:
            source = iface.get_source()
            if 'network' in source.keys() and source['network'] == 'default':
                has_default_net = True
                break
            elif 'bridge' in source.keys() and source['bridge'] == 'virbr0':
                has_default_net = True
                break
        if not has_default_net:
            options = "network default --current"
            virsh.attach_interface(vm_name, options, ignore_status=False)
        try:
            if vm_defined:
                ret = virsh.start(vm_name)
            else:
                logging.debug("undefine the vm, then create the vm...")
                vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
                virsh.undefine(vm_name)
                ret = virsh.create(vmxml.xml)
                logging.debug(ret.stdout)
            # check the create or start cmd status
            utils_test.libvirt.check_exit_status(ret, expect_error=(network_status != 'active'))
            status = 1

            if status_error != 'yes':
                cmd = "ps -ef | grep /usr/sbin/libvirtd | grep -v grep"
                # record the libvirt pid then destroy network
                libvirtd_pid = process.run(cmd, shell=True).stdout_text.strip().split()[1]
                ret = virsh.net_destroy(net_ref, extra, uri=uri, debug=True,
                                        unprivileged_user=unprivileged_user,
                                        ignore_status=True)
                utils_test.libvirt.check_exit_status(ret, expect_error=False)
                # check_libvirtd pid no change
                result = check_libvirtd_restart(libvirtd_pid, cmd)
                if result:
                    test.fail("libvirtd crash after destroy network!")
                    status = 1
                else:
                    logging.debug("libvirtd do not crash after destroy network!")
                    status = 0
                # destroy vm, check libvirtd pid no change
                ret = virsh.destroy(vm_name)
                utils_test.libvirt.check_exit_status(ret, expect_error=False)
                result = check_libvirtd_restart(libvirtd_pid, cmd)
                if result:
                    test.fail("libvirtd crash after destroy vm!")
                    status = 1
                else:
                    logging.debug("libvirtd do not crash after destroy vm!")
                    status = 0
        finally:
            if not vm_defined:
                vmxml_backup.define()
            vmxml_backup.sync()

    else:
        readonly = (params.get("net_destroy_readonly", "no") == "yes")
        status = virsh.net_destroy(net_ref, extra, uri=uri, readonly=readonly,
                                   debug=True, unprivileged_user=unprivileged_user,
                                   ignore_status=True).exit_status
        # Confirm the network has been destroied.
        if net_persistent:
            if virsh.net_state_dict()[network_name]['active']:
                status = 1
        else:
            output_all = virsh.net_list("--all").stdout.strip()
            if re.search(network_name, output_all):
                status = 1
                logging.debug("transient network should not exists after destroy")

    # Recover network status to system default status
    try:
        if network_name not in virsh.net_state_dict():
            virsh.net_define(net_cfg_file, ignore_status=False)
        if not virsh.net_state_dict()[network_name]['active']:
            virsh.net_start(network_name, ignore_status=False)
        if not virsh.net_state_dict()[network_name]['persistent']:
            make_net_persistent(network_name)
        if not virsh.net_state_dict()[network_name]['autostart']:
            virsh.net_autostart(network_name, ignore_status=False)
    except process.CmdError:
        test.error("Recover network status failed!")
    # Check status_error
    if status_error == "yes":
        if status == 0:
            test.fail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            test.fail("Run failed with right command")
    else:
        test.error("The status_error must be 'yes' or 'no'!")
Exemplo n.º 12
0
def run(test, params, env):
    """
    Test command: virsh domcontrol.

    The command can show the state of a control interface to the domain.
    1.Prepare test environment, destroy or suspend a VM.
    2.Do action to get a subprocess(dump, save, restore, managedsave) if
      domcontrol_job is set as yes.
    3.Perform virsh domcontrol to check state of a control interface to the
      domain.
    4.Recover the VM's status and wait for the subprocess over.
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    start_vm = params.get("start_vm")
    pre_vm_state = params.get("pre_vm_state", "running")
    options = params.get("domcontrol_options", "")
    action = params.get("domcontrol_action", "dump")
    tmp_file = os.path.join(data_dir.get_tmp_dir(), "domcontrol.tmp")
    vm_ref = params.get("domcontrol_vm_ref")
    job = params.get("domcontrol_job", "yes")
    readonly = "yes" == params.get("readonly", "no")
    status_error = params.get("status_error", "no")
    remote_uri = params.get("remote_uri")
    remote_ip = params.get("remote_ip")
    remote_pwd = params.get("remote_pwd")
    remote_user = params.get("remote_user", "root")
    if start_vm == "no" and vm.is_alive():
        vm.destroy()

    if remote_uri:
        if remote_ip.count("EXAMPLE"):
            test.cancel("The remote ip is Sample one, pls configure it first")
        ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd)

    # Instead of "paused_after_start_vm", use "pre_vm_state".
    # After start the VM, wait for some time to make sure the job
    # can be created on this domain.
    if start_vm == "yes":
        vm.wait_for_login()
        if params.get("pre_vm_state") == "suspend":
            vm.pause()

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name

    if action == "managedsave":
        tmp_file = '/var/lib/libvirt/qemu/save/%s.save' % vm.name

    if action == "restore":
        virsh.save(vm_name, tmp_file, ignore_status=True)

    process = None
    if job == "yes" and start_vm == "yes" and status_error == "no":
        # Check domain control interface state with job on domain.
        process = get_subprocess(action, vm_name, tmp_file)
        while process.poll() is None:
            if vm.is_alive():
                ret = virsh.domcontrol(vm_ref,
                                       options,
                                       ignore_status=True,
                                       debug=True)
                status = ret.exit_status
                # check status_error
                if status != 0:
                    # Do not raise error if domain is not running, as save,
                    # managedsave and restore will change the domain state
                    # from running to shutoff or reverse, and the timing of
                    # the state change is not predicatable, so skip the error
                    # of domain state change and focus on domcontrol command
                    # status while domain is running.
                    if vm.is_alive():
                        test.fail("Run failed with right command")
    else:
        if remote_uri:
            # check remote domain status
            if not virsh.is_alive(vm_name, uri=remote_uri):
                # If remote domain is not running, start remote domain
                virsh.start(vm_name, uri=remote_uri)

        # Check domain control interface state without job on domain.
        ret = virsh.domcontrol(vm_ref,
                               options,
                               readonly=readonly,
                               ignore_status=True,
                               debug=True,
                               uri=remote_uri)
        status = ret.exit_status

        # check status_error
        if status_error == "yes":
            if status == 0:
                test.fail("Run successfully with wrong command!")
        elif status_error == "no":
            if status != 0:
                test.fail("Run failed with right command")

    # Recover the environment.
    if action == "managedsave":
        virsh.managedsave_remove(vm_name, ignore_status=True)
    if os.path.exists(tmp_file):
        os.unlink(tmp_file)
    if remote_uri:
        if virsh.is_alive(vm_name, uri=remote_uri):
            # Destroy remote domain
            virsh.destroy(vm_name, uri=remote_uri)
    if pre_vm_state == "suspend":
        vm.resume()
    if process:
        if process.poll() is None:
            process.kill()
Exemplo n.º 13
0
def compare_cpu_info(test, params):
    """
    Compare flags between qemu cli and libvirt dumpxml with 'host-model'
    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :return: True or False
    """
    vm_arch = params["vm_arch_name"]
    machine = params["machine_type"]
    name = uuid.uuid4().hex
    try:
        path.find_command("virsh")
    except path.CmdNotFoundError:
        test.cancel("Virsh executable not set or found on path")

    xml = """
            <domain type='kvm'>
                <name>%s</name>
                <memory>1</memory>
                <os>
                    <type arch='%s' machine='%s'>hvm</type>
                </os>
                <cpu mode='host-model'/>
            </domain>
          """ % (name, vm_arch, machine)
    xml_file = os.path.join(data_dir.get_tmp_dir(), "temp_xml_for_cpu")
    with open(xml_file, "w") as f:
        f.write(xml)
    try:
        test.log.info("Get cpu model and features from virsh")
        virsh.define(xml_file)
        virsh.start(name)
    except Exception as err:
        test.cancel(err)
    else:
        global qemu_cpu_info, libvirt_cpu_info, qemu_proc_cpu_flags
        qemu_cpu_info = cpu.get_cpu_info_from_virsh_qemu_cli(name)
        libvirt_cpu_info = get_cpu_info_from_dumpxml(name)

        cpu_model_qemu = qemu_cpu_info["model"]
        cpu_model_libvirt = libvirt_cpu_info["model"]
        qemu_proc_cpu_flags = qemu_cpu_info["flags"]
        if cpu_model_qemu != cpu_model_libvirt:
            test.log.error("mismatch cpu model bwteen qemu %s and libvirt %s",
                           cpu_model_qemu, cpu_model_libvirt)
            return False
        params["cpu_model"] = cpu_model_qemu
        qemu_cpu_flags = cpu.parse_qemu_cpu_flags(qemu_cpu_info['flags'])
        libvirt_cpu_flags = libvirt_cpu_info['features']
        qemu_cpu_flags = extend_flags_patterns(qemu_cpu_flags)
        exclude_map = eval(params.get('exclude_map', '{}'))
        check_exclude = False
        if cpu_model_qemu in exclude_map.keys():
            exclude_map_flags = exclude_map[cpu_model_qemu]
            check_exclude = True
        miss_flags = []
        mismatch_flags = []
        result_bool = True
        for flag in libvirt_cpu_flags.keys():
            if flag not in qemu_cpu_flags.keys():
                if libvirt_cpu_flags[flag] == 'on':
                    miss_flags.append(flag)
            elif libvirt_cpu_flags[flag] != qemu_cpu_flags[flag]:
                mismatch_flags.append(flag)
        if miss_flags:
            test.log.error("\nmiss flags %s from qemu cli\n", miss_flags)
            if not check_exclude:
                result_bool = False
            else:
                for miss_flag in miss_flags:
                    if miss_flag not in exclude_map_flags:
                        result_bool = False
                        break
        if mismatch_flags:
            test.log.error("\nmismatch flags %s between libvirt and qemu\n",
                           mismatch_flags)
            if not check_exclude:
                result_bool = False
            else:
                for mismatch_flag in miss_flags:
                    if mismatch_flag not in exclude_map_flags:
                        result_bool = False
                        break
        return result_bool
    finally:
        if virsh.is_alive(name):
            virsh.destroy(name, ignore_status=True)
        virsh.undefine(name, ignore_status=True)
Exemplo n.º 14
0
def run(test, params, env):
    """
    Test migration over unix socket.
    1) Migrate vm over unix socket
    2) Migrate vm over unix socket - libvirt tunnelled(--tunnelled)
    3) Migrate vm over unix socket - enable multifd(--parallel)
    4) Migrate vm with copy storage over unix socket - one disk
    5) Migrate vm with copy storage over unix socket - multiple disks
    6) Abort migration over unix socket and migrate again
    7) Abort migration with copy storage over unix socket, and migrate again
    8) Migrate vm with copy storage over unix socket - multiple disks
        - enable multifd(--parallel)

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def update_disk(vm, params):
        """
        Update disk for testing.

        :param vm: vm object.
        :param params: the parameters used.
        :return: updated images.
        """
        local_image_list = []
        remote_image_list = []
        vm_did_list = []
        # Change the disk of the vm
        if storage_type == "nfs":
            libvirt.set_vm_disk(vm, params)
        else:
            disk_format = params.get("disk_format", "qcow2")
            disk_num = eval(params.get("disk_num", "1"))
            blk_source = vm.get_first_disk_devices()['source']
            vsize = utils_misc.get_image_info(blk_source).get("vsize")
            remote_session = remote.remote_login("ssh", server_ip, "22",
                                                 server_user, server_pwd,
                                                 r'[$#%]')
            # Create disk on remote host
            utils_misc.make_dirs(os.path.dirname(blk_source), remote_session)
            libvirt_disk.create_disk("file",
                                     disk_format=disk_format,
                                     path=blk_source,
                                     size=vsize,
                                     session=remote_session)
            remote_image_list.append(blk_source)

            for idx in range(2, disk_num + 1):
                disk_path = os.path.join(os.path.dirname(blk_source),
                                         "test%s.img" % str(idx))
                # Create disk on local
                libvirt_disk.create_disk("file",
                                         disk_format=disk_format,
                                         path=disk_path)
                local_image_list.append(disk_path)

                target_dev = 'vd' + chr(idx + ord('a') - 1)
                new_disk_dict = {"driver_type": disk_format}
                vm_was_running = vm.is_alive()
                libvirt_pcicontr.reset_pci_num(vm_name)
                if vm_was_running and not vm.is_alive():
                    vm.start()
                    vm.wait_for_login().close()
                result = libvirt.attach_additional_device(
                    vm_name, target_dev, disk_path, new_disk_dict, False)
                libvirt.check_exit_status(result)

                libvirt_disk.create_disk("file",
                                         disk_format=disk_format,
                                         path=disk_path,
                                         session=remote_session)

                remote_image_list.append(disk_path)
                vm_did_list.append(target_dev)

            remote_session.close()
        return local_image_list, remote_image_list, vm_did_list

    def check_socket(params):
        """
        Check sockets' number

        :param params: the parameters used
        :raise: test.fail when command fails
        """
        postcopy_options = params.get("postcopy_options")
        vm_name = params.get("migrate_main_vm")
        exp_num = params.get("expected_socket_num", "2")
        if postcopy_options:
            migration_test.set_migratepostcopy(vm_name)
        cmd = ("netstat -xanp|grep -E \"CONNECTED"
               ".*(desturi-socket|migrateuri-socket)\" | wc -l")
        res = process.run(cmd, shell=True).stdout_text.strip()
        if res != exp_num:
            test.fail("There should be {} connected unix sockets, "
                      "but found {} sockets.".format(exp_num, res))

    migration_test = migration.MigrationTest()
    migration_test.check_parameters(params)

    # Local variables
    virsh_args = {"debug": True}
    server_ip = params["server_ip"] = params.get("remote_ip")
    server_user = params["server_user"] = params.get("remote_user", "root")
    server_pwd = params["server_pwd"] = params.get("remote_pwd")
    client_ip = params["client_ip"] = params.get("local_ip")
    client_pwd = params["client_pwd"] = params.get("local_pwd")
    virsh_options = params.get("virsh_options", "")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options", "--live --p2p --verbose")
    storage_type = params.get("storage_type")
    disk_num = params.get("disk_num")
    desturi_port = params.get("desturi_port", "22222")
    migrateuri_port = params.get("migrateuri_port", "33333")
    disks_uri_port = params.get("disks_uri_port", "44444")
    migrate_again = "yes" == params.get("migrate_again", "no")
    action_during_mig = params.get("action_during_mig")
    if action_during_mig:
        action_during_mig = eval(action_during_mig)

    extra_args = migration_test.update_virsh_migrate_extra_args(params)

    mig_result = None
    local_image_list = []
    remote_image_list = []
    vm_did_list = []

    if not libvirt_version.version_compare(6, 6, 0):
        test.cancel("This libvirt version doesn't support "
                    "migration over unix.")

    if storage_type == "nfs":
        # Params for NFS shared storage
        shared_storage = params.get("migrate_shared_storage", "")
        if shared_storage == "":
            default_guest_asset = defaults.get_default_guest_os_info()['asset']
            default_guest_asset = "%s.qcow2" % default_guest_asset
            shared_storage = os.path.join(params.get("nfs_mount_dir"),
                                          default_guest_asset)
            logging.debug("shared_storage:%s", shared_storage)

        # Params to update disk using shared storage
        params["disk_type"] = "file"
        params["disk_source_protocol"] = "netfs"
        params["mnt_path_name"] = params.get("nfs_mount_dir")

    # params for migration connection
    params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri(
        params.get("migrate_source_host"))
    src_uri = params.get("virsh_migrate_connect_uri")
    dest_uri = params.get("virsh_migrate_desturi")
    dest_uri_ssh = libvirt_vm.complete_uri(params.get("migrate_dest_host"))

    unix_obj = None

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()
    bk_uri = vm.connect_uri

    postcopy_options = params.get("postcopy_options")
    if postcopy_options:
        extra = "%s %s" % (extra, postcopy_options)

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()

    try:
        unix_obj = utils_conn.UNIXSocketConnection(params)
        unix_obj.conn_setup()
        unix_obj.auto_recover = True

        local_image_list, remote_image_list, vm_did_list = update_disk(
            vm, params)

        if not vm.is_alive():
            vm.start()

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))

        vm_session = vm.wait_for_login()

        for did in vm_did_list:
            utils_disk.linux_disk_check(vm_session, did)

        # Execute migration process
        vms = [vm]

        migration_test.do_migration(vms,
                                    None,
                                    dest_uri,
                                    'orderly',
                                    options,
                                    thread_timeout=600,
                                    ignore_status=True,
                                    virsh_opt=virsh_options,
                                    func=action_during_mig,
                                    extra_opts=extra,
                                    **extra_args)

        mig_result = migration_test.ret

        if migrate_again:
            logging.debug(
                "Sleeping 10 seconds before rerunning the migration.")
            time.sleep(10)
            if params.get("migrate_again_clear_func", "yes") == "yes":
                action_during_mig = None
            extra_args["status_error"] = params.get(
                "migrate_again_status_error", "no")
            migration_test.do_migration(vms,
                                        None,
                                        dest_uri,
                                        'orderly',
                                        options,
                                        thread_timeout=900,
                                        ignore_status=True,
                                        virsh_opt=virsh_options,
                                        extra_opts=extra,
                                        func=action_during_mig,
                                        **extra_args)

            mig_result = migration_test.ret

        if int(mig_result.exit_status) == 0:
            vm.connect_uri = dest_uri_ssh
            if not utils_misc.wait_for(
                    lambda: virsh.is_alive(
                        vm_name, uri=dest_uri_ssh, debug=True), 60):
                test.fail("The migrated VM should be alive!")
            if vm_did_list:
                vm_session_after_mig = vm.wait_for_serial_login(timeout=240)
                for did in vm_did_list:
                    vm_session_after_mig.cmd("echo mytest >> /mnt/%s1/mytest" %
                                             did)
    finally:
        logging.info("Recover test environment")
        vm.connect_uri = bk_uri
        # Clean VM on destination and source
        migration_test.cleanup_vm(vm, dest_uri)

        orig_config_xml.sync()

        # Remove image files
        for source_file in local_image_list:
            libvirt.delete_local_disk("file", path=source_file)
        for img in remote_image_list:
            remote.run_remote_cmd("rm -rf %s" % img, params)
Exemplo n.º 15
0
def run(test, params, env):
    """
    Test command: virsh domcontrol.

    The command can show the state of a control interface to the domain.
    1.Prepare test environment, destroy or suspend a VM.
    2.Do action to get a subprocess(dump, save, restore, managedsave) if
      domcontrol_job is set as yes.
    3.Perform virsh domcontrol to check state of a control interface to the
      domain.
    4.Recover the VM's status and wait for the subprocess over.
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    start_vm = params.get("start_vm")
    pre_vm_state = params.get("pre_vm_state", "running")
    options = params.get("domcontrol_options", "")
    action = params.get("domcontrol_action", "dump")
    tmp_file = os.path.join(data_dir.get_tmp_dir(), "domcontrol.tmp")
    vm_ref = params.get("domcontrol_vm_ref")
    job = params.get("domcontrol_job", "yes")
    readonly = "yes" == params.get("readonly", "no")
    status_error = params.get("status_error", "no")
    remote_uri = params.get("remote_uri")
    remote_ip = params.get("remote_ip")
    remote_pwd = params.get("remote_pwd")
    remote_user = params.get("remote_user", "root")
    if start_vm == "no" and vm.is_alive():
        vm.destroy()

    if remote_uri:
        if remote_ip.count("EXAMPLE"):
            test.cancel("The remote ip is Sample one, pls configure it first")
        ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd)

    # Instead of "paused_after_start_vm", use "pre_vm_state".
    # After start the VM, wait for some time to make sure the job
    # can be created on this domain.
    if start_vm == "yes":
        vm.wait_for_login()
        if params.get("pre_vm_state") == "suspend":
            vm.pause()

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name

    if action == "managedsave":
        tmp_file = '/var/lib/libvirt/qemu/save/%s.save' % vm.name

    if action == "restore":
        virsh.save(vm_name, tmp_file, ignore_status=True)

    process = None
    if job == "yes" and start_vm == "yes" and status_error == "no":
        # Check domain contorl interface state with job on domain.
        process = get_subprocess(action, vm_name, tmp_file)
        while process.poll() is None:
            if vm.is_alive():
                ret = virsh.domcontrol(vm_ref, options, ignore_status=True,
                                       debug=True)
                status = ret.exit_status
                # check status_error
                if status != 0:
                    # Do not raise error if domain is not running, as save,
                    # managedsave and restore will change the domain state
                    # from running to shutoff or reverse, and the timing of
                    # the state change is not predicatable, so skip the error
                    # of domain state change and focus on domcontrol command
                    # status while domain is running.
                    if vm.is_alive():
                        test.fail("Run failed with right command")
    else:
        if remote_uri:
            # check remote domain status
            if not virsh.is_alive(vm_name, uri=remote_uri):
                # If remote domain is not running, start remote domain
                virsh.start(vm_name, uri=remote_uri)

        # Check domain control interface state without job on domain.
        ret = virsh.domcontrol(vm_ref, options, readonly=readonly,
                               ignore_status=True, debug=True, uri=remote_uri)
        status = ret.exit_status

        # check status_error
        if status_error == "yes":
            if status == 0:
                test.fail("Run successfully with wrong command!")
        elif status_error == "no":
            if status != 0:
                test.fail("Run failed with right command")

    # Recover the environment.
    if action == "managedsave":
        virsh.managedsave_remove(vm_name, ignore_status=True)
    if os.path.exists(tmp_file):
        os.unlink(tmp_file)
    if remote_uri:
        if virsh.is_alive(vm_name, uri=remote_uri):
            # Destroy remote domain
            virsh.destroy(vm_name, uri=remote_uri)
    if pre_vm_state == "suspend":
        vm.resume()
    if process:
        if process.poll() is None:
            process.kill()
Exemplo n.º 16
0
 def _vm_alive():
     return virsh.is_alive(vm_name)
Exemplo n.º 17
0
 def _vm_alive():
     return virsh.is_alive(vm_name)