def run(test, params, env):
    """
    Test command: virsh qemu-agent-command.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_ref = params.get("vm_ref", "domname")
    vm_state = params.get("vm_state", "running")
    cmd = params.get("agent_cmd", "")
    options = params.get("options", "")
    needs_agent = "yes" == params.get("needs_agent", "yes")
    start_vm = "yes" == params.get("start_vm")
    status_error = "yes" == params.get("status_error", "no")
    domuuid = vm.get_uuid()
    domid = ""
    xml_file = os.path.join(test.tmpdir, "vm.xml")
    virsh.dumpxml(vm_name, extra="--inactive", to_file=xml_file)
    libvirtd_inst = utils_libvirtd.Libvirtd()

    # Prepare domain
    try:
        reset_domain(vm, vm_state, needs_agent)
    except Exception, details:
        reset_env(vm_name, xml_file)
        error.TestFail(details)
def run(test, params, env):
    """
    Test command: virsh qemu-agent-command.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_ref = params.get("vm_ref", "domname")
    vm_state = params.get("vm_state", "running")
    cmd = params.get("agent_cmd", "")
    options = params.get("options", "")
    needs_agent = "yes" == params.get("needs_agent", "yes")
    status_error = "yes" == params.get("status_error", "no")
    if not status_error and options:
        option = options.split()[0]
        test_cmd = "qemu-agent-command"
        if virsh.has_command_help_match(test_cmd, option) is None:
            raise error.TestNAError("The current libvirt doesn't support"
                                    " %s option for %s" % (option, test_cmd))
    guest_cpu_busy = "yes" == params.get("guest_cpu_busy", "no")
    password = params.get("password", None)
    domuuid = vm.get_uuid()
    domid = ""
    xml_file = os.path.join(test.tmpdir, "vm.xml")
    virsh.dumpxml(vm_name, extra="--inactive", to_file=xml_file)
    libvirtd_inst = utils_libvirtd.Libvirtd()

    # Prepare domain
    try:
        reset_domain(vm, vm_state, needs_agent, guest_cpu_busy, password)
    except error.TestNAError, details:
        reset_env(vm_name, xml_file)
        raise error.TestNAError(details)
def run(test, params, env):
    """
    Test command: virsh qemu-agent-command.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_ref = params.get("vm_ref", "domname")
    vm_state = params.get("vm_state", "running")
    cmd = params.get("agent_cmd", "")
    options = params.get("options", "")
    needs_agent = "yes" == params.get("needs_agent", "yes")
    start_vm = "yes" == params.get("start_vm")
    status_error = "yes" == params.get("status_error", "no")
    domuuid = vm.get_uuid()
    domid = ""
    xml_file = os.path.join(test.tmpdir, "vm.xml")
    virsh.dumpxml(vm_name, extra="--inactive", to_file=xml_file)
    libvirtd_inst = utils_libvirtd.Libvirtd()

    # Prepare domain
    try:
        reset_domain(vm, vm_state, needs_agent)
    except Exception, details:
        reset_env(vm_name, xml_file)
        error.TestFail(details)
Beispiel #4
0
def test_blockcommit_operation(vm_name, device_target, blockcommit_option,
                               test):
    """
    Test blockcommit on disk with metadata_cache size attribute

    :param vm_name: domain name
    :param disk_target: the target of disk
    :param blockcommit_option: blockcommit option
    :param test: test case itself
    """
    virsh.blockcommit(vm_name,
                      device_target,
                      blockcommit_option,
                      ignore_status=False,
                      debug=True)
    # Check max size value in mirror part
    mirror_xml = virsh.dumpxml(vm_name, debug=False).stdout_text.strip()
    mirror_part_xml = re.findall(r'(<mirror)(.+)((?:\n.+)+)(mirror>)',
                                 mirror_xml)
    mirror_filter_str = ''.join(mirror_part_xml[0])
    mirror_byte_match = "<max_size unit='bytes'>1000</max_size>"
    if mirror_byte_match not in mirror_filter_str:
        test.fail(
            "Failed to generate metadata_cache in blockcommit operation %s" %
            mirror_filter_str)
    virsh.blockjob(vm_name,
                   device_target,
                   " --pivot",
                   ignore_status=True,
                   debug=True)
    pivot_xml = virsh.dumpxml(vm_name, debug=False).stdout_text.strip()
    if mirror_byte_match not in pivot_xml:
        test.fail("Failed to generate metadata_cache in  blockcommit %s" %
                  pivot_xml)
Beispiel #5
0
def run(test, params, env):
    """
    Test command: virsh qemu-agent-command.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_ref = params.get("vm_ref", "domname")
    vm_state = params.get("vm_state", "running")
    cmd = params.get("agent_cmd", "")
    options = params.get("options", "")
    needs_agent = "yes" == params.get("needs_agent", "yes")
    status_error = "yes" == params.get("status_error", "no")
    if not status_error and options:
        option = options.split()[0]
        test_cmd = "qemu-agent-command"
        if virsh.has_command_help_match(test_cmd, option) is None:
            raise error.TestNAError("The current libvirt doesn't support"
                                    " %s option for %s" % (option, test_cmd))
    guest_cpu_busy = "yes" == params.get("guest_cpu_busy", "no")
    password = params.get("password", None)
    domuuid = vm.get_uuid()
    domid = ""
    xml_file = os.path.join(test.tmpdir, "vm.xml")
    virsh.dumpxml(vm_name, extra="--inactive", to_file=xml_file)
    libvirtd_inst = utils_libvirtd.Libvirtd()

    # Prepare domain
    try:
        reset_domain(vm, vm_state, needs_agent, guest_cpu_busy, password)
    except error.TestNAError, details:
        reset_env(vm_name, xml_file)
        raise error.TestNAError(details)
def run(test, params, env):
    """
    Test for virt-xml-validate
    """
    # Get the full path of virt-xml-validate command.
    VIRT_XML_VALIDATE = os_dep.command("virt-xml-validate")

    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    schema = params.get("schema", "domain")
    output = params.get("output_file", "output")
    output_path = os.path.join(data_dir.get_tmp_dir(), output)

    if schema == "domain":
        virsh.dumpxml(vm_name, to_file=output_path)
    # TODO Add more case for other schema.

    cmd = "%s %s %s" % (VIRT_XML_VALIDATE, output_path, schema)
    cmd_result = utils.run(cmd, ignore_status=True)
    if cmd_result.exit_status:
        raise error.TestFail("virt-xml-validate command failed.\n"
                             "Detail: %s." % cmd_result)

    if cmd_result.stdout.count("fail"):
        raise error.TestFail("xml fails to validate\n"
                             "Detail: %s." % cmd_result)
Beispiel #7
0
def run(test, params, env):
    """
    Test for virt-xml-validate
    """
    # Get the full path of virt-xml-validate command.
    try:
        VIRT_XML_VALIDATE = os_dep.command("virt-xml-validate")
    except ValueError:
        raise error.TestNAError("Not find virt-xml-validate command on host.")

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    net_name = params.get("net_dumpxml_name", "default")
    pool_name = params.get("pool_dumpxml_name", "default")
    schema = params.get("schema", "domain")
    output = params.get("output_file", "output")
    output_path = os.path.join(data_dir.get_tmp_dir(), output)

    valid_schemas = [
        'domain', 'domainsnapshot', 'network', 'storagepool', 'storagevol',
        'nodedev', 'capability', 'nwfilter', 'secret', 'interface'
    ]
    if schema not in valid_schemas:
        raise error.TestFail("invalid %s specified" % schema)

    virsh_dargs = {'ignore_status': True, 'debug': True}
    if schema == "domainsnapshot":
        domainsnapshot_validate(vm_name, file=output_path, **virsh_dargs)
    elif schema == "network":
        network_validate(net_name, file=output_path, **virsh_dargs)
    elif schema == "storagepool":
        storagepool_validate(pool_name, file=output_path, **virsh_dargs)
    elif schema == "storagevol":
        storagevol_validate(pool_name, file=output_path, **virsh_dargs)
    elif schema == "nodedev":
        nodedev_validate(file=output_path, **virsh_dargs)
    elif schema == "capability":
        capability_validate(file=output_path, **virsh_dargs)
    elif schema == "nwfilter":
        nwfilter_validate(file=output_path, **virsh_dargs)
    elif schema == "secret":
        secret_validate(file=output_path, **virsh_dargs)
    elif schema == "interface":
        interface_validate(file=output_path, **virsh_dargs)
    else:
        # domain
        virsh.dumpxml(vm_name, to_file=output_path)

    cmd = "%s %s %s" % (VIRT_XML_VALIDATE, output_path, schema)
    cmd_result = utils.run(cmd, ignore_status=True)
    if cmd_result.exit_status:
        raise error.TestFail("virt-xml-validate command failed.\n"
                             "Detail: %s." % cmd_result)

    if cmd_result.stdout.count("fail"):
        raise error.TestFail("xml fails to validate\n"
                             "Detail: %s." % cmd_result)
def run(test, params, env):
    """
    Test for virt-xml-validate
    """
    # Get the full path of virt-xml-validate command.
    try:
        VIRT_XML_VALIDATE = os_dep.command("virt-xml-validate")
    except ValueError:
        raise error.TestNAError("Not find virt-xml-validate command on host.")

    vm_name = params.get("main_vm", "virt-tests-vm1")
    net_name = params.get("net_dumpxml_name", "default")
    pool_name = params.get("pool_dumpxml_name", "default")
    schema = params.get("schema", "domain")
    output = params.get("output_file", "output")
    output_path = os.path.join(data_dir.get_tmp_dir(), output)

    valid_schemas = ['domain', 'domainsnapshot', 'network', 'storagepool',
                     'storagevol', 'nodedev', 'capability',
                     'nwfilter', 'secret', 'interface']
    if schema not in valid_schemas:
        raise error.TestFail("invalid %s specified" % schema)

    virsh_dargs = {'ignore_status': True, 'debug': True}
    if schema == "domainsnapshot":
        domainsnapshot_validate(vm_name, file=output_path, **virsh_dargs)
    elif schema == "network":
        network_validate(net_name, file=output_path, **virsh_dargs)
    elif schema == "storagepool":
        storagepool_validate(pool_name, file=output_path, **virsh_dargs)
    elif schema == "storagevol":
        storagevol_validate(pool_name, file=output_path, **virsh_dargs)
    elif schema == "nodedev":
        nodedev_validate(file=output_path, **virsh_dargs)
    elif schema == "capability":
        capability_validate(file=output_path, **virsh_dargs)
    elif schema == "nwfilter":
        nwfilter_validate(file=output_path, **virsh_dargs)
    elif schema == "secret":
        secret_validate(file=output_path, **virsh_dargs)
    elif schema == "interface":
        interface_validate(file=output_path, **virsh_dargs)
    else:
        # domain
        virsh.dumpxml(vm_name, to_file=output_path)

    cmd = "%s %s %s" % (VIRT_XML_VALIDATE, output_path, schema)
    cmd_result = utils.run(cmd, ignore_status=True)
    if cmd_result.exit_status:
        raise error.TestFail("virt-xml-validate command failed.\n"
                             "Detail: %s." % cmd_result)

    if cmd_result.stdout.count("fail"):
        raise error.TestFail("xml fails to validate\n"
                             "Detail: %s." % cmd_result)
def run(test, params, env):
    """
    Test the command virsh vcpucount

    (1) Iterate perform setvcpus operation with four valid options.
    (2) Iterate call virsh vcpucount with given options.
    (3) Check whether the virsh vcpucount works as expected.
    (4) Recover test environment.

    The test works for domain state as "shut off" or "running", it check
    vcpucount result after vcpu hotplug using setvcpus.

    For setvcpus, include four valid options:
      --config
      --config --maximum
      --live
      --guest

    For vcpucount options, restrict up to 2 options together, upstream libvirt
    support more options combinations now (e.g. 3 options together or single
    --maximum option), for backward support, only following options are
    checked:
      None
      --config --active
      --config --maximum
      --live --active
      --live --maximum
      --current --active
      --current --maximum
      --guest
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    xml_file = params.get("vcpucount_xml_file", "vm.xml")
    virsh.dumpxml(vm_name, extra="--inactive", to_file=xml_file)
    pre_vm_state = params.get("vcpucount_pre_vm_state")
    options = params.get("vcpucount_options")
    status_error = params.get("status_error")
    set_option = ["--config", "--config --maximum", "--live", "--guest"]

    # maximum options should be 2
    if len(options.split()) > 2:
        raise error.TestNAError("Options exceeds 2 is not supported")

    # Prepare domain
    try:
        reset_domain(vm, pre_vm_state, (options == "--guest"))
    except Exception, details:
        reset_env(vm_name, xml_file)
        error.TestFail(details)
    def check_blockcopy(target):
        """
        Block copy operation test.
        """
        blk_file = os.path.join(data_dir.get_tmp_dir(), "blk.rbd")
        if os.path.exists(blk_file):
            os.remove(blk_file)
        blk_mirror = ("mirror type='file' file='%s' "
                      "format='raw' job='copy'" % blk_file)

        # Do blockcopy
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_result(ret, skip_if=unsupported_err)

        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count(blk_mirror):
            test.fail("Can't see block job in domain xml")

        # Abort
        ret = virsh.blockjob(vm_name, target, "--abort")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if dom_xml.count(blk_mirror):
            test.fail("Failed to abort block job")
        if os.path.exists(blk_file):
            os.remove(blk_file)

        # Sleep for a while after abort operation.
        time.sleep(5)
        # Do blockcopy again
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_exit_status(ret)

        # Wait for complete
        def wait_func():
            ret = virsh.blockjob(vm_name, target, "--info")
            return ret.stderr.count("Block Copy: [100 %]")

        timeout = params.get("blockjob_timeout", 600)
        utils_misc.wait_for(wait_func, int(timeout))

        # Pivot
        ret = virsh.blockjob(vm_name, target, "--pivot")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count("source file='%s'" % blk_file):
            test.fail("Failed to pivot block job")
        # Remove the disk file.
        if os.path.exists(blk_file):
            os.remove(blk_file)
Beispiel #11
0
 def get_current_vcpus():
     """
     Get current vcpu number.
     """
     vcpus_set = ""
     virsh.dumpxml(vm_name, extra="", to_file=tmp_file)
     dom = parse(tmp_file)
     root = dom.documentElement
     vcpus_2 = root.getElementsByTagName("vcpu")
     for n in vcpus_2:
         vcpus_set += n.getAttribute("current")
         vcpus_set = int(vcpus_set)
     dom.unlink()
     return vcpus_set
Beispiel #12
0
 def get_current_vcpus():
     """
     Get current vcpu number.
     """
     vcpus_set = ""
     virsh.dumpxml(vm_name, extra="", to_file=tmp_file)
     dom = parse(tmp_file)
     root = dom.documentElement
     vcpus_2 = root.getElementsByTagName("vcpu")
     for n in vcpus_2:
         vcpus_set += n.getAttribute("current")
         vcpus_set = int(vcpus_set)
     dom.unlink()
     return vcpus_set
    def check_blockcopy(target):
        """
        Block copy operation test.
        """
        blk_file = os.path.join(data_dir.get_tmp_dir(), "blk.rbd")
        if os.path.exists(blk_file):
            os.remove(blk_file)
        blk_mirror = ("mirror type='file' file='%s' "
                      "format='raw' job='copy'" % blk_file)

        # Do blockcopy
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_result(ret, skip_if=unsupported_err)

        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count(blk_mirror):
            test.fail("Can't see block job in domain xml")

        # Abort
        ret = virsh.blockjob(vm_name, target, "--abort")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if dom_xml.count(blk_mirror):
            test.fail("Failed to abort block job")
        if os.path.exists(blk_file):
            os.remove(blk_file)

        # Sleep for a while after abort operation.
        time.sleep(5)
        # Do blockcopy again
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_exit_status(ret)

        # Wait for complete
        def wait_func():
            ret = virsh.blockjob(vm_name, target, "--info")
            return ret.stderr.count("Block Copy: [100 %]")
        timeout = params.get("blockjob_timeout", 600)
        utils_misc.wait_for(wait_func, int(timeout))

        # Pivot
        ret = virsh.blockjob(vm_name, target, "--pivot")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count("source file='%s'" % blk_file):
            test.fail("Failed to pivot block job")
        # Remove the disk file.
        if os.path.exists(blk_file):
            os.remove(blk_file)
Beispiel #14
0
    def set_interface(mtu_size='',
                      source_network='default',
                      iface_type='network',
                      iface_model='virtio',
                      iface_target=None):
        """
        Set mtu size to a certain interface
        """
        interface_type = 'bridge' if iface_type in (
            'bridge', 'openvswitch') else iface_type
        iface_dict = {'type': interface_type, 'model': iface_model}
        if source_network:
            iface_dict.update(
                {'source': "{'%s': '%s'}" % (interface_type, source_network)})

        if iface_type == 'openvswitch':
            iface_dict.update({'virtualport_type': 'openvswitch'})

        if mtu_size:
            iface_dict.update({'mtu': "{'size': %s}" % mtu_size})

        if iface_target:
            iface_dict.update({'target': iface_target})

        libvirt.modify_vm_iface(vm_name, 'update_iface', iface_dict)
        logging.debug(virsh.dumpxml(vm_name).stdout)
 def check_graphics(self, param):
     """
     Check if graphics attributes value in vm xml match with given param.
     """
     logging.info('Check graphics parameters')
     if self.target == 'ovirt':
         xml = virsh.dumpxml(
             self.vm_name,
             extra='--security-info',
             session_id=self.virsh_session_id).stdout
         vmxml = xml_utils.XMLTreeFile(xml)
         graphic = vmxml.find('devices').find('graphics')
     else:
         vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
             self.vm_name, options='--security-info',
             virsh_instance=self.virsh_session)
         graphic = vmxml.xmltreefile.find('devices').find('graphics')
     status = True
     for key in param:
         logging.debug('%s = %s' % (key, graphic.get(key)))
         if graphic.get(key) != param[key]:
             logging.error('Attribute "%s" match failed' % key)
             status = False
     if not status:
         self.log_err('Graphic parameter check failed')
Beispiel #16
0
    def setup_test_xml_check(case):
        """
        Set up xml check related cases

        :param case: test case
        """
        if case == 'smbios':
            # Edit guest XML with smbios /sysinfo /idmap /metadata and memory device
            vmxml_attrs = {k.replace('vmxml_', ''): int(v) if v.isdigit() else v
                           for k, v in params.items() if k.startswith('vmxml_')}

            vmxml_attrs.update({
                'sysinfo': eval(params.get('sysinfo_attrs', '{}')),
                'os': eval(params.get('os_attrs', '{}')),
                'idmap': eval(params.get('idmap_attrs', '{}')),
                'cpu': eval(params.get('cpu_attrs', '{}'))
            })
            vmxml.setup_attrs(**vmxml_attrs)

            # Setup mem device
            memxml_attrs = eval(params.get('memxml_attrs', '{}'))
            memxml = Memory()
            memxml.setup_attrs(**memxml_attrs)
            vmxml.add_device(memxml)

            # Finish setting up vmxml
            vmxml.sync()
            logging.debug(virsh.dumpxml(vm_name).stdout_text)
Beispiel #17
0
def check_snap_in_image(vm_name, snap_name):
    """
    check the snapshot info in image

    :params: vm_name: VM name
    :params: snap_name: Snapshot name
    """

    domxml = virsh.dumpxml(vm_name).stdout.strip()
    xtf_dom = xml_utils.XMLTreeFile(domxml)
    # Check whether qemu-img need add -U suboption since locking feature was added afterwards qemu-2.10
    qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support(
    )

    cmd = "qemu-img info " + xtf_dom.find("devices/disk/source").get("file")
    if qemu_img_locking_feature_support:
        cmd = "qemu-img info -U " + xtf_dom.find("devices/disk/source").get(
            "file")
    img_info = process.getoutput(cmd).strip()

    if re.search(snap_name, img_info):
        logging.info("Find snapshot info in image")
        return True
    else:
        return False
 def __init__(self, test, params, env):
     self.errors = []
     self.params = params
     self.vm_name = params.get('main_vm')
     self.original_vm_name = params.get('original_vm_name')
     # The expected boottype of guest, default 0 is 'i440fx+bios'
     # Other values are 1 for q35+bios, 2 for q35+uefi, 3 for
     # q35+secure_uefi
     self.boottype = int(params.get("boottype", 0))
     self.hypervisor = params.get("hypervisor")
     self.target = params.get('target')
     self.os_type = params.get('os_type')
     self.os_version = params.get('os_version', 'OS_VERSION_V2V_EXAMPLE')
     self.original_vmxml = params.get('original_vmxml')
     self.vmx_nfs_src = params.get('vmx_nfs_src')
     self.virsh_session = None
     self.virsh_session_id = None
     self.setup_session()
     self.checker = utils_v2v.VMCheck(test, params, env)
     self.checker.virsh_session_id = self.virsh_session_id
     self.virsh_instance = virsh.VirshPersistent(
         session_id=self.virsh_session_id)
     self.vmxml = virsh.dumpxml(
         self.vm_name, session_id=self.virsh_session_id).stdout.strip()
     # Save NFS mount records like {0:(src, dst, fstype)}
     self.mount_records = {}
    def init_vmxml(self, raise_exception=True):
        """
        Initialize the self.vmxml.

        The self.vmxml could be empty untill VMChecker.run begins.
        It's not neccessary to get the xml if you don't need to check it or the
        env is not ready to get the xml.

        e.g. When the VM is in a rhv host, the dumpxml will not success unless
        the VM is started. But the VM may fail to start because of unexpected
        reason, so we should not assume the dumpxml always returns success in
        vmchecker.__init__ function.

        But the self.vmxml must not be empty when vmchecker.run begins.

        :param raise_exception: True to raise exception, False to ignore it.
        """
        if self.vmxml:
            return

        try:
            res = virsh.dumpxml(
                self.vm_name,
                session_id=self.virsh_session_id,
                debug=True)
            if res.exit_status == 0:
                self.vmxml = res.stdout_text.strip()
                self.xmltree = xml_utils.XMLTreeFile(self.vmxml)
        except Exception as e:
            if raise_exception:
                raise
            logging.debug('Failed to dumpxml: %s', str(e))
Beispiel #20
0
    def run_test_alias(case):
        """
        Test memballoon alias

        :param case: test case
        """
        model = params.get('model')
        alias_name = params.get('alias_name')
        has_alias = 'yes' == params.get('has_alias', 'no')

        # Update memballoon device
        balloon_dict = {'membal_model': model, 'membal_alias_name': alias_name}

        libvirt.update_memballoon_xml(vmxml, balloon_dict)
        logging.debug(virsh.dumpxml(vm_name).stdout_text)

        # Get memballoon device after vm define and check
        balloons = vmxml.get_devices('memballoon')
        if len(balloons) == 0:
            test.error('Memballoon device was not added to vm.')
        new_balloon = balloons[0]
        if has_alias:
            logging.debug('Expected alias: %s\nActual alias: %s', alias_name,
                          new_balloon.alias_name)
            if new_balloon.alias_name == alias_name:
                logging.info('Memballon alias check PASS.')
            else:
                test.fail('Memballon alias check FAIL.')

        # Check vm start
        cmd_result = virsh.start(vm_name)
        libvirt.check_result(cmd_result, status_error)
Beispiel #21
0
 def __init__(self, test, params, env):
     self.errors = []
     self.params = params
     self.vm_name = params.get('main_vm')
     self.v2v_cmd = params.get('v2v_command', '')
     self.original_vm_name = params.get('original_vm_name')
     # The expected boottype of guest, default 0 is 'i440fx+bios'
     # Other values are 1 for q35+bios, 2 for q35+uefi, 3 for
     # q35+secure_uefi
     self.boottype = int(params.get("boottype", 0))
     self.hypervisor = params.get("hypervisor")
     self.target = params.get('target')
     self.os_type = params.get('os_type')
     self.os_version = params.get('os_version', 'OS_VERSION_V2V_EXAMPLE')
     self.original_vmxml = params.get('original_vmxml')
     self.vmx_nfs_src = params.get('vmx_nfs_src')
     self.virsh_session = params.get('virsh_session')
     self.virsh_session_id = self.virsh_session.get_id(
     ) if self.virsh_session else params.get('virsh_session_id')
     self.checker = utils_v2v.VMCheck(test, params, env)
     self.setup_session()
     if not self.checker.virsh_session_id:
         self.checker.virsh_session_id = self.virsh_session_id
     if self.v2v_cmd and '-o rhv-upload' in self.v2v_cmd and '--no-copy' in self.v2v_cmd:
         self.vmxml = ''
     else:
         self.vmxml = virsh.dumpxml(
             self.vm_name, session_id=self.virsh_session_id).stdout.strip()
     self.xmltree = None
     if self.vmxml:
         self.xmltree = xml_utils.XMLTreeFile(self.vmxml)
     # Save NFS mount records like {0:(src, dst, fstype)}
     self.mount_records = {}
Beispiel #22
0
def config_xml_multiqueue(vm_name, vcpu=1, multiqueue=4):
    """
    Configure vCPU and interface for multiqueue test.
    """
    vm_xml.VMXML.set_vm_vcpus(vm_name, int(vcpu), int(vcpu))
    vm_xml.VMXML.set_multiqueues(vm_name, multiqueue)
    logging.debug("XML:%s", virsh.dumpxml(vm_name))
Beispiel #23
0
def config_xml_multiqueue(vm_name, vcpu=1, multiqueue=4):
    """
    Configure vCPU and interface for multiqueue test.
    """
    vm_xml.VMXML.set_vm_vcpus(vm_name, int(vcpu), int(vcpu))
    vm_xml.VMXML.set_multiqueues(vm_name, multiqueue)
    logging.debug("XML:%s", virsh.dumpxml(vm_name))
def check_vcpu_after_plug_unplug(test,
                                 vm_name,
                                 config_vcpus,
                                 option='--inactive'):
    re_dump_xml = virsh.dumpxml(vm_name, option).stdout.strip()
    # Check <vcpu current='number'> xx </vcpu>
    crt_vcpus = re.findall(r"vcpu.*current=.%s.*" % config_vcpus, re_dump_xml)
    logging.info("dumpxml %s xml: \n %s", option, crt_vcpus)
    if len(crt_vcpus) != 1:
        test.fail("Dumpxml with {},"
                  "the vcpu current is not correct.".format(option))
    # Check <vcpu id='x' enabled='yes' .../> number should be correct
    vcpu_enabled_list = re.findall(r"vcpu.*enabled='yes'", re_dump_xml)
    if vcpu_enabled_list:
        if len(vcpu_enabled_list) != int(config_vcpus):
            test.fail("The enabled vcpu number is expected to be {}, "
                      "but found {}".format(config_vcpus,
                                            len(vcpu_enabled_list)))
    else:
        test.error("No vcpu is enabled")
    # Check  <vcpu id='x' enabled='xx' ... order='x'/> should disappear
    vcpu_order_list = re.findall(r"vcpu.*order=.*", re_dump_xml)
    if vcpu_order_list:
        test.fail("vcpu order info should be cleared, "
                  "but found {}".format(vcpu_order_list))
Beispiel #25
0
    def check_transient_disk_keyword(vm_names):
        """
        Check VM disk with TRANSIENT keyword.

        :param vm_names. VM names list.
        """
        logging.info("Checking disk with transient keyword...")

        output0 = ""
        output1 = ""
        for i in list(range(2)):
            ret = virsh.dumpxml(vm_names[i], ignore_status=False)

            cmd = ("echo \"%s\" | grep '<source file=.*TRANSIENT.*/>'" %
                   ret.stdout_text)
            if process.system(cmd, ignore_status=False, shell=True):
                test.fail("Check transident disk on %s failed" % vm_names[i])
            if i == 0:
                output0 = astring.to_text(
                    process.system_output(cmd, ignore_status=False,
                                          shell=True))
            else:
                output1 = astring.to_text(
                    process.system_output(cmd, ignore_status=False,
                                          shell=True))
        if output0 == output1:
            test.fail("Two vms have same source transident disk %s" % output0)
Beispiel #26
0
 def get_info(self, name):
     infos = {}
     for line in virsh.dominfo(name).stdout.strip().splitlines():
         key, value = line.split(':', 1)
         infos[key.lower()] = value.strip()
     infos['inactive xml'] = virsh.dumpxml(
         name, extra='--inactive').stdout.splitlines()
     return infos
Beispiel #27
0
def test_create_destroyed_vm(guest_xml, params, test):
    """
    Test scenario:
     - Destroyed the vm
     - Create the vm and failure is expected with invalid nvram file size
     - Create the vm successfully with --reset-nvram
     - Check the nvram file is recreated as expected

    :param guest_xml: guest xml
    :param params: dict, test parameters
    :param test: test object
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    setup_reset_nvram(guest_xml, params, virsh.destroy, test, vm_name)
    vm_file = os.path.join(data_dir.get_data_dir(), params.get('output_file'))
    virsh.dumpxml(vm_name, to_file=vm_file)
    common_test_steps(virsh.create, vm_file, params, test)
 def check_device_exist(check, virsh_session_id):
     """
     Check if device exist after convertion
     """
     xml = virsh.dumpxml(vm_name, session_id=virsh_session_id).stdout
     if check == 'cdrom':
         if "device='cdrom'" not in xml:
             log_fail('CDROM no longer exists')
Beispiel #29
0
 def check_device_exist(check, virsh_session_id):
     """
     Check if device exist after convertion
     """
     xml = virsh.dumpxml(vm_name, session_id=virsh_session_id).stdout
     if check == 'cdrom':
         if "device='cdrom'" not in xml:
             log_fail('CDROM no longer exists')
Beispiel #30
0
    def check_snapshot(bgjob=None):
        """
        Do snapshot operation and check the results
        """
        snapshot_name1 = "snap.s1"
        snapshot_name2 = "snap.s2"
        if not snapshot_vm_running:
            vm.destroy(gracefully=False)
        ret = virsh.snapshot_create_as(vm_name, snapshot_name1)
        libvirt.check_exit_status(ret)
        snap_lists = virsh.snapshot_list(vm_name)
        if snapshot_name not in snap_lists:
            test.fail("Snapshot %s doesn't exist"
                      % snapshot_name)

        if snapshot_vm_running:
            options = "--force"
        else:
            options = ""
        ret = virsh.snapshot_revert(
            vm_name, ("%s %s" % (snapshot_name, options)))
        libvirt.check_exit_status(ret)
        ret = virsh.dumpxml(vm_name)
        if ret.stdout.count("<rng model="):
            test.fail("Found rng device in xml")

        if snapshot_with_rng:
            if vm.is_alive():
                vm.destroy(gracefully=False)
            if bgjob:
                bgjob.kill_func()
            modify_rng_xml(params, False)

        # Start the domain before disk-only snapshot
        if vm.is_dead():
            # Add random server
            if params.get("backend_type") == "tcp":
                cmd = "cat /dev/random | nc -4 -l localhost 1024"
                bgjob = utils.AsyncJob(cmd)
            vm.start()
            vm.wait_for_login().close()
        err_msgs = ("live disk snapshot not supported"
                    " with this QEMU binary")
        ret = virsh.snapshot_create_as(vm_name,
                                       "%s --disk-only"
                                       % snapshot_name2)
        if ret.exit_status:
            if ret.stderr.count(err_msgs):
                test.skip(err_msgs)
            else:
                test.fail("Failed to create external snapshot")
        snap_lists = virsh.snapshot_list(vm_name)
        if snapshot_name2 not in snap_lists:
            test.fail("Failed to check snapshot list")

        ret = virsh.domblklist(vm_name)
        if not ret.stdout.count(snapshot_name2):
            test.fail("Failed to find snapshot disk")
Beispiel #31
0
 def check_iothread_in_xml(vm_name):
     """
     Check that the iothread value has been set in the XML file.
     """
     dom_xml = virsh.dumpxml(vm_name, debug=False).stdout_text.strip()
     iothread_str = \
         params.get("xml_iothread_block")
     if iothread_str not in dom_xml:
         test.fail("IOThread value was not set to 2 in %s" % dom_xml)
Beispiel #32
0
def test_blockcopy_operation(vm_name, disk_path, disk_format, disk_device,
                             device_target, device_bus, max_blockcopy_size,
                             blockcopy_option, test):
    """
    Test virsh blockcopy operation on disk with metadatacache attribute.

    :param vm_name: domain name
    :param disk_path: the path of disk
    :param disk_format: the format to disk image
    :param disk_device: the disk device type
    :param device_target: the target of disk
    :param max_blockcopy_size: max blockcopy metadatacache size
    :param blockcopy_option: blockcopy option
    :param test: test case itself
    """
    blockcopy_disk = libvirt_disk.create_custom_metadata_disk(
        disk_path, disk_format, disk_device, device_target, device_bus,
        max_blockcopy_size)
    virsh.blockcopy(vm_name,
                    device_target,
                    "--xml %s" % blockcopy_disk.xml,
                    options=blockcopy_option,
                    debug=True,
                    ignore_status=False)
    #Check job finished
    if not utils_misc.wait_for(
            lambda: libvirt.check_blockjob(vm_name, device_target, "progress",
                                           "100"), 300):
        test.fail("Blockjob timeout in 300 sec.")
    # Check max size value in mirror part
    blk_mirror = ("mirror type='file' file='%s' "
                  "format='%s' job='copy'" % (disk_path, disk_format))
    dom_xml = virsh.dumpxml(vm_name, debug=False).stdout_text.strip()
    if not dom_xml.count(blk_mirror):
        test.fail("Can't see block job in domain xml")
    virsh.blockjob(vm_name,
                   device_target,
                   " --pivot",
                   ignore_status=True,
                   debug=False)
    pivot_xml = virsh.dumpxml(vm_name, debug=True).stdout_text.strip()
    pivot_byte_str = "<max_size unit='bytes'>1000</max_size>"
    if pivot_byte_str not in pivot_xml:
        test.fail("Failed to generate metadata_cache in %s" % pivot_xml)
Beispiel #33
0
def get_vcpu_line(vm_name, cmd, extra=''):
    """
    Get the <vcpu xxx> line in the dumpxml

    :param vm_name: the vm name
    :param cmd: the command to get vcpu line
    :param extra: the extra option for virsh command
    :return: str, the line in dumpxml with <vcpu xxx>
    """
    dumpxml_path = os.path.join(data_dir.get_tmp_dir(),
                                '{}.dumpxml'.format(vm_name))
    virsh.dumpxml(vm_name,
                  extra=extra,
                  to_file=dumpxml_path,
                  ignore_status=False)
    _, output = utils_misc.cmd_status_output(cmd + dumpxml_path,
                                             ignore_status=False)
    os.remove(dumpxml_path)
    return output
Beispiel #34
0
 def _check_slice_in_xml():
     """
     Check slice attribute in disk xml.
     """
     debug_vmxml = virsh.dumpxml(vm_name, "",
                                 debug=True).stdout.strip()
     if 'slices' in debug_vmxml:
         return True
     else:
         return False
 def check_sound_card(vmcheck, check):
     """
     Check sound status of vm from xml
     """
     xml = virsh.dumpxml(vm_name, session_id=vmcheck.virsh_session_id).stdout
     logging.debug(xml)
     if check == 'sound' and '<sound model' in xml:
         log_fail('Sound card should be removed')
     if check == 'pcspk' and "<sound model='pcspk'" not in xml:
         log_fail('Sound card should be "pcspk"')
Beispiel #36
0
 def check_sound_card(vmcheck, check):
     """
     Check sound status of vm from xml
     """
     xml = virsh.dumpxml(vm_name, session_id=vmcheck.virsh_session_id).stdout
     logging.debug(xml)
     if check == 'sound' and '<sound model' in xml:
         log_fail('Sound card should be removed')
     if check == 'pcspk' and "<sound model='pcspk'" not in xml:
         log_fail('Sound card should be "pcspk"')
Beispiel #37
0
    def check_detached_xml_noexist():
        """
        Check detached xml does not exist in the guest dumpxml

        :return: True if it does not exist, False if still exists
        """
        domxml_dt = virsh.dumpxml(vm_name, dump_option).stdout_text.strip()
        if detach_check_xml not in domxml_dt:
            return True
        else:
            return False
Beispiel #38
0
    def check_snapshot(snap_option, target_dev='vda'):
        """
        Test snapshot operation.
        """
        snap_name = "s1"
        snap_mem = os.path.join(TMP_DATA_DIR, "rbd.mem")
        snap_disk = os.path.join(TMP_DATA_DIR, "rbd.disk")
        xml_snap_exp = [
            "disk name='%s' snapshot='external' type='file'" % target_dev
        ]
        xml_dom_exp = [
            "source file='%s'" % snap_disk,
            "backingStore type='network' index='1'",
            "source protocol='rbd' name='%s'" % disk_src_name
        ]
        if snap_option.count("disk-only"):
            options = ("%s --diskspec %s,file=%s --disk-only" %
                       (snap_name, target_dev, snap_disk))
        elif snap_option.count("disk-mem"):
            options = ("%s --memspec file=%s --diskspec %s,file="
                       "%s" % (snap_name, snap_mem, target_dev, snap_disk))
            xml_snap_exp.append("memory snapshot='external' file='%s'" %
                                snap_mem)
        else:
            options = snap_name

        ret = virsh.snapshot_create_as(vm_name, options)
        if test_disk_internal_snapshot:
            libvirt.check_result(ret, expected_fails=unsupported_err)
        elif test_disk_readonly:
            if libvirt_version.version_compare(6, 0, 0):
                libvirt.check_result(ret)
            else:
                libvirt.check_result(ret, expected_fails=unsupported_err)
        else:
            libvirt.check_result(ret, skip_if=unsupported_err)

        # check xml file.
        if not ret.exit_status:
            snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name,
                                              debug=True).stdout.strip()
            dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
            # Delete snapshots.
            libvirt.clean_up_snapshots(vm_name)
            if os.path.exists(snap_mem):
                os.remove(snap_mem)
            if os.path.exists(snap_disk):
                os.remove(snap_disk)

            if not all([x in snap_xml for x in xml_snap_exp]):
                test.fail("Failed to check snapshot xml")
            if not all([x in dom_xml for x in xml_dom_exp]):
                test.fail("Failed to check domain xml")
Beispiel #39
0
    def check_bootorder_snapshot(disk_name):
        """
        Check VM disk's bootorder option with snapshot.

        :param disk_name. The target disk to be checked.
        """
        logging.info("Checking diskorder option with snapshot...")
        snapshot1 = "s1"
        snapshot2 = "s2"
        snapshot2_file = os.path.join(test.tmpdir, "s2")
        ret = virsh.snapshot_create(vm_name, "", **virsh_dargs)
        libvirt.check_exit_status(ret)

        ret = virsh.snapshot_create_as(vm_name, "%s --disk-only" % snapshot1,
                                       **virsh_dargs)
        libvirt.check_exit_status(ret)

        ret = virsh.snapshot_dumpxml(vm_name, snapshot1)
        libvirt.check_exit_status(ret)

        cmd = "echo \"%s\" | grep %s.%s" % (ret.stdout, disk_name, snapshot1)
        if process.system(cmd, ignore_status=True, shell=True):
            raise exceptions.TestError("Check snapshot disk failed")

        ret = virsh.snapshot_create_as(vm_name,
                                       "%s --memspec file=%s,snapshot=external"
                                       % (snapshot2, snapshot2_file),
                                       **virsh_dargs)
        libvirt.check_exit_status(ret)

        ret = virsh.dumpxml(vm_name)
        libvirt.check_exit_status(ret)

        cmd = ("echo \"%s\" | grep -A 16 %s.%s | grep \"boot order='%s'\""
               % (ret.stdout, disk_name, snapshot2, bootorder))
        if process.system(cmd, ignore_status=True, shell=True):
            raise exceptions.TestError("Check snapshot disk with bootorder failed")

        snap_lists = virsh.snapshot_list(vm_name)
        if snapshot1 not in snap_lists or snapshot2 not in snap_lists:
            raise exceptions.TestError("Check snapshot list failed")

        # Check virsh save command after snapshot.
        save_file = "/tmp/%s.save" % vm_name
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Check virsh restore command after snapshot.
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        #Passed all test.
        os.remove(save_file)
    def check_bootorder_snapshot(disk_name):
        """
        Check VM disk's bootorder option with snapshot.

        :param disk_name. The target disk to be checked.
        """
        logging.info("Checking diskorder option with snapshot...")
        snapshot1 = "s1"
        snapshot2 = "s2"
        snapshot2_file = os.path.join(test.tmpdir, "s2")
        ret = virsh.snapshot_create(vm_name, "", **virsh_dargs)
        libvirt.check_exit_status(ret)

        ret = virsh.snapshot_create_as(vm_name, "%s --disk-only" % snapshot1,
                                       **virsh_dargs)
        libvirt.check_exit_status(ret)

        ret = virsh.snapshot_dumpxml(vm_name, snapshot1)
        libvirt.check_exit_status(ret)

        cmd = "echo \"%s\" | grep %s.%s" % (ret.stdout, disk_name, snapshot1)
        if utils.run(cmd, ignore_status=True).exit_status:
            raise error.TestError("Check snapshot disk failed")

        ret = virsh.snapshot_create_as(vm_name,
                                       "%s --memspec file=%s,snapshot=external"
                                       % (snapshot2, snapshot2_file),
                                       **virsh_dargs)
        libvirt.check_exit_status(ret)

        ret = virsh.dumpxml(vm_name)
        libvirt.check_exit_status(ret)

        cmd = ("echo \"%s\" | grep -A 16 %s.%s | grep \"boot order='%s'\""
               % (ret.stdout, disk_name, snapshot2, bootorder))
        if utils.run(cmd, ignore_status=True).exit_status:
            raise error.TestError("Check snapshot disk with bootorder failed")

        snap_lists = virsh.snapshot_list(vm_name)
        if snapshot1 not in snap_lists or snapshot2 not in snap_lists:
            raise error.TestError("Check snapshot list failed")

        # Check virsh save command after snapshot.
        save_file = "/tmp/%s.save" % vm_name
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Check virsh restore command after snapshot.
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        #Passed all test.
        os.remove(save_file)
def get_xmldata(vm_name, xml_file, options):
    """
    Get some values out of the guests xml
    Returns:
        count => Number of vCPUs set for the guest
        current => If there is a 'current' value set
                   in the xml indicating the ability
                   to add vCPUs. If 'current' is not
                   set, then return 0 for this value.
        os_machine => Name of the <os> <type machine=''>
                      to be used to determine if we can
                      support hotplug
    """
    # Grab a dump of the guest - if we're using the --config,
    # then get an --inactive dump.
    extra_opts = ""
    if "--config" in options:
        extra_opts = "--inactive"
    vcpus_current = ""
    virsh.dumpxml(vm_name, extra=extra_opts, to_file=xml_file)
    dom = parse(xml_file)
    root = dom.documentElement
    # get the vcpu value
    vcpus_parent = root.getElementsByTagName("vcpu")
    vcpus_count = int(vcpus_parent[0].firstChild.data)
    for n in vcpus_parent:
        vcpus_current += n.getAttribute("current")
        if vcpus_current != "":
            vcpus_current = int(vcpus_current)
        else:
            vcpus_current = 0
    # get the machine type
    os_parent = root.getElementsByTagName("os")
    os_machine = ""
    for os_elem in os_parent:
        for node in os_elem.childNodes:
            if node.nodeName == "type":
                os_machine = node.getAttribute("machine")
    dom.unlink()
    return vcpus_count, vcpus_current, os_machine
    def vm_state_check():
        cmd_result = virsh.dumpxml(vm_name, debug=True)
        if cmd_result.exit_status:
            test.fail("Failed to dump xml of domain %s" % vm_name)

        # The xml should contain the match_string
        xml = cmd_result.stdout.strip()
        match_string = "<boot dev='cdrom'/>"
        if not re.search(match_string, xml):
            test.fail("After domain restore the xml is not expected")

        domstate = virsh.domstate(vm_name, debug=True).stdout.strip()
        if restore_state != domstate:
            test.fail("The domain state is not expected")
    def vm_state_check():
        cmd_result = virsh.dumpxml(vm_name, debug=True)
        libvirt.check_exit_status(cmd_result)

        # The xml should contain the match_string
        xml = cmd_result.stdout.strip()
        match_string = "<boot dev='cdrom'/>"
        if not re.search(match_string, xml):
            raise exceptions.TestFail("After domain restore, "
                                      "the xml is not expected")

        domstate = virsh.domstate(vm_name, debug=True).stdout.strip()
        if restore_state != domstate:
            raise exceptions.TestFail("The domain state is not expected")
 def __init__(self, test, params, env):
     self.errors = []
     self.params = params
     self.vm_name = params.get('main_vm')
     self.hypervisor = params.get("hypervisor")
     self.target = params.get('target')
     self.os_type = params.get('os_type')
     self.os_version = params.get('os_version', 'OS_VERSION_V2V_EXAMPLE')
     self.original_vmxml = params.get('original_vmxml')
     self.virsh_session = None
     self.virsh_session_id = None
     self.setup_session()
     self.checker = utils_v2v.VMCheck(test, params, env)
     self.checker.virsh_session_id = self.virsh_session_id
     self.vmxml = virsh.dumpxml(self.vm_name,
                                session_id=self.virsh_session_id).stdout.strip()
    def attach_channel_xml():
        """
        Create channel xml and attach it to guest configuration
        """
        # Check if pty channel exists already
        for elem in new_xml.devices.by_device_tag('channel'):
            if elem.type_name == channel_type_name:
                logging.debug("{0} channel already exists in guest. "
                              "No need to add new one".format(channel_type_name))
                return

        params = {'channel_type_name': channel_type_name,
                  'target_type': target_type,
                  'target_name': target_name}
        channel_xml = libvirt.create_channel_xml(params)
        virsh.attach_device(domain_opt=vm_name, file_opt=channel_xml.xml,
                            flagstr="--config", ignore_status=False)
        logging.debug("New VMXML with channel:\n%s", virsh.dumpxml(vm_name))
Beispiel #46
0
def test_nic_group(vm, params):
    """
    Try to attach device in iommu group to vm.

    1.Get original available interfaces before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added interface in vm.
    """
    pci_id = params.get("nic_pci_id", "ETH:PCI.EXAMPLE")
    if pci_id.count("EXAMPLE"):
        raise error.TestNAError("Invalid pci device id.")

    device_type = "Ethernet"
    nic_ip = params.get("nic_pci_ip")
    nic_mask = params.get("nic_pci_mask", "255.255.0.0")
    nic_gateway = params.get("nic_pci_gateway")

    # Login vm to get interfaces before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_pci_nics = vm.get_pci_devices("Ethernet")
    before_interfaces = vm.get_interfaces()
    logging.debug("Ethernet PCI devices before:%s",
                  before_pci_nics)
    logging.debug("Ethernet interfaces before:%s",
                  before_interfaces)
    vm.destroy()

    boot_order = int(params.get("boot_order", 0))
    prepare_devices(pci_id, device_type)
    try:
        if boot_order:
            utlv.alter_boot_order(vm.name, pci_id, boot_order)
        else:
            xmlfile = utlv.create_hostdev_xml(pci_id)
            virsh.attach_device(domain_opt=vm.name, file_opt=xmlfile,
                                flagstr="--config", debug=True,
                                ignore_status=False)
        logging.debug("VMXML with disk boot:\n%s", virsh.dumpxml(vm.name))
        vm.start()
    except (error.CmdError, virt_vm.VMStartError), detail:
        cleanup_devices(pci_id, device_type)
        raise error.TestFail("New device does not work well: %s" % detail)
    def check_snapshot(snap_option, target_dev='vda'):
        """
        Test snapshot operation.
        """
        snap_name = "s1"
        snap_mem = os.path.join(data_dir.get_tmp_dir(), "rbd.mem")
        snap_disk = os.path.join(data_dir.get_tmp_dir(), "rbd.disk")
        xml_snap_exp = ["disk name='%s' snapshot='external' type='file'" % target_dev]
        xml_dom_exp = ["source file='%s'" % snap_disk,
                       "backingStore type='network' index='1'",
                       "source protocol='rbd' name='%s'" % disk_src_name]
        if snap_option.count("disk-only"):
            options = ("%s --diskspec %s,file=%s --disk-only" %
                       (snap_name, target_dev, snap_disk))
        elif snap_option.count("disk-mem"):
            options = ("%s --memspec file=%s --diskspec %s,file="
                       "%s" % (snap_name, snap_mem, target_dev, snap_disk))
            xml_snap_exp.append("memory snapshot='external' file='%s'"
                                % snap_mem)
        else:
            options = snap_name

        ret = virsh.snapshot_create_as(vm_name, options)
        if test_disk_internal_snapshot or test_disk_readonly:
            libvirt.check_result(ret, expected_fails=unsupported_err)
        else:
            libvirt.check_result(ret, skip_if=unsupported_err)

        # check xml file.
        if not ret.exit_status:
            snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name,
                                              debug=True).stdout.strip()
            dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
            # Delete snapshots.
            libvirt.clean_up_snapshots(vm_name)
            if os.path.exists(snap_mem):
                os.remove(snap_mem)
            if os.path.exists(snap_disk):
                os.remove(snap_disk)

            if not all([x in snap_xml for x in xml_snap_exp]):
                test.fail("Failed to check snapshot xml")
            if not all([x in dom_xml for x in xml_dom_exp]):
                test.fail("Failed to check domain xml")
def check_snap_in_image(vm_name, snap_name):
    """
    check the snapshot info in image

    :params: vm_name: VM name
    :params: snap_name: Snapshot name
    """

    domxml = virsh.dumpxml(vm_name).stdout.strip()
    xtf_dom = xml_utils.XMLTreeFile(domxml)

    cmd = "qemu-img info " + xtf_dom.find("devices/disk/source").get("file")
    img_info = commands.getoutput(cmd).strip()

    if re.search(snap_name, img_info):
        logging.info("Find snapshot info in image")
        return True
    else:
        return False
Beispiel #49
0
    def set_interface(mtu_size='', source_network='default',
                      iface_type='network', iface_model='virtio'):
        """
        Set mtu size to a certain interface
        """
        interface_type = 'bridge' if iface_type in ('bridge', 'openvswitch') else iface_type
        iface_dict = {
            'type': interface_type,
            'source': "{'%s': '%s'}" % (interface_type, source_network),
            'model': iface_model
        }

        if iface_type == 'openvswitch':
            iface_dict.update({'virtualport_type': 'openvswitch'})

        if mtu_size:
            iface_dict.update({'mtu': "{'size': %s}" % mtu_size})

        libvirt.modify_vm_iface(vm_name, 'update_iface', iface_dict)
        logging.debug(virsh.dumpxml(vm_name).stdout)
Beispiel #50
0
def test_fibre_group(vm, params):
    """
    Try to attach device in iommu group to vm.

    1.Get original available disks before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added disk in vm.
    """
    pci_id = params.get("fibre_pci_id", "FIBRE:PCI.EXAMPLE")
    device_type = "Fibre"
    if pci_id.count("EXAMPLE"):
        raise error.TestNAError("Invalid pci device id.")
    disk_check = "yes" == params.get("fibre_pci_disk_check", "no")

    # Login vm to get disks before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_pci_fibres = vm.get_pci_devices("Fibre")
    before_disks = vm.get_disks()
    logging.debug("Fibre PCI devices before:%s",
                  before_pci_fibres)
    logging.debug("Disks before:%s",
                  before_disks)
    vm.destroy()

    boot_order = int(params.get("boot_order", 0))
    prepare_devices(pci_id, device_type)
    try:
        if boot_order:
            utlv.alter_boot_order(vm.name, pci_id, boot_order)
        else:
            xmlfile = utlv.create_hostdev_xml(pci_id)
            virsh.attach_device(domain_opt=vm.name, file_opt=xmlfile,
                                flagstr="--config", debug=True,
                                ignore_status=False)
        logging.debug("VMXML with disk boot:\n%s", virsh.dumpxml(vm.name))
        vm.start()
    except (process.CmdError, virt_vm.VMStartError), detail:
        cleanup_devices(pci_id, device_type)
        raise error.TestFail("New device does not work well: %s" % detail)
def check_snap_in_image(vm_name, snap_name):
    """
    check the snapshot info in image

    :params: vm_name: VM name
    :params: snap_name: Snapshot name
    """

    domxml = virsh.dumpxml(vm_name).stdout.strip()
    xtf_dom = xml_utils.XMLTreeFile(domxml)
    # Check whether qemu-img need add -U suboption since locking feature was added afterwards qemu-2.10
    qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support()

    cmd = "qemu-img info " + xtf_dom.find("devices/disk/source").get("file")
    if qemu_img_locking_feature_support:
        cmd = "qemu-img info -U " + xtf_dom.find("devices/disk/source").get("file")
    img_info = process.getoutput(cmd).strip()

    if re.search(snap_name, img_info):
        logging.info("Find snapshot info in image")
        return True
    else:
        return False
Beispiel #52
0
def run_virsh_undefine(test, params, env):
    """
    Test virsh undefine command.

    Undefine an inactive domain, or convert persistent to transient.
    1.Prepare test environment.
    2.Backup the VM's information to a xml file.
    3.When the libvirtd == "off", stop the libvirtd service.
    4.Perform virsh undefine operation.
    5.Recover test environment.(libvirts service,VM)
    6.Confirm the test result.
    """

    vm_ref = params.get("undefine_vm_ref", "vm_name")
    extra = params.get("undefine_extra", "")
    libvirtd_state = params.get("libvirtd", "on")
    status_error = params.get("status_error")
    undefine_twice = params.get("undefine_twice", 'no')
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    xml_file = os.path.join(test.tmpdir, 'tmp.xml')
    remote_user = params.get("remote_user", "user")
    remote_password = params.get("remote_password", "password")
    remote_prompt = params.get("remote_prompt", "#")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_id = vm.get_id()
    vm_uuid = vm.get_uuid()

    # Back up xml file.Xen host has no guest xml file to define a guset.
    virsh.dumpxml(vm_name, extra="", to_file=xml_file)

    # Confirm how to reference a VM.
    if vm_ref == "vm_name":
        vm_ref = vm_name
    elif vm_ref == "id":
        vm_ref = vm_id
    elif vm_ref == "hex_vm_id":
        vm_ref = hex(int(vm_id))
    elif vm_ref == "uuid":
        vm_ref = vm_uuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)

    # Turn libvirtd into certain state.
    if libvirtd_state == "off":
        utils_libvirtd.libvirtd_stop()

    # Test virsh undefine command.
    status = 0
    try:
        uri = libvirt_vm.complete_uri(local_ip)
    except error.CmdError:
        status = 1
        uri = None
    if vm_ref != "remote":
        vm_ref = "%s %s" % (vm_ref, extra)
        cmdresult = virsh.undefine(vm_ref, uri=uri,
                                   ignore_status=True, debug=True)
        status = cmdresult.exit_status
        if status:
            logging.debug("Error status, command output: %s", cmdresult.stdout)
        if undefine_twice == "yes":
            status2 = virsh.undefine(vm_ref, uri=uri,
                                     ignore_status=True).exit_status
    else:
        if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
            raise error.TestNAError("remote_ip and/or local_ip parameters not"
                                    " changed from default values")
        session = remote.remote_login("ssh", remote_ip, "22", remote_user,
                                      remote_password, remote_prompt)
        cmd_undefine = "virsh -c %s undefine %s" % (uri, vm_name)
        status, output = session.cmd_status_output(cmd_undefine)
        logging.info("Undefine output: %s", output)

    # Recover libvirtd state.
    if libvirtd_state == "off":
        utils_libvirtd.libvirtd_start()

    # Shutdown VM.
    if virsh.domain_exists(vm.name, uri=uri):
        try:
            if vm.is_alive():
                vm.destroy()
        except error.CmdError, detail:
            logging.error("Detail: %s", detail)
Beispiel #53
0
def run(test, params, env):
    """
    Different cpu compat mode scenario tests

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """

    def check_feature(vm, feature="", vcpu=0):
        """
        Checks the given feature is present
        :param vm: VM Name
        :param feature: feature to be verified
        :param vcpu: vcpu number to pin guest test
        :return: true on success, test fail on failure
        """
        session = vm.wait_for_login()
        if 'power8' in feature:
            cmd = 'lscpu|grep -i "Model name:.*power8"'
        elif 'xive' in feature:
            # remove -v once guest xive support is available
            # right now power9 guest supports only xics
            cmd = "grep -v xive /sys/firmware/devicetree/base/interrupt-*/compatible"
        elif 'xics' in feature:
            cmd = "grep -v xive /sys/firmware/devicetree/base/interrupt-*/compatible"
        elif 'power9' in feature:
            cmd = 'lscpu|grep -i "Model name:.*power9"'
        elif 'hpt' in feature:
            cmd = 'grep "MMU.*: Hash" /proc/cpuinfo'
        elif 'rpt' in feature:
            cmd = 'grep "MMU.*: Radix" /proc/cpuinfo'
        elif 'isa' in feature:
            utils_package.package_install('gcc', session)
            cmd = "echo 'int main(){asm volatile (\".long 0x7c0005e6\");"
            cmd += "return 0;}' > ~/a.c;cc ~/a.c;taskset -c %s ./a.out" % vcpu
        status, output = session.cmd_status_output(cmd)
        logging.debug(output)
        session.close()
        if feature != "isa2.7":
            if status != 0:
                test.fail("Feature: %s check failed inside "
                          "%s guest on %s host" % (feature,
                                                   guest_version,
                                                   host_version))
        else:
            if status == 0:
                test.fail("isa3.0 instruction succeeds in "
                          "%s guest on %s host" % (guest_version,
                                                   host_version))
        return True

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    pin_vcpu = 0
    host_version = params.get("host_version")
    guest_version = params.get("guest_version")
    max_vcpu = params.get("cpucompat_vcpu_max", "")
    cur_vcpu = int(params.get("cpucompat_vcpu_cur", "1"))
    cores = int(params.get("topology_cores", '1'))
    sockets = int(params.get("topology_sockets", '1'))
    threads = int(params.get("topology_threads", '1'))
    status_error = "yes" == params.get("status_error", "no")
    condn = params.get("condn", "")
    guest_features = params.get("guest_features", "")
    if guest_features:
        guest_features = guest_features.split(',')
        if guest_version:
            guest_features.append(guest_version)
    if host_version not in cpu.get_cpu_arch():
        test.cancel("Unsupported Host cpu version")

    vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    org_xml = vmxml.copy()
    # Destroy the vm
    vm.destroy()
    try:
        # Set cpu model
        if max_vcpu:
            pin_vcpu = int(max_vcpu) - 1
            libvirt_xml.VMXML.set_vm_vcpus(vm_name, int(max_vcpu), cur_vcpu,
                                           sockets=sockets, cores=cores,
                                           threads=threads, add_topology=True)
        libvirt_xml.VMXML.set_cpu_mode(vm_name, model=guest_version)
        logging.debug(virsh.dumpxml(vm_name))
        try:
            vm.start()
        except virt_vm.VMStartError as detail:
            if not status_error:
                test.fail("%s" % detail)
            else:
                pass
        if max_vcpu:
            virsh.setvcpus(vm_name, int(max_vcpu), "--live",
                           ignore_status=False, debug=True)
            if not utils_misc.check_if_vm_vcpu_match(int(max_vcpu), vm):
                test.fail("Vcpu hotplug failed")
        if not status_error:
            for feature in guest_features:
                check_feature(vm, feature, vcpu=pin_vcpu)
        if condn == "filetrans":
            utils_test.run_file_transfer(test, params, env)
        elif condn == "stress":
            bt = utils_test.run_avocado_bg(vm, params, test)
            if not bt:
                test.cancel("guest stress failed to start")
        elif condn == "save":
            save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
            result = virsh.save(vm_name, save_file, ignore_status=True,
                                debug=True)
            utils_test.libvirt.check_exit_status(result)
            # Just sleep few secs before guest recovery
            time.sleep(2)
            if os.path.exists(save_file):
                result = virsh.restore(save_file, ignore_status=True,
                                       debug=True)
                utils_test.libvirt.check_exit_status(result)
                os.remove(save_file)
        elif condn == "suspend":
            result = virsh.suspend(vm_name, ignore_status=True, debug=True)
            utils_test.libvirt.check_exit_status(result)
            # Just sleep few secs before guest recovery
            time.sleep(2)
            result = virsh.resume(vm_name, ignore_status=True, debug=True)
            utils_test.libvirt.check_exit_status(result)
        else:
            pass
    finally:
        org_xml.sync()
Beispiel #54
0
def run_virsh_edit(test, params, env):
    """
    Test command: virsh edit.

    The command can edit XML configuration for a domain
    1.Prepare test environment,destroy or suspend a VM.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh edit operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    vcpucount_result = virsh.vcpucount(vm_name, options="--config")
    if vcpucount_result.exit_status:
        raise error.TestError("Failed to get vcpucount. Detail:\n%s"
                              % vcpucount_result)
    original_vcpu = vcpucount_result.stdout.strip()
    expected_vcpu = str(int(original_vcpu)+1)

    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("edit_vm_ref")
    status_error = params.get("status_error")

    def modify_vcpu(source, edit_cmd):
        """
        Modify vm's cpu infomation.

        @param: source : virsh edit's option.
        @param: dic_mode : a edit commad line .
        @return: True if edit successed,False if edit failed.
        """
        session = aexpect.ShellSession("sudo -s")
        try:
            session.sendline("virsh edit %s" % source)
            session.sendline(edit_cmd)
            session.send('\x1b')
            session.send('ZZ')
            # use sleep(1) to make sure the modify has been completed.
            time.sleep(1)
            session.close()
            return True
        except:
            return False

    def edit_vcpu(source, guest_name):
        """
        Modify vm's cpu infomation by virsh edit command.

        @param: source : virsh edit's option.
        @param: guest_name : vm's name.
        @return: True if edit successed,False if edit failed.
        """
        dic_mode = {"edit": ":%s /[0-9]*<\/vcpu>/"+expected_vcpu+"<\/vcpu>",
                    "recover": ":%s /[0-9]*<\/vcpu>/"+original_vcpu+"<\/vcpu>"}
        status = modify_vcpu(source, dic_mode["edit"])
        if not status:
            return status
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(guest_name, ignore_status=True)
            virsh.destroy(guest_name)
        elif params.get("start_vm") == "yes":
            virsh.destroy(guest_name)
        vcpus = vm.dominfo()["CPU(s)"]
        #Recover cpuinfo
        status = modify_vcpu(source, dic_mode["recover"])
        if status and vcpus != expected_vcpu:
            return False
        return status

    #run test case
    xml_file = os.path.join(test.tmpdir, 'tmp.xml')
    virsh.dumpxml(vm_name, extra="", to_file=xml_file)

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    try:
        if vm_ref == "id":
            status = edit_vcpu(domid, vm_name)
        elif vm_ref == "uuid":
            status = edit_vcpu(domuuid, vm_name)
        elif vm_ref == "name" and status_error == "no":
            status = edit_vcpu(vm_name, vm_name)
        else:
            status = False
            if vm_ref.find("invalid") != -1:
                vm_ref = params.get(vm_ref)
            elif vm_ref == "name":
                vm_ref = "%s %s" % (vm_name, params.get("edit_extra_param"))
            edit_status = virsh.edit(vm_ref).exit_status
            if edit_status == 0:
                status = True
    except:
        status = False

    #recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    #Recover VM
    if vm.is_alive():
        vm.destroy()
    virsh.undefine(vm_name)
    virsh.define(xml_file)

    #check status_error
    if status_error == "yes":
        if status:
            raise error.TestFail("Run successfully with wrong command!")
    elif status_error == "no":
        if not status:
            raise error.TestFail("Run failed with right command")
def run(test, params, env):
    """
    Test command: virsh domxml-to-native.

    Convert domain XML config to a native guest configuration format.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh domxml-from-native operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    def buildcmd(arglist):
        """
        Return a list of arguments of qemu command.

        Return a list based on the input string where each list element
        is put together with care to pair up options with their argument
        rather than being on separate lines.  Thus rather than having
        "-option" "argument" in separate list elements, they will be in
        one element "-option argument". Take care to note the argument to
        an option may not be required. This will make it easier to determine
        what is causing the failure when printing error messages.
        """
        # First separate everything by the first space into a list
        elems = arglist.split("\x20")

        # Peruse the list to build up a formatted output retlist
        retlist = []
        i = 0
        skip = False
        for e in elems:
            # If 'skip' is True, then we've appended an option and argument
            if skip:
                skip = False
                i = i + 1
                continue

            # Need a peek at the next element
            enext = elems[i + 1]

            # If current and next element starts with "-", then the
            # is not an argument to the current, thus we just append.
            # Same for anything we find that doesn't start with a "-"
            if (e[0] == "-" and enext[0] == "-") or e[0] != "-":
                retlist.append(e)
            else:
                # Append this and the next and set our skip flag
                retlist.append(e + " " + enext)
                skip = True
            i = i + 1

        # Now build a list where the
        return retlist

    def filtlist(arglist):
        """
        Return a filtered list of arguments.

        Walk through the supplied list to filter out things that will be
        known to be different depending on the running environment.
        """
        retlist = []
        for arg in arglist:
            if re.search("mode=readline", arg):
                continue
            elif re.search("mac=", arg):
                continue
            elif re.search("127.0.0.1:", arg):
                continue
            elif re.search("tap", arg):
                continue
            # Upstream libvirt commit id 'e8400564':
            # XMLToNative: Don't show -S
            elif re.search("-S", arg):
                continue
            retlist.append(arg)

        return retlist

    def compare(conv_arg):
        """
        Compare converted information with vm's information.

        :param conv_arg : Converted information.
        :return: True if converted information has no different from
                 vm's information.
        """
        pid = vm.get_pid()
        cmdline_tmp = utils.system_output("cat -v /proc/%d/cmdline" % pid)

        # Output has a trailing '^@' which gets converted into an empty
        # element when spliting by '\x20', so strip it on the end.
        cmdline = re.sub(r"\^@", " ", cmdline_tmp).strip(" ")

        # Fedora 19 replaces the /usr/bin/qemu-kvm with the string
        # "/usr/bin/qemu-system-x86_64 -machine accel=kvm", so let's
        # do the same if we find "/usr/bin/qemu-kvm" in the incoming
        # argument list and we find "qemu-system-x86_64 -machine accel=kvm"
        # in the running guest's cmdline
        if (
            conv_arg.find("/usr/bin/qemu-kvm") != 1
            and cmdline.find("/usr/bin/qemu-system-x86_64 -machine accel=kvm") != -1
        ):
            cmdline = re.sub(r"/usr/bin/qemu-system-x86_64 -machine accel=kvm", "/usr/bin/qemu-kvm", cmdline)

        # Now prepend the various environment variables that will be in
        # the conv_arg, but not in the actual command
        tmp = (
            re.search("LC_ALL.[^\s]\s", conv_arg).group(0)
            + re.search("PATH.[^\s]+\s", conv_arg).group(0)
            + re.search("QEMU_AUDIO_DRV.[^\s]+\s", conv_arg).group(0)
        )
        qemu_arg = tmp + cmdline

        conv_arg_lines = buildcmd(conv_arg)
        qemu_arg_lines = buildcmd(qemu_arg)

        diff1 = filtlist(tuple(x for x in conv_arg_lines if x not in set(qemu_arg_lines)))
        if diff1:
            logging.debug("Found the following in conv_arg not in qemu_arg:")
        for elem in diff1:
            logging.debug("\t%s", elem)

        diff2 = filtlist(tuple(x for x in qemu_arg_lines if x not in set(conv_arg_lines)))
        if diff2:
            logging.debug("Found the following in qemu_arg not in conv_arg:")
        for elem in diff2:
            logging.debug("\t%s", elem)

        if diff1 or diff2:
            return False

        return True

    # run test case
    dtn_format = params.get("dtn_format")
    file_xml = params.get("dtn_file_xml")
    extra_param = params.get("dtn_extra_param")
    libvirtd = params.get("libvirtd")
    status_error = params.get("status_error")
    virsh.dumpxml(vm_name, extra="", to_file=file_xml)
    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()
    ret = virsh.domxml_to_native(dtn_format, file_xml, extra_param, ignore_status=True)
    status = ret.exit_status
    conv_arg = ret.stdout.strip()

    # recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # clean up
    if os.path.exists(file_xml):
        os.remove(file_xml)

    # check status_error
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            raise error.TestFail("Run failed with right command")
        if compare(conv_arg) is not True:
            raise error.TestFail("Test failed!")
Beispiel #56
0
def run_virsh_setvcpus(test, params, env):
    """
    Test command: virsh setvcpus.

    The conmand can change the number of virtual CPUs in the guest domain.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh setvcpus operation.
    3.Recover test environment.
    4.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    xml_file = params.get("setvcpus_xml_file", "vm.xml")
    virsh.dumpxml(vm_name, extra="--inactive", to_file=xml_file)
    tmp_file = params.get("setvcpus_tmp_file", "tmp.xml")
    pre_vm_state = params.get("setvcpus_pre_vm_state")
    command = params.get("setvcpus_command", "setvcpus")
    options = params.get("setvcpus_options")
    domain = params.get("setvcpus_domain")
    count = params.get("setvcpus_count")
    extra_param = params.get("setvcpus_extra_param")
    count_option = "%s %s" % (count, extra_param)
    status_error = params.get("status_error")

    def get_current_vcpus():
        """
        Get current vcpu number.
        """
        vcpus_set = ""
        virsh.dumpxml(vm_name, extra="", to_file=tmp_file)
        dom = parse(tmp_file)
        root = dom.documentElement
        vcpus_2 = root.getElementsByTagName("vcpu")
        for n in vcpus_2:
            vcpus_set += n.getAttribute("current")
            vcpus_set = int(vcpus_set)
        dom.unlink()
        return vcpus_set

    if vm.is_alive():
        vm.destroy()
    vm_xml = libvirt_xml.VMXML()
    vm_xml.set_vm_vcpus(vm_name, 2)
    vm.start()
    vm.wait_for_login()

    if status_error == "no":
        vcpus_new = len(vm.vcpuinfo())
    domid = vm.get_id()
    domuuid = vm.get_uuid()
    if pre_vm_state == "paused":
        vm.pause()
    elif pre_vm_state == "shut off":
        vm.destroy()

    if domain == "remote_name":
        remote_ssh_addr = params.get("remote_ip", None)
        remote_addr = params.get("local_ip", None)
        remote_password = params.get("remote_password", None)
        host_type = virsh.driver()
        if host_type == "qemu":
            remote_string = "qemu+ssh://%s/system" % remote_addr
        elif host_type == "xen":
            remote_string = "xen+ssh://%s" % remote_addr
        command = "virsh -c %s setvcpus %s 1 --live" % (remote_string, vm_name)
        if virsh.has_command_help_match(command, "--live") is None:
            status_error = "yes"
        session = remote.remote_login(
            "ssh", remote_ssh_addr, "22", "root", remote_password, "#")
        session.cmd_output('LANG=C')
        status, output = session.cmd_status_output(command, internal_timeout=5)
        session.close()
        vcpus_current = len(vm.vcpuinfo())
    else:
        if domain == "name":
            dom_option = vm_name
        elif domain == "id":
            dom_option = domid
            if params.get("setvcpus_hex_id") is not None:
                dom_option = hex(int(domid))
            elif params.get("setvcpus_invalid_id") is not None:
                dom_option = params.get("setvcpus_invalid_id")
        elif domain == "uuid":
            dom_option = domuuid
            if params.get("setvcpus_invalid_uuid") is not None:
                dom_option = params.get("setvcpus_invalid_uuid")
        else:
            dom_option = domain
        option_list = options.split(" ")
        for item in option_list:
            if virsh.has_command_help_match(command, item) is None:
                status_error = "yes"
                break
        status = virsh.setvcpus(
            dom_option, count_option, options, ignore_status=True).exit_status
        if pre_vm_state == "paused":
            virsh.resume(vm_name, ignore_status=True)
        if status_error == "no":
            if status == 0:
                if pre_vm_state == "shut off":
                    if options == "--config":
                        vcpus_set = len(vm.vcpuinfo())
                    elif options == "--current":
                        vcpus_set = get_current_vcpus()
                    elif options == "--maximum --config":
                        vcpus_set = ""
                        dom = parse("/etc/libvirt/qemu/%s.xml" % vm_name)
                        vcpus_set = dom.getElementsByTagName(
                            "vcpu")[0].firstChild.data
                        vcpus_set = int(vcpus_set)
                        dom.unlink()
                else:
                    vcpus_set = len(vm.vcpuinfo())
                if domain == "id":
                    cmd_chk = "cat /etc/libvirt/qemu/%s.xml" % vm_name
                    output1 = commands.getoutput(cmd_chk)
                    logging.info("guest-info:\n%s" % output1)

    virsh.destroy(vm_name)
    virsh.undefine(vm_name)
    virsh.define(xml_file)
    if os.path.exists(xml_file):
        os.remove(xml_file)
    if os.path.exists(tmp_file):
        os.remove(tmp_file)

    # check status_error
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    else:
        if status != 0:
            raise error.TestFail("Run failed with right command")
        else:
            if options == "--maximum --config":
                if vcpus_set != 4:
                    raise error.TestFail("Run failed with right command1")
            elif domain == "id":
                if options == "--config":
                    if vcpus_set != vcpus_new or not re.search('<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1):
                        raise error.TestFail("Run failed with right command2")
                elif options == "--config --live":
                    if vcpus_set != 1 or not re.search('<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1):
                        raise error.TestFail("Run failed with right command3")
                else:
                    if vcpus_set != 1 or re.search('<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1):
                        raise error.TestFail("Run failed with right command4")
            else:
                if vcpus_set != 1:
                    raise error.TestFail("Run failed with right command5")
def run(test, params, env):
    """
    Test vcpu hotpluggable item in xml

    1. Set the libvirtd log filter/level/file
    2. Restart libvirtd
    3. Start vm by xml with vcpu hotpluggable
    4. Check the qemu command line
    5. Check the libvirtd log
    6. Restart libvrtd
    7. Check the vm xml
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vcpus_placement = params.get("vcpus_placement", "static")
    vcpus_crt = int(params.get("vcpus_current", "4"))
    vcpus_max = int(params.get("vcpus_max", "8"))
    vcpus_enabled = params.get("vcpus_enabled", "")
    vcpus_hotplug = params.get("vcpus_hotpluggable", "")
    vcpus_order = params.get("vcpus_order")
    err_msg = params.get("err_msg", "")
    config_libvirtd = params.get("config_libvirtd", "yes") == "yes"
    log_file = params.get("log_file", "libvirtd.log")
    live_vcpus = params.get("set_live_vcpus", "")
    config_vcpus = params.get("set_config_vcpus", "")
    enable_vcpu = params.get("set_enable_vcpu", "")
    disable_vcpu = params.get("set_disable_vcpu", "")
    # Install cgroup utils
    cgutils = "libcgroup-tools"
    if "ubuntu" in platform.dist()[0].lower():
        cgutils = "cgroup-tools"
    sm = SoftwareManager()
    if not sm.check_installed(cgutils) and not sm.install(cgutils):
        test.cancel("cgroup utils package install failed")
    # Backup domain XML
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    libvirtd = utils_libvirtd.Libvirtd()

    try:
        # Configure libvirtd log
        if config_libvirtd:
            config_path = os.path.join(data_dir.get_tmp_dir(), log_file)
            with open(config_path, 'a') as f:
                pass
            config = utils_config.LibvirtdConfig()
            log_outputs = "1:file:%s" % config_path
            config.log_outputs = log_outputs
            config.log_level = 1
            config.log_filters = "1:json 1:libvirt 1:qemu 1:monitor 3:remote 4:event"

            # Restart libvirtd to make the changes take effect in libvirt
            libvirtd.restart()

        # Set vcpu: placement,current,max vcpu
        vmxml.placement = vcpus_placement
        vmxml.vcpu = vcpus_max
        vmxml.current_vcpu = vcpus_crt
        del vmxml.cpuset

        # Create vcpu xml with vcpu hotpluggable and order
        vcpu_list = []
        vcpu = {}
        en_list = vcpus_enabled.split(",")
        hotplug_list = vcpus_hotplug.split(",")
        order_dict = ast.literal_eval(vcpus_order)

        for vcpu_id in range(vcpus_max):
            vcpu['id'] = str(vcpu_id)
            if str(vcpu_id) in en_list:
                vcpu['enabled'] = 'yes'
                if str(vcpu_id) in order_dict:
                    vcpu['order'] = order_dict[str(vcpu_id)]
            else:
                vcpu['enabled'] = 'no'
            if str(vcpu_id) in hotplug_list:
                vcpu['hotpluggable'] = 'yes'
            else:
                vcpu['hotpluggable'] = 'no'
            vcpu_list.append(copy.copy(vcpu))
            vcpu = {}

        vcpus_xml = vm_xml.VMVCPUSXML()
        vcpus_xml.vcpu = vcpu_list

        vmxml.vcpus = vcpus_xml

        # Remove influence from topology setting
        try:
            logging.info('Remove influence from topology setting')
            cpuxml = vmxml.cpu
            del cpuxml.topology
            vmxml.cpu = cpuxml
        except Exception as e:
            pass

        vmxml.sync()

        # Start VM
        logging.info("Start VM with vcpu hotpluggable and order...")
        ret = virsh.start(vm_name, ignore_status=True)

        if err_msg:
            libvirt.check_result(ret, err_msg)
        else:
            # Wait for domain
            vm.wait_for_login()

            if enable_vcpu:
                ret = virsh.setvcpu(vm_name, enable_vcpu, "--enable",
                                    ignore_status=False, debug=True)
                vcpus_crt += 1
            if disable_vcpu:
                ret = virsh.setvcpu(vm_name, disable_vcpu, "--disable",
                                    ingnore_status=False, debug=True)
                vcpus_crt -= 1
            if live_vcpus:
                ret = virsh.setvcpus(vm_name, live_vcpus, ignore_status=False,
                                     debug=True)
                vcpus_crt = int(live_vcpus)
            if config_vcpus:
                ret = virsh.setvcpus(vm_name, config_vcpus, "--config",
                                     ignore_status=False, debug=True)

            # Check QEMU command line
            cmd = ("ps -ef| grep %s| grep 'maxcpus=%s'" % (vm_name, vcpus_max))
            ret = process.run(cmd, ignore_status=False, shell=True)
            if ret.exit_status != 0:
                logging.error("Maxcpus in QEMU command line is wrong!")

            # Check libvirtd log
            if config_libvirtd:
                for vcpu in vcpu_list:
                    if vcpu['enabled'] == 'yes' and vcpu['hotpluggable'] == 'yes':
                        cmd = ("cat %s| grep device_add| grep qemuMonitorIOWrite"
                               "| grep 'vcpu%s'" % (config_path, vcpu['id']))
                        ret = process.run(cmd, ignore_status=False, shell=True)
                        if ret.exit_status != 0:
                            logging.error("Failed to find lines about enabled vcpu%s"
                                          "in libvirtd log.", vcpu['id'])

            # Dumpxml
            dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            vcpu_items = re.findall(r"vcpu.*", dump_xml)

            # Check guest vcpu count
            ret = virsh.vcpucount(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            max_list = re.findall(r"maximum.*[config|live].*%s\n" % vcpus_max, output)
            if len(max_list) != 2:
                test.fail("vcpucount maximum info is not correct.")

            if live_vcpus:
                crt_live_list = re.findall(r"current.*live.*%s" % live_vcpus, output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current live info is not correct.")
            elif config_vcpus:
                crt_cfg_list = re.findall(r"current.*config.*%s" % config_vcpus, output)
                logging.info("vcpucount crt_cfg_list: \n %s", crt_cfg_list)
                if len(crt_cfg_list) != 1:
                    test.fail("vcpucount: current config info is not correct.")
            else:
                crt_live_list = re.findall(r"current.*live.*%s" % vcpus_crt, output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current info is not correct.")

            # Check guest vcpu info
            ret = virsh.vcpuinfo(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            vcpu_lines = re.findall(r"VCPU:.*\n", output)
            logging.info("vcpuinfo vcpu_lines: \n %s", vcpu_lines)
            if len(vcpu_lines) != vcpus_crt:
                test.fail("vcpuinfo is not correct.")

            # Check cpu in guest
            if not utils_misc.check_if_vm_vcpu_match(vcpus_crt, vm):
                test.fail("cpu number in VM is not correct, it should be %s cpus" % vcpus_crt)

            # Check VM xml change for cold-plug/cold-unplug
            if config_vcpus:
                inactive_xml = virsh.dumpxml(vm_name, "--inactive").stdout.strip()
                crt_vcpus_xml = re.findall(r"vcpu.*current=.%s.*" %
                                           config_vcpus, inactive_xml)
                logging.info("dumpxml --inactive xml: \n %s", crt_vcpus_xml)
                if len(crt_vcpus_xml) != 1:
                    test.fail("Dumpxml with --inactive,"
                              "the vcpu current is not correct.")

            # Restart libvirtd
            libvirtd.restart()

            # Recheck VM xml
            re_dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            re_vcpu_items = re.findall(r"vcpu.*", re_dump_xml)
            if vcpu_items != re_vcpu_items:
                test.fail("After restarting libvirtd,"
                          "VM xml changed unexpectedly.")

            # Check cgroup info
            en_vcpu_list = re.findall(r"vcpu.*enabled=.yes.*", re_dump_xml)
            for vcpu_sn in range(len(en_vcpu_list)):
                vcpu_id = en_vcpu_list[vcpu_sn].split("=")[1].split()[0].strip('\'')
                cmd = ("lscgroup| grep cpuset| grep %s| grep vcpu%s" %
                       (vm_name[-3:], vcpu_id))
                ret = process.run(cmd, ignore_status=False, shell=True)
                if ret.exit_status != 0:
                    test.fail("Failed to find lines about enabled vcpu%s"
                              "in lscgroup info." % vcpu_id)
    finally:
        # Recover libvirtd configration
        if config_libvirtd:
            config.restore()
            if os.path.exists(config_path):
                os.remove(config_path)

        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()