def define_and_check():
        """
        Predict the error message when defining and try to define the guest
        with testing XML.
        """
        fail_patts = []
        known_models = {
            'pci': ['pci-root', 'pcie-root', 'pci-bridge'],
            'virtio-serial': [],
            'usb': ['ehci', 'ich9-ehci1'],
        }

        if cntlr_type == 'pci' and model is None:
            fail_patts.append(r"Invalid PCI controller model")
        if model is not None and model not in known_models[cntlr_type]:
            fail_patts.append(r"Unknown model type")
        if os_machine == 'q35' and model in ['pci-root', 'pci-bridge']:
            fail_patts.append(r"Device requires a PCI Express slot")
        if os_machine == 'i440fx' and model == 'pcie-root':
            fail_patts.append(r"Device requires a standard PCI slot")
        # isdigit will return false on negative number, which just meet the
        # requirement of this test.
        if index is not None and not index.isdigit():
            fail_patts.append(r"Cannot parse controller index")

        vm_xml.undefine()
        res = vm_xml.virsh.define(vm_xml.xml)
        libvirt.check_result(res, expected_fails=fail_patts)
        return not res.exit_status
Пример #2
0
    def _define_and_check():
        """
        Predict the error message when defining and try to define the guest
        with testing XML.
        """
        fail_patts = []

        if target_type not in ['virtio', 'guestfwd', 'spicevmc']:
            fail_patts.append(
                r"target type must be specified for channel device")
            fail_patts.append(
                r"unknown target type '.*' specified for character device")
        if target_type == 'guestfwd':
            if target_address is None:
                fail_patts.append(
                    r"guestfwd channel does not define a target address")
            if target_port is None:
                fail_patts.append(
                    r"guestfwd channel does not define a target port")
            if channel_type == 'unix' and source_path is None:
                fail_patts.append(
                    r"Missing source path attribute for char device")
        vm_xml.undefine()
        res = vm_xml.virsh.define(vm_xml.xml)
        libvirt.check_result(res, expected_fails=fail_patts)
        return not res.exit_status
Пример #3
0
    def define_and_check():
        """
        Predict the error message when defining and try to define the guest
        with testing serial device.
        """
        fail_patts = []
        if serial_type in ['dev', 'file', 'pipe', 'unix'] and not any(
                ['path' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing source path attribute for char device")
        if serial_type in ['tcp'] and not any(
                ['host' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing source host attribute for char device")
        if serial_type in ['tcp', 'udp'] and not any(
                ['service' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing source service attribute for char "
                              "device")
        if serial_type in ['spiceport'] and not any(
                ['channel' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing source channel attribute for char "
                              "device")
        if serial_type in ['nmdm'] and not any(
                ['master' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing master path attribute for nmdm device")
        if serial_type in ['nmdm'] and not any(
                ['slave' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing slave path attribute for nmdm device")
        if serial_type in ['spicevmc']:
            fail_patts.append(r"spicevmc device type only supports virtio")

        vm_xml.undefine()
        res = vm_xml.virsh.define(vm_xml.xml)
        libvirt.check_result(res, expected_fails=fail_patts)
        return not res.exit_status
Пример #4
0
    def check_vol(vol_params):
        """
        Check volume information.
        """
        pv = libvirt_storage.PoolVolume(pool_name)
        # Supported operation
        if vol_name not in pv.list_volumes():
            test.fail("Volume %s doesn't exist" % vol_name)
        ret = virsh.vol_dumpxml(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        # vol-info
        if not pv.volume_info(vol_name):
            test.fail("Can't see volume info")
        # vol-key
        ret = virsh.vol_key(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip():
            test.fail("Volume key isn't correct")
        # vol-path
        ret = virsh.vol_path(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip():
            test.fail("Volume path isn't correct")
        # vol-pool
        ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if pool_name not in ret.stdout.strip():
            test.fail("Volume pool isn't correct")
        # vol-name
        ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if vol_name not in ret.stdout.strip():
            test.fail("Volume name isn't correct")
        # vol-resize
        ret = virsh.vol_resize(vol_name, "2G", pool_name)
        libvirt.check_exit_status(ret)

        # Not supported operation
        # vol-clone
        ret = virsh.vol_clone(vol_name, cloned_vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-create-from
        volxml = vol_xml.VolXML()
        vol_params.update({"name": "%s" % create_from_cloned_volume})
        v_xml = volxml.new_vol(**vol_params)
        v_xml.xmltreefile.write()
        ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)

        # vol-wipe
        ret = virsh.vol_wipe(vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-upload
        ret = virsh.vol_upload(vol_name, vm.get_first_disk_devices()['source'],
                               "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-download
        ret = virsh.vol_download(vol_name, cloned_vol_name, "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
 def start_and_check():
     """
     Predict the error message when starting and try to start the guest.
     """
     fail_patts = []
     if model == 'pci-bridge' and (index is None or int(index) == 0):
         fail_patts.append(r"PCI bridge index should be > 0")
     res = virsh.start(vm_name)
     libvirt.check_result(res, expected_fails=fail_patts)
     return not res.exit_status
 def start_and_check():
     """
     Predict the error message when starting and try to start the guest.
     """
     fail_patts = []
     if model == 'pci-bridge' and (index is None or int(index) == 0):
         fail_patts.append(r"PCI bridge index should be > 0")
     res = virsh.start(vm_name)
     libvirt.check_result(res, expected_fails=fail_patts)
     return not res.exit_status
Пример #7
0
    def update_iothread_xml(define_error=False):
        """
        Update xml for test
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        del vmxml.cputune
        del vmxml.iothreadids
        del vmxml.iothreads

        vm_is_active = vm.is_alive()

        # Set iothreads first
        if iothread_ids:
            ids_xml = vm_xml.VMIothreadidsXML()
            ids_xml.iothread = iothread_ids.split()
            vmxml.iothreadids = ids_xml
        # Set cputune
        if any([iothreadpins, iothreadscheds, iothread_quota, iothread_period]):
            cputune_xml = vm_xml.VMCPUTuneXML()
            if iothreadpins:
                io_pins = []
                for pins in iothreadpins.split():
                    thread, cpuset = pins.split(':')
                    io_pins.append({"iothread": thread,
                                    "cpuset": cpuset})
                cputune_xml.iothreadpins = io_pins
            if iothreadscheds:
                io_scheds = []
                for sched in iothreadscheds.split():
                    thread, scheduler = sched.split(":")
                    io_scheds.append({"iothreads": thread,
                                      "scheduler": scheduler})
                cputune_xml.iothreadscheds = io_scheds
            if iothread_period:
                cputune_xml.iothread_period = int(iothread_period)
            if iothread_quota:
                cputune_xml.iothread_quota = int(iothread_quota)

            vmxml.cputune = cputune_xml

        # Set iothread
        if iothread_num:
            vmxml.iothreads = int(iothread_num)

        logging.debug("Pre-test xml is %s", vmxml)
        if not define_error:
            vmxml.sync()
            if vm_is_active:
                vm.start()
                vm.wait_for_login().close()
        else:
            result = virsh.define(vmxml.xml, debug=True)
            libvirt.check_exit_status(result, True)
            if err_msg:
                libvirt.check_result(result, err_msg)
Пример #8
0
 def start_and_check():
     """
     Predict the error message when starting and try to start the guest.
     """
     fail_patts = []
     if expect_err_msg:
         fail_patts.append(expect_err_msg)
     res = virsh.start(vm_name)
     logging.debug("Expect failures: %s", fail_patts)
     libvirt.check_result(res, expected_fails=fail_patts)
     return not res.exit_status
Пример #9
0
    def check_result(cmd_result, status_error, error_msg=None):
        """
        Check command result including exit status and error message

        :param cmd_result: The result object to check
        :param status_error: The expected exit status, True to be failed
        :param error_msg: Expected error message
        """
        libvirt.check_exit_status(cmd_result, status_error)
        if error_msg:
            libvirt.check_result(cmd_result, error_msg)
Пример #10
0
 def delete_busy_iothread(vm_name):
     """
     Attempt to delete a busy iothread disk.
     """
     thread_id = params.get("virt_disk_thread_id", "--id 2")
     ret = virsh.iothreaddel(vm_name, thread_id)
     iothread_error = True
     disk_errors = \
         params.get("virt_disk_iothread_in_use_error")
     libvirt.check_exit_status(ret, iothread_error)
     libvirt.check_result(ret, expected_fails=disk_errors)
Пример #11
0
    def _start_and_check():
        """
        Predict the error message when starting and try to start the guest.
        """
        fail_patts = []

        if channel_type == 'unix' and source_path and source_mode != 'bind':
            fail_patts.append(r"No such file or directory")

        res = virsh.start(vm_name)
        libvirt.check_result(res, expected_fails=fail_patts)
        return not res.exit_status
Пример #12
0
    def _start_and_check():
        """
        Predict the error message when starting and try to start the guest.
        """
        fail_patts = []

        if channel_type == 'unix' and source_path and source_mode != 'bind':
            fail_patts.append(r"No such file or directory")

        res = virsh.start(vm_name)
        libvirt.check_result(res, expected_fails=fail_patts)
        return not res.exit_status
Пример #13
0
    def test_at_iface_and_memory(dev_type):
        """
        hotplug an interface and memory devices

        1) Start vm and check the default locked memory
        2) Hotplug an interface and check the locked memory
        3) Hotplug 2 memory devices and check the locked memory
        4) Hot-unplug a memory device and check the locked memory

        :param dev_type: interface type
        """
        vm.start()
        vm.wait_for_serial_login(timeout=240).close()
        expr_memlock = 67108864
        if not check_soft_memlock(expr_memlock):
            test.fail("Unalbe to get correct default!")

        interface_base.attach_iface_device(vm_name, dev_type, params)

        new_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        # MEMLOCK value is guest memory + 1G(for the passthrough device)
        expr_memlock = normalize_mem_size(
            new_vmxml.get_current_mem(),
            new_vmxml.get_current_mem_unit()) + 1073741824
        if not check_soft_memlock(expr_memlock):
            test.fail("Unalbe to get correct MEMLOCK after VM startup!")

        logging.info("Hotplug memory devices.")
        for mem_attrs in ['mem_dict1', 'mem_dict2']:
            mem_dict = eval(params.get(mem_attrs, '{}'))
            memxml = Memory()
            memxml.setup_attrs(**mem_dict)
            virsh.attach_device(vm_name, memxml.xml, **VIRSH_ARGS)
            expr_memlock += normalize_mem_size(
                mem_dict['target']['size'], mem_dict['target']['size_unit'])
            if not check_soft_memlock(expr_memlock):
                test.fail("Unalbe to get correct MEMLOCK after attaching a "
                          "memory device!")

        logging.info("Detach a memory device and check memlock.")
        memxml = vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices('memory')[-1]
        cmd_result = virsh.detach_device(vm_name, memxml.xml,
                                         wait_for_event=True,
                                         debug=True)
        if cmd_result.exit_status:
            libvirt.check_result(cmd_result, 'unplug of device was rejected')
            if not check_soft_memlock(expr_memlock):
                test.fail("Detaching mem failed, MEMLOCK should not change!")
        else:
            if not check_soft_memlock(expr_memlock):
                test.fail("Unalbe to get correct MEMLOCK after detaching a "
                          "memory device!")
Пример #14
0
 def define_and_check():
     """
     Predict the error message when defining and try to define the guest
     with testing XML.
     """
     fail_patts = []
     if expect_err_msg:
         fail_patts.append(expect_err_msg)
     vm_xml.undefine()
     res = vm_xml.virsh.define(vm_xml.xml)
     logging.debug("Expect failures: %s", fail_patts)
     libvirt.check_result(res, expected_fails=fail_patts)
     return not res.exit_status
def config_feature_pv_eoi(test, vmxml, **kwargs):
    """
    Config libvirt VM XML to enable/disable PV EOI feature.

    :param vmxml: VMXML instance
    :param kwargs: Function keywords
    :return: Corresponding feature flag in qem cmdline
    """
    # This attribute supported since 0.10.2 (QEMU only)
    if not libvirt_version.version_compare(0, 10, 2):
        test.cancel("PV eoi is not supported in current"
                    " libvirt version")
    qemu_flags = []
    eoi_enable = kwargs.get('eoi_enable', 'on')
    get_hostos_version = astring.to_text(process.run("cat /etc/redhat-release", shell=True).stdout)
    if re.search(r'(\d+(\.\d+)?)', get_hostos_version) is not None:
        hostos_version = float(re.search(r'(\d+(\.\d+)?)', get_hostos_version).group(0))
        if hostos_version < float(8.1):
            if eoi_enable == 'on':
                qemu_flags.append('+kvm_pv_eoi')
            elif eoi_enable == 'off':
                qemu_flags.append('-kvm_pv_eoi')
            else:
                logging.error("Invaild value %s, eoi_enable must be 'on' or 'off'", eoi_enable)
        elif hostos_version > float(8.0):
            if eoi_enable == 'on':
                qemu_flags.append('kvm-pv-eoi=on')
            elif eoi_enable == 'off':
                qemu_flags.append('kvm-pv-eoi=off')
            else:
                logging.error("Invaild value %s, eoi_enable must be 'on' or 'off'", eoi_enable)
        else:
            test.fail("Can not decide the expected qemu cmd line because of no expected hostos version")

    # Create features tag if not existed
    if not vmxml.xmltreefile.find('features'):
        vmxml.features = vm_xml.VMFeaturesXML()
    vmxml_feature = vmxml.features
    if vmxml_feature.has_feature('apic'):
        vmxml_feature.remove_feature('apic')
    vmxml_feature.add_feature('apic', 'eoi', eoi_enable)
    vmxml.features = vmxml_feature
    logging.debug("Update VM XML:\n%s", vmxml)
    expect_fail = False if 'expect_define_vm_fail' not in kwargs \
        else kwargs['expect_define_vm_fail']
    result = virsh.define(vmxml.xml, debug=True)
    libvirt.check_exit_status(result, expect_fail)
    if expect_fail:
        libvirt.check_result(result, kwargs.get('expected_msg'))
        return
    return qemu_flags
Пример #16
0
    def define_and_check(guest_xml):
        """
        Define the guest and check the result.

        :param guest_xml: The guest VMXML instance
        """
        fail_patts = []
        if expect_err_msg:
            fail_patts.append(expect_err_msg)
        guest_xml.undefine()
        res = vm_xml.virsh.define(guest_xml.xml)
        logging.debug("Expect failures: %s", fail_patts)
        libvirt.check_result(res, expected_fails=fail_patts)
        return not res.exit_status
Пример #17
0
def run(test, params, env):
    """
    Run tests for virsh hypervisor-cpu-baseline with parameters
    """
    baseline_option = substitute_param(params)
    domcap_path = params.get("domcap_path")
    domcap_path = os.path.join(os.path.dirname(__file__), domcap_path)
    err_msg = params.get("err_msg")

    ret = virsh.hypervisor_cpu_baseline(domcap_path,
                                        options=baseline_option,
                                        ignore_status=True,
                                        debug=True)
    libvirt.check_result(ret, expected_fails=err_msg)
Пример #18
0
    def check_blockcopy(target):
        """
        Block copy operation test.
        """
        blk_file = os.path.join(data_dir.get_tmp_dir(), "blk.rbd")
        if os.path.exists(blk_file):
            os.remove(blk_file)
        blk_mirror = ("mirror type='file' file='%s' "
                      "format='raw' job='copy'" % blk_file)

        # Do blockcopy
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_result(ret, skip_if=unsupported_err)

        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count(blk_mirror):
            test.fail("Can't see block job in domain xml")

        # Abort
        ret = virsh.blockjob(vm_name, target, "--abort")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if dom_xml.count(blk_mirror):
            test.fail("Failed to abort block job")
        if os.path.exists(blk_file):
            os.remove(blk_file)

        # Sleep for a while after abort operation.
        time.sleep(5)
        # Do blockcopy again
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_exit_status(ret)

        # Wait for complete
        def wait_func():
            ret = virsh.blockjob(vm_name, target, "--info")
            return ret.stderr.count("Block Copy: [100 %]")

        timeout = params.get("blockjob_timeout", 600)
        utils_misc.wait_for(wait_func, int(timeout))

        # Pivot
        ret = virsh.blockjob(vm_name, target, "--pivot")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count("source file='%s'" % blk_file):
            test.fail("Failed to pivot block job")
        # Remove the disk file.
        if os.path.exists(blk_file):
            os.remove(blk_file)
Пример #19
0
 def attach_disk_iothread_two(vm_name, device_source, device_target):
     """
     Attach a disk with an iothread value of 2.
     """
     disk_attach_success = params.get("virt_disk_attach_success")
     attach_option = params.get("disk_attach_option_io_2")
     ret = virsh.attach_disk(vm_name,
                             device_source,
                             device_target,
                             attach_option,
                             debug=True)
     disk_attach_error = False
     libvirt.check_exit_status(ret, disk_attach_error)
     libvirt.check_result(ret, expected_match=disk_attach_success)
Пример #20
0
    def check_blockcopy(target):
        """
        Block copy operation test.
        """
        blk_file = os.path.join(data_dir.get_tmp_dir(), "blk.rbd")
        if os.path.exists(blk_file):
            os.remove(blk_file)
        blk_mirror = ("mirror type='file' file='%s' "
                      "format='raw' job='copy'" % blk_file)

        # Do blockcopy
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_result(ret, skip_if=unsupported_err)

        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count(blk_mirror):
            test.fail("Can't see block job in domain xml")

        # Abort
        ret = virsh.blockjob(vm_name, target, "--abort")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if dom_xml.count(blk_mirror):
            test.fail("Failed to abort block job")
        if os.path.exists(blk_file):
            os.remove(blk_file)

        # Sleep for a while after abort operation.
        time.sleep(5)
        # Do blockcopy again
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_exit_status(ret)

        # Wait for complete
        def wait_func():
            ret = virsh.blockjob(vm_name, target, "--info")
            return ret.stderr.count("Block Copy: [100 %]")
        timeout = params.get("blockjob_timeout", 600)
        utils_misc.wait_for(wait_func, int(timeout))

        # Pivot
        ret = virsh.blockjob(vm_name, target, "--pivot")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count("source file='%s'" % blk_file):
            test.fail("Failed to pivot block job")
        # Remove the disk file.
        if os.path.exists(blk_file):
            os.remove(blk_file)
Пример #21
0
def run(test, params, env):
    """
    Test memballoon device
    """
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)

    case = params.get('case', '')
    status_error = "yes" == params.get('status_error', 'no')

    bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        if case == 'alias':
            model = params.get('model')
            alias_name = params.get('alias_name')
            has_alias = 'yes' == params.get('has_alias', 'no')

            # Update memballoon device
            balloon_dict = {
                'membal_model': model,
                'membal_alias_name': alias_name
            }

            libvirt.update_memballoon_xml(vmxml, balloon_dict)
            logging.debug(virsh.dumpxml(vm_name).stdout_text)

            # Get memballoon device after vm define and check
            balloons = vmxml.get_devices('memballoon')
            if len(balloons) == 0:
                test.error('Memballoon device was not added to vm.')
            new_balloon = balloons[0]
            if has_alias:
                logging.debug('Expected alias: %s\nActual alias: %s',
                              alias_name, new_balloon.alias_name)
                if new_balloon.alias_name == alias_name:
                    logging.info('Memballon alias check PASS.')
                else:
                    test.fail('Memballon alias check FAIL.')

            # Check vm start
            cmd_result = virsh.start(vm_name)
            libvirt.check_result(cmd_result, status_error)

    finally:
        if params.get('destroy_after') == 'yes':
            vm.destroy()
        bkxml.sync()
Пример #22
0
 def test_at_dt():
     """
     Test attach-detach interfaces
     """
     options = '' if vm.is_alive() else '--config'
     iface_dict = eval(params.get('iface_dict')
                       % utils_sriov.pci_to_addr(vf_pci))
     iface = interface_base.create_iface('hostdev', iface_dict)
     result = virsh.attach_device(vm_name, iface.xml,
                                  flagstr=options, debug=True)
     if not start_vm:
         result = virsh.start(vm.name, debug=True)
     libvirt.check_exit_status(result, status_error)
     if error_msg:
         libvirt.check_result(result, error_msg)
Пример #23
0
        def check_dominfo_and_ausearch(dominfo_check, ausearch_check):
            """
            Check output of virsh dominfo and ausearch command

            :param dominfo_check: patterns to search in dominfo output
            :param ausearch_check: patterns to search in ausearch output
            """
            if dominfo_check:
                dominfo = virsh.dominfo(vm_name, **VIRSH_ARGS)
                libvirt.check_result(dominfo, expected_match=dominfo_check)
            if ausearch_check:
                ausearch_result = process.run(audit_cmd,
                                              verbose=True, shell=True)
                libvirt.check_result(ausearch_result,
                                     expected_match=ausearch_check)
Пример #24
0
 def attach_disk_iothread_zero(vm_name, device_source, device_target):
     """
     Attempt to attach a disk with an iothread value of 0.
     """
     disk_errors = \
         params.get("virt_disk_iothread_0_errors")
     attach_option = params.get("disk_attach_option_io_0", "--iothread 0")
     ret = virsh.attach_disk(vm_name,
                             device_source,
                             device_target,
                             attach_option,
                             debug=True)
     disk_attach_error = True
     libvirt.check_exit_status(ret, disk_attach_error)
     libvirt.check_result(ret, expected_fails=disk_errors)
Пример #25
0
def setup_test_default(guest_xml, params):
    """
    Setup steps by default

    :param guest_xml: guest xml
    :param params: dict, test parameters
    :return: the updated guest xml
    """
    # Create nvdimm file on the host
    nvdimm_file_size = params.get('nvdimm_file_size')
    nvdimm_file = params.get('nvdimm_file')
    process.run('truncate -s %s %s' % (nvdimm_file_size, nvdimm_file), verbose=True)

    if params.get('vm_attrs'):
        guest_xml = setup_test_pmem_alignsize(guest_xml, params)
        return guest_xml
    # Set cpu according to params
    cpu_xml = create_cpuxml(params)
    guest_xml.cpu = cpu_xml

    # Update other vcpu, memory info according to params
    update_vm_args = {k: params[k] for k in params
                      if k.startswith('setvm_')}
    logging.debug(update_vm_args)
    for key, value in list(update_vm_args.items()):
        attr = key.replace('setvm_', '')
        logging.debug('Set %s = %s', attr, value)
        setattr(guest_xml, attr, int(value) if value.isdigit() else value)

    # Add an nvdimm mem device to vm xml
    nvdimm_params = {k.replace('nvdimmxml_', ''): v
                     for k, v in params.items() if k.startswith('nvdimmxml_')}
    nvdimm_xml = create_nvdimm_xml(**nvdimm_params)
    guest_xml.add_device(nvdimm_xml)
    error_msg = params.get('error_msg')
    check = params.get('check')
    check_define_list = ['ppc_no_label', 'discard']
    if libvirt_version.version_compare(7, 0, 0):
        check_define_list.append('less_than_256')

    if check in check_define_list:
        result = virsh.define(guest_xml.xml, debug=True)
        libvirt.check_result(result, expected_fails=[error_msg])
        return None

    guest_xml.sync()
    logging.debug(virsh.dumpxml(params.get('main_vm')).stdout_text)
    return guest_xml
Пример #26
0
    def check_snapshot(snap_option, target_dev='vda'):
        """
        Test snapshot operation.
        """
        snap_name = "s1"
        snap_mem = os.path.join(data_dir.get_tmp_dir(), "rbd.mem")
        snap_disk = os.path.join(data_dir.get_tmp_dir(), "rbd.disk")
        xml_snap_exp = [
            "disk name='%s' snapshot='external' type='file'" % target_dev
        ]
        xml_dom_exp = [
            "source file='%s'" % snap_disk,
            "backingStore type='network' index='1'",
            "source protocol='rbd' name='%s'" % disk_src_name
        ]
        if snap_option.count("disk-only"):
            options = ("%s --diskspec %s,file=%s --disk-only" %
                       (snap_name, target_dev, snap_disk))
        elif snap_option.count("disk-mem"):
            options = ("%s --memspec file=%s --diskspec %s,file="
                       "%s" % (snap_name, snap_mem, target_dev, snap_disk))
            xml_snap_exp.append("memory snapshot='external' file='%s'" %
                                snap_mem)
        else:
            options = snap_name

        ret = virsh.snapshot_create_as(vm_name, options)
        if test_disk_internal_snapshot or test_disk_readonly:
            libvirt.check_result(ret, expected_fails=unsupported_err)
        else:
            libvirt.check_result(ret, skip_if=unsupported_err)

        # check xml file.
        if not ret.exit_status:
            snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name,
                                              debug=True).stdout.strip()
            dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
            # Delete snapshots.
            libvirt.clean_up_snapshots(vm_name)
            if os.path.exists(snap_mem):
                os.remove(snap_mem)
            if os.path.exists(snap_disk):
                os.remove(snap_disk)

            if not all([x in snap_xml for x in xml_snap_exp]):
                test.fail("Failed to check snapshot xml")
            if not all([x in dom_xml for x in xml_dom_exp]):
                test.fail("Failed to check domain xml")
def connect_to_remote(server_info, err_msg=None):
    """
    Try connection to the remote server with TLS

    :param server_info: dictionary with the server information
    :param err_msg: expected error messages (if any)
    :returns: None
    """
    expected_fails = [err_msg] if err_msg else []
    result = process.run('virsh -c qemu+tls://{}/system'.format(
        server_info['ip']),
                         shell=True,
                         ignore_status=True)
    libvirt.check_result(result,
                         expected_fails=expected_fails,
                         check_both_on_error=True)
Пример #28
0
    def exec_test(vm,
                  test_scenario,
                  iface_type,
                  iface_args,
                  iface_type2,
                  iface_args2,
                  params=params):
        """
        Execute test

        :param vm: VM object
        :param test_scenario: Test scenario
        :param iface_type: Interface type for the first device
        :param iface_args: Interface attrs for the first device
        :param iface_type2: Interface type for the second device
        :param iface_args2: Interface attrs for the second device
        :param params: Test parameters
        """
        status_error = "yes" == params.get("status_error", "no")
        error_msg = params.get("error_msg")

        opts = '--config'
        test.log.info("TEST_STEP1: Attach a %s device", iface_type)
        iface = interface_base.create_iface(iface_type, iface_args)
        virsh.attach_device(vm.name, iface.xml, flagstr=opts, **VIRSH_ARGS)

        iface2 = interface_base.create_iface(iface_type2, iface_args2)
        if test_scenario.startswith("hotplug"):
            opts = ''
            libvirt_pcicontr.reset_pci_num(vm.name)
            test.log.info("TEST_STEP2: Start VM")
            vm.start()
            vm.wait_for_serial_login(timeout=240).close()
            test.log.info("TEST_STEP3: Attach a %s device.", iface_type2)
            result = virsh.attach_device(vm.name, iface2.xml, debug=True)
        else:
            test.log.info("TEST_STEP2: Attach a %s device.", iface_type2)
            virsh.attach_device(vm.name,
                                iface2.xml,
                                flagstr=opts,
                                **VIRSH_ARGS)
            test.log.info("TEST_STEP3: Start VM")
            result = virsh.start(vm.name, debug=True)
        libvirt.check_exit_status(result, status_error)
        if error_msg:
            libvirt.check_result(result, error_msg)
Пример #29
0
def check_virsh_connect_alive(test, params):
    virsh_instance = virsh.VirshPersistent()
    try:
        virsh_instance.dom_list(debug=True)
        ret = virsh_instance.connect("test", debug=True, ignore_status=True)
        # Return value should be an error
        if ret.exit_status:
            # But, there is no output in stderr, all output is in stdout
            if not ret.stderr:
                # Therefore copy stdout to stderr to check by check_result
                ret.stderr = ret.stdout
        utlv.check_result(ret, expected_fails=[params['expected_result_patt']])
        virsh_instance.dom_list(debug=True)
        virsh_instance.connect(debug=True)
        virsh_instance.dom_list(debug=True)
    except Exception as e:
        test.fail('Unexpected failure: {}'.format(e))
Пример #30
0
def test_invalid_cpuNum(test, host_cpus_list, params):
    """
    Test nodecpustats command with different invalid cpu

    :param test: test object
    :param host_cpus_list: list, host cpu list
    :param params: dict, test parameters
    :return: None
    """

    for offset_value in [0, 5, 3200000000000000000000]:
        option = "--cpu %s" % (len(host_cpus_list) + offset_value)
        status, output = run_nodecpustats(option)
        err_msg = ''
        if (libvirt_version.version_compare(6, 2, 0)):
            err_msg = params.get('err_msg')
        libvirt.check_result(output, expected_fails=[err_msg])
Пример #31
0
def check_ga_function(vm_name, status_error, hotunplug_ga):
    """
    Check whether guest agent function can work as expected

    :param vm_name: The vm's name
    :param status_error: Expect status error or not
    :param hotunplug_ga: hotunplug guest agent device or not
    """
    error_msg = []
    if status_error:
        error_msg.append("QEMU guest agent is not connected")
    if hotunplug_ga:
        error_msg.append("QEMU guest agent is not configured")
    result = virsh.domtime(vm_name, ignore_status=True, debug=True)
    libvirt.check_result(result,
                         expected_fails=error_msg,
                         any_error=status_error)
Пример #32
0
def enable_normal_boot(vmxml, check_points, define_error, test):
    """
    Undefine/Define VM and check the result

    :param vmxml: The instance of VMXML class
    :param  check_points: The list of check points of result
    :param define_error: The define error status
    :param test: Avocado test object
    """
    logging.debug("Boot guest in normal mode:\n%s", open(vmxml.xml).read())
    vmxml.undefine(options="--nvram")
    ret = virsh.define(vmxml.xml)
    if ret.exit_status:
        if define_error:
            utlv.check_result(ret, expected_fails=check_points)
        else:
            test.fail("Failed to define VM from %s" % vmxml.xml)
Пример #33
0
 def check_hpt_order(session, resizing=''):
     """
     Return htp order in hpt_order file by default
     If 'resizing' is disabled, test updating htp_order
     """
     if not hpt_order_path:
         test.cancel('No hpt order path provided.')
     hpt_order = session.cmd_output('cat %s' % hpt_order_path).strip()
     hpt_order = int(hpt_order)
     logging.info('Current hpt_order is %d', hpt_order)
     if resizing == 'disabled':
         cmd_result = session.cmd_status_output(
             'echo %d > %s' % (hpt_order + 1, hpt_order_path))
         result = process.CmdResult(stderr=cmd_result[1],
                                    exit_status=cmd_result[0])
         libvirt.check_exit_status(result, True)
         libvirt.check_result(result, [error_msg])
     return hpt_order
Пример #34
0
def enable_normal_boot(vmxml, check_points, define_error, test):
    """
    Undefine/Define VM and check the result

    :param vmxml: The instance of VMXML class
    :param  check_points: The list of check points of result
    :param define_error: The define error status
    :param test: Avocado test object
    """
    logging.debug("Boot guest in normal mode:\n%s",
                  open(vmxml.xml).read())
    vmxml.undefine(options="--nvram")
    ret = virsh.define(vmxml.xml)
    if ret.exit_status:
        if define_error:
            utlv.check_result(ret, expected_fails=check_points)
        else:
            test.fail("Failed to define VM from %s" % vmxml.xml)
Пример #35
0
 def check_hpt_order(session, resizing=''):
     """
     Return htp order in hpt_order file by default
     If 'resizing' is disabled, test updating htp_order
     """
     if not hpt_order_path:
         test.cancel('No hpt order path provided.')
     hpt_order = session.cmd_output('cat %s' % hpt_order_path).strip()
     hpt_order = int(hpt_order)
     logging.info('Current hpt_order is %d', hpt_order)
     if resizing == 'disabled':
         cmd_result = session.cmd_status_output(
             'echo %d > %s' % (hpt_order + 1, hpt_order_path))
         result = process.CmdResult(stderr=cmd_result[1],
                                    exit_status=cmd_result[0])
         libvirt.check_exit_status(result, True)
         libvirt.check_result(result, error_msg)
     return hpt_order
Пример #36
0
    def check_snapshot(snap_option, target_dev='vda'):
        """
        Test snapshot operation.
        """
        snap_name = "s1"
        snap_mem = os.path.join(data_dir.get_tmp_dir(), "rbd.mem")
        snap_disk = os.path.join(data_dir.get_tmp_dir(), "rbd.disk")
        xml_snap_exp = ["disk name='%s' snapshot='external' type='file'" % target_dev]
        xml_dom_exp = ["source file='%s'" % snap_disk,
                       "backingStore type='network' index='1'",
                       "source protocol='rbd' name='%s'" % disk_src_name]
        if snap_option.count("disk-only"):
            options = ("%s --diskspec %s,file=%s --disk-only" %
                       (snap_name, target_dev, snap_disk))
        elif snap_option.count("disk-mem"):
            options = ("%s --memspec file=%s --diskspec %s,file="
                       "%s" % (snap_name, snap_mem, target_dev, snap_disk))
            xml_snap_exp.append("memory snapshot='external' file='%s'"
                                % snap_mem)
        else:
            options = snap_name

        ret = virsh.snapshot_create_as(vm_name, options)
        if test_disk_internal_snapshot or test_disk_readonly:
            libvirt.check_result(ret, expected_fails=unsupported_err)
        else:
            libvirt.check_result(ret, skip_if=unsupported_err)

        # check xml file.
        if not ret.exit_status:
            snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name,
                                              debug=True).stdout.strip()
            dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
            # Delete snapshots.
            libvirt.clean_up_snapshots(vm_name)
            if os.path.exists(snap_mem):
                os.remove(snap_mem)
            if os.path.exists(snap_disk):
                os.remove(snap_disk)

            if not all([x in snap_xml for x in xml_snap_exp]):
                test.fail("Failed to check snapshot xml")
            if not all([x in dom_xml for x in xml_dom_exp]):
                test.fail("Failed to check domain xml")
Пример #37
0
def set_emulatorpin_parameter(params, test):
    """
    Set the emulatorpin parameters

    :params: the parameter dictionary
    :param test: the test object
    :raises: test.fail if command fails
    """
    vm_name = params.get("main_vm")
    vm = params.get("vm")
    cpulist = params.get("emulatorpin_cpulist")
    options = params.get("emulatorpin_options")
    start_vm = params.get("start_vm", "yes")

    if start_vm == "no" and vm and vm.is_alive():
        vm.destroy()

    result = virsh.emulatorpin(vm_name, cpulist, options, debug=True)
    status = result.exit_status

    # Check status_error
    status_error = params.get("status_error")
    err_msg = params.get("err_msg")

    if status_error == "yes":
        if err_msg:
            libvirt.check_result(result, expected_fails=[err_msg])
        elif status or not check_emulatorpin(params, test):
            logging.info("It's an expected : %s", result.stderr)
        else:
            test.fail("%d not a expected command " "return value" % status)
    elif status_error == "no":
        if status:
            test.fail(result.stderr)
        else:
            if check_emulatorpin(params, test):
                logging.info(result.stdout.strip())
            else:
                test.fail("The 'cpulist' is inconsistent with"
                          " 'cpulist' emulatorpin XML or/and is"
                          " different from emulator/cpuset.cpus"
                          " value from cgroup cpuset controller")
Пример #38
0
def update_iface_device(vm, params):
    """
    Update an interface by virsh update-device

    :param vm: VM object
    :param params: Dictionary with the test parameters
    """
    status_error = "yes" == params.get("status_error", "no")
    error_msg = params.get("error_msg")
    iface_dict = eval(params.get('iface_dict', '{}'))

    iface = get_vm_iface_dev(vm, iface_dict)
    virsh_opt = parse_virsh_opts(params)
    result = virsh.update_device(vm.name,
                                 iface.xml,
                                 flagstr=virsh_opt,
                                 debug=True)
    libvirt.check_exit_status(result, status_error)
    if error_msg:
        libvirt.check_result(result, error_msg)
Пример #39
0
    def exec_iothreadset():
        """
        Run "virsh iothreadset" and check if iothread pool values are updated
        or not

        :raise: test.fail if the result of virsh command is not as expected
        """
        # The command "virsh iothreadset" needs vm in running stats
        if not vm.is_alive():
            vm.start()
            vm.wait_for_login().close()

        # Check domstats before run virsh iothreadset
        global ORG_IOTHREAD_POOL
        ORG_IOTHREAD_POOL = get_iothread_pool(vm_name, iothreadset_id)
        result = virsh.iothreadset(vm_name,
                                   iothreadset_id,
                                   iothreadset_val,
                                   debug=True,
                                   ignore_status=True)

        libvirt.check_exit_status(result, status_error)
        if err_msg:
            libvirt.check_result(result, expected_fails=err_msg)

        # Check domstats again
        global UPDATE_IOTHREAD_POOL
        UPDATE_IOTHREAD_POOL = get_iothread_pool(vm_name, iothreadset_id)
        check_iothread_pool(ORG_IOTHREAD_POOL, UPDATE_IOTHREAD_POOL,
                            status_error)

        # Check if the values are updated as expected
        if not status_error:
            lst = iothreadset_val.split()
            exp_pool = {
                re.sub('--', "iothread." + iothreadset_id + ".", lst[i]):
                lst[i + 1]
                for i in range(0, len(lst), 2)
            }
            check_iothread_pool(UPDATE_IOTHREAD_POOL, exp_pool, True)
Пример #40
0
    def define_and_check():
        """
        Predict the error message when defining and try to define the guest
        with testing XML.
        """
        fail_patts = []
        known_models = {
            'pci': ['pci-root', 'pci-bridge'],
            'virtio-serial': [],
            'usb': ['qemu-xhci'],
            'scsi': ['virtio-scsi'],
        }
        if status_error:
            if cntlr_type == 'pci' and cntlr_model:
                fail_patts.append(r"Invalid PCI controller model")
            if cntlr_type and cntlr_model not in known_models[cntlr_type]:
                fail_patts.append(r"Unknown model type")
            if cntlr_model == 'pcie-root':
                fail_patts.append(r"Device requires a standard PCI slot")
            if addr_str and '02:00.0' in addr_str:
                fail_patts.append(r"slot must be >= 1")
            elif addr_str and '02:32.0' in addr_str:
                fail_patts.append(r"must be <= 0x1F")
            if cntlr_num > 31 and cntlr_type == 'pci':
                fail_patts.append(r"out of range - must be 0-30")
            if cntlr_index and target_index:
                if (int(cntlr_index) != 0) ^ (int(target_index) != 0):
                    fail_patts.append(
                        r"Only the PCI controller with index 0 can have target index 0"
                    )

            # isdigit will return false on negative number, which just meet the
            # requirement of this test.
            if cntlr_index is not None and not cntlr_index.isdigit():
                fail_patts.append(r"Cannot parse controller index")

        vm_xml.undefine()
        res = vm_xml.virsh.define(vm_xml.xml)
        libvirt.check_result(res, expected_fails=fail_patts)
        return not res.exit_status
Пример #41
0
def run(test, params, env):
    """
    Test vcpu
    """
    vm_name = params.get('main_vm')
    check = params.get('check', '')
    status_error = 'yes' == params.get('status_error', 'no')
    err_msg = params.get('err_msg', '')
    guest_vcpu = params.get('guest_vcpu')
    boot_timeout = int(params.get('boot_timeout', 240))
    start_fail = 'yes' == params.get('start_fail', 'no')

    vm = env.get_vm(vm_name)
    vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()

    def check_onlinevcpus(vm, cpu_num):
        """

        Check whether all vcpus are online as expected.

        :param vm: the exact VM need to check
        :param cpu_num: the num of online vcpus need to match
        """
        if not utils_misc.wait_for(
                lambda: utils_misc.check_if_vm_vcpu_match(cpu_num, vm),
                timeout=120, step=5, text="wait for vcpu online"):
            test.fail('Not all vcpus are online as expected.')

    def set_iommu(vmxml, **dargs):
        """

        Add iommu device to vm.

        :param vmxml: xml of vm to be add iommu device
        :param dargs: args or the iommu device
        :return:
        """
        logging.info('Add iommu device to vm.')
        iommu_device = Iommu()
        iommu_device.model = dargs.get('model', 'intel')
        iommu_device.driver = dargs.get('driver', {'intremap': 'on', 'eim': 'on'})
        vmxml.add_device(iommu_device)

    try:
        # Check the output of "virsh maxvcpus" for both i440fx and q35 VM
        if check == 'virsh_maxvcpus':
            report_num = params.get('report_num', '')
            logging.info('Check the output of virsh maxvcpus')
            cmd_result = virsh.maxvcpus(debug=True)
            if cmd_result.exit_status == 0 and cmd_result.stdout.strip() == report_num:
                logging.debug('Test passed as the reported max vcpu num is %s', report_num)
            else:
                test.fail('Test failed as the reported max vcpu num is not as expected.')

        # Check the output of "virsh capabilities" for both i440fx and q35 VM
        if check == "virsh_capabilities":
            report_num_pc_7 = params.get('report_num_pc_7', '')
            report_num_q35_73 = params.get('report_num_q35_73', '')
            report_num_q35_7_8 = params.get('report_num_q35_7_8', '')
            logging.info('Check the output of virsh capabilities')
            xmltreefile = capability_xml.CapabilityXML().xmltreefile
            machtype_vcpunum_dict = {}
            for guest in xmltreefile.findall('guest'):
                for arch in guest.findall('arch'):
                    if arch.get('name') == "x86_64":
                        for machine in arch.findall('machine'):
                            machine_text = machine.text
                            vcpunum = machine.get('maxCpus')
                            machtype_vcpunum_dict[machine_text] = vcpunum
            for key in machtype_vcpunum_dict:
                logging.info("%s : %s", key, machtype_vcpunum_dict[key])
                if key.startswith('pc-i440fx') or key.startswith('rhel') or key == 'pc':
                    if machtype_vcpunum_dict[key] != report_num_pc_7:
                        test.fail('Test failed as i440fx_max_vcpus_num in virsh_capa is wrong.')
                if key.startswith('pc-q35') or key == 'q35':
                    if key == "pc-q35-rhel7.3.0":
                        if machtype_vcpunum_dict[key] != report_num_q35_73:
                            test.fail('Test failed as q35_rhel73_max_vcpus_num in virsh_capa is wrong.')
                    else:
                        if machtype_vcpunum_dict[key] != report_num_q35_7_8:
                            test.fail('Test failed as the q35_max_vcpus_num in virsh_capa is wrong.')

        # Test i440fx VM starts with 240(positive)/241(negative) vcpus and hot-plugs vcpus to 240
        if check.startswith('i440fx_test'):
            current_vcpu = params.get('current_vcpu')
            if 'hotplug' not in check:
                vmxml.vcpu = int(guest_vcpu)
                vmxml.sync()
                if status_error:
                    if start_fail:
                        result_need_check = virsh.start(vm_name, debug=True)
                else:
                    vm.start()
                    logging.info(libvirt_xml.VMXML.new_from_dumpxml(vm_name))
                    vm.wait_for_login(timeout=boot_timeout).close()
                    check_onlinevcpus(vm, int(guest_vcpu))
            else:
                vmxml.vcpu = int(guest_vcpu)
                vmxml.current_vcpu = int(current_vcpu)
                vmxml.sync()
                vm.start()
                logging.info(libvirt_xml.VMXML.new_from_dumpxml(vm_name))
                vm.wait_for_login(timeout=boot_timeout).close()
                check_onlinevcpus(vm, int(current_vcpu))
                res = virsh.setvcpus(vm_name, guest_vcpu, debug=True)
                libvirt.check_exit_status(res)
                check_onlinevcpus(vm, int(guest_vcpu))

        # Configure a guest vcpu > 255 without iommu device for q35 VM
        if check == 'no_iommu':
            logging.info('Set vcpu to %s', guest_vcpu)
            vmxml.vcpu = int(guest_vcpu)
            result_need_check = virsh.define(vmxml.xml, debug=True)

        # Set iommu device but not set ioapci in features for q35 VM
        if check == 'with_iommu':
            logging.info('Set vcpu to %s', guest_vcpu)
            vmxml.vcpu = int(guest_vcpu)
            set_iommu(vmxml)
            result_need_check = virsh.define(vmxml.xml, debug=True)

        # Add ioapic and iommu device in xml for q35 VM
        if check.startswith('ioapic_iommu'):
            logging.info('Modify features')
            vm_features = vmxml.features
            vm_features.add_feature('apic')
            vm_features.add_feature('ioapic', 'driver', 'qemu')
            vmxml.features = vm_features
            logging.debug(vmxml.features.get_feature_list())

            logging.info('Set vcpu to %s', guest_vcpu)
            set_iommu(vmxml)

            ori_vcpu = vmxml.vcpu
            vmxml.vcpu = int(guest_vcpu)
            vmxml.current_vcpu = ori_vcpu

            if 'hotplug' not in check:
                vmxml.current_vcpu = int(guest_vcpu)

            vmxml.sync()
            logging.debug(virsh.dumpxml(vm_name))

            if status_error:
                if start_fail:
                    result_need_check = virsh.start(vm_name, debug=True)

            else:
                # Login guest and check guest cpu number
                vm.start()
                session = vm.wait_for_login(timeout=boot_timeout)
                logging.debug(session.cmd('lscpu -e'))

                # Hotplug vcpu to $guest_vcpu
                if 'hotplug' in check:
                    res = virsh.setvcpus(vm_name, guest_vcpu, debug=True)
                    libvirt.check_exit_status(res)

                # Check if vcpu(s) are online
                check_onlinevcpus(vm, int(guest_vcpu))

        # Check result if there's result to check
        if 'result_need_check' in locals():
            libvirt.check_result(result_need_check, err_msg)

    finally:
        bkxml.sync()
Пример #42
0
def run(test, params, env):
    """
    Test the command virsh memtune

    1) To get the current memtune parameters
    2) Change the parameter values
    3) Check the memtune query updated with the values
    4) Check whether the mounted cgroup path gets the updated value
    5) Check the output of virsh dumpxml
    6) Check vm is alive
    """

    # Check for memtune command is available in the libvirt version under test
    if not virsh.has_help_command("memtune"):
        test.cancel(
            "Memtune not available in this libvirt version")

    # Check if memtune options are supported
    for option in memtune_types:
        if not virsh.has_command_help_match("memtune", option):
            test.cancel("%s option not available in memtune "
                        "cmd in this libvirt version" % option)
    # Get common parameters
    acceptable_minus = int(params.get("acceptable_minus", 8))
    step_mem = params.get("mt_step_mem", "no") == "yes"
    expect_error = params.get("expect_error", "no") == "yes"
    restart_libvirtd = params.get("restart_libvirtd", "no") == "yes"
    set_one_line = params.get("set_in_one_command", "no") == "yes"
    mt_hard_limit = params.get("mt_hard_limit", None)
    mt_soft_limit = params.get("mt_soft_limit", None)
    mt_swap_hard_limit = params.get("mt_swap_hard_limit", None)
    # if restart_libvirtd is True, set set_one_line is True
    set_one_line = True if restart_libvirtd else set_one_line

    # Get the vm name, pid of vm and check for alive
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    pid = vm.get_pid()

    # Resolve the memory cgroup path for a domain
    path = utils_cgroup.resolve_task_cgroup_path(int(pid), "memory")

    # step_mem is used to do step increment limit testing
    if step_mem:
        mem_step(params, path, vm, test, acceptable_minus)
        return

    if not set_one_line:
        # Set one type memtune limit in one command
        if mt_hard_limit:
            index = 0
            mt_limit = mt_hard_limit
        elif mt_soft_limit:
            index = 1
            mt_limit = mt_soft_limit
        elif mt_swap_hard_limit:
            index = 2
            mt_limit = mt_swap_hard_limit
        mt_type = memtune_types[index]
        mt_cgname = memtune_cgnames[index]
        options = " --%s %s --live" % (mt_type, mt_limit)
        result = virsh.memtune_set(vm.name, options, debug=True)

        if expect_error:
            fail_patts = [params.get("error_info")]
            libvirt.check_result(result, fail_patts, [])
        else:
            # If limit value is negative, means no memtune limit
            mt_expected = mt_limit if int(mt_limit) > 0 else -1
            check_limit(path, mt_expected, mt_type, mt_cgname, vm, test,
                        acceptable_minus)
    else:
        # Set 3 limits in one command line
        mt_limits = [mt_hard_limit, mt_soft_limit, mt_swap_hard_limit]
        options = " %s --live" % ' '.join(mt_limits)
        result = virsh.memtune_set(vm.name, options, debug=True)

        if expect_error:
            fail_patts = [params.get("error_info")]
            libvirt.check_result(result, fail_patts, [])
        else:
            check_limits(path, mt_limits, vm, test, acceptable_minus)

        if restart_libvirtd:
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()

        if not expect_error:
            # After libvirtd restared, check memtune values again
            check_limits(path, mt_limits, vm, test, acceptable_minus)
Пример #43
0
    def create_pool():
        """
        Define and start a pool.
        """
        sp = libvirt_storage.StoragePool()
        if create_by_xml:
            p_xml = pool_xml.PoolXML(pool_type=pool_type)
            p_xml.name = pool_name
            s_xml = pool_xml.SourceXML()
            s_xml.vg_name = disk_src_pool
            source_host = []
            for (host_name, host_port) in zip(
                    disk_src_host.split(), disk_src_port.split()):
                source_host.append({'name': host_name,
                                    'port': host_port})

            s_xml.hosts = source_host
            if auth_type:
                s_xml.auth_type = auth_type
            if auth_user:
                s_xml.auth_username = auth_user
            if auth_usage:
                s_xml.secret_usage = auth_usage
            p_xml.source = s_xml
            logging.debug("Pool xml: %s", p_xml)
            p_xml.xmltreefile.write()
            ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_build(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_start(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
        else:
            auth_opt = ""
            if client_name and client_key:
                auth_opt = ("--auth-type %s --auth-username %s --secret-usage '%s'"
                            % (auth_type, auth_user, auth_usage))
            if not sp.define_rbd_pool(pool_name, mon_host,
                                      disk_src_pool, extra=auth_opt):
                test.fail("Failed to define storage pool")
            if not sp.build_pool(pool_name):
                test.fail("Failed to build storage pool")
            if not sp.start_pool(pool_name):
                test.fail("Failed to start storage pool")

        # Check pool operation
        ret = virsh.pool_refresh(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_uuid(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        # pool-info
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'no':
            test.fail("Failed to check pool information")
        # pool-autostart
        if not sp.set_pool_autostart(pool_name):
            test.fail("Failed to set pool autostart")
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'yes':
            test.fail("Failed to check pool information")
        # pool-autostart --disable
        if not sp.set_pool_autostart(pool_name, "--disable"):
            test.fail("Failed to set pool autostart")
        # If port is not pre-configured, port value should not be hardcoded in pool information.
        if "yes" == params.get("rbd_port", "no"):
            if 'port' in virsh.pool_dumpxml(pool_name):
                test.fail("port attribute should not be in pool information")
        # find-storage-pool-sources-as
        if "yes" == params.get("find_storage_pool_sources_as", "no"):
            ret = virsh.find_storage_pool_sources_as("rbd", mon_host)
            libvirt.check_result(ret, skip_if=unsupported_err)
Пример #44
0
def run(test, params, env):
    """
    Test command: virsh guestvcpus

    The command query or modify state of vcpu in the vm
    1. Prepare test environment, start vm with guest agent
    2. Perform virsh guestvcpus query/enable/disable operation
    3. Check the cpus in the vm
    4. Recover test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vcpus_num = int(params.get("vcpus_num", "4"))
    vcpus_placement = params.get("vcpus_placement", "static")
    option = params.get("option", "")
    combine = params.get("combine", "")
    invalid_domain = params.get("invalid_domain", "")
    domain_name = params.get("domain_name", "")
    invalid_cpulist = params.get("invalid_cpulist", "")
    status_error = params.get("status_error", "no")
    error_msg = params.get("error_msg", "no")
    vcpus_list = ""

    # Back up domain XML
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_bakup = vmxml.copy()

    try:
        # Modify vm with static vcpus
        if vm.is_alive():
            vm.destroy()
        vmxml.placement = vcpus_placement
        vmxml.set_vm_vcpus(vm_name, vcpus_num, vcpus_num)
        logging.debug("Define guest with '%s' vcpus" % str(vcpus_num))

        # Start guest agent in vm
        vm.prepare_guest_agent(prepare_xml=False)

        # Normal test: disable/ enable guest vcpus
        if option and status_error == "no":
            for cpu in range(1, vcpus_num):
                virsh.guestvcpus(vm_name, str(cpu), option, debug=True)

        # Normal test: combine: --disable 1-max then --enable 1
        if combine == "yes" and status_error == "no":
            vcpus_list = '1' + '-' + str(vcpus_num - 1)
            option = "--disable"
            virsh.guestvcpus(vm_name, vcpus_list, option, debug=True)
            vcpus_list = '1'
            option = "--enable"
            virsh.guestvcpus(vm_name, vcpus_list, option, debug=True)

        # Error test: invalid_domain
        if invalid_domain == "yes":
            vm_name = domain_name
        # Error test: invalid_cpulist
        if invalid_cpulist == "yes":
            if option == "--enable":
                vcpus_list = str(vcpus_num)
            else:
                vcpus_list = '0' + '-' + str(vcpus_num - 1)
            ret = virsh.guestvcpus(vm_name, vcpus_list, option)
        else:
            # Query guest vcpus
            ret = virsh.guestvcpus(vm_name)
            output = ret.stdout.strip()

        # Check test results
        if status_error == "yes":
            libvirt.check_result(ret, error_msg)
        else:
            # Check the test result of query
            ret_output = dict([item.strip() for item in line.split(":")]
                              for line in output.split("\n"))
            if combine == "yes":
                online_vcpus = '0-1'
            elif option == "--disable":
                online_vcpus = '0'
            else:
                online_vcpus = '0' + '-' + str(vcpus_num - 1)

            if ret_output["online"] != online_vcpus:
                test.fail("Query result is different from"
                          " the '%s' command." % option)

            # Check the cpu in guest
            session = vm.wait_for_login()
            vm_cpu_info = utils_misc.get_cpu_info(session)
            session.close()

            if combine == "yes":
                online_vcpus = '0,1'
                offline_vcpus = '2' + ',' + str(vcpus_num - 1)
            elif option == "--disable":
                online_vcpus = '0'
                offline_vcpus = '1' + '-' + str(vcpus_num - 1)
            else:
                online_vcpus = '0' + '-' + str(vcpus_num - 1)
                offline_vcpus = ""

            if offline_vcpus:
                if (vm_cpu_info["Off-line CPU(s) list"] != offline_vcpus or
                   vm_cpu_info["On-line CPU(s) list"] != online_vcpus):
                    test.fail("CPUs in vm is different from"
                              " the %s command." % option)
            elif vm_cpu_info["On-line CPU(s) list"] != online_vcpus:
                test.fail("On-line CPUs in vm is different"
                          " from the %s command." % option)
            else:
                logging.debug("lscpu in vm '%s' is: \n '%s'" %
                              (vm_name, vm_cpu_info))

    finally:
        # Recover VM
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_bakup.sync()
Пример #45
0
def run(test, params, env):
    """
    This test virsh domtime command and its options.

    1) Start a guest with/without guest agent configured;
    2) Record guest times;
    3) Do some operation to stop VM;
    4) Run virsh domtime command with different options;
    5) Check the command result;
    6) Check the guest times against expectation;
    7) Cleanup test environment.
    """
    epoch = datetime.datetime(1970, 1, 1, 0, 0, 0)
    # Max time can be set with domtime successfully in newer qemu-ga
    time_max_1 = 3155731199
    # Max time can be set with domtime successfully in older qemu-ga
    time_max_2 = 3155759999
    # Max time can be set with domtime bug failed to set RTC in older qemu-ga
    time_max_3 = 9223372035

    def init_time(session):
        """
        Initialize guest RTC time to epoch + 1234567890 and system time
        one day latter.

        :param session: Session from which to access guest
        """
        res = virsh.domtime(vm_name, time=1234567890)
        if res.exit_status:
            logging.debug("Failed to init time to 1234567890:\n%s", res)
        status, output = session.cmd_status_output('date -s "1 day"')
        if status:
            test.error("Failed to set guest time:\n%s" % output)

    def get_host_utc_time():
        """
        Get host UTC time from date command.
        """
        res = process.run("date -u", shell=True)
        # Strip timezone info from output
        # e.g. 'Sun Feb 15 07:31:40 CST 2009' -> 'Sun Feb 15 07:31:40 2009'
        time_str = re.sub(r'\S+ (?=\S+$)', '', res.stdout_text.strip())
        return datetime.datetime.strptime(time_str,
                                          r"%a %b %d %H:%M:%S %Y")

    def run_cmd(session, cmd):
        """
        Run a command in a session and record duration of call.
        """
        start = time.time()
        output = session.cmd_output(cmd)
        duration = time.time() - start
        logging.info('Result of command "%s". Duration: %s. Output:%s',
                     cmd, duration, output.strip())
        return output, duration

    def get_guest_times(session):
        """
        Retrieve different guest time as a dict for checking.
        Keys:
            local_hw: Guest RTC time in local timezone
            local_sys: Guest system time in local timezone
            utc_sys: Guest system time in UTC
            domtime: Guest system time in UTC got from virsh domtime command

        :param session: Session from which to access guest
        """
        times = {}
        get_begin = time.time()
        # Guest RTC local timezone time
        output, _ = run_cmd(session, 'hwclock')
        try:
            time_str, _ = re.search(r"(.+)  (\S+ seconds)", output).groups()

            try:
                # output format 1: Tue 01 Mar 2016 01:53:46 PM CST
                # Remove timezone info from output
                new_str = re.sub(r'\s+\S+$', '', time_str)
                times['local_hw'] = datetime.datetime.strptime(
                    new_str, r"%a %d %b %Y %I:%M:%S %p")
            except ValueError:
                # There are known three possible output format for `hwclock`
                # output format 2: Sat Feb 14 07:31:33 2009
                times['local_hw'] = datetime.datetime.strptime(
                    time_str, r"%a %b %d %H:%M:%S %Y")
        except AttributeError:
            try:  # output format 3: 2019-03-22 05:16:18.224511-04:00
                time_str = output.split(".")[0]
                times['local_hw'] = datetime.datetime.strptime(
                    time_str, r"%Y-%m-%d %H:%M:%S")
            except ValueError:
                test.fail("Unknown hwclock output format in guest: %s", output)
        delta = time.time() - get_begin
        times['local_hw'] -= datetime.timedelta(seconds=delta)

        # Guest system local timezone time
        output, _ = run_cmd(session, 'date')
        # Strip timezone info from output
        # e.g. 'Sun Feb 15 07:31:40 CST 2009' -> 'Sun Feb 15 07:31:40 2009'
        time_str = re.sub(r'\S+ (?=\S+$)', '', output.strip())
        times['local_sys'] = datetime.datetime.strptime(
            time_str, r"%a %b %d %H:%M:%S %Y")
        delta = time.time() - get_begin
        times['local_sys'] -= datetime.timedelta(seconds=delta)

        # Guest system UTC timezone time
        output, _ = run_cmd(session, 'date -u')
        # Strip timezone info from output
        # e.g. 'Sun Feb 15 07:31:40 CST 2009' -> 'Sun Feb 15 07:31:40 2009'
        time_str = re.sub(r'\S+ (?=\S+$)', '', output.strip())
        times['utc_sys'] = datetime.datetime.strptime(
            time_str, r"%a %b %d %H:%M:%S %Y")
        delta = time.time() - get_begin
        times['utc_sys'] -= datetime.timedelta(seconds=delta)

        # Guest UTC time from virsh domtime
        res = virsh.domtime(vm_name, pretty=True, ignore_status=True)
        if not res.exit_status:
            logging.info('Result of "domtime". Duration: %s. Output:%s',
                         res.duration, res.stdout.strip())
            _, time_str = res.stdout.split(" ", 1)
            times['domtime'] = datetime.datetime.strptime(
                time_str.strip(), r"%Y-%m-%d %H:%M:%S")
            delta = time.time() - get_begin
            times['domtime'] -= datetime.timedelta(seconds=delta)
        else:
            logging.debug("Unable to get domain time:\n%s", res)
            times['domtime'] = None

        return times, time.time() - get_begin

    def check_get_success(expected_times):
        """
        Check virsh command get result against expected times

        :param expected_times: Expected time for checking
        """
        _, time_str = res.stdout.split(" ", 1)
        if pretty:
            # Time: 2015-01-13 06:29:18
            domtime = datetime.datetime.strptime(time_str.strip(),
                                                 r"%Y-%m-%d %H:%M:%S")
        else:
            # Time: 1421130740
            domtime = epoch + datetime.timedelta(seconds=int(time_str))
        time_shift = time.time() - start
        logging.debug("Time shift is %s", time_shift)
        result_diff = (domtime - expected_times['domtime']).total_seconds()
        if abs(result_diff) > 2.0:
            test.fail("Expect get time %s, but got %s, time "
                      "diff: %s" % (org_times['domtime'],
                                    domtime, result_diff))

    def check_guest_times(expected_times, cur_times):
        """
        Check guest times after test against expected times

        :param expected_times: Expected time for checking
        """
        time_shift = time.time() - start
        logging.debug("Time shift is %s", time_shift)

        error_msgs = []
        for key in cur_times:
            if cur_times[key] is not None:
                cur = cur_times[key]
                expect = expected_times[key]

                diff = (cur - expect).total_seconds()
                msg = "For %s, expect get time %s, got %s, time diff: %s" % (
                    key, expect, cur, diff)
                logging.debug(msg)
                if abs(diff) > 2.0:
                    error_msgs.append(msg)
        if error_msgs:
            test.fail('\n'.join(error_msgs))

    def check_time(result, org_times, cur_times):
        """
        Check whether domain time has been change accordingly.

        :param result: virsh domtime CmdResult instance
        :param org_times: Original guest times
        """
        action = "get"
        if now or sync or (set_time is not None):
            action = "set"

        host_tz_diff = org_host_loc_time - org_host_time
        logging.debug("Timezone diff on host is %d hours.",
                      (host_tz_diff.total_seconds() // 3600))

        # Hardware time will never stop
        logging.info('Add %ss to expected guest time', interval)
        if action == 'get':
            expected_times = org_times
        elif action == 'set':
            if result.exit_status:
                # Time not change if domtime fails
                expected_times = org_times
            else:
                # Time change accordingly if succeed.
                if now:
                    utc_time = org_host_time
                    local_time = utc_time + guest_tz_diff
                elif sync:
                    local_time = org_times["local_hw"]
                    utc_time = local_time - guest_tz_diff
                elif set_time is not None:
                    utc_time = epoch + datetime.timedelta(
                        seconds=(int(set_time) - guest_duration))
                    local_time = utc_time + guest_tz_diff
                expected_times = {}
                expected_times['local_hw'] = local_time
                expected_times['local_sys'] = local_time
                expected_times["utc_sys"] = utc_time
                expected_times["domtime"] = utc_time

        # Add interval between two checks of guest time
        for key in expected_times:
            if expected_times[key] is not None:
                expected_times[key] += interval

        # Hardware time will never stop
        # Software time will stop if suspended or managed-saved
        if suspend or managedsave:
            logging.info('Remove %ss from expected guest software time',
                         stop_time)
            expected_times["domtime"] -= stop_time
            expected_times["local_sys"] -= stop_time
            expected_times["utc_sys"] -= stop_time

        # Check guest time if domtime succeeded
        check_guest_times(expected_times, cur_times)

        # Check if output of domtime is correct
        if action == 'get' and not result.exit_status:
            check_get_success(expected_times)

    def prepare_fail_patts():
        """
        Predict fail pattern from test parameters.
        """
        fail_patts = []
        if not channel:
            fail_patts.append(r"QEMU guest agent is not configured")
        if not agent:
            # For older version
            fail_patts.append(r"Guest agent not available for now")
            # For newer version
            fail_patts.append(r"Guest agent is not responding")
        if int(now) + int(sync) + int(bool(set_time)) > 1:
            fail_patts.append(r"Options \S+ and \S+ are mutually exclusive")
        if shutdown:
            fail_patts.append(r"domain is not running")

        if set_time is not None:
            if int(set_time) < 0:
                fail_patts.append(r"Invalid argument")
            elif time_max_1 < int(set_time) <= time_max_2:
                fail_patts.append(r"Invalid time")
            elif time_max_2 < int(set_time) <= time_max_3:
                fail_patts.append(r"Invalid time")
            elif time_max_3 < int(set_time):
                fail_patts.append(r"too big for guest agent")

        if readonly:
            fail_patts.append(r"operation forbidden")
        return fail_patts

    def stop_vm():
        """
        Suspend, managedsave, pmsuspend or shutdown a VM for a period of time
        """
        stop_start = time.time()
        vmlogin_dur = 0.0
        if suspend:
            vm.pause()
            time.sleep(vm_stop_duration)
            vm.resume()
        elif managedsave:
            vm.managedsave()
            time.sleep(vm_stop_duration)
            vm.start()
            start_dur = time.time()
            vm.wait_for_login()
            vmlogin_dur = time.time() - start_dur
        elif pmsuspend:
            vm.pmsuspend()
            time.sleep(vm_stop_duration)
            vm.pmwakeup()
            start_dur = time.time()
            vm.wait_for_login()
            vmlogin_dur = time.time() - start_dur
        elif shutdown:
            vm.destroy()

        # Check real guest stop time
        stop_seconds = time.time() - stop_start - vmlogin_dur
        stop_time = datetime.timedelta(seconds=stop_seconds)
        logging.debug("Guest stopped: %s", stop_time)
        return stop_time

    # Check availability of virsh command domtime
    if not virsh.has_help_command('domtime'):
        test.cancel("This version of libvirt does not support "
                    "the domtime test")

    channel = (params.get("prepare_channel", "yes") == 'yes')
    agent = (params.get("start_agent", "yes") == 'yes')
    pretty = (params.get("domtime_pretty", "no") == 'yes')
    now = (params.get("domtime_now", "no") == 'yes')
    sync = (params.get("domtime_sync", "no") == 'yes')
    set_time = params.get("domtime_time", None)

    shutdown = (params.get("shutdown_vm", "no") == 'yes')
    suspend = (params.get("suspend_vm", "no") == 'yes')
    managedsave = (params.get("managedsave_vm", "no") == 'yes')
    pmsuspend = (params.get("pmsuspend_vm", "no") == 'yes')
    vm_stop_duration = int(params.get("vm_stop_duration", "10"))

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    readonly = (params.get("readonly_test", "no") == "yes")

    # Backup domain XML
    xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        if pmsuspend:
            vm_xml.VMXML.set_pm_suspend(vm_name)
        # Add or remove qemu-agent from guest before test
        vm.prepare_guest_agent(channel=channel, start=agent)
        session = vm.wait_for_login()
        # Let's set guest timezone to region where we do not
        # have day light savings, to affect the time
        session.cmd("timedatectl set-timezone Asia/Kolkata")
        try:
            init_guest_times, _ = get_guest_times(session)
            guest_tz_diff = init_guest_times['local_sys'] - init_guest_times['utc_sys']
            logging.debug("Timezone diff on guest is %d hours.",
                          (guest_tz_diff.total_seconds() // 3600))

            if channel and agent:
                init_time(session)

            # Expected fail message patterns
            fail_patts = prepare_fail_patts()

            # Message patterns test should skip when met
            skip_patts = [
                r'The command \S+ has not been found',
            ]

            # Record start time
            start = time.time()

            # Record host utc time before testing
            org_host_time = get_host_utc_time()

            # Record host local time before testing
            outp = process.run('date', shell=True)
            time_st = re.sub(r'\S+ (?=\S+$)', '', outp.stdout_text.strip())
            org_host_loc_time = datetime.datetime.strptime(time_st, r"%a %b %d %H:%M:%S %Y")

            # Get original guest times
            org_times, guest_duration = get_guest_times(session)

            # Run some operations to stop guest system
            stop_time = stop_vm()

            # Run command with specified options.
            res = virsh.domtime(vm_name, now=now, pretty=pretty, sync=sync,
                                time=set_time, readonly=readonly, debug=True)
            libvirt.check_result(res, fail_patts, skip_patts)

            # Check interval between two check of guest time
            interval = datetime.timedelta(
                seconds=(time.time() - start))
            logging.debug("Interval between guest checking: %s", interval)

            if not shutdown:
                # Get current guest times
                cur_times, _ = get_guest_times(session)

                check_time(res, org_times, cur_times)
        finally:
            if shutdown:
                vm.start()
            # sync guest timezone
            utils_time.sync_timezone_linux(vm)
            # Sync guest time with host
            if channel and agent and not shutdown:
                res = virsh.domtime(vm_name, now=True)
                if res.exit_status:
                    session.close()
                    test.error("Failed to recover guest time:\n%s"
                               % res)
            session.close()
    finally:
        # Restore VM XML
        xml_backup.sync()
Пример #46
0
def run(test, params, env):
    """
    Test command: virsh shutdown.

    The conmand can gracefully shutdown a domain.

    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh setvcpus operation.
    4.Recover test environment.
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_ref = params.get("shutdown_vm_ref")
    status_error = ("yes" == params.get("status_error"))
    agent = ("yes" == params.get("shutdown_agent", "no"))
    mode = params.get("shutdown_mode", "")
    pre_domian_status = params.get("reboot_pre_domian_status", "running")
    libvirtd = params.get("libvirtd", "on")
    xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    timeout = eval(params.get("shutdown_timeout", "60"))
    readonly = "yes" == params.get("shutdown_readonly", "no")
    expect_msg = params.get("shutdown_err_msg")

    # Libvirt acl test related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in"
                        " current libvirt version.")

    try:
        # Add or remove qemu-agent from guest before test
        vm.prepare_guest_agent(channel=agent, start=agent)
        if pre_domian_status == "shutoff":
            virsh.destroy(vm_name)
        domid = vm.get_id()
        domuuid = vm.get_uuid()
        # run test case
        if vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref.find("invalid") != -1:
            vm_ref = params.get(vm_ref)
        elif vm_ref == "name":
            vm_ref = "%s %s" % (vm_name, params.get("shutdown_extra"))
        elif vm_ref == "uuid":
            vm_ref = domuuid

        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

        if vm_ref != "remote":
            result = virsh.shutdown(vm_ref, mode,
                                    unprivileged_user=unprivileged_user,
                                    uri=uri, debug=True,
                                    ignore_status=True,
                                    readonly=readonly)
            status = result.exit_status
        else:
            remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
            remote_pwd = params.get("remote_pwd", None)
            remote_user = params.get("remote_user", "root")
            local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
            local_pwd = params.get("local_pwd", "password")
            local_user = params.get("username", "root")
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                test.cancel("Remote test parameters"
                            " unchanged from default")
            status = 0
            try:
                remote_uri = libvirt_vm.complete_uri(local_ip)
                # set up auto ssh login from remote machine to
                # execute commands
                config_opt = ["StrictHostKeyChecking=no"]
                ssh_key.setup_remote_ssh_key(remote_ip, remote_user,
                                             remote_pwd, hostname2=local_ip,
                                             user2=local_user,
                                             password2=local_pwd,
                                             config_options=config_opt)
                session = remote.remote_login("ssh", remote_ip, "22", "root",
                                              remote_pwd, "#")
                session.cmd_output('LANG=C')
                command = ("virsh -c %s shutdown %s %s"
                           % (remote_uri, vm_name, mode))
                status = session.cmd_status(command, internal_timeout=5)
                session.close()
            except process.CmdError:
                status = 1

        # recover libvirtd service start
        if libvirtd == "off":
            utils_libvirtd.libvirtd_start()

        # check status_error
        if status_error:
            if not status:
                test.fail("Run successfully with wrong command!")
            if expect_msg:
                libvirt.check_result(result, expect_msg.split(';'))
        else:
            if status:
                test.fail("Run failed with right command")
            if not vm.wait_for_shutdown(timeout):
                test.fail("Failed to shutdown in timeout %s" % timeout)
    finally:
        if utils_misc.wait_for(utils_libvirtd.libvirtd_is_running, 60):
            xml_backup.sync()
Пример #47
0
def run(test, params, env):
    """
    Test command: virsh setmem.

    1) Prepare vm environment.
    2) Handle params
    3) Prepare libvirtd status.
    4) Run test command and wait for current memory's stable.
    5) Recover environment.
    4) Check result.
    """

    def get_vm_usable_mem(session):
        """
        Get total usable RAM from /proc/meminfo
        """
        cmd = "cat /proc/meminfo"
        proc_mem = session.cmd_output(cmd)
        total_usable_mem = re.search(r'MemTotal:\s+(\d+)\s+[kK]B',
                                     proc_mem).group(1)
        return int(total_usable_mem)

    def vm_unusable_mem(session):
        """
        Get the unusable RAM of the VM.
        """
        # Get total physical memory from dmidecode
        cmd = "dmidecode -t 17"
        dmi_mem = session.cmd_output(cmd)
        total_physical_mem = reduce(lambda x, y: int(x) + int(y),
                                    re.findall(r'Size:\s(\d+)\sMB', dmi_mem))
        return int(total_physical_mem) * 1024 - get_vm_usable_mem(session)

    def make_domref(domarg, vm_ref, domid, vm_name, domuuid):
        """
        Create domain options of command
        """
        # Specify domain as argument or parameter
        if domarg == "yes":
            dom_darg_key = "domainarg"
        else:
            dom_darg_key = "domain"

        # How to reference domain
        if vm_ref == "domid":
            dom_darg_value = domid
        elif vm_ref == "domname":
            dom_darg_value = vm_name
        elif vm_ref == "domuuid":
            dom_darg_value = domuuid
        elif vm_ref == "none":
            dom_darg_value = None
        elif vm_ref == "emptystring":
            dom_darg_value = '""'
        else:  # stick in value directly
            dom_darg_value = vm_ref

        return {dom_darg_key: dom_darg_value}

    def make_sizeref(sizearg, mem_ref, original_mem):
        """
        Create size options of command
        """
        if sizearg == "yes":
            size_darg_key = "sizearg"
        else:
            size_darg_key = "size"

        if mem_ref == "halfless":
            size_darg_value = "%d" % (original_mem // 2)
        elif mem_ref == "halfmore":
            size_darg_value = "%d" % int(original_mem * 1.5)  # no fraction
        elif mem_ref == "same":
            size_darg_value = "%d" % original_mem
        elif mem_ref == "emptystring":
            size_darg_value = '""'
        elif mem_ref == "zero":
            size_darg_value = "0"
        elif mem_ref == "toosmall":
            size_darg_value = "1024"
        elif mem_ref == "toobig":
            size_darg_value = "1099511627776"  # (KiB) One Petabyte
        elif mem_ref == "none":
            size_darg_value = None
        else:  # stick in value directly
            size_darg_value = mem_ref

        return {size_darg_key: size_darg_value}

    def cal_deviation(actual, expected):
        """
        Calculate deviation of actual result and expected result
        """
        numerator = float(actual)
        denominator = float(expected)
        if numerator > denominator:
            numerator = denominator
            denominator = float(actual)
        return 100 - (100 * (numerator / denominator))

    def is_old_libvirt():
        """
        Check if libvirt is old version
        """
        regex = r'\s+\[--size\]\s+'
        return bool(not virsh.has_command_help_match('setmem', regex))

    def print_debug_stats(original_inside_mem, original_outside_mem,
                          test_inside_mem, test_outside_mem,
                          expected_outside_mem, expected_inside_mem,
                          delta_percentage, unusable_mem):
        """
        Print debug message for test
        """
        # Calculate deviation
        inside_deviation = cal_deviation(test_inside_mem, expected_inside_mem)
        outside_deviation = cal_deviation(test_outside_mem, expected_outside_mem)
        dbgmsg = ("Unusable memory of VM   : %d KiB\n"
                  "Original inside memory  : %d KiB\n"
                  "Expected inside memory  : %d KiB\n"
                  "Actual inside memory    : %d KiB\n"
                  "Inside memory deviation : %0.2f%%\n"
                  "Original outside memory : %d KiB\n"
                  "Expected outside memory : %d KiB\n"
                  "Actual outside memory   : %d KiB\n"
                  "Outside memory deviation: %0.2f%%\n"
                  "Acceptable deviation    : %0.2f%%" % (
                      unusable_mem,
                      original_inside_mem,
                      expected_inside_mem,
                      test_inside_mem,
                      inside_deviation,
                      original_outside_mem,
                      expected_outside_mem,
                      test_outside_mem,
                      outside_deviation,
                      delta_percentage))
        for dbgline in dbgmsg.splitlines():
            logging.debug(dbgline)

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_ref = params.get("setmem_vm_ref", "")
    mem_ref = params.get("setmem_mem_ref", "")
    flags = params.get("setmem_flags", "")
    status_error = "yes" == params.get("status_error", "no")
    old_libvirt_fail = "yes" == params.get("setmem_old_libvirt_fail", "no")
    quiesce_delay = int(params.get("setmem_quiesce_delay", "1"))
    domarg = params.get("setmem_domarg", "no")
    sizearg = params.get("setmem_sizearg", "no")
    libvirt_status = params.get("libvirt", "on")
    delta_percentage = float(params.get("setmem_delta_per", "10"))
    start_vm = "yes" == params.get("start_vm", "yes")
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    paused_after_start_vm = "yes" == params.get("paused_after_start_vm", "no")
    manipulate_dom_before_setmem = "yes" == params.get(
        "manipulate_dom_before_setmem", "no")
    manipulate_dom_after_setmem = "yes" == params.get(
        "manipulate_dom_after_setmem", "no")
    manipulate_action = params.get("manipulate_action", "")
    readonly = "yes" == params.get("setmem_readonly", "no")
    expect_msg = params.get("setmem_err_msg")

    vm = env.get_vm(vm_name)
    # Back up domain XML
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    vmosxml = vmxml.os
    need_mkswap = False
    if manipulate_action in ['s3', 's4']:
        vm.destroy()
        BIOS_BIN = "/usr/share/seabios/bios.bin"
        if os.path.isfile(BIOS_BIN):
            vmosxml.loader = BIOS_BIN
            vmxml.os = vmosxml
            vmxml.sync()
        else:
            logging.error("Not find %s on host", BIOS_BIN)
        vmxml.set_pm_suspend(vm_name, "yes", "yes")
        vm.prepare_guest_agent()
        if manipulate_action == "s4":
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition")
            vm.create_swap_partition()

    memballoon_model = params.get("memballoon_model", "")
    if memballoon_model:
        vm.destroy()
        vmxml.del_device('memballoon', by_tag=True)
        memballoon_xml = vmxml.get_device_class('memballoon')()
        memballoon_xml.model = memballoon_model
        vmxml.add_device(memballoon_xml)
        logging.info(memballoon_xml)
        vmxml.sync()
        vm.start()

    remove_balloon_driver = "yes" == params.get("remove_balloon_driver", "no")
    if remove_balloon_driver:
        if not vm.is_alive():
            logging.error("Can't remove module as guest not running")
        else:
            session = vm.wait_for_login()
            cmd = "rmmod virtio_balloon"
            s_rmmod, o_rmmod = session.cmd_status_output(cmd)
            if s_rmmod != 0:
                logging.error("Fail to remove module virtio_balloon in guest:\n%s",
                              o_rmmod)
            session.close()
    # Get original data
    domid = vm.get_id()
    domuuid = vm.get_uuid()
    uri = vm.connect_uri
    if not vm.is_alive():
        vm.start()
    session = vm.wait_for_login()
    if session.cmd_status('dmidecode'):
        # The physical memory size is in vm xml, use it when dmideode not
        # supported
        unusable_mem = int(vmxml.max_mem) - get_vm_usable_mem(session)
    else:
        unusable_mem = vm_unusable_mem(session)
    original_outside_mem = vm.get_used_mem()
    original_inside_mem = get_vm_usable_mem(session)
    session.close()
    # Prepare VM state
    if not start_vm:
        vm.destroy()
    else:
        if paused_after_start_vm:
            vm.pause()
    old_libvirt = is_old_libvirt()
    if old_libvirt:
        logging.info("Running test on older libvirt")
        use_kilobytes = True
    else:
        logging.info("Running test on newer libvirt")
        use_kilobytes = False

    # Argument pattern is complex, build with dargs
    dargs = {'flagstr': flags,
             'use_kilobytes': use_kilobytes,
             'uri': uri, 'ignore_status': True, "debug": True,
             'readonly': readonly}
    dargs.update(make_domref(domarg, vm_ref, domid, vm_name, domuuid))
    dargs.update(make_sizeref(sizearg, mem_ref, original_outside_mem))

    # Prepare libvirtd status
    libvirtd = utils_libvirtd.Libvirtd()
    if libvirt_status == "off":
        libvirtd.stop()
    else:
        if not libvirtd.is_running():
            libvirtd.start()

    if status_error or (old_libvirt_fail & old_libvirt):
        logging.info("Error Test: Expecting an error to occur!")

    try:
        memory_change = True
        if manipulate_dom_before_setmem:
            manipulate_domain(test, vm_name, manipulate_action)
            if manipulate_action in ['save', 'managedsave', 's4']:
                memory_change = False

        result = virsh.setmem(**dargs)
        status = result.exit_status

        if status is 0:
            logging.info(
                "Waiting %d seconds for VM memory to settle", quiesce_delay)
            # It takes time for kernel to settle on new memory
            # and current clean pages is not predictable. Therefor,
            # extremely difficult to determine quiescence, so
            # sleep one second per error percent is reasonable option.
            time.sleep(quiesce_delay)

        if manipulate_dom_before_setmem:
            manipulate_domain(test, vm_name, manipulate_action, True)
        if manipulate_dom_after_setmem:
            manipulate_domain(test, vm_name, manipulate_action)
            manipulate_domain(test, vm_name, manipulate_action, True)

        # Recover libvirtd status
        if libvirt_status == "off":
            libvirtd.start()

        # Gather stats if not running error test
        if not status_error and not old_libvirt_fail:
            # Expected results for both inside and outside
            if remove_balloon_driver:
                expected_mem = original_outside_mem
            else:
                if not memory_change:
                    expected_mem = original_inside_mem
                elif sizearg == "yes":
                    expected_mem = int(dargs["sizearg"])
                else:
                    expected_mem = int(dargs["size"])
            if memory_change:
                # Should minus unusable memory for inside memory check
                expected_inside_mem = expected_mem - unusable_mem
                expected_outside_mem = expected_mem
            else:
                expected_inside_mem = expected_mem
                expected_outside_mem = original_outside_mem

            def get_vm_mem():
                """
                Test results for both inside and outside
                :return: Get vm memory for both inside and outside
                """
                if not memory_change:
                    test_inside_mem = original_inside_mem
                    test_outside_mem = original_outside_mem
                else:
                    if vm.state() == "shut off":
                        vm.start()
                    elif vm.state() == "paused":
                        # Make sure it's never paused
                        vm.resume()
                    session = vm.wait_for_login()

                    # Actual results
                    test_inside_mem = get_vm_usable_mem(session)
                    session.close()
                    test_outside_mem = vm.get_used_mem()
                return (test_inside_mem, test_outside_mem)

            # Don't care about memory comparison on error test
            def verify_outside_result():
                _, test_outside_mem = get_vm_mem()
                return(cal_deviation(test_outside_mem, expected_outside_mem) <= delta_percentage)

            def verify_inside_result():
                test_inside_mem, _ = get_vm_mem()
                return(cal_deviation(test_inside_mem, expected_inside_mem) <= delta_percentage)

            msg = "test conditions not met: "
            error_flag = 0
            if status is not 0:
                error_flag = 1
                msg += "Non-zero virsh setmem exit code. "
            if not utils_misc.wait_for(verify_outside_result, timeout=240):
                error_flag = 1
                msg += "Outside memory deviated. "
            if not utils_misc.wait_for(verify_inside_result, timeout=240):
                error_flag = 1
                msg += "Inside memory deviated. "

            test_inside_mem, test_outside_mem = get_vm_mem()
            print_debug_stats(original_inside_mem, original_outside_mem,
                              test_inside_mem, test_outside_mem,
                              expected_outside_mem, expected_inside_mem,
                              delta_percentage, unusable_mem)
            if error_flag:
                test.fail(msg)
        elif not status_error and old_libvirt_fail:
            if status is 0:
                if old_libvirt:
                    test.fail("Error test did not result in an error")
            else:
                if not old_libvirt:
                    test.fail("Newer libvirt failed when it should not")
        else:  # Verify an error test resulted in error
            if status is 0:
                test.fail("Error test did not result in an error")
            if expect_msg:
                libvirt.check_result(result, expect_msg.split(';'))
    finally:
        if need_mkswap:
            vm.cleanup_swap()
        vm.destroy()
        backup_xml.sync()
def run(test, params, env):
    """
    This test virsh domfsfreeze and domfsthaw commands and their options.

    1) Start a guest with/without guest agent configured;
    2) Freeze the guest file systems with domfsfreeze;
    3) Create a file on guest to see command hang;
    4) Thaw the guest file systems with domfsthaw;
    5) Check the file is already created;
    6) Retouch the file the ensure guest file system are not frozen;
    7) Cleanup test environment.
    """
    def check_freeze(session):
        """
        Check whether file system has been frozen by touch a test file
        and see if command will hang.

        :param session: Guest session to be tested.
        """
        try:
            output = session.cmd_output('touch freeze_test',
                                        timeout=10)
            test.fail("Failed to freeze file system. "
                      "Create file succeeded:\n%s" % output)
        except aexpect.ShellTimeoutError:
            pass

    def check_thaw(session):
        """
        Check whether file system has been thawed by check a test file
        prohibited from creation when frozen created and successfully touch
        the file again.

        :param session: Guest session to be tested.
        """
        status, output = session.cmd_status_output('ls freeze_test')
        if status:
            test.fail("Failed to thaw file system. "
                      "Find created file failed:\n%s" % output)

        try:
            output = session.cmd_output('touch freeze_test', timeout=10)
        except aexpect.ShellTimeoutError:
            test.fail("Failed to freeze file system. "
                      "Touch file timeout:\n%s" % output)

    def cleanup(session):
        """
        Clean up the test file used for freeze/thaw test.

        :param session: Guest session to be cleaned up.
        """
        status, output = session.cmd_status_output('rm -f freeze_test')
        if status:
            test.error("Failed to cleanup test file"
                       "Find created file failed:\n%s" % output)

    if not virsh.has_help_command('domfsfreeze'):
        test.cancel("This version of libvirt does not support "
                    "the domfsfreeze/domfsthaw test")

    channel = ("yes" == params.get("prepare_channel", "yes"))
    agent = ("yes" == params.get("start_agent", "yes"))
    mountpoint = params.get("mountpoint", None)
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        # Add or remove qemu-agent from guest before test
        vm.prepare_guest_agent(channel=channel, start=agent)
        session = vm.wait_for_login()
        try:
            # Expected fail message patterns
            fail_patts = []
            if not channel:
                fail_patts.append(r"QEMU guest agent is not configured")
            if not agent:
                # For older version
                fail_patts.append(r"Guest agent not available for now")
                # For newer version
                fail_patts.append(r"Guest agent is not responding")
            # Message patterns test should skip when met
            skip_patts = [
                r'The command \S+ has not been found',
                r'specifying mountpoints is not supported',
            ]

            res = virsh.domfsfreeze(vm_name, mountpoint=mountpoint)
            libvirt.check_result(res, fail_patts, skip_patts)
            if not res.exit_status:
                check_freeze(session)

            res = virsh.domfsthaw(vm_name, mountpoint=mountpoint)
            libvirt.check_result(res, fail_patts, skip_patts)
            if not res.exit_status:
                check_thaw(session)

            cleanup(session)
        finally:
            session.close()
    finally:
        xml_backup.sync()
Пример #49
0
def run(test, params, env):
    """
    Test command: virsh set-user-password

    The command set the user password inside the domain
    1. Prepare test environment, start vm with guest agent
    2. Perform virsh set-user-password operation(encrypted/ non-encrypted)
    3. Login the vm with new/old password
    4. Recover test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    encrypted = params.get("encrypted", "no") == "yes"
    option = params.get("option", "no") == "yes"
    add_user = params.get("add_user", "no") == "yes"
    set_user_name = params.get("set_user_name", "root")
    status_error = params.get("status_error", "no")
    err_domain = params.get("err_domain", "")
    err_msg = params.get("err_msg", "")
    start_ga = params.get("start_ga", "yes") == "yes"
    ori_passwd = vm.params.get("password")
    new_passwd = "a" + ori_passwd
    passwd = new_passwd

    # Back up domain XML
    vmxml_bak = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        vmxml = vmxml_bak.copy()

        if start_ga:
            # Start guest agent in vm
            vm.prepare_guest_agent(prepare_xml=False, channel=False, start=True)

        # Error test
        if status_error == "yes":
            if err_domain:
                vm_name = err_domain
            ret = virsh.set_user_password(vm_name, set_user_name, new_passwd,
                                          encrypted=encrypted, option=option, debug=True)
            libvirt.check_result(ret, err_msg)

        # Normal test
        else:
            # Get guest ip address
            session = vm.wait_for_login(timeout=30, username="******", password=ori_passwd)
            vm_mac = vm.get_virsh_mac_address()
            vm_ip = utils_net.get_guest_ip_addr(session, vm_mac)

            # Add user
            if add_user:
                cmd = " rm -f /etc/gshadow.lock & useradd %s" % set_user_name
                status, output = session.cmd_status_output(cmd)
                if status:
                    test.error("Adding user '%s' got failed: '%s'" %
                               (set_user_name, output))
                session.close()

            # Set the user password in vm
            if encrypted:
                cmd = "openssl passwd -crypt %s" % new_passwd
                ret = process.run(cmd, shell=True)
                libvirt.check_exit_status(ret)
                en_passwd = str(ret.stdout_text.strip())
                passwd = en_passwd

            ret = virsh.set_user_password(vm_name, set_user_name, passwd,
                                          encrypted=encrypted, option=option, debug=True)
            libvirt.check_exit_status(ret)

            # Login with new password
            try:
                session = remote.wait_for_login("ssh", vm_ip, "22", set_user_name, new_passwd,
                                                r"[\#\$]\s*$", timeout=30)
                session.close()
            except remote.LoginAuthenticationError, e:
                logging.debug(e)

            # Login with old password
            try:
                session = remote.wait_for_login("ssh", vm_ip, "22", set_user_name, ori_passwd,
                                                r"[\#\$]\s*$", timeout=10)
                session.close()
            except remote.LoginAuthenticationError:
                logging.debug("Login with old password failed as expected.")

            # Change the password back in VM
            ret = virsh.set_user_password(vm_name, set_user_name, ori_passwd, False,
                                          option=option, debug=True)
            libvirt.check_exit_status(ret)

            # Login with the original password
            try:
                session = remote.wait_for_login("ssh", vm_ip, "22", set_user_name, ori_passwd,
                                                r"[\#\$]\s*$", timeout=30)
                session.close()
            except remote.LoginAuthenticationError, e:
                logging.debug(e)

            if start_ga:
                # Stop guest agent in vm
                vm.prepare_guest_agent(prepare_xml=False, channel=False, start=False)

            # Del user
            if add_user:
                session = vm.wait_for_login(timeout=30, username="******", password=ori_passwd)
                cmd = "userdel -r %s" % set_user_name
                status, output = session.cmd_status_output(cmd)
                if status:
                    test.error("Deleting user '%s' got failed: '%s'" %
                               (set_user_name, output))
                session.close()
Пример #50
0
def run(test, params, env):
    """
    Test command: virsh managedsave-xxx
    including virsh managedsave-edit
              virsh managedsave-dumpxml
              virsh managedsave-define
              ...
    """
    vm_name = params.get('main_vm')
    checkpoint = params.get('checkpoint', '')
    error_msg = params.get('error_msg', '')
    ms_extra_options = params.get('ms_extra_options', '')
    pre_state = params.get('pre_state', '')
    status_error = 'yes' == params.get('status_error', 'no')

    vm = env.get_vm(vm_name)
    bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': False}

    def start_and_login_vm():
        """
        Start vm and login, after which vm is accessible
        """
        vm.start()
        vm.wait_for_login().close()

    try:
        if checkpoint == 'dumpxml':
            # Check managedsave-dumpxml
            start_and_login_vm()
            virsh.managedsave(vm_name, **virsh_dargs)
            virsh.managedsave_dumpxml(vm_name, **virsh_dargs)
            tmp_dir = data_dir.get_tmp_dir()
            save_img_xml = os.path.join(tmp_dir, 'save_img.xml')
            managed_save_xml = os.path.join(tmp_dir, 'managed_save.xml')
            virsh.save_image_dumpxml(MANAGEDSAVE_FILE % vm_name, ' > %s'
                                     % save_img_xml, **virsh_dargs)
            virsh.managedsave_dumpxml(vm_name, ' > %s' % managed_save_xml,
                                      **virsh_dargs)
            result_need_check = process.run('diff %s %s' %
                                            (save_img_xml, managed_save_xml),
                                            shell=True, verbose=True)
        if checkpoint == 'secure_info':
            # Check managedsave-dumpxml with option --security-info
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            vm_xml.VMXML.set_graphics_attr(vm_name, {'passwd': '123456'})
            start_and_login_vm()
            virsh.managedsave(vm_name, **virsh_dargs)
            default_xml = virsh.managedsave_dumpxml(vm_name, **virsh_dargs).stdout_text
            if 'passwd' in default_xml:
                test.fail('Found "passwd" in dumped vm xml. '
                          'Secure info like "passwd" should not be dumped.')
            secure_xml = virsh.managedsave_dumpxml(vm_name, '--security-info',
                                                   **virsh_dargs).stdout_text
            if 'passwd' not in secure_xml:
                test.fail('Not found "passwd" in dumped vm xml.'
                          'Secure info like "passwd" should be dumped '
                          'with option "--security-info"')
        if checkpoint == 'define':
            # Make change to a managedsave-dumped xml and redefine vm
            # and check if the change take effect
            start_option = '--paused' if pre_state == 'paused' else ''
            virsh.start(vm_name, start_option, **virsh_dargs)
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            logging.debug(vmxml.devices)
            disk = vmxml.get_devices('disk')[0]
            img_path = disk.source.attrs['file']
            logging.info('Original image path: %s', img_path)
            # Copy old image to new image
            new_img_path = os.path.join(data_dir.get_tmp_dir(), 'test.img')
            shutil.copyfile(img_path, new_img_path)
            virsh.managedsave(vm_name, **virsh_dargs)
            xmlfile = os.path.join(data_dir.get_tmp_dir(), 'managedsave.xml')
            virsh.managedsave_dumpxml(vm_name, '>%s' % xmlfile, **virsh_dargs)
            # Make change to xmlfile and managedsave-define with it
            with open(xmlfile) as file_xml:
                updated_xml = file_xml.read().replace(img_path, new_img_path)
            with open(xmlfile, 'w') as file_xml:
                file_xml.write(updated_xml)
            virsh.managedsave_define(vm_name, xmlfile, ms_extra_options, **virsh_dargs)
            virsh.start(vm_name, **virsh_dargs)
            xml_after_define = virsh.dumpxml(vm_name, **virsh_dargs).stdout_text
            if 'test.img' not in xml_after_define:
                test.fail('Not found "test.img" in vm xml after managedsave-define.'
                          'Modification to xml did not take effect.')
        if checkpoint == 'no_save':
            # Start a guest but do not managedsave it
            start_and_login_vm()
            virsh.dom_list('--all --managed-save', **virsh_dargs)
        if checkpoint == 'rm_after_save':
            # Remove saved file after managedsave a vm
            start_and_login_vm()
            virsh.managedsave(vm_name, **virsh_dargs)
            os.remove(MANAGEDSAVE_FILE % vm_name)
        if checkpoint == 'not_saved_corrupt':
            # Do not managedsave a vm, but create a fake managedsaved file by
            # 'touch' a file
            start_and_login_vm()
            virsh.dom_list('--all --managed-save', **virsh_dargs)
            process.run('touch %s' % MANAGEDSAVE_FILE % vm_name, verbose=True)
            params['clean_managed_save'] = True
        if checkpoint == 'exclusive_option':
            virsh.managedsave(vm_name, **virsh_dargs)

        # Test managedsave-edit, managedsave_dumpxml, managedsave-define
        if params.get('check_cmd_error', '') == 'yes':
            ms_command = params.get('ms_command', '')
            if ms_command == 'edit':
                result_need_check = virsh.managedsave_edit(vm_name,
                                                           ms_extra_options,
                                                           debug=True)
            if ms_command == 'dumpxml':
                result_need_check = virsh.managedsave_dumpxml(vm_name,
                                                              ms_extra_options,
                                                              debug=True)
            if ms_command == 'define':
                result_need_check = virsh.managedsave_define(vm_name,
                                                             bkxml.xml,
                                                             ms_extra_options,
                                                             debug=True)
        # If needs to check result, check it
        if 'result_need_check' in locals():
            logging.info('Check command result.')
            libvirt.check_exit_status(result_need_check, status_error)
            if error_msg:
                libvirt.check_result(result_need_check, [error_msg])

    finally:
        if params.get('clean_managed_save'):
            os.remove(MANAGEDSAVE_FILE % vm_name)
        utils_libvirtd.libvirtd_restart()
        virsh.managedsave_remove(vm_name, debug=True)
        bkxml.sync()
Пример #51
0
def run(test, params, env):
    """
    Test vcpu hotpluggable item in xml

    1. Set the libvirtd log filter/level/file
    2. Restart libvirtd
    3. Start vm by xml with vcpu hotpluggable
    4. Check the qemu command line
    5. Check the libvirtd log
    6. Restart libvrtd
    7. Check the vm xml
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vcpus_placement = params.get("vcpus_placement", "static")
    vcpus_crt = int(params.get("vcpus_current", "4"))
    vcpus_max = int(params.get("vcpus_max", "8"))
    vcpus_enabled = params.get("vcpus_enabled", "")
    vcpus_hotplug = params.get("vcpus_hotpluggable", "")
    vcpus_order = params.get("vcpus_order")
    err_msg = params.get("err_msg", "")
    config_libvirtd = params.get("config_libvirtd", "yes") == "yes"
    log_file = params.get("log_file", "libvirtd.log")
    live_vcpus = params.get("set_live_vcpus", "")
    config_vcpus = params.get("set_config_vcpus", "")
    enable_vcpu = params.get("set_enable_vcpu", "")
    disable_vcpu = params.get("set_disable_vcpu", "")
    # Install cgroup utils
    cgutils = "libcgroup-tools"
    if "ubuntu" in platform.dist()[0].lower():
        cgutils = "cgroup-tools"
    sm = SoftwareManager()
    if not sm.check_installed(cgutils) and not sm.install(cgutils):
        test.cancel("cgroup utils package install failed")
    # Backup domain XML
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    libvirtd = utils_libvirtd.Libvirtd()

    try:
        # Configure libvirtd log
        if config_libvirtd:
            config_path = os.path.join(data_dir.get_tmp_dir(), log_file)
            with open(config_path, 'a') as f:
                pass
            config = utils_config.LibvirtdConfig()
            log_outputs = "1:file:%s" % config_path
            config.log_outputs = log_outputs
            config.log_level = 1
            config.log_filters = "1:json 1:libvirt 1:qemu 1:monitor 3:remote 4:event"

            # Restart libvirtd to make the changes take effect in libvirt
            libvirtd.restart()

        # Set vcpu: placement,current,max vcpu
        vmxml.placement = vcpus_placement
        vmxml.vcpu = vcpus_max
        vmxml.current_vcpu = vcpus_crt
        del vmxml.cpuset

        # Create vcpu xml with vcpu hotpluggable and order
        vcpu_list = []
        vcpu = {}
        en_list = vcpus_enabled.split(",")
        hotplug_list = vcpus_hotplug.split(",")
        order_dict = ast.literal_eval(vcpus_order)

        for vcpu_id in range(vcpus_max):
            vcpu['id'] = str(vcpu_id)
            if str(vcpu_id) in en_list:
                vcpu['enabled'] = 'yes'
                if str(vcpu_id) in order_dict:
                    vcpu['order'] = order_dict[str(vcpu_id)]
            else:
                vcpu['enabled'] = 'no'
            if str(vcpu_id) in hotplug_list:
                vcpu['hotpluggable'] = 'yes'
            else:
                vcpu['hotpluggable'] = 'no'
            vcpu_list.append(copy.copy(vcpu))
            vcpu = {}

        vcpus_xml = vm_xml.VMVCPUSXML()
        vcpus_xml.vcpu = vcpu_list

        vmxml.vcpus = vcpus_xml

        # Remove influence from topology setting
        try:
            logging.info('Remove influence from topology setting')
            cpuxml = vmxml.cpu
            del cpuxml.topology
            vmxml.cpu = cpuxml
        except Exception as e:
            pass

        vmxml.sync()

        # Start VM
        logging.info("Start VM with vcpu hotpluggable and order...")
        ret = virsh.start(vm_name, ignore_status=True)

        if err_msg:
            libvirt.check_result(ret, err_msg)
        else:
            # Wait for domain
            vm.wait_for_login()

            if enable_vcpu:
                ret = virsh.setvcpu(vm_name, enable_vcpu, "--enable",
                                    ignore_status=False, debug=True)
                vcpus_crt += 1
            if disable_vcpu:
                ret = virsh.setvcpu(vm_name, disable_vcpu, "--disable",
                                    ingnore_status=False, debug=True)
                vcpus_crt -= 1
            if live_vcpus:
                ret = virsh.setvcpus(vm_name, live_vcpus, ignore_status=False,
                                     debug=True)
                vcpus_crt = int(live_vcpus)
            if config_vcpus:
                ret = virsh.setvcpus(vm_name, config_vcpus, "--config",
                                     ignore_status=False, debug=True)

            # Check QEMU command line
            cmd = ("ps -ef| grep %s| grep 'maxcpus=%s'" % (vm_name, vcpus_max))
            ret = process.run(cmd, ignore_status=False, shell=True)
            if ret.exit_status != 0:
                logging.error("Maxcpus in QEMU command line is wrong!")

            # Check libvirtd log
            if config_libvirtd:
                for vcpu in vcpu_list:
                    if vcpu['enabled'] == 'yes' and vcpu['hotpluggable'] == 'yes':
                        cmd = ("cat %s| grep device_add| grep qemuMonitorIOWrite"
                               "| grep 'vcpu%s'" % (config_path, vcpu['id']))
                        ret = process.run(cmd, ignore_status=False, shell=True)
                        if ret.exit_status != 0:
                            logging.error("Failed to find lines about enabled vcpu%s"
                                          "in libvirtd log.", vcpu['id'])

            # Dumpxml
            dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            vcpu_items = re.findall(r"vcpu.*", dump_xml)

            # Check guest vcpu count
            ret = virsh.vcpucount(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            max_list = re.findall(r"maximum.*[config|live].*%s\n" % vcpus_max, output)
            if len(max_list) != 2:
                test.fail("vcpucount maximum info is not correct.")

            if live_vcpus:
                crt_live_list = re.findall(r"current.*live.*%s" % live_vcpus, output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current live info is not correct.")
            elif config_vcpus:
                crt_cfg_list = re.findall(r"current.*config.*%s" % config_vcpus, output)
                logging.info("vcpucount crt_cfg_list: \n %s", crt_cfg_list)
                if len(crt_cfg_list) != 1:
                    test.fail("vcpucount: current config info is not correct.")
            else:
                crt_live_list = re.findall(r"current.*live.*%s" % vcpus_crt, output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current info is not correct.")

            # Check guest vcpu info
            ret = virsh.vcpuinfo(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            vcpu_lines = re.findall(r"VCPU:.*\n", output)
            logging.info("vcpuinfo vcpu_lines: \n %s", vcpu_lines)
            if len(vcpu_lines) != vcpus_crt:
                test.fail("vcpuinfo is not correct.")

            # Check cpu in guest
            if not utils_misc.check_if_vm_vcpu_match(vcpus_crt, vm):
                test.fail("cpu number in VM is not correct, it should be %s cpus" % vcpus_crt)

            # Check VM xml change for cold-plug/cold-unplug
            if config_vcpus:
                inactive_xml = virsh.dumpxml(vm_name, "--inactive").stdout.strip()
                crt_vcpus_xml = re.findall(r"vcpu.*current=.%s.*" %
                                           config_vcpus, inactive_xml)
                logging.info("dumpxml --inactive xml: \n %s", crt_vcpus_xml)
                if len(crt_vcpus_xml) != 1:
                    test.fail("Dumpxml with --inactive,"
                              "the vcpu current is not correct.")

            # Restart libvirtd
            libvirtd.restart()

            # Recheck VM xml
            re_dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            re_vcpu_items = re.findall(r"vcpu.*", re_dump_xml)
            if vcpu_items != re_vcpu_items:
                test.fail("After restarting libvirtd,"
                          "VM xml changed unexpectedly.")

            # Check cgroup info
            en_vcpu_list = re.findall(r"vcpu.*enabled=.yes.*", re_dump_xml)
            for vcpu_sn in range(len(en_vcpu_list)):
                vcpu_id = en_vcpu_list[vcpu_sn].split("=")[1].split()[0].strip('\'')
                cmd = ("lscgroup| grep cpuset| grep %s| grep vcpu%s" %
                       (vm_name[-3:], vcpu_id))
                ret = process.run(cmd, ignore_status=False, shell=True)
                if ret.exit_status != 0:
                    test.fail("Failed to find lines about enabled vcpu%s"
                              "in lscgroup info." % vcpu_id)
    finally:
        # Recover libvirtd configration
        if config_libvirtd:
            config.restore()
            if os.path.exists(config_path):
                os.remove(config_path)

        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
Пример #52
0
def run(test, params, env):
    """
    Test the command virsh vcpucount

    (1) Iterate perform setvcpus operation with four valid options.
    (2) Iterate call virsh vcpucount with given options.
    (3) Check whether the virsh vcpucount works as expected.
    (4) Recover test environment.

    The test works for domain state as "shut off" or "running", it check
    vcpucount result after vcpu hotplug using setvcpus.

    For setvcpus, include four valid options:
      --config
      --config --maximum
      --live
      --guest

    For vcpucount options, restrict up to 2 options together, upstream libvirt
    support more options combinations now (e.g. 3 options together or single
    --maximum option), for backward support, only following options are
    checked:
      None
      --config --active
      --config --maximum
      --live --active
      --live --maximum
      --current --active
      --current --maximum
      --guest
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    xml_file = params.get("vcpucount_xml_file", "vm.xml")
    virsh.dumpxml(vm_name, extra="--inactive", to_file=xml_file)
    pre_vm_state = params.get("vcpucount_pre_vm_state")
    options = params.get("vcpucount_options")
    status_error = "yes" == params.get("status_error", "no")
    maxvcpu = int(params.get("vcpucount_maxvcpu", "4"))
    curvcpu = int(params.get("vcpucount_current", "1"))
    sockets = int(params.get("sockets", "1"))
    cores = int(params.get("cores", "4"))
    threads = int(params.get("threads", "1"))
    expect_msg = params.get("vcpucount_err_msg")
    livevcpu = curvcpu + threads
    set_option = ["--config", "--config --maximum", "--live", "--guest"]

    # Early death
    # 1.1 More than two options not supported
    if len(options.split()) > 2:
        test.cancel("Options exceeds 2 is not supported")

    # 1.2 Check for all options
    option_list = options.split(" ")
    if not status_error:
        for item in option_list:
            if virsh.has_command_help_match("vcpucount", item) is None:
                test.cancel("The current libvirt version doesn't support "
                            "'%s' option" % item)
    # 1.3 Check for vcpu values
    if (sockets and cores and threads):
        if int(maxvcpu) != int(sockets) * int(cores) * int(threads):
            test.cancel("Invalid topology definition, VM will not start")

    try:
        # Prepare domain
        reset_domain(vm, pre_vm_state, maxvcpu, curvcpu,
                     sockets, cores, threads, ("--guest" in options))

        # Perform guest vcpu hotplug
        for idx in range(len(set_option)):
            # Remove topology for maximum config
            # https://bugzilla.redhat.com/show_bug.cgi?id=1426220
            if idx == 1:
                del_topology(vm, pre_vm_state)
            # Hotplug domain vcpu
            result = virsh.setvcpus(vm_name, livevcpu, set_option[idx],
                                    ignore_status=True, debug=True)
            setvcpus_status = result.exit_status

            # Call virsh vcpucount with option
            result = virsh.vcpucount(vm_name, options, ignore_status=True,
                                     debug=True)
            output = result.stdout.strip()
            vcpucount_status = result.exit_status

            if "--guest" in options:
                if result.stderr.count("doesn't support option") or \
                   result.stderr.count("command guest-get-vcpus has not been found"):
                    test.fail("Option %s is not supported" % options)

            # Reset domain
            reset_domain(vm, pre_vm_state, maxvcpu, curvcpu,
                         sockets, cores, threads, ("--guest" in options))

            # Check result
            if status_error:
                if vcpucount_status == 0:
                    test.fail("Run successfully with wrong command!")
                else:
                    logging.info("Run failed as expected")
                    if expect_msg:
                        libvirt.check_result(result, expect_msg.split(';'))
            else:
                if vcpucount_status != 0:
                    test.fail("Run command failed with options %s" %
                              options)
                elif setvcpus_status == 0:
                    if pre_vm_state == "shut off":
                        if idx == 0:
                            expect_out = [maxvcpu, livevcpu]
                            chk_output_shutoff(output, expect_out, options, test)
                        elif idx == 1:
                            expect_out = [livevcpu, curvcpu]
                            chk_output_shutoff(output, expect_out, options, test)
                        else:
                            test.fail("setvcpus should failed")
                    else:
                        if idx == 0:
                            expect_out = [maxvcpu, maxvcpu, livevcpu,
                                          curvcpu, curvcpu]
                            chk_output_running(output, expect_out, options, test)
                        elif idx == 1:
                            expect_out = [livevcpu, maxvcpu, curvcpu,
                                          curvcpu, curvcpu]
                            chk_output_running(output, expect_out, options, test)
                        elif idx == 2:
                            expect_out = [maxvcpu, maxvcpu, curvcpu,
                                          livevcpu, livevcpu]
                            chk_output_running(output, expect_out, options, test)
                        else:
                            expect_out = [maxvcpu, maxvcpu, curvcpu,
                                          curvcpu, livevcpu]
                            chk_output_running(output, expect_out, options, test)
                else:
                    if pre_vm_state == "shut off":
                        expect_out = [maxvcpu, curvcpu]
                        chk_output_shutoff(output, expect_out, options, test)
                    else:
                        expect_out = [
                            maxvcpu, maxvcpu, curvcpu, curvcpu, curvcpu]
                        chk_output_running(output, expect_out, options, test)
    finally:
        # Recover env
        reset_env(vm_name, xml_file)
Пример #53
0
def run(test, params, env):
    """
    Test extended TSEG on Q35 machine types
    <smm state='on'>
        <tseg unit='MiB'>48</tseg>
    </smm>

    Steps:
    1) Edit VM xml for smm or tseg sub element
    2) Verify if Guest can boot as expected
    3) On i440 machine types, the property does not support.
       On Q35 machine types, both Seabios and OVMF Guest can bootup
    """

    vm_name = params.get("main_vm", "")
    vm = env.get_vm(vm_name)
    smm_state = params.get("smm_state", "off")
    unit = params.get("tseg_unit")
    size = params.get("tseg_size")
    boot_type = params.get("boot_type", "")
    loader_type = params.get("loader_type")
    loader = params.get("loader")
    err_msg = params.get("error_msg", "")
    vm_arch_name = params.get("vm_arch_name", "x86_64")
    status_error = ("yes" == params.get("status_error", "no"))

    if not libvirt_version.version_compare(4, 5, 0):
        test.cancel("TSEG does not support in "
                    "current libvirt version")

    if (boot_type == "seabios" and
            not utils_package.package_install('seabios-bin')):
        test.cancel("Failed to install Seabios")

    if (boot_type == 'ovmf' and
            not utils_package.package_install('OVMF')):
        test.cancel("Failed to install OVMF")

    # Back VM XML
    v_xml_backup = vm_xml.VMXML.new_from_dumpxml(vm_name)
    v_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)

    try:
        # Specify boot loader for OVMF
        if boot_type == 'ovmf':
            os_xml = v_xml.os
            os_xml.loader_type = loader_type
            os_xml.loader = loader
            os_xml.loader_readonly = "yes"
            v_xml.os = os_xml

        try:
            features_xml = v_xml.features
        except xcepts.LibvirtXMLNotFoundError:
            if vm_arch_name == 'x86_64':
                # ACPI is required for UEFI on x86_64
                v_xml.xmltreefile.create_by_xpath("/features/acpi")
                features_xml = v_xml.features
            else:
                features_xml = vm_xml.VMFeaturesXML()

        features_xml.smm = smm_state
        if unit and size:
            features_xml.smm_tseg_unit = unit
            features_xml.smm_tseg = size
        v_xml.features = features_xml

        logging.debug("New VM XML is:\n%s", v_xml)
        ret = virsh.define(v_xml.xml)
        utlv.check_result(ret, expected_fails=err_msg)

        # Check result
        if not status_error:
            vm.start()
            if unit and size:
                # If tseg unit is KiB, convert it to MiB
                # as vm dumpxml convert it automatically
                if unit == 'KiB':
                    unit, size = unify_to_MiB(unit, size)
                expect_line = "<tseg unit=\"%s\">%s</tseg>" % (unit, size)
                utlv.check_dumpxml(vm, expect_line)
                # Qemu cmdline use mbytes
                unit, tseg_mbytes = unify_to_MiB(unit, size)
                expect_line = '-global mch.extended-tseg-mbytes=%s' % size
                utlv.check_qemu_cmd_line(expect_line)
    finally:
        logging.debug("Restore the VM XML")
        if vm.is_alive():
            vm.destroy()
        # OVMF enable nvram by default
        v_xml_backup.sync(options="--nvram")
Пример #54
0
def run(test, params, env):
    """
    Test pci/pcie-to-pci bridge

    Hotplug interface to pci/pcie-to-pci bridge, then check xml and
    inside vm.
    Hotunplug interface, then check xml and inside vm
    Other test scenarios of pci/pcie-to-pci bridge
    """

    def create_pci_device(pci_model, pci_model_name, **kwargs):
        """
        Create a pci/pcie bridge

        :param pci_model: model of pci controller device
        :param pci_model_name: model name of pci controller device
        :param kwargs: other k-w args that needed to create device
        :return: the newly created device object
        """
        pci_bridge = Controller('pci')
        pci_bridge.model = pci_model
        pci_bridge.model_name = {'name': pci_model_name}
        if 'index' in kwargs:
            pci_bridge.index = kwargs['index']
        if 'address' in kwargs:
            pci_bridge.address = pci_bridge.new_controller_address(
                attrs=eval(kwargs['address']))

        logging.debug('pci_bridge: %s', pci_bridge)
        return pci_bridge

    def create_iface(iface_model, iface_source, **kwargs):
        """
        Create an interface to be attached to vm

        :param iface_model: model of the interface device
        :param iface_source: source of the interface device
        :param kwargs: other k-w args that needed to create device
        :return: the newly created interface object
        """
        iface = Interface('network')
        iface.model = iface_model
        iface.source = eval(iface_source)

        if 'mac' in kwargs:
            iface.mac_address = kwargs['mac']
        else:
            mac = utils_net.generate_mac_address_simple()
            iface.mac_address = mac

        if 'address' in kwargs:
            iface.address = iface.new_iface_address(attrs=eval(kwargs['address']))

        logging.debug('iface: %s', iface)
        return iface

    vm_name = params.get('main_vm')
    status_error = 'yes' == params.get('status_error', 'no')
    err_msg = params.get('err_msg', '').split(';')
    case = params.get('case', '')
    hotplug = 'yes' == params.get('hotplug', 'no')

    need_pci_br = 'yes' == params.get('need_pci_br', 'no')
    pci_model = params.get('pci_model', 'pci')
    pci_model_name = params.get('pci_model_name')
    pci_br_kwargs = eval(params.get('pci_br_kwargs', '{}'))

    pci_br_has_device = 'yes' == params.get('pci_br_has_device', 'no')
    sound_dev_model_type = params.get('sound_dev_model_type', '')
    sound_dev_address = params.get('sound_dev_address', '')

    iface_model = params.get('iface_model', '')
    iface_source = params.get('iface_source', '')
    iface_kwargs = eval(params.get('iface_kwargs', '{}'))

    max_slots = int(params.get('max_slots', 31))
    pcie_br_count = int(params.get('pcie_br_count', 3))

    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()
    vm = env.get_vm(vm_name)

    try:

        # Check if there is a pci/pcie-to-pci bridge, if so,
        # just use the existing pci/pcie-to-pci-bridge to test
        ori_pci_br = [dev for dev in vmxml.get_devices('controller')
                      if dev.type == 'pci' and dev.model == pci_model]

        if need_pci_br:
            # If there is not a pci/pcie-to-pci bridge to test,
            # create one and add to vm
            if not ori_pci_br:
                logging.info('No %s on vm, create one', pci_model)
                pci_bridge = create_pci_device(pci_model, pci_model_name)
                vmxml.add_device(pci_bridge)
                vmxml.sync()
                logging.debug(virsh.dumpxml(vm_name))

            # Check if pci/pcie-to-pci bridge is successfully added
            vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
            cur_pci_br = [dev for dev in vmxml.get_devices('controller')
                          if dev.type == 'pci' and dev.model == pci_model]
            if not cur_pci_br:
                test.error('Failed to add %s controller to vm xml' % pci_model)

            pci_br = cur_pci_br[0]
            logging.debug('pci_br: %s', pci_br)
            pci_br_index = pci_br.index

        # If test scenario requires another pci device on pci/pcie-to-pci
        # bridge before hotplug, add a sound device and make sure
        # the 'bus' is same with pci bridge index
        if need_pci_br and pci_br_has_device:
            sound_dev = Sound()
            sound_dev.model_type = sound_dev_model_type
            sound_dev.address = eval(sound_dev_address % pci_br_index)
            logging.debug('sound_dev.address: %s', sound_dev.address)
            vmxml.add_device(sound_dev)
            if case != 'vm_with_pcie_br_1_br':
                vmxml.sync()

        # Test hotplug scenario
        if hotplug:
            vm.start()
            vm.wait_for_login().close()

            # Create interface to be hotplugged
            logging.info('Create interface to be hotplugged')
            iface = create_iface(iface_model, iface_source)
            mac = iface.mac_address

            result = virsh.attach_device(vm_name, iface.xml, debug=True)
            libvirt.check_exit_status(result)

            xml_after_attach = VMXML.new_from_dumpxml(vm_name)
            logging.debug(virsh.dumpxml(vm_name))

            # Check if the iface with given mac address is successfully
            # attached with address bus equal to pcie/pci bridge's index
            iface_list = [
                iface for iface in xml_after_attach.get_devices('interface')
                if iface.mac_address == mac and
                int(iface.address['attrs']['bus'], 16) == int(pci_br_index, 16)
            ]

            logging.debug('iface list after attach: %s', iface_list)
            if not iface_list:
                test.error('Failed to attach interface %s' % iface)

            # Check inside vm
            def check_inside_vm(session, expect=True):
                ip_output = session.cmd('ip a')
                logging.debug('output of "ip a": %s', ip_output)

                return expect if mac in ip_output else not expect

            session = vm.wait_for_serial_login()
            if not utils_misc.wait_for(lambda: check_inside_vm(session, True),
                                       timeout=60, step=5):
                test.fail('Check interface inside vm failed,'
                          'interface not successfully attached:'
                          'not found mac address %s' % mac)
            session.close()

            # Test hotunplug
            result = virsh.detach_device(vm_name, iface.xml, debug=True)
            libvirt.check_exit_status(result)

            logging.debug(virsh.dumpxml(vm_name))

            # Check if the iface with given mac address has been
            # successfully detached
            xml_after_detach = VMXML.new_from_dumpxml(vm_name)
            iface_list_after_detach = [
                iface for iface in xml_after_detach.get_devices('interface')
                if iface.mac_address == mac
            ]

            logging.debug('iface list after detach: %s', iface_list_after_detach)
            if iface_list_after_detach:
                test.fail('Failed to detach device: %s' % iface)

            # Check again inside vm
            session = vm.wait_for_serial_login()
            if not utils_misc.wait_for(lambda: check_inside_vm(session, False),
                                       timeout=60, step=5):
                test.fail('Check interface inside vm failed,'
                          'interface not successfully detached:'
                          'found mac address %s' % mac)
            session.close()

        # Other test scenarios of pci/pcie
        if case:
            logging.debug('iface_kwargs: %s', iface_kwargs)

            # Setting pcie-to-pci-bridge model name !=pcie-pci-bridge.
            # or Invalid controller index for pcie-to-pci-bridge.
            if case in ('wrong_model_name', 'invalid_index'):
                pci_bridge = create_pci_device(pci_model, pci_model_name,
                                               **pci_br_kwargs)
                vmxml.add_device(pci_bridge)
                result_to_check = virsh.define(vmxml.xml, debug=True)

            # Attach device with invalid slot to pcie-to-pci-bridge
            if case == 'attach_with_invalid_slot':
                iface = create_iface(iface_model, iface_source, **iface_kwargs)
                vmxml.add_device(iface)
                result_to_check = virsh.define(vmxml.xml, debug=True)

            # Test that pcie-to-pci-bridge has 31 available slots
            if case == 'max_slots':
                target_bus = cur_pci_br[0].index
                target_bus = hex(int(target_bus))
                logging.debug('target_bus: %s', target_bus)

                # Attach 32 interfaces
                for i in range(max_slots + 1):
                    logging.debug('address: %s', iface_kwargs['address'])
                    new_iface_kwargs = {'address': iface_kwargs['address']
                                        % (target_bus, hex(i + 1))}
                    iface = create_iface(iface_model, iface_source,
                                         **new_iface_kwargs)
                    logging.info('Attaching the %d th interface', i + 1)
                    result_in_loop = virsh.attach_device(
                        vm_name, iface.xml, flagstr='--config', debug=True)

                    # Attaching the 32rd interfaces will fail
                    if i == max_slots:
                        status_error = True
                    libvirt.check_exit_status(result_in_loop,
                                              expect_error=status_error)
                logging.debug(virsh.dumpxml(vm_name))

                # Get all devices on pcie-to-pci-bridge from new xml
                # Test if it matches with value of max_slots
                new_xml = VMXML.new_from_dumpxml(vm_name)
                device_on_pci_br = [
                    dev for dev in new_xml.get_devices('interface')
                    if dev.address['type_name'] == 'pci' and
                    int(dev.address['attrs']['bus'], 16) == int(target_bus, 16)
                ]

                logging.info('All slots of pcie-to-pci-bridge is %d',
                             len(device_on_pci_br))
                if len(device_on_pci_br) != max_slots:
                    test.fail('Max slots is %d instead of %d' %
                              (len(device_on_pci_br), max_slots))

            # Define a guest with pcie-to-pci-bridge controller's index <=bus
            if case.startswith('index_v_bus'):
                last_pci_index = max([
                    int(dev.index) for dev in vmxml.get_devices('controller')
                    if dev.type == 'pci'])

                # New index of new pcie-bridge should be +1
                new_index = last_pci_index + 1
                if case.endswith('less_than'):
                    new_bus = new_index + 1
                elif case.endswith('equal_to'):
                    new_bus = new_index
                else:
                    test.error('Invalid test: %s' % case)

                pci_br_kwargs['index'] = new_index
                pci_br_kwargs['address'] = pci_br_kwargs['address'] % (new_bus)
                logging.debug('pci_br_kwargs: %s', pci_br_kwargs)

                pcie_br = create_pci_device(pci_model, pci_model_name, **pci_br_kwargs)
                vmxml.add_device(pcie_br)
                result_to_check = virsh.define(vmxml.xml, debug=True)

            # Test define & start an VM with/without pcie-to-pci-bridge
            if case.startswith('vm_with_pcie_br'):
                if case.endswith('1_br'):
                    pass
                elif case.endswith('multi_br'):
                    # Add $pcie_br_count pcie-root-port to vm
                    for i in range(pcie_br_count):
                        temp_xml = VMXML.new_from_inactive_dumpxml(vm_name)
                        port = create_pci_device('pcie-root-port', 'pcie-root-port')
                        temp_xml.add_device(port)
                        temp_xml.sync()

                    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
                    pci_root_ports = [dev for dev in vmxml.get_devices('controller')
                                      if dev.type == 'pci' and
                                      dev.model == 'pcie-root-port']
                    indexs = sorted([hex(int(dev.index)) for dev in pci_root_ports])
                    logging.debug('indexs: %s', indexs)

                    # Add $pcie_br_count pcie-to-pci-bridge to vm,
                    # on separated pcie-root-port
                    for i in range(pcie_br_count):
                        new_kwargs = {
                            'address': pci_br_kwargs['address'] % indexs[-i - 1]}
                        pcie_br = create_pci_device(pci_model, pci_model_name,
                                                    **new_kwargs)
                        vmxml.add_device(pcie_br)

                elif case.endswith('no_br'):
                    # Delete all pcie-to-pci-bridge
                    for dev in ori_pci_br:
                        vmxml.del_device(dev)
                    vmxml.sync()

                    # Add an pci device(rtl8139 nic)
                    iface = create_iface(iface_model, iface_source)
                    vmxml.add_device(iface)
                else:
                    test.error('Invalid test: %s' % case)

                # Test define and start vm with new xml settings
                result_define_vm = virsh.define(vmxml.xml, debug=True)
                libvirt.check_exit_status(result_define_vm)
                result_start_vm = virsh.start(vm_name, debug=True)
                libvirt.check_exit_status(result_start_vm)

                # Login to make sure vm is actually started
                vm.create_serial_console()
                vm.wait_for_serial_login().close()

                logging.debug(virsh.dumpxml(vm_name))

                # Get all pcie-to-pci-bridge after test operations
                new_vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
                cur_pci_br = [dev for dev in new_vmxml.get_devices('controller')
                              if dev.type == 'pci' and dev.model == pci_model]
                logging.debug('cur_pci_br: %s', cur_pci_br)

                if case.endswith('multi_br'):
                    # Check whether all new pcie-to-pci-br are successfullly added
                    if len(cur_pci_br) - len(ori_pci_br) != pcie_br_count:
                        test.fail('Not all pcie-to-pci-br successfully added.'
                                  'Should be %d, actually is %d' %
                                  (pcie_br_count, len(cur_pci_br) - len(ori_pci_br)))

                elif case.endswith('no_br'):
                    # Check if a pcie-to-pci-br will be automatically created
                    if len(cur_pci_br) == 0:
                        test.fail('No new pcie-to-pci-bridge automatically created')

            # Check command result if there is a result to be checked
            if 'result_to_check' in locals():
                libvirt.check_exit_status(result_to_check, expect_error=status_error)
                libvirt.check_result(result_to_check, expected_fails=err_msg)

    finally:
        bkxml.sync()
Пример #55
0
def run(test, params, env):
    """
    Test Boot OVMF Guest and Seabios Guest with options

    Steps:
    1) Edit VM xml with specified options
    2) For secure boot mode, boot OVMF Guest from
       cdrom first, enroll the key, then switch
       boot from hd
    3) For normal boot mode, directly boot Guest from given device
    4) Verify if Guest can boot as expected
    """
    vm_name = params.get("main_vm", "")
    vm = env.get_vm(vm_name)
    username = params.get("username", "root")
    password = params.get("password", "redhat")
    test_cmd = params.get("test_cmd", "")
    check_point = params.get("checkpoint", "")
    status_error = "yes" == params.get("status_error", "no")
    boot_iso_file = os.path.join(data_dir.get_tmp_dir(), "boot.iso")
    secure_boot_mode = (params.get("secure_boot_mode", "no") == "yes")
    non_release_os_url = params.get("non_release_os_url", "")
    download_file_path = os.path.join(data_dir.get_tmp_dir(), "non_released_os.qcow2")
    uefi_iso = params.get("uefi_iso", "")
    custom_codes = params.get("uefi_custom_codes", "")
    uefi_target_dev = params.get("uefi_target_dev", "")
    uefi_device_bus = params.get("uefi_device_bus", "")
    with_boot = (params.get("with_boot", "no") == "yes")
    boot_ref = params.get("boot_ref", "dev")
    boot_order = params.get("boot_order", "1")
    boot_dev = params.get("boot_dev", "hd")
    target_dev = params.get("target_dev", "vdb")
    vol_name = params.get("vol_name")
    brick_path = os.path.join(test.virtdir, "gluster-pool")

    # Prepare result checkpoint list
    check_points = []
    if check_point:
        check_points.append(check_point)

    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    try:
        # Create /etc/ceph/ceph.conf file to suppress false warning error message.
        process.run("mkdir -p /etc/ceph", ignore_status=True, shell=True)
        cmd = ("echo 'mon_host = {0}' >/etc/ceph/ceph.conf"
               .format(params.get("mon_host")))
        process.run(cmd, ignore_status=True, shell=True)

        setup_test_env(params, test)
        apply_boot_options(vmxml, params)
        blk_source = vm.get_first_disk_devices()['source']
        set_domain_disk(vmxml, blk_source, params, test)
        vmxml.remove_all_boots()
        if with_boot and not secure_boot_mode:
            boot_kwargs = {"boot_ref": boot_ref,
                           "boot_dev": boot_dev,
                           "boot_order": boot_order,
                           "target_dev": target_dev}
            if "yes" == params.get("two_same_boot_dev", "no"):
                boot_kwargs.update({"two_same_boot_dev": True})
            set_boot_dev_or_boot_order(vmxml, **boot_kwargs)
        if secure_boot_mode:
            secure_boot_kwargs = {"uefi_iso": uefi_iso,
                                  "uefi_target_dev": uefi_target_dev,
                                  "uefi_device_bus": uefi_device_bus,
                                  "uefi_custom_codes": custom_codes}
            enable_secure_boot(vm, vmxml, test, **secure_boot_kwargs)
        if not secure_boot_mode:
            define_error = ("yes" == params.get("define_error", "no"))
            enable_normal_boot(vmxml, check_points, define_error, test)
            # Some negative cases failed at virsh.define
            if define_error:
                return

        # Start VM and check result
        # For boot from cdrom or non_released_os, just verify key words from serial console output
        # For boot from disk image, run 'test cmd' to verify if OS boot well
        if boot_dev == "cdrom" or non_release_os_url:
            if not vm.is_alive():
                vm.start()
                check_prompt = params.get("check_prompt", "")
                while True:
                    match, text = read_until_any_line_matches(
                            vm.serial_console,
                            [check_prompt],
                            timeout=30.0, internal_timeout=0.5,
                            custom_codes=custom_codes)
                    logging.debug("matches %s", check_prompt)
                    if match == -1:
                        logging.debug("Got check point as expected")
                        break
        elif boot_dev == "hd":
            ret = virsh.start(vm_name, timeout=60)
            utlv.check_result(ret, expected_fails=check_points)
            # For no boot options, further check if boot dev can be automatically added
            if not with_boot:
                if re.search(r"<boot dev='hd'/>", virsh.dumpxml(vm_name).stdout.strip()):
                    logging.debug("OS boot dev added automatically")
                else:
                    test.fail("OS boot dev not added as expected")
            if not status_error:
                vm_ip = vm.wait_for_get_address(0, timeout=240)
                remote_session = remote.wait_for_login("ssh", vm_ip, "22", username, password,
                                                       "[\#\$]\s*$")
                if test_cmd:
                    status, output = remote_session.cmd_status_output(test_cmd)
                    logging.debug("CMD '%s' running result is:\n%s", test_cmd, output)
                    if status:
                        test.fail("Failed to boot %s from %s" % (vm_name, vmxml.xml))
                remote_session.close()
        logging.debug("Succeed to boot %s" % vm_name)
    finally:
        # Remove /etc/ceph/ceph.conf file if exists.
        if os.path.exists('/etc/ceph/ceph.conf'):
            os.remove('/etc/ceph/ceph.conf')
        logging.debug("Start to cleanup")
        if vm.is_alive:
            vm.destroy()
        logging.debug("Restore the VM XML")
        vmxml_backup.sync(options="--nvram")
        if cleanup_gluster:
            process.run("umount /mnt", ignore_status=True, shell=True)
            utlv.setup_or_cleanup_gluster(False, vol_name, brick_path)
        if cleanup_iscsi:
            utlv.setup_or_cleanup_iscsi(False)
        if cleanup_iso_file:
            process.run("rm -rf %s" % boot_iso_file, shell=True, ignore_status=True)
        if cleanup_image_file:
            process.run("rm -rf %s" % download_file_path, shell=True, ignore_status=True)
Пример #56
0
def run(test, params, env):
    """
    Test vcpu affinity feature as follows:
    positive test:
        1. use vcpu cpuset in xml to define vcpu affinity
        2. use cputune cpuset in xml to define vcpu affinity
        3. use offline-to-online host cpu as cpuset to run virsh vcpupin
        4. set vcpu placement in xml to auto and check xml result
        5. set vcpu cpuset in xml without placement defined and check xml result
    negative test:
        1. use outrange cpuset as vcpu cpuset in xml to define vcpu affinity
        2. use outrange cpuset as cputune cpuset in xml to define vcpu affinity
        3. use invalid cpuset as cputune cpuset in xml to define vcpu affinity
        4. use duplicate vcpu in xml to define vcpu affinity
        5. use offline host cpu as cputune cpuset to run virsh vcpupin
        6. set vcpu affinity for none exists vcpu and check xml result
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    cpuset_mask = params.get("cpuset_mask", "")
    vcpu = params.get("vcpu", "0")
    maxvcpu = params.get("maxvcpu", "8")
    current_vcpu = params.get("current_vcpu", "3")
    check = params.get("check", "")
    config_xml = params.get("config_xml", "")

    status_error = "yes" == params.get("status_error", "no")
    define_fail = "yes" == params.get("define_fail", "no")
    start_fail = "yes" == params.get("start_fail", "no")
    runtime_fail = "yes" == params.get("runtime_fail", "no")
    vm_down = "yes" == params.get("vm_down", "no")

    vcpu_cpuset = params.get("vcpu_cpuset", "")
    cputune_cpuset = params.get("cputune_cpuset", "")
    vcpu_placement = params.get("vcpu_placement", "static")
    err_msg = params.get("err_msg", "")
    start_timeout = int(params.get("start_timeout", "60"))
    offline_hostcpus = params.get("offline_hostcpus", "")
    machine_cpuset_path = params.get("machine_cpuset_path", "")

    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    def check_vcpu_affinity():
        """
        check vcpu affinity defined by vcpu cpuset or cputune cpuset
        """
        affinity = vcpu_cpuset if not cputune_cpuset else cputune_cpuset
        affinity = {vcpu: affinity}
        virsh.vcpuinfo(vm_name, debug=True)

        vmxml_live = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(vmxml_live)

        # if vcpu >= maxvcpu, the cputune should not exist in xml
        if int(vcpu) >= int(maxvcpu):
            try:
                if hasattr(vmxml_live, 'cputune'):
                    test.fail("cputune tag is set when vcpu >= maxvcpu")
        # check the expected vcpu affinity with the one got from running vm
                elif not utils_hotplug.check_affinity(vm, affinity):
                    test.fail("vcpu affinity check fail")
            except xcepts.LibvirtXMLError:
                pass

    try:
        hostcpu_num = int(cpu.total_cpus_count())

        # online all host cpus
        for x in range(hostcpu_num):
            if cpu.online(x):
                test.fail("fail to online cpu{}".format(x))

        # use vcpu cpuset or/and cputune cpuset to define xml
        del vmxml.cputune
        del vmxml.vcpus
        del vmxml.placement
        vmxml.vcpu = int(maxvcpu)
        vmxml.current_vcpu = current_vcpu

        # config vcpu cpuset for cpuset range test
        num = 1 if not status_error else 0
        cpuset_new = "0-{},^{}".format(hostcpu_num-num, cpuset_mask)
        if (config_xml == "vcpu" and check.endswith("range_cpuset")):
            vcpu_cpuset = cpuset_new
        vmxml.cpuset = vcpu_cpuset

        if vcpu_placement:
            vmxml.placement = vcpu_placement

        if config_xml == "cputune":
            cputune = vm_xml.VMCPUTuneXML()
            if check.endswith("range_cpuset"):
                cputune_cpuset = cpuset_new
            if check.endswith("duplicate_vcpu"):
                cputune.vcpupins = [{'vcpu': vcpu, 'cpuset': "2"}, {'vcpu': vcpu, 'cpuset': "3"}]
            else:
                cputune.vcpupins = [{'vcpu': vcpu, 'cpuset': cputune_cpuset}]
            vmxml.cputune = cputune

        logging.debug(vmxml)
        if status_error and define_fail:
            result_to_check = virsh.define(vmxml.xml, debug=True)
        else:
            vmxml.sync()

        # start the vm
        if status_error and start_fail:
            result_to_check = virsh.start(vm_name, debug=True)

        if (not status_error) or runtime_fail:
            result_to_check = virsh.start(vm_name, debug=True, ignore_status=False)
            vm.wait_for_login(timeout=start_timeout).close()

            # test vcpu cpuset in offline/online  host cpu scenario
            if check.endswith("offline_hostcpu"):
                if vm_down:
                    vm.shutdown()
                for x in offline_hostcpus.split(','):
                    if cpu.offline(x):
                        test.fail("fail to offline cpu{}".format(x))
                    logging.debug("offline host cpu {}".format(x))
                if vm_down:
                    vm.start()
                    vm.wait_for_login(timeout=start_timeout).close()
                if not status_error:
                    # online host cpu
                    if cpu.online(cputune_cpuset):
                        test.fail("fail to online cpu{}".format(cputune_cpuset))

            # run virsh vcpupin to config vcpu affinity
            if check.startswith("cputune") and (not config_xml):
                result_to_check = virsh.vcpupin(vm_name, vcpu, cputune_cpuset, debug=True)

            if check == "vcpu_placement":
                check_vcpu_placement(test, params)
            elif not status_error:
                check_vcpu_affinity()

        if 'result_to_check' in locals():
            if err_msg:
                err_msg = err_msg.split(";")
            libvirt.check_result(result_to_check, err_msg)

    finally:
        vmxml_backup.sync()

        # recovery the host cpu env
        for x in range(hostcpu_num):
            cpu.online(x)
        cmd = "echo '0-{}' > {}".format(hostcpu_num-1, machine_cpuset_path)
        process.run(cmd, shell=True)
Пример #57
0
def run(test, params, env):
    """
    Test for basic serial character device function.

    1) Define the VM with specified serial device and define result meets
       expectation.
    2) Test whether defined XML meets expectation
    3) Start the guest and check if start result meets expectation
    4) Test the function of started serial device
    5) Shutdown the VM and check whether cleaned up properly
    6) Clean up environment
    """

    def set_targets(serial):
        """
        Prepare a serial device XML according to parameters
        """
        machine = platform.machine()
        if "ppc" in machine:
            serial.target_model = 'spapr-vty'
            serial.target_type = 'spapr-vio-serial'
        elif "aarch" in machine:
            serial.target_model = 'pl011'
            serial.target_type = 'system-serial'
        else:
            serial.target_model = target_type
            serial.target_type = target_type

    def prepare_spice_graphics_device():
        """
        Prepare a spice graphics device XML according to parameters
        """
        graphic = Graphics(type_name='spice')
        graphic.autoport = "yes"
        graphic.port = "-1"
        graphic.tlsPort = "-1"
        return graphic

    def prepare_serial_device():
        """
        Prepare a serial device XML according to parameters
        """
        local_serial_type = serial_type
        if serial_type == "tls":
            local_serial_type = "tcp"
        serial = librarian.get('serial')(local_serial_type)

        serial.target_port = "0"

        set_targets(serial)

        sources = []
        logging.debug(sources_str)
        for source_str in sources_str.split():
            source_dict = {}
            for att in source_str.split(','):
                key, val = att.split(':')
                source_dict[key] = val
            sources.append(source_dict)
        serial.sources = sources
        return serial

    def prepare_console_device():
        """
        Prepare a serial device XML according to parameters
        """
        local_serial_type = serial_type
        if serial_type == "tls":
            local_serial_type = "tcp"
        console = librarian.get('console')(local_serial_type)
        console.target_type = console_target_type
        console.target_port = console_target_port

        sources = []
        logging.debug(sources_str)
        for source_str in sources_str.split():
            source_dict = {}
            for att in source_str.split(','):
                key, val = att.split(':')
                source_dict[key] = val
            sources.append(source_dict)
        console.sources = sources
        return console

    def define_and_check():
        """
        Predict the error message when defining and try to define the guest
        with testing serial device.
        """
        fail_patts = []
        if serial_type in ['dev', 'file', 'pipe', 'unix'] and not any(
                ['path' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing source path attribute for char device")
        if serial_type in ['tcp'] and not any(
                ['host' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing source host attribute for char device")
        if serial_type in ['tcp', 'udp'] and not any(
                ['service' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing source service attribute for char "
                              "device")
        if serial_type in ['spiceport'] and not any(
                ['channel' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing source channel attribute for char "
                              "device")
        if serial_type in ['nmdm'] and not any(
                ['master' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing master path attribute for nmdm device")
        if serial_type in ['nmdm'] and not any(
                ['slave' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing slave path attribute for nmdm device")
        if serial_type in ['spicevmc']:
            fail_patts.append(r"spicevmc device type only supports virtio")

        vm_xml.undefine()
        res = vm_xml.virsh.define(vm_xml.xml)
        libvirt.check_result(res, expected_fails=fail_patts)
        return not res.exit_status

    def check_xml():
        """
        Predict the result serial device and generated console device
        and check the result domain XML against expectation
        """
        console_cls = librarian.get('console')

        local_serial_type = serial_type

        if serial_type == 'tls':
            local_serial_type = 'tcp'
        # Predict expected serial and console XML
        expected_console = console_cls(local_serial_type)

        if local_serial_type == 'udp':
            sources = []
            for source in serial_dev.sources:
                if 'service' in source and 'mode' not in source:
                    source['mode'] = 'connect'
                sources.append(source)
        else:
            sources = serial_dev.sources

        expected_console.sources = sources

        if local_serial_type == 'tcp':
            if 'protocol_type' in local_serial_type:
                expected_console.protocol_type = serial_dev.protocol_type
            else:
                expected_console.protocol_type = "raw"

        expected_console.target_port = serial_dev.target_port
        if 'target_type' in serial_dev:
            expected_console.target_type = serial_dev.target_type
        expected_console.target_type = console_target_type
        logging.debug("Expected console XML is:\n%s", expected_console)

        # Get current serial and console XML
        current_xml = VMXML.new_from_dumpxml(vm_name)
        serial_elem = current_xml.xmltreefile.find('devices/serial')
        console_elem = current_xml.xmltreefile.find('devices/console')
        if console_elem is None:
            test.fail("Expect generate console automatically, "
                      "but found none.")
        if serial_elem and console_target_type != 'serial':
            test.fail("Don't Expect exist serial device, "
                      "but found:\n%s" % serial_elem)

        cur_console = console_cls.new_from_element(console_elem)
        logging.debug("Current console XML is:\n%s", cur_console)
        # Compare current serial and console with oracle.
        if not expected_console == cur_console:
            # "==" has been override
            test.fail("Expect generate console:\n%s\nBut got:\n%s" %
                      (expected_console, cur_console))

        if console_target_type == 'serial':
            serial_cls = librarian.get('serial')
            expected_serial = serial_cls(local_serial_type)
            expected_serial.sources = sources

            set_targets(expected_serial)

            if local_serial_type == 'tcp':
                if 'protocol_type' in local_serial_type:
                    expected_serial.protocol_type = serial_dev.protocol_type
                else:
                    expected_serial.protocol_type = "raw"
            expected_serial.target_port = serial_dev.target_port
            if serial_elem is None:
                test.fail("Expect exist serial device, "
                          "but found none.")
            cur_serial = serial_cls.new_from_element(serial_elem)
            if target_type == 'pci-serial':
                if cur_serial.address is None:
                    test.fail("Expect serial device address is not assigned")
                else:
                    logging.debug("Serial address is: %s", cur_serial.address)

            logging.debug("Expected serial XML is:\n%s", expected_serial)
            logging.debug("Current serial XML is:\n%s", cur_serial)
            # Compare current serial and console with oracle.
            if target_type != 'pci-serial' and not expected_serial == cur_serial:
                # "==" has been override
                test.fail("Expect serial device:\n%s\nBut got:\n "
                          "%s" % (expected_serial, cur_serial))

    def prepare_serial_console():
        """
        Prepare serial console to connect with guest serial according to
        the serial type
        """
        is_server = console_type == 'server'
        if serial_type == 'unix':
            # Unix socket path should match SELinux label
            socket_path = '/var/lib/libvirt/qemu/virt-test'
            console = Console('unix', socket_path, is_server)
        elif serial_type == 'tls':
            host = '127.0.0.1'
            service = 5556
            console = Console('tls', (host, service), is_server,
                              custom_pki_path)
        elif serial_type == 'tcp':
            host = '127.0.0.1'
            service = 2445
            console = Console('tcp', (host, service), is_server)
        elif serial_type == 'udp':
            host = '127.0.0.1'
            service = 2445
            console = Console('udp', (host, service), is_server)
        elif serial_type == 'file':
            socket_path = '/var/log/libvirt/virt-test'
            console = Console('file', socket_path, is_server)
        elif serial_type == 'pipe':
            socket_path = '/tmp/virt-test'
            console = Console('pipe', socket_path, is_server)
        else:
            logging.debug("Serial type %s don't support console test yet.",
                          serial_type)
            console = None
        return console

    def check_qemu_cmdline():
        """
        Check if VM's qemu command line meets expectation.
        """
        cmdline = open('/proc/%s/cmdline' % vm.get_pid()).read()
        logging.debug('Qemu command line: %s', cmdline)
        options = cmdline.split('\x00')
        ser_opt = None
        con_opt = None
        ser_dev = None
        con_dev = None
        exp_ser_opt = None
        exp_ser_dev = None
        exp_con_opt = None
        exp_con_dev = None
        # Get serial and console options from qemu command line
        for idx, opt in enumerate(options):
            if opt == '-chardev':
                if 'id=charserial' in options[idx + 1]:
                    ser_opt = options[idx + 1]
                    ser_dev = options[idx + 3]
                    logging.debug('Serial option is: %s', ser_opt)
                    logging.debug('Serial device option is: %s', ser_dev)
                if 'id=charconsole' in options[idx + 1]:
                    con_opt = options[idx + 1]
                    con_dev = options[idx + 3]
                    logging.debug('Console option is: %s', con_opt)
                    logging.debug('Console device option is: %s', con_dev)

        # Get expected serial and console options from params
        if serial_type == 'dev':
            ser_type = 'tty'
        elif serial_type in ['unix', 'tcp', 'tls']:
            ser_type = 'socket'
        else:
            ser_type = serial_type

        exp_ser_opts = [ser_type, 'id=charserial0']
        if serial_type in ['dev', 'file', 'pipe', 'unix']:
            for source in serial_dev.sources:
                if 'path' in source:
                    path = source['path']
            if serial_type == 'file':
                # Use re to make this fdset number flexible
                exp_ser_opts.append('path=/dev/fdset/\d+')
                exp_ser_opts.append('append=on')
            elif serial_type == 'unix':
                # Use re to make this fd number flexible
                exp_ser_opts.append('fd=\d+')
            else:
                exp_ser_opts.append('path=%s' % path)
        elif serial_type in ['tcp', 'tls']:
            for source in serial_dev.sources:
                if 'host' in source:
                    host = source['host']
                if 'service' in source:
                    port = source['service']
            exp_ser_opts.append('host=%s' % host)
            exp_ser_opts.append('port=%s' % port)
        elif serial_type in ['udp']:
            localaddr = ''
            localport = '0'
            for source in serial_dev.sources:
                if source['mode'] == 'connect':
                    if 'host' in source:
                        host = source['host']
                    if 'service' in source:
                        port = source['service']
                else:
                    if 'host' in source:
                        localaddr = source['host']
                    if 'service' in source:
                        localport = source['service']
            exp_ser_opts.append('host=%s' % host)
            exp_ser_opts.append('port=%s' % port)
            exp_ser_opts.append('localaddr=%s' % localaddr)
            exp_ser_opts.append('localport=%s' % localport)

        if serial_type in ['unix', 'tcp', 'udp', 'tls']:
            for source in serial_dev.sources:
                if 'mode' in source:
                    mode = source['mode']
            if mode == 'bind':
                exp_ser_opts.append('server')
                exp_ser_opts.append('nowait')

        if serial_type == 'tls':
            exp_ser_opts.append('tls-creds=objcharserial0_tls0')

        exp_ser_opt = ','.join(exp_ser_opts)

        if "ppc" in platform.machine():
            exp_ser_devs = ['spapr-vty', 'chardev=charserial0',
                            'reg=0x30000000']
            if libvirt_version.version_compare(3, 9, 0):
                exp_ser_devs.insert(2, 'id=serial0')
        else:
            logging.debug('target_type: %s', target_type)
            if target_type == 'pci-serial':
                exp_ser_devs = ['pci-serial', 'chardev=charserial0',
                                'id=serial0', 'bus=pci.\d+', 'addr=0x\d+']
            else:
                exp_ser_devs = ['isa-serial', 'chardev=charserial0',
                                'id=serial0']
        exp_ser_dev = ','.join(exp_ser_devs)

        if console_target_type != 'serial' or serial_type == 'spicevmc':
            exp_ser_opt = None
            exp_ser_dev = None
        logging.debug("exp_ser_opt: %s", exp_ser_opt)
        logging.debug("ser_opt: %s", ser_opt)

        # Check options against expectation
        if exp_ser_opt is not None and re.match(exp_ser_opt, ser_opt) is None:
            test.fail('Expect get qemu command serial option "%s", '
                      'but got "%s"' % (exp_ser_opt, ser_opt))
        if exp_ser_dev is not None and ser_dev is not None \
           and re.match(exp_ser_dev, ser_dev) is None:
            test.fail(
                'Expect get qemu command serial device option "%s", '
                'but got "%s"' % (exp_ser_dev, ser_dev))

        if console_target_type == 'virtio':
            exp_con_opts = [serial_type, 'id=charconsole1']
            exp_con_opt = ','.join(exp_con_opts)

            exp_con_devs = []
            if console_target_type == 'virtio':
                exp_con_devs.append('virtconsole')
            exp_con_devs += ['chardev=charconsole1', 'id=console1']
            exp_con_dev = ','.join(exp_con_devs)
            if con_opt != exp_con_opt:
                test.fail(
                    'Expect get qemu command console option "%s", '
                    'but got "%s"' % (exp_con_opt, con_opt))
            if con_dev != exp_con_dev:
                test.fail(
                    'Expect get qemu command console device option "%s", '
                    'but got "%s"' % (exp_con_dev, con_dev))

    def check_serial_console(console, username, password):
        """
        Check whether console works properly by read the result for read only
        console and login and emit a command for read write console.
        """
        if serial_type in ['file', 'tls']:
            _, output = console.read_until_output_matches(['.*[Ll]ogin:'])
        else:
            console.wait_for_login(username, password)
            status, output = console.cmd_status_output('pwd')
            logging.debug("Command status: %s", status)
            logging.debug("Command output: %s", output)

    def cleanup(objs_list):
        """
        Clean up test environment
        """
        if serial_type == 'file':
            if os.path.exists('/var/log/libvirt/virt-test'):
                os.remove('/var/log/libvirt/virt-test')

        # recovery test environment
        for obj in objs_list:
            obj.auto_recover = True
            del obj

    def get_console_type():
        """
        Check whether console should be started before or after guest starting.
        """
        if serial_type in ['tcp', 'unix', 'udp', 'tls']:
            for source in serial_dev.sources:
                if 'mode' in source and source['mode'] == 'connect':
                    return 'server'
            return 'client'
        elif serial_type in ['file']:
            return 'client'
        elif serial_type in ['pipe']:
            return 'server'

    serial_type = params.get('serial_type', 'pty')
    sources_str = params.get('serial_sources', '')
    username = params.get('username')
    password = params.get('password')
    console_target_type = params.get('console_target_type', 'serial')
    target_type = params.get('target_type', 'isa-serial')
    console_target_port = params.get('console_target_port', '0')
    second_serial_console = params.get('second_serial_console', 'no') == 'yes'
    custom_pki_path = params.get('custom_pki_path', '/etc/pki/libvirt-chardev')
    auto_recover = params.get('auto_recover', 'no')
    client_pwd = params.get('client_pwd', None)
    server_pwd = params.get('server_pwd', None)

    args_list = [client_pwd, server_pwd]

    for arg in args_list:
        if arg and arg.count("ENTER.YOUR."):
            raise test.fail("Please assign a value for %s!" % arg)

    vm_name = params.get('main_vm', 'virt-tests-vm1')
    vm = env.get_vm(vm_name)

    # it's used to clean up TLS objs later
    objs_list = []

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    try:
        if console_target_type != 'virtio':
            vm_xml.remove_all_device_by_type('serial')
            vm_xml.remove_all_device_by_type('console')
        if serial_type == "tls":
            test_dict = dict(params)
            tls_obj = TLSConnection(test_dict)
            if auto_recover == "yes":
                objs_list.append(tls_obj)
            tls_obj.conn_setup(False, True, True)

        serial_dev = prepare_serial_device()
        if console_target_type == 'serial' or second_serial_console:
            logging.debug('Serial device:\n%s', serial_dev)
            vm_xml.add_device(serial_dev)
            if serial_type == "spiceport":
                spice_graphics_dev = prepare_spice_graphics_device()
                logging.debug('Spice graphics device:\n%s', spice_graphics_dev)
                vm_xml.add_device(spice_graphics_dev)

        console_dev = prepare_console_device()
        logging.debug('Console device:\n%s', console_dev)
        vm_xml.add_device(console_dev)
        if second_serial_console:
            console_target_type = 'serial'
            console_target_port = '1'
            console_dev = prepare_console_device()
            logging.debug('Console device:\n%s', console_dev)
            vm_xml.add_device(console_dev)
            vm_xml.undefine()
            res = virsh.define(vm_xml.xml)
            if res.stderr.find(r'Only the first console can be a serial port'):
                logging.debug("Test only the first console can be a serial"
                              "succeeded")
                return
            test.fail("Test only the first console can be serial failed.")

        console_type = get_console_type()

        if not define_and_check():
            logging.debug("Can't define the VM, exiting.")
            return
        if console_target_type != 'virtio':
            check_xml()

        expected_fails = []
        if serial_type == 'nmdm' and platform.system() != 'FreeBSD':
            expected_fails.append("unsupported chardev 'nmdm'")

        # Prepare console before start when console is server
        if console_type == 'server':
            console = prepare_serial_console()

        res = virsh.start(vm_name)
        libvirt.check_result(res, expected_fails, [])
        if res.exit_status:
            logging.debug("Can't start the VM, exiting.")
            return
        check_qemu_cmdline()
        # Prepare console after start when console is client
        if console_type == 'client':
            console = prepare_serial_console()

        if (console_type is not None and serial_type != 'tls' and
           console_type != 'server'):
            check_serial_console(console, username, password)

        vm.destroy()

    finally:
        cleanup(objs_list)
        vm_xml_backup.sync()
def run(test, params, env):
    """
    Test command: virsh net-define/net-undefine.

    1) Collect parameters&environment info before test
    2) Prepare options for command
    3) Execute command for test
    4) Check state of defined network
    5) Recover environment
    6) Check result
    """
    uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri",
                                                      "default"))
    net_name = params.get("net_define_undefine_net_name", "default")
    net_uuid = params.get("net_define_undefine_net_uuid", "")
    options_ref = params.get("net_define_undefine_options_ref", "default")
    trans_ref = params.get("net_define_undefine_trans_ref", "trans")
    extra_args = params.get("net_define_undefine_extra", "")
    remove_existing = params.get("net_define_undefine_remove_existing", "yes")
    status_error = "yes" == params.get("status_error", "no")
    check_states = "yes" == params.get("check_states", "no")
    net_persistent = "yes" == params.get("net_persistent")
    net_active = "yes" == params.get("net_active")
    expect_msg = params.get("net_define_undefine_err_msg")

    # define multi ip/dhcp sections in network
    multi_ip = "yes" == params.get("multi_ip", "no")
    netmask = params.get("netmask")
    prefix_v6 = params.get("prefix_v6")
    single_v6_range = "yes" == params.get("single_v6_range", "no")
    # Get 2nd ipv4 dhcp range
    dhcp_ranges_start = params.get("dhcp_ranges_start", None)
    dhcp_ranges_end = params.get("dhcp_ranges_end", None)

    # Get 2 groups of ipv6 ip address and dhcp section
    address_v6_1 = params.get("address_v6_1")
    dhcp_ranges_v6_start_1 = params.get("dhcp_ranges_v6_start_1", None)
    dhcp_ranges_v6_end_1 = params.get("dhcp_ranges_v6_end_1", None)

    address_v6_2 = params.get("address_v6_2")
    dhcp_ranges_v6_start_2 = params.get("dhcp_ranges_v6_start_2", None)
    dhcp_ranges_v6_end_2 = params.get("dhcp_ranges_v6_end_2", None)

    # Edit net xml forward/ip part then define/start to check invalid setting
    edit_xml = "yes" == params.get("edit_xml", "no")
    address_v4 = params.get("address_v4")
    nat_port_start = params.get("nat_port_start")
    nat_port_end = params.get("nat_port_end")
    test_port = "yes" == params.get("test_port", "no")

    virsh_dargs = {'uri': uri, 'debug': False, 'ignore_status': True}
    virsh_instance = virsh.VirshPersistent(**virsh_dargs)

    # libvirt acl polkit related params
    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    virsh_uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    # Prepare environment and record current net_state_dict
    backup = network_xml.NetworkXML.new_all_networks_dict(virsh_instance)
    backup_state = virsh_instance.net_state_dict()
    logging.debug("Backed up network(s): %s", backup_state)

    # Make some XML to use for testing, for now we just copy 'default'
    test_xml = xml_utils.TempXMLFile()  # temporary file
    try:
        # LibvirtXMLBase.__str__ returns XML content
        test_xml.write(str(backup['default']))
        test_xml.flush()
    except (KeyError, AttributeError):
        test.cancel("Test requires default network to exist")

    testnet_xml = get_network_xml_instance(virsh_dargs, test_xml, net_name,
                                           net_uuid, bridge=None)

    if remove_existing:
        for netxml in list(backup.values()):
            netxml.orbital_nuclear_strike()

    # Test both define and undefine, So collect info
    # both of them for result check.
    # When something wrong with network, set it to 1
    fail_flag = 0
    result_info = []

    if options_ref == "correct_arg":
        define_options = testnet_xml.xml
        undefine_options = net_name
    elif options_ref == "no_option":
        define_options = ""
        undefine_options = ""
    elif options_ref == "not_exist_option":
        define_options = "/not/exist/file"
        undefine_options = "NOT_EXIST_NETWORK"

    define_extra = undefine_extra = extra_args
    if trans_ref != "define":
        define_extra = ""

    if params.get('setup_libvirt_polkit') == 'yes':
        virsh_dargs = {'uri': virsh_uri, 'unprivileged_user': unprivileged_user,
                       'debug': False, 'ignore_status': True}
        cmd = "chmod 666 %s" % testnet_xml.xml
        process.run(cmd, shell=True)

    if params.get('net_define_undefine_readonly', 'no') == 'yes':
        virsh_dargs = {'uri': uri, 'debug': False, 'ignore_status': True,
                       'readonly': True}
    try:
        if edit_xml:
            ipxml_v4 = network_xml.IPXML()
            ipxml_v4.address = address_v4
            ipxml_v4.netmask = netmask
            ipxml_v4.dhcp_ranges = {"start": dhcp_ranges_start, "end": dhcp_ranges_end}
            testnet_xml.del_ip()
            testnet_xml.set_ip(ipxml_v4)
            if test_port:
                nat_port = {"start": nat_port_start, "end": nat_port_end}
                testnet_xml.nat_port = nat_port
            testnet_xml.debug_xml()
        if multi_ip:
            # Enabling IPv6 forwarding with RA routes without accept_ra set to 2
            # is likely to cause routes loss
            sysctl_cmd = 'sysctl net.ipv6.conf.all.accept_ra'
            original_accept_ra = to_text(
                process.system_output(sysctl_cmd + ' -n'))
            if original_accept_ra != '2':
                process.system(sysctl_cmd + '=2')
            # add another ipv4 address and dhcp range
            set_ip_section(testnet_xml, address_v4, ipv6=False,
                           netmask=netmask,
                           dhcp_ranges_start=dhcp_ranges_start,
                           dhcp_ranges_end=dhcp_ranges_end)
            # add ipv6 address and dhcp range
            set_ip_section(testnet_xml, address_v6_1, ipv6=True,
                           prefix_v6=prefix_v6,
                           dhcp_ranges_start=dhcp_ranges_v6_start_1,
                           dhcp_ranges_end=dhcp_ranges_v6_end_1)
            # 2nd ipv6 address and dhcp range
            set_ip_section(testnet_xml, address_v6_2, ipv6=True,
                           prefix_v6=prefix_v6,
                           dhcp_ranges_start=dhcp_ranges_v6_start_2,
                           dhcp_ranges_end=dhcp_ranges_v6_end_2)
        testnet_xml.debug_xml()
        # Run test case
        define_result = virsh.net_define(define_options, define_extra,
                                         **virsh_dargs)
        logging.debug(define_result)
        define_status = define_result.exit_status

        # Check network states
        if check_states and not define_status:
            net_state = virsh_instance.net_state_dict()
            if (net_state[net_name]['active'] or
                    net_state[net_name]['autostart'] or
                    not net_state[net_name]['persistent']):
                fail_flag = 1
                result_info.append("Found wrong network states for "
                                   "defined netowrk: %s" % str(net_state))

        if define_status == 1 and status_error and expect_msg:
            libvirt.check_result(define_result, expect_msg.split(';'))

        # If defining network succeed, then trying to start it.
        if define_status == 0:
            start_result = virsh.net_start(net_name, extra="", **virsh_dargs)
            logging.debug(start_result)
            start_status = start_result.exit_status

        if trans_ref == "trans":
            if define_status:
                fail_flag = 1
                result_info.append("Define network with right command failed.")
            else:
                if start_status:
                    fail_flag = 1
                    result_info.append("Network is defined as expected, "
                                       "but failed to start it.")

        # Check network states for normal test
        if check_states and not status_error:
            net_state = virsh_instance.net_state_dict()
            if (not net_state[net_name]['active'] or
                    net_state[net_name]['autostart'] or
                    not net_state[net_name]['persistent']):
                fail_flag = 1
                result_info.append("Found wrong network states for "
                                   "started netowrk: %s" % str(net_state))
            # Try to set autostart
            virsh.net_autostart(net_name, **virsh_dargs)
            net_state = virsh_instance.net_state_dict()
            if not net_state[net_name]['autostart']:
                fail_flag = 1
                result_info.append("Failed to set autostart for network %s"
                                   % net_name)
            # Restart libvirtd and check state
            # Close down persistent virsh session before libvirtd restart
            if hasattr(virsh_instance, 'close_session'):
                virsh_instance.close_session()
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            # Need to redefine virsh_instance after libvirtd restart
            virsh_instance = virsh.VirshPersistent(**virsh_dargs)
            net_state = virsh_instance.net_state_dict()
            if (not net_state[net_name]['active'] or
                    not net_state[net_name]['autostart']):
                fail_flag = 1
                result_info.append("Found wrong network state after restarting"
                                   " libvirtd: %s" % str(net_state))
            logging.debug("undefine network:")
            # prepare the network status
            if not net_persistent:
                virsh.net_undefine(net_name, ignore_status=False)
            if not net_active:
                virsh.net_destroy(net_name, ignore_status=False)
            undefine_status = virsh.net_undefine(undefine_options, undefine_extra,
                                                 **virsh_dargs).exit_status

            net_state = virsh_instance.net_state_dict()
            if net_persistent:
                if undefine_status:
                    fail_flag = 1
                    result_info.append("undefine should succeed but failed")
                if net_active:
                    if (not net_state[net_name]['active'] or
                            net_state[net_name]['autostart'] or
                            net_state[net_name]['persistent']):
                        fail_flag = 1
                        result_info.append("Found wrong network states for "
                                           "undefined netowrk: %s" % str(net_state))
                else:
                    if net_name in net_state:
                        fail_flag = 1
                        result_info.append("Transient network should not exists "
                                           "after undefine : %s" % str(net_state))
            else:
                if not undefine_status:
                    fail_flag = 1
                    result_info.append("undefine transient network should fail "
                                       "but succeed: %s" % str(net_state))
        # Stop network for undefine test anyway
        destroy_result = virsh.net_destroy(net_name, extra="", **virsh_dargs)
        logging.debug(destroy_result)

        # Undefine network
        if not check_states:
            undefine_result = virsh.net_undefine(undefine_options, undefine_extra,
                                                 **virsh_dargs)
            if trans_ref != "define":
                logging.debug(undefine_result)
            undefine_status = undefine_result.exit_status

    finally:
        # Recover environment
        leftovers = network_xml.NetworkXML.new_all_networks_dict(
            virsh_instance)
        for netxml in list(leftovers.values()):
            netxml.orbital_nuclear_strike()

        # Recover from backup
        for netxml in list(backup.values()):
            netxml.sync(backup_state[netxml.name])

        # Close down persistent virsh session (including for all netxml copies)
        if hasattr(virsh_instance, 'close_session'):
            virsh_instance.close_session()

        # Done with file, cleanup
        del test_xml
        del testnet_xml

    # Check status_error
    # If fail_flag is set, it must be transaction test.
    if fail_flag:
        test.fail("Define network for transaction test "
                  "failed:%s" % result_info)

    # The logic to check result:
    # status_error&only undefine:it is negative undefine test only
    # status_error&(no undefine):it is negative define test only
    # (not status_error)&(only undefine):it is positive transaction test.
    # (not status_error)&(no undefine):it is positive define test only
    if status_error:
        if trans_ref == "undefine":
            if undefine_status == 0:
                test.fail("Run successfully with wrong command.")
        else:
            if define_status == 0:
                if start_status == 0:
                    test.fail("Define an unexpected network, "
                              "and start it successfully.")
                else:
                    test.fail("Define an unexpected network, "
                              "but start it failed.")
    else:
        if trans_ref == "undefine":
            if undefine_status:
                test.fail("Define network for transaction "
                          "successfully, but undefine failed.")
        else:
            if define_status != 0:
                test.fail("Run failed with right command")
            else:
                if start_status != 0:
                    test.fail("Network is defined as expected, "
                              "but start it failed.")
Пример #59
0
def run(test, params, env):
    """
    Test command: virsh setmaxmem.

    1) Prepare vm environment.
    2) Handle params
    3) Run test command and get vm started then get maxmem.
    4) Recover environment.
    5) Check result.
    TODO: support more options:--live,--config,--current.
    """

    def vmxml_max_mem(vm_name):
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        return int(vmxml.max_mem)

    def make_domref(domarg, vm_ref, domid, vm_name, domuuid):
        # Specify domain as argument or parameter
        if domarg == "yes":
            dom_darg_key = "domainarg"
        else:
            dom_darg_key = "domain"

        # How to reference domain
        if vm_ref == "domid":
            dom_darg_value = domid
        elif vm_ref == "domname":
            dom_darg_value = vm_name
        elif vm_ref == "domuuid":
            dom_darg_value = domuuid
        elif vm_ref == "none":
            dom_darg_value = None
        elif vm_ref == "emptystring":
            dom_darg_value = '""'
        else:  # stick in value directly
            dom_darg_value = vm_ref

        return {dom_darg_key: dom_darg_value}

    def make_sizeref(sizearg, mem_ref, original_mem):
        if sizearg == "yes":
            size_darg_key = "sizearg"
        else:
            size_darg_key = "size"

        if mem_ref == "halfless":
            size_darg_value = "%d" % (original_mem // 2)
        elif mem_ref == "halfmore":
            size_darg_value = "%d" % int(original_mem * 1.5)
        elif mem_ref == "same":
            size_darg_value = "%d" % original_mem
        elif mem_ref == "emptystring":
            size_darg_value = '""'
        elif mem_ref == "zero":
            size_darg_value = "0"
        elif mem_ref == "toosmall":
            size_darg_value = "1024"
        elif mem_ref == "toobig":
            size_darg_value = "1099511627776"  # (KiB) One Petabyte
        elif mem_ref == "none":
            size_darg_value = None
        else:  # stick in value directly
            size_darg_value = mem_ref

        return {size_darg_key: size_darg_value}

    def is_old_libvirt():
        regex = r'\s+\[--size\]\s+'
        return bool(not virsh.has_command_help_match('setmaxmem', regex))

    def is_xen_host():
        check_cmd = "ls /dev/kvm"
        return process.run(check_cmd, ignore_status=True, shell=True).exit_status

    def is_in_range(actual, expected, error_percent):
        deviation = 100 - (100 * (float(actual) / float(expected)))
        logging.debug("Deviation: %0.2f%%", float(deviation))
        return float(deviation) <= float(error_percent)

    def print_debug_stats(original_vmxml_mem, original_dominfo_mem,
                          expected_mem, test_vmxml_mem, test_dominfo_mem):
        dbgmsg = ("Original vmxml mem : %d KiB\n"
                  "Original dominfo mem : %d KiB\n"
                  "Expected max mem : %d KiB\n"
                  "Actual vmxml mem   : %d KiB\n"
                  "Actual dominfo mem   : %d KiB\n" % (
                      original_vmxml_mem,
                      original_dominfo_mem,
                      expected_mem,
                      test_vmxml_mem,
                      test_dominfo_mem))
        for dbgline in dbgmsg.splitlines():
            logging.debug(dbgline)

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_ref = params.get("setmaxmem_vm_ref", "")
    mem_ref = params.get("setmaxmem_mem_ref", "")
    status_error = "yes" == params.get("status_error", "no")
    flags = params.get("setmaxmem_flags", "")
    domarg = params.get("setmaxmem_domarg", "no")
    sizearg = params.get("setmaxmem_sizearg", "no")
    delta_per = params.get("setmaxmem_delta_per", "10")
    readonly = "yes" == params.get("setmaxmem_readonly", "no")
    expect_msg = params.get("setmaxmem_err_msg")
    vm_name = params.get("main_vm")
    start_vm = "yes" == params.get("start_vm")

    # Gather environment parameters
    vm = env.get_vm(vm_name)

    # FIXME: KVM does not support --live currently.
    if (flags.count('live') or (flags.count('current') and vm.is_alive())):
        test.cancel("KVM does not support --live.")

    # Backup original XML
    original_vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    original_vmxml_mem = vmxml_max_mem(vm_name)

    original_dominfo_mem = vm.get_max_mem()
    domid = vm.get_id()
    domuuid = vm.get_uuid()
    uri = vm.connect_uri

    old_libvirt = is_old_libvirt()
    if old_libvirt:
        logging.info("Running test on older libvirt")
        use_kilobytes = True
    else:
        logging.info("Running test on newer libvirt")
        use_kilobytes = False

    xen_host = is_xen_host()
    if xen_host:
        logging.info("Running on xen host, %s offset is allowed.", delta_per)

    # Argument pattern is complex, build with dargs
    dargs = {'flagstr': flags,
             'use_kilobytes': use_kilobytes,
             'uri': uri, 'ignore_status': True, "debug": True, "readonly": readonly}
    dargs.update(make_domref(domarg, vm_ref, domid, vm_name, domuuid))
    dargs.update(make_sizeref(sizearg, mem_ref, original_dominfo_mem))

    if status_error:
        logging.info("Error Test: Expecting an error to occur!")

    try:
        # Delete numa node configuration before setmaxmem operation
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        try:
            del vmxml.max_mem_rt
            cpu_xml = vmxml.cpu
            cpu_xml.remove_numa_cells()
            vmxml.cpu = cpu_xml
        except Exception:
            # The item doesn't exist
            pass
        vmxml.sync()
        # sync will destroy the vm so need to start again
        if start_vm:
            vm.start()

        result = virsh.setmaxmem(**dargs)
        status = result.exit_status

        # Gather status if not running error test
        start_status = 0    # Check can guest be started after maxmem-modified.
        if not status_error:
            if flags.count("config"):
                vm.destroy()
            if vm.state() == "shut off":
                try:
                    vm.start()
                except virt_vm.VMStartError as detail:
                    start_status = 1
                    logging.error("Start after VM's maxmem modified failed:%s",
                                  detail)

            # Actual results
            test_vmxml_mem = vmxml_max_mem(vm_name)
            test_dominfo_mem = vm.get_max_mem()

            # Expected results for both vmxml and dominfo
            if sizearg == "yes":
                expected_mem = int(dargs["sizearg"])
            else:
                expected_mem = int(dargs["size"])

            print_debug_stats(original_vmxml_mem, original_dominfo_mem,
                              expected_mem, test_vmxml_mem, test_dominfo_mem)

        else:
            if expect_msg:
                libvirt.check_result(result, expect_msg.split(';'))

            if vm.state() == "paused":
                vm.resume()
    finally:
        original_vmxml.sync()

    # Don't care about memory comparison on error test
    if status_error:
        if status is 0:
            test.fail("Error test did not result in an error.")
    else:
        vmxml_match = (test_vmxml_mem == expected_mem)
        if xen_host:
            dominfo_match = is_in_range(test_dominfo_mem, expected_mem,
                                        delta_per)
        else:
            dominfo_match = (test_dominfo_mem == expected_mem)
        if (status or start_status or not vmxml_match or not dominfo_match):
            msg = "test conditions not met: "
            if status:
                msg += "Non-zero virsh setmaxmem exit code. "
            if not vmxml_match:
                msg += "Max memory in VM's xml is not matched. "
            if not dominfo_match:
                msg += "Max memory in dominfo's output is not matched. "
            if start_status:
                msg += "Start after VM's max mem is modified failed."
            test.fail(msg)

    logging.info("Test end normally.")
Пример #60
0
def run(test, params, env):
    """
    Test mtu feature from virtual network
    """
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)
    mtu_type = params.get('mtu_type')
    mtu_size = params.get('mtu_size', '')
    net = params.get('net', DEFAULT_NET)
    net_type = params.get('net_type', '')
    with_iface = 'yes' == params.get('with_iface', 'no')
    with_net = 'yes' == params.get('with_net', 'no')
    status_error = 'yes' == params.get('status_error', 'no')
    check = params.get('check', '')
    error_msg = params.get('error_msg', '')
    bridge_name = 'br_mtu' + utils_misc.generate_random_string(3)
    add_pkg = params.get('add_pkg', '')
    model = params.get('model', 'virtio')

    def set_network(size, net='default'):
        """
        Set mtu size to a certain network
        """
        logging.info('Set mtu size of network "%s" to %s', net, size)
        default_xml = NetworkXML.new_from_net_dumpxml(net)
        default_xml.mtu = size
        default_xml.sync()
        logging.debug(virsh.net_dumpxml(net))

    def set_interface(mtu_size='', source_network='default',
                      iface_type='network', iface_model='virtio'):
        """
        Set mtu size to a certain interface
        """
        interface_type = 'bridge' if iface_type in ('bridge', 'openvswitch') else iface_type
        iface_dict = {
            'type': interface_type,
            'source': "{'%s': '%s'}" % (interface_type, source_network),
            'model': iface_model
        }

        if iface_type == 'openvswitch':
            iface_dict.update({'virtualport_type': 'openvswitch'})

        if mtu_size:
            iface_dict.update({'mtu': "{'size': %s}" % mtu_size})

        libvirt.modify_vm_iface(vm_name, 'update_iface', iface_dict)
        logging.debug(virsh.dumpxml(vm_name).stdout)

    def get_default_if():
        """
        Get default interface that is using by vm
        """
        ifaces = utils_net.get_sorted_net_if()
        logging.debug('Interfaces on host: %s', ifaces)
        for iface in ifaces[0]:
            if 'Link detected: yes' in process.run('ethtool %s' % iface).stdout_text:
                logging.debug('Found host interface "%s"', iface)
                return iface

    def create_bridge():
        """
        Create a bridge on host for test
        """
        cmd_create_br = 'nmcli con add type bridge con-name %s ifname %s'
        con_name = 'con_' + utils_misc.generate_random_string(3)
        bridge_name = 'br_' + utils_misc.generate_random_string(3)
        process.run(cmd_create_br % (con_name, bridge_name), verbose=True)
        return con_name, bridge_name

    def create_network_xml(name, network_type, base_if='', **kwargs):
        """
        Create a network xml to be defined
        """
        m_net = NetworkXML(name)
        m_net.forward = {'mode': 'bridge'}
        if network_type in ('bridge', 'openvswitch'):
            m_net.bridge = {'name': kwargs['bridge_name']}
        elif network_type == 'macvtap':
            if base_if:
                m_net.forward_interface = [{'dev': base_if}]
        if network_type == 'openvswitch':
            m_net.virtualport_type = 'openvswitch'
        if 'mtu' in kwargs:
            m_net.mtu = kwargs['mtu']
        logging.debug(m_net)
        return m_net.xml

    def create_iface(iface_type, **kwargs):
        """
        Create a interface to be attached to vm
        """
        m_iface = Interface(iface_type)
        m_iface.mac_address = utils_net.generate_mac_address_simple()
        if 'base_if' in kwargs:
            m_iface.source = {'dev': kwargs['base_if'],
                              'mode': 'vepa'}
        if 'source_net' in kwargs:
            m_iface.source = {'network': kwargs['source_net']}
        if 'mtu' in kwargs:
            m_iface.mtu = {'size': kwargs['mtu']}
        if 'model_net' in kwargs:
            m_iface.model = kwargs['model_net']
        logging.debug(m_iface.get_xml())
        logging.debug(m_iface)
        return m_iface

    def check_mtu(mtu_size, qemu=False):
        """
        Check if mtu meets expectation on host
        """
        error = ''
        live_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        iface_xml = live_vmxml.get_devices('interface')[0]
        logging.debug(iface_xml.target)
        dev = iface_xml.target['dev']
        ifconfig_info = process.run('ifconfig|grep mtu|grep %s' % dev,
                                    shell=True, verbose=True).stdout_text
        if 'mtu %s' % mtu_size in ifconfig_info:
            logging.info('PASS on ifconfig check for vnet.')
        else:
            error += 'Fail on ifconfig check for vnet.'
        if qemu:
            qemu_mtu_info = process.run('ps aux|grep qemu-kvm',
                                        shell=True, verbose=True).stdout_text
            if 'host_mtu=%s' % mtu_size in qemu_mtu_info:
                logging.info('PASS on qemu cmd line check.')
            else:
                error += 'Fail on qemu cmd line check.'
        if error:
            test.fail(error)

    def check_mtu_in_vm(fn_login, mtu_size):
        """
        Check if mtu meets expectations in vm
        """
        session = fn_login()
        check_cmd = 'ifconfig'
        output = session.cmd(check_cmd)
        session.close()
        logging.debug(output)
        if 'mtu %s' % mtu_size not in output:
            test.fail('MTU check inside vm failed.')
        else:
            logging.debug("MTU check inside vm passed.")

    try:
        bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        bk_netxml = NetworkXML.new_from_net_dumpxml(DEFAULT_NET)
        if add_pkg:
            add_pkg = add_pkg.split()
            utils_package.package_install(add_pkg)
        if 'openvswitch' in add_pkg:
            br = 'ovsbr0' + utils_misc.generate_random_string(3)
            process.run('systemctl start openvswitch.service', shell=True, verbose=True)
            process.run('ovs-vsctl add-br %s' % br, shell=True, verbose=True)
            process.run('ovs-vsctl show', shell=True, verbose=True)

        if not check or check in ['save', 'managedsave', 'hotplug_save']:
            # Create bridge or network and set mtu
            iface_type = 'network'
            if net_type in ('bridge', 'openvswitch'):
                if net_type == 'bridge':
                    params['con_name'], br = create_bridge()
                if mtu_type == 'network':
                    test_net = create_network_xml(
                        bridge_name, net_type,
                        bridge_name=br
                    )
                    virsh.net_create(test_net, debug=True)
                    virsh.net_dumpxml(bridge_name, debug=True)
                if mtu_type == 'interface':
                    iface_type = net_type
                    bridge_name = br
            elif net_type == 'network':
                if mtu_type == 'network':
                    set_network(mtu_size)

            iface_mtu = 0
            if mtu_type == 'interface':
                iface_mtu = mtu_size
            if mtu_type == 'network' and with_iface:
                mtu_size = str(int(mtu_size)//2)
                iface_mtu = mtu_size

            source_net = bridge_name if net_type in ('bridge', 'openvswitch') else 'default'

            # set mtu in vm interface
            set_interface(iface_mtu, source_network=source_net, iface_type=iface_type, iface_model=model)
            vm.start()
            vm_login = vm.wait_for_serial_login if net_type in ('bridge', 'openvswitch') else vm.wait_for_login
            vm_login().close()
            check_qemu = True if mtu_type == 'interface' else False

            # Test mtu after save vm
            if check in ('save', 'hotplug_save'):
                if check == 'hotplug_save':
                    iface = create_iface('network', source_net='default',
                                         mtu=mtu_size, model_net=model)
                    params['mac'] = iface.mac_address
                    virsh.attach_device(vm_name, iface.xml, debug=True)
                    virsh.dumpxml(vm_name, debug=True)
                    dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                    if params['mac'] not in str(dom_xml):
                        test.fail('Failed to attach interface with mtu')
                save_path = os.path.join(data_dir.get_tmp_dir(), vm_name + '.save')
                virsh.save(vm_name, save_path, debug=True)
                virsh.restore(save_path, debug=True)
            if check == 'managedsave':
                virsh.managedsave(vm_name, debug=True)
                virsh.start(vm_name, debug=True)

            # Check in both host and vm
            check_mtu(mtu_size, check_qemu)
            check_mtu_in_vm(vm_login, mtu_size)
            vm_login(timeout=60).close()

            if check == 'hotplug_save':
                virsh.detach_interface(vm_name, 'network %s' % params['mac'], debug=True)
                time.sleep(5)
                dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                if params['mac'] in str(dom_xml):
                    test.fail('Failed to detach interface with mtu after save-restore')

        else:
            hotplug = 'yes' == params.get('hotplug', 'False')
            if check == 'net_update':
                result = virsh.net_update(
                    DEFAULT_NET, 'modify', 'mtu',
                    '''"<mtu size='%s'/>"''' % mtu_size,
                    debug=True
                )
            if check in ('macvtap', 'bridge_net', 'ovswitch_net'):
                base_if = get_default_if()
                macv_name = 'direct-macvtap' + utils_misc.generate_random_string(3)

                # Test mtu in different type of network
                if mtu_type == 'network':
                    if check == 'macvtap':
                        test_net = create_network_xml(macv_name, 'macvtap',
                                                      base_if, mtu=mtu_size)
                    if check == 'bridge_net':
                        params['con_name'], br = create_bridge()
                        test_net = create_network_xml(
                            bridge_name, 'bridge', mtu=mtu_size,
                            bridge_name=br
                        )
                    if check == 'ovswitch_net':
                        test_net = create_network_xml(
                            bridge_name, 'openvswitch', mtu=mtu_size,
                            bridge_name=br
                        )
                    if 'net_create' in params['id']:
                        result = virsh.net_create(test_net, debug=True)
                    if 'net_define' in params['id']:
                        result = virsh.net_define(test_net, debug=True)

                # Test mtu with or without a binding network
                elif mtu_type == 'interface':
                    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
                    if with_net:
                        test_net = create_network_xml(macv_name, 'macvtap', base_if)
                        virsh.net_create(test_net, debug=True)
                        iface = create_iface('network', source_net=macv_name, mtu=mtu_size)
                        if hotplug:
                            result = virsh.attach_device(vm_name, iface.xml, debug=True)
                        else:
                            vmxml.add_device(iface)
                            vmxml.sync()
                            result = virsh.start(vm_name)
                    else:
                        iface = create_iface('direct', base_if=base_if, mtu=mtu_size)
                        if hotplug:
                            result = virsh.attach_device(vm_name, iface.xml, debug=True)
                        else:
                            vmxml.add_device(iface)
                            result = virsh.define(vmxml.xml, debug=True)
            if check == 'invalid_val':
                iface = create_iface('network', source_net='default', mtu=mtu_size)
                result = virsh.attach_device(vm_name, iface.xml, debug=True)

            # Check result
            libvirt.check_exit_status(result, status_error)
            libvirt.check_result(result, [error_msg])

    finally:
        bk_xml.sync()
        bk_netxml.sync()
        if 'test_net' in locals():
            virsh.net_destroy(bridge_name, debug=True)
        if params.get('con_name'):
            process.run('nmcli con del %s' % params['con_name'], verbose=True)
        if add_pkg:
            process.run("ovs-vsctl del-br %s" % br, verbose=True)
            utils_package.package_remove(add_pkg)