示例#1
0
def run(test, params, env):
    """
    Test misc tests of virtual cpu features

    1) check dumpxml after snapshot-create/revert
    2) check vendor_id
    3) check maximum vcpus with topology settings

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def update_cpu_xml():
        """
        Update cpu xml for test
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        # Create cpu xml for test
        if vmxml.xmltreefile.find('cpu'):
            cpu_xml = vmxml.cpu
        else:
            cpu_xml = vm_xml.VMCPUXML()
        if cpu_mode:
            cpu_xml.mode = cpu_mode
        if cpu_vendor_id:
            cpu_xml.vendor_id = cpu_vendor_id

        # Update vm's cpu
        vmxml.cpu = cpu_xml
        vmxml.sync()

        if vcpu_max:
            if with_topology:
                vm_xml.VMXML.set_vm_vcpus(vm_name,
                                          int(vcpu_max),
                                          cores=int(vcpu_max),
                                          sockets=1,
                                          threads=1,
                                          add_topology=with_topology,
                                          topology_correction=with_topology)
            else:
                vm_xml.VMXML.set_vm_vcpus(vm_name, int(vcpu_max))

    def do_snapshot(vm_name, expected_str):
        """
        Run snapshot related commands: snapshot-create-as, snapshot-list
        snapshot-dumpxml, snapshot-revert

        :param vm_name: vm name
        :param expected_str: expected string in snapshot-dumpxml
        :raise: test.fail if virsh command failed
        """
        snapshot_name = vm_name + "-snap"
        virsh_dargs = {'debug': True}

        cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name,
                                              **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

        try:
            snapshots = virsh.snapshot_list(vm_name, **virsh_dargs)
        except process.CmdError:
            test.fail("Failed to get snapshots list for %s" % vm_name)
        if snapshot_name not in snapshots:
            test.fail("The snapshot '%s' was not in snapshot-list." %
                      snapshot_name)
        cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name,
                                            **virsh_dargs)
        libvirt.check_result(cmd_result, expected_match=expected_str)

        cmd_result = virsh.snapshot_revert(vm_name, "", "--current",
                                           **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

    libvirt_version.is_libvirt_feature_supported(params)
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)

    cpu_mode = params.get('cpu_mode')
    vcpu_max = params.get('vcpu_max')
    expected_str_before_startup = params.get("expected_str_before_startup")
    expected_str_after_startup = params.get("expected_str_after_startup")

    test_operations = params.get("test_operations")
    check_vendor_id = "yes" == params.get("check_vendor_id", "no")
    virsh_edit_cmd = params.get("virsh_edit_cmd")
    with_topology = "yes" == params.get("with_topology", "no")

    status_error = "yes" == params.get("status_error", "no")
    err_msg = params.get("err_msg")

    cpu_vendor_id = None
    expected_qemuline = None
    cmd_in_guest = params.get("cmd_in_guest")

    bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        if check_vendor_id:
            output = virsh.capabilities(debug=True)
            host_vendor = re.findall(r'<vendor>(\w+)<', output)[0]

            cpu_vendor_id = 'GenuineIntel'
            if host_vendor != "Intel":
                cpu_vendor_id = 'AuthenticAMD'
            logging.debug("Set cpu vendor_id to %s on this host.",
                          cpu_vendor_id)

            expected_qemuline = "vendor=" + cpu_vendor_id
            cmd_in_guest = ("cat /proc/cpuinfo | grep vendor_id | grep {}".
                            format(cpu_vendor_id))

        # Update xml for test
        update_cpu_xml()

        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        logging.debug("Pre-test xml is %s", vmxml.xmltreefile)

        if expected_str_before_startup:
            libvirt.check_dumpxml(vm, expected_str_before_startup)

        if test_operations:
            for action in test_operations.split(","):
                if action == "do_snapshot":
                    do_snapshot(vm_name, expected_str_before_startup)

        if virsh_edit_cmd:
            status = libvirt.exec_virsh_edit(vm_name,
                                             virsh_edit_cmd.split(","))
            if status == status_error:
                test.fail("Virsh edit got unexpected result.")

        # Check if vm could start successfully
        if not status_error:
            result = virsh.start(vm_name, debug=True)
            libvirt.check_exit_status(result)

            if expected_str_after_startup:
                libvirt.check_dumpxml(vm, expected_str_after_startup)

            if expected_qemuline:
                libvirt.check_qemu_cmd_line(expected_qemuline)

            if cmd_in_guest:
                vm_session = vm.wait_for_login()
                status, output = vm_session.cmd_status_output(cmd_in_guest)
                if status:
                    vm_session.close()
                    test.fail("Failed to run '{}' in vm with "
                              "messages:\n{}".format(cmd_in_guest, output))
                vm_session.close()
                if cpu_mode == 'maximum':
                    check_vm_cpu_model(output.strip(), cmd_in_guest, test)
    finally:
        logging.debug("Recover test environment")
        if vm.is_alive():
            vm.destroy()

        libvirt.clean_up_snapshots(vm_name, domxml=bkxml)
        bkxml.sync()
示例#2
0
def run(test, params, env):
    """
    Test misc tests of virtual cpu features

    1) check dumpxml after snapshot-create/revert

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """

    def do_snapshot(vm_name, expected_str):
        """
        Run snapshot related commands: snapshot-create-as, snapshot-list
        snapshot-dumpxml, snapshot-revert

        :param vm_name: vm name
        :param expected_str: expected string in snapshot-dumpxml
        :raise: test.fail if virsh command failed
        """
        snapshot_name = vm_name + "-snap"
        virsh_dargs = {'debug': True}

        cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name,
                                              **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

        try:
            snapshots = virsh.snapshot_list(vm_name, **virsh_dargs)
        except process.CmdError:
            test.fail("Failed to get snapshots list for %s" % vm_name)
        if snapshot_name not in snapshots:
            test.fail("The snapshot '%s' was not in snapshot-list."
                      % snapshot_name)
        cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name,
                                            **virsh_dargs)
        libvirt.check_result(cmd_result, expected_match=expected_str)

        cmd_result = virsh.snapshot_revert(vm_name, "", "--current",
                                           **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)

    cpu_mode = params.get('cpu_mode')
    expected_str_before_startup = params.get("expected_str_before_startup")
    expected_str_after_startup = params.get("expected_str_after_startup")
    test_operations = params.get("test_operations")
    status_error = "yes" == params.get("status_error", "no")
    err_msg = params.get("err_msg")

    bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        # Create cpu xml for test
        if vmxml.xmltreefile.find('cpu'):
            cpu_xml = vmxml.cpu
        else:
            cpu_xml = vm_xml.VMCPUXML()
        if cpu_mode:
            cpu_xml.mode = cpu_mode

        # Update vm's cpu
        vmxml.cpu = cpu_xml

        vmxml.sync()

        if expected_str_before_startup:
            libvirt.check_dumpxml(vm, expected_str_before_startup)

        if test_operations:
            for action in test_operations.split(","):
                if action == "do_snapshot":
                    do_snapshot(vm_name, expected_str_before_startup)

        # Check if vm could start successfully
        result = virsh.start(vm_name, debug=True)
        libvirt.check_exit_status(result)

        if expected_str_after_startup:
            libvirt.check_dumpxml(vm, expected_str_after_startup)

    finally:
        logging.debug("Recover test environment")
        if vm.is_alive():
            vm.destroy()

        libvirt.clean_up_snapshots(vm_name, domxml=bkxml)
        bkxml.sync()
示例#3
0
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocol = params.get("disk_source_protocol", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    pool_target = params.get("pool_target", "/dev/disk/by-path")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")
    vg_name = params.get("virt_disk_vg_name", "vg_test_0")
    lv_name = params.get("virt_disk_lv_name", "lv_test_0")
    driver_packed = params.get("driver_packed", "on")
    disk_packed = "yes" == params.get("disk_packed", "no")
    scsi_packed = "yes" == params.get("scsi_packed", "no")

    # Indicate the PPC platform
    on_ppc = False
    if platform.platform().count('ppc64'):
        on_ppc = True

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            test.cancel("'iscsi' disk doesn't support in"
                        " current libvirt version.")
    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            test.cancel("'volume' type disk doesn't support in"
                        " current libvirt version.")
    if pool_type == "iscsi-direct":
        if not libvirt_version.version_compare(4, 7, 0):
            test.cancel("iscsi-direct pool is not supported in"
                        " current libvirt version.")
    if ((disk_packed or scsi_packed)
            and not libvirt_version.version_compare(6, 3, 0)):
        test.cancel("The virtio packed attribute is not supported in"
                    " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Fix no more PCI slots issue in certain cases.
    vm_dump_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    machine_type = params.get("machine_type", "pc")
    if machine_type == 'q35':
        vm_dump_xml.remove_all_device_by_type('controller')
        machine_list = vm_dump_xml.os.machine.split("-")
        vm_dump_xml.set_os_attrs(
            **{"machine": machine_list[0] + "-q35-" + machine_list[2]})
        q35_pcie_dict0 = {
            'controller_model': 'pcie-root',
            'controller_type': 'pci',
            'controller_index': 0
        }
        q35_pcie_dict1 = {
            'controller_model': 'pcie-root-port',
            'controller_type': 'pci'
        }
        vm_dump_xml.add_device(libvirt.create_controller_xml(q35_pcie_dict0))
        # Add enough controllers to match multiple times disk attaching requirements
        for i in list(range(1, 12)):
            q35_pcie_dict1.update({'controller_index': "%d" % i})
            vm_dump_xml.add_device(
                libvirt.create_controller_xml(q35_pcie_dict1))
        vm_dump_xml.sync()

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
        else:
            if not vm.is_dead():
                vm.destroy()

        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocol
            secret_xml.target = secret_usage_target
            with open(secret_xml.xml) as f:
                logging.debug("Define secret by XML: %s", f.read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                test.error("Fail to get new created secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(
                chap_passwd.encode(encoding)).decode(encoding)
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        if disk_type == "block":
            iscsi_target = libvirt.setup_or_cleanup_iscsi(
                is_setup=True,
                is_login=True,
                image_size="1G",
                chap_user=chap_user,
                chap_passwd=chap_passwd,
                portal_ip=disk_src_host)
        else:
            iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                is_setup=True,
                is_login=False,
                image_size='1G',
                chap_user=chap_user,
                chap_passwd=chap_passwd,
                portal_ip=disk_src_host)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.host_name = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_pool
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = pool_target
            if chap_auth:
                pool_src_xml.auth_type = "chap"
                pool_src_xml.auth_username = chap_user
                pool_src_xml.secret_usage = secret_usage_target
                poolxml.set_source(pool_src_xml)
            if pool_type == "iscsi-direct":
                iscsi_initiator = params.get('iscsi_initiator')
                pool_src_xml.iqn_name = iscsi_initiator
                poolxml.set_source(pool_src_xml)
            # Create iscsi/iscsi-direct pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            xml = virsh.pool_dumpxml(disk_src_pool)
            logging.debug("Pool '%s' XML:\n%s", disk_src_pool, xml)

            def get_vol():
                """Get the volume info"""
                # Refresh the pool
                cmd_result = virsh.pool_refresh(disk_src_pool)
                libvirt.check_exit_status(cmd_result)
                # Get volume name
                cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
                libvirt.check_exit_status(cmd_result)
                vol_list = []
                vol_list = re.findall(r"(\S+)\ +(\S+)",
                                      str(cmd_result.stdout.strip()))
                if len(vol_list) > 1:
                    return vol_list[1]
                else:
                    return None

            # Wait for a while so that we can get the volume info
            vol_info = utils_misc.wait_for(get_vol, 10)
            if vol_info:
                vol_name, vol_path = vol_info
            else:
                test.error("Failed to get volume info")
            # Snapshot doesn't support raw disk format, create a qcow2 volume
            # disk for snapshot operation.
            if pool_type == "iscsi":
                process.run('qemu-img create -f qcow2 %s %s' %
                            (vol_path, '100M'),
                            shell=True,
                            verbose=True)
            else:
                # Get iscsi URL to create a qcow2 volume disk
                disk_path = ("iscsi://[%s]/%s/%s" %
                             (disk_src_host, iscsi_target, lun_num))
                blk_source = "/mnt/test.qcow2"
                process.run('qemu-img create -f qcow2 %s %s' %
                            (blk_source, '100M'),
                            shell=True,
                            verbose=True)
                process.run('qemu-img convert -O qcow2 %s %s' %
                            (blk_source, disk_path),
                            shell=True,
                            verbose=True)

        # Create block device
        if disk_type == "block":
            logging.debug("iscsi dev name: %s", iscsi_target)
            lv_utils.vg_create(vg_name, iscsi_target)
            device_source = libvirt.create_local_disk("lvm",
                                                      size="10M",
                                                      vgname=vg_name,
                                                      lvname=lv_name)
            logging.debug("New created volume: %s", lv_name)

        # Create iscsi network disk XML
        disk_params = {
            'device_type': disk_device,
            'type_name': disk_type,
            'target_dev': disk_target,
            'target_bus': disk_target_bus,
            'readonly': disk_readonly
        }
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {
                'source_protocol': disk_src_protocol,
                'source_name': iscsi_target + "/%s" % lun_num,
                'source_host_name': disk_src_host,
                'source_host_port': disk_src_port
            }
        elif disk_type == "volume":
            if pool_type == "iscsi":
                disk_params_src = {
                    'source_pool': disk_src_pool,
                    'source_volume': vol_name,
                    'driver_type': 'qcow2',
                    'source_mode': disk_src_mode
                }
            # iscsi-direct pool don't include source_mode option
            else:
                disk_params_src = {
                    'source_pool': disk_src_pool,
                    'source_volume': vol_name,
                    'driver_type': 'qcow2'
                }
        elif disk_type == "block":
            disk_params_src = {
                'source_file': device_source,
                'driver_type': 'raw'
            }
            # Start guest with packed attribute in disk
            if disk_packed:
                disk_params_src['driver_packed'] = driver_packed
            # Start guest with packed attribute in scsi controller
            if scsi_packed:
                scsi_controller = Controller("controller")
                scsi_controller.type = "scsi"
                scsi_controller.model = "virtio-scsi"
                scsi_controller.driver = {'packed': driver_packed}
                vm_dump_xml.add_device(scsi_controller)
                vm_dump_xml.sync()
        else:
            test.cancel("Unsupported disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth and disk_type != "volume":
            disk_params_auth = {
                'auth_user': chap_user,
                'secret_type': disk_src_protocol,
                'secret_usage': secret_xml.target
            }
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)
        attach_option = params.get("attach_option", "")
        cmd_result = virsh.attach_device(domainarg=vm_name,
                                         filearg=disk_xml,
                                         flagstr=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        # Wait for domain is stable
        vm.wait_for_login().close()
        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            # virsh snapshot-revert is not supported on combined internal and external snapshots
            # see more details from,https://bugzilla.redhat.com/show_bug.cgi?id=1733173
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            try:
                virsh.snapshot_list(vm_name, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots list for %s" % vm_name)

            try:
                virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots info for %s" % vm_name)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            virsh.snapshot_create_as(vm_name,
                                     snapshot_name2,
                                     ignore_status=False,
                                     debug=True)

            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            if snapshot_name2 not in cmd_result:
                test.error("Snapshot %s not found" % snapshot_name2)
        elif domain_operation == "start_with_packed":
            expect_xml_line = "packed=\"%s\"" % driver_packed
            libvirt.check_dumpxml(vm, expect_xml_line)
            expect_qemu_line = "packed=%s" % driver_packed
            libvirt.check_qemu_cmd_line(expect_qemu_line)
        elif domain_operation == "":
            logging.debug("No domain operation provided, so skip it")
        else:
            logging.error("Unsupported operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                test.error("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    # Here the script needs wait for a while for the guest to
                    # recognize the hotplugged disk on PPC
                    if on_ppc:
                        time.sleep(10)
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError) as e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                test.error("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name,
                                       disk_target,
                                       wait_remove_event=True)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)

    finally:
        # Clean up snapshot
        # Shut down before cleaning up snapshots
        if vm.is_alive():
            vm.destroy()
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
        # Restore vm
        vmxml_backup.sync("--snapshots-metadata")
        # Destroy pool and undefine secret, which may not exist
        try:
            if disk_type == "volume":
                virsh.pool_destroy(disk_src_pool)
            if disk_type == "block":
                clean_up_lvm(iscsi_target, vg_name, lv_name)
            if chap_auth:
                virsh.secret_undefine(secret_uuid)
        except Exception:
            pass
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
def run(test, params, env):
    """
    Test extended TSEG on Q35 machine types
    <smm state='on'>
        <tseg unit='MiB'>48</tseg>
    </smm>

    Steps:
    1) Edit VM xml for smm or tseg sub element
    2) Verify if Guest can boot as expected
    3) On i440 machine types, the property does not support.
       On Q35 machine types, both Seabios and OVMF Guest can bootup
    """

    vm_name = params.get("main_vm", "")
    vm = env.get_vm(vm_name)
    smm_state = params.get("smm_state", "off")
    unit = params.get("tseg_unit")
    size = params.get("tseg_size")
    boot_type = params.get("boot_type", "")
    loader_type = params.get("loader_type")
    loader = params.get("loader")
    err_msg = params.get("error_msg", "")
    vm_arch_name = params.get("vm_arch_name", "x86_64")
    status_error = ("yes" == params.get("status_error", "no"))

    if not libvirt_version.version_compare(4, 5, 0):
        test.cancel("TSEG does not support in "
                    "current libvirt version")

    if (boot_type == "seabios" and
            not utils_package.package_install('seabios-bin')):
        test.cancel("Failed to install Seabios")

    if (boot_type == 'ovmf' and
            not utils_package.package_install('OVMF')):
        test.cancel("Failed to install OVMF")

    # Back VM XML
    v_xml_backup = vm_xml.VMXML.new_from_dumpxml(vm_name)
    v_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)

    try:
        # Specify boot loader for OVMF
        if boot_type == 'ovmf':
            os_xml = v_xml.os
            os_xml.loader_type = loader_type
            os_xml.loader = loader
            os_xml.loader_readonly = "yes"
            v_xml.os = os_xml

        try:
            features_xml = v_xml.features
        except xcepts.LibvirtXMLNotFoundError:
            if vm_arch_name == 'x86_64':
                # ACPI is required for UEFI on x86_64
                v_xml.xmltreefile.create_by_xpath("/features/acpi")
                features_xml = v_xml.features
            else:
                features_xml = vm_xml.VMFeaturesXML()

        features_xml.smm = smm_state
        if unit and size:
            features_xml.smm_tseg_unit = unit
            features_xml.smm_tseg = size
        v_xml.features = features_xml

        logging.debug("New VM XML is:\n%s", v_xml)
        ret = virsh.define(v_xml.xml)
        utlv.check_result(ret, expected_fails=err_msg)

        # Check result
        if not status_error:
            vm.start()
            if unit and size:
                # If tseg unit is KiB, convert it to MiB
                # as vm dumpxml convert it automatically
                if unit == 'KiB':
                    unit, size = unify_to_MiB(unit, size)
                expect_line = "<tseg unit=\"%s\">%s</tseg>" % (unit, size)
                utlv.check_dumpxml(vm, expect_line)
                # Qemu cmdline use mbytes
                unit, tseg_mbytes = unify_to_MiB(unit, size)
                expect_line = '-global mch.extended-tseg-mbytes=%s' % size
                utlv.check_qemu_cmd_line(expect_line)
    finally:
        logging.debug("Restore the VM XML")
        if vm.is_alive():
            vm.destroy()
        # OVMF enable nvram by default
        v_xml_backup.sync(options="--nvram")
示例#5
0
def run(test, params, env):
    """
    Test command: virsh setmem.

    1) Prepare vm environment.
    2) Handle params
    3) Prepare libvirtd status.
    4) Run test command and wait for current memory's stable.
    5) Recover environment.
    4) Check result.
    """
    def get_vm_usable_mem(session):
        """
        Get total usable RAM from /proc/meminfo
        """
        cmd = "cat /proc/meminfo"
        proc_mem = session.cmd_output(cmd)
        total_usable_mem = re.search(r'MemTotal:\s+(\d+)\s+[kK]B',
                                     proc_mem).group(1)
        return int(total_usable_mem)

    def vm_unusable_mem(session):
        """
        Get the unusable RAM of the VM.
        """
        # Get total physical memory from dmidecode
        cmd = "dmidecode -t 17"
        dmi_mem = session.cmd_output(cmd)
        dmi_mem_size = re.findall(r'Size:\s(\d+\s+[K|M|G]B)', dmi_mem)
        if not dmi_mem_size:
            test.fail("Cannot get memory size info inside VM.")
        total_physical_mem = 0
        for size_info in dmi_mem_size:
            mem_size = int(size_info.split()[0].strip())
            mem_unit = size_info.split()[1].strip()
            if mem_unit.lower() == 'kb':
                total_physical_mem += mem_size
            elif mem_unit.lower() == 'mb':
                total_physical_mem += mem_size * 1024
            elif mem_unit.lower() == 'gb':
                total_physical_mem += mem_size * 1048576
        return total_physical_mem - get_vm_usable_mem(session)

    def make_domref(domarg, vm_ref, domid, vm_name, domuuid):
        """
        Create domain options of command
        """
        # Specify domain as argument or parameter
        if domarg == "yes":
            dom_darg_key = "domainarg"
        else:
            dom_darg_key = "domain"

        # How to reference domain
        if vm_ref == "domid":
            dom_darg_value = domid
        elif vm_ref == "domname":
            dom_darg_value = vm_name
        elif vm_ref == "domuuid":
            dom_darg_value = domuuid
        elif vm_ref == "none":
            dom_darg_value = None
        elif vm_ref == "emptystring":
            dom_darg_value = '""'
        else:  # stick in value directly
            dom_darg_value = vm_ref

        return {dom_darg_key: dom_darg_value}

    def make_sizeref(sizearg, mem_ref, original_mem):
        """
        Create size options of command
        """
        if sizearg == "yes":
            size_darg_key = "sizearg"
        else:
            size_darg_key = "size"

        if mem_ref == "halfless":
            size_darg_value = "%d" % (original_mem // 2)
        elif mem_ref == "halfmore":
            size_darg_value = "%d" % int(original_mem * 1.5)  # no fraction
        elif mem_ref == "same":
            size_darg_value = "%d" % original_mem
        elif mem_ref == "emptystring":
            size_darg_value = '""'
        elif mem_ref == "zero":
            size_darg_value = "0"
        elif mem_ref == "toosmall":
            size_darg_value = "1024"
        elif mem_ref == "toobig":
            size_darg_value = "1099511627776"  # (KiB) One Petabyte
        elif mem_ref == "none":
            size_darg_value = None
        else:  # stick in value directly
            size_darg_value = mem_ref

        return {size_darg_key: size_darg_value}

    def cal_deviation(actual, expected):
        """
        Calculate deviation of actual result and expected result
        """
        numerator = float(actual)
        denominator = float(expected)
        if numerator > denominator:
            numerator = denominator
            denominator = float(actual)
        return 100 - (100 * (numerator / denominator))

    def is_old_libvirt():
        """
        Check if libvirt is old version
        """
        regex = r'\s+\[--size\]\s+'
        return bool(not virsh.has_command_help_match('setmem', regex))

    def print_debug_stats(original_inside_mem, original_outside_mem,
                          test_inside_mem, test_outside_mem,
                          expected_outside_mem, expected_inside_mem,
                          delta_percentage, unusable_mem):
        """
        Print debug message for test
        """
        # Calculate deviation
        inside_deviation = cal_deviation(test_inside_mem, expected_inside_mem)
        outside_deviation = cal_deviation(test_outside_mem,
                                          expected_outside_mem)
        dbgmsg = ("Unusable memory of VM   : %d KiB\n"
                  "Original inside memory  : %d KiB\n"
                  "Expected inside memory  : %d KiB\n"
                  "Actual inside memory    : %d KiB\n"
                  "Inside memory deviation : %0.2f%%\n"
                  "Original outside memory : %d KiB\n"
                  "Expected outside memory : %d KiB\n"
                  "Actual outside memory   : %d KiB\n"
                  "Outside memory deviation: %0.2f%%\n"
                  "Acceptable deviation    : %0.2f%%" %
                  (unusable_mem, original_inside_mem, expected_inside_mem,
                   test_inside_mem, inside_deviation, original_outside_mem,
                   expected_outside_mem, test_outside_mem, outside_deviation,
                   delta_percentage))
        for dbgline in dbgmsg.splitlines():
            logging.debug(dbgline)

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_ref = params.get("setmem_vm_ref", "")
    mem_ref = params.get("setmem_mem_ref", "")
    flags = params.get("setmem_flags", "")
    status_error = "yes" == params.get("status_error", "no")
    old_libvirt_fail = "yes" == params.get("setmem_old_libvirt_fail", "no")
    quiesce_delay = int(params.get("setmem_quiesce_delay", "1"))
    domarg = params.get("setmem_domarg", "no")
    sizearg = params.get("setmem_sizearg", "no")
    libvirt_status = params.get("libvirt", "on")
    delta_percentage = float(params.get("setmem_delta_per", "10"))
    start_vm = "yes" == params.get("start_vm", "yes")
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    paused_after_start_vm = "yes" == params.get("paused_after_start_vm", "no")
    manipulate_dom_before_setmem = "yes" == params.get(
        "manipulate_dom_before_setmem", "no")
    manipulate_dom_after_setmem = "yes" == params.get(
        "manipulate_dom_after_setmem", "no")
    manipulate_action = params.get("manipulate_action", "")
    readonly = "yes" == params.get("setmem_readonly", "no")
    expect_msg = params.get("setmem_err_msg")
    driver_packed = params.get("driver_packed", "on")
    with_packed = "yes" == params.get("with_packed", "no")
    expect_xml_line = params.get("expect_xml_line")
    expect_qemu_line = params.get("expect_qemu_line")

    vm = env.get_vm(vm_name)
    # Back up domain XML
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    if with_packed and not libvirt_version.version_compare(6, 3, 0):
        test.cancel("The virtio packed attribute is not supported in"
                    " current libvirt version.")

    vmosxml = vmxml.os
    need_mkswap = False
    if manipulate_action in ['s3', 's4']:
        vm.destroy()
        BIOS_BIN = "/usr/share/seabios/bios.bin"
        if os.path.isfile(BIOS_BIN):
            vmosxml.loader = BIOS_BIN
            vmxml.os = vmosxml
            vmxml.sync()
        else:
            logging.error("Not find %s on host", BIOS_BIN)
        vmxml.set_pm_suspend(vm_name, "yes", "yes")
        vm.prepare_guest_agent()
        if manipulate_action == "s4":
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition")
            vm.create_swap_partition()

    memballoon_model = params.get("memballoon_model", "")
    if memballoon_model:
        vm.destroy()
        vmxml.del_device('memballoon', by_tag=True)
        memballoon_xml = vmxml.get_device_class('memballoon')()
        memballoon_xml.model = memballoon_model
        if with_packed:
            memballoon_xml.driver_packed = driver_packed
        vmxml.add_device(memballoon_xml)
        logging.info(memballoon_xml)
        vmxml.sync()
        vm.start()
    # Check guest dumpxml and qemu command in with_packed attribute
    if with_packed:
        expect_xml_line = expect_xml_line % driver_packed
        if utils_misc.compare_qemu_version(6, 2, 0, is_rhev=False):
            expect_qemu_line = expect_qemu_line % "true"
        else:
            expect_qemu_line = expect_qemu_line % driver_packed
        libvirt.check_dumpxml(vm, expect_xml_line)
        libvirt.check_qemu_cmd_line(expect_qemu_line)

    remove_balloon_driver = "yes" == params.get("remove_balloon_driver", "no")
    if remove_balloon_driver:
        if not vm.is_alive():
            logging.error("Can't remove module as guest not running")
        else:
            session = vm.wait_for_login()
            cmd = "rmmod virtio_balloon"
            s_rmmod, o_rmmod = session.cmd_status_output(cmd)
            if s_rmmod != 0:
                logging.error(
                    "Fail to remove module virtio_balloon in guest:\n%s",
                    o_rmmod)
            session.close()
    # Get original data
    domid = vm.get_id()
    domuuid = vm.get_uuid()
    uri = vm.connect_uri
    if not vm.is_alive():
        vm.start()
    session = vm.wait_for_login()
    if session.cmd_status('dmidecode'):
        # The physical memory size is in vm xml, use it when dmidecode not
        # supported
        unusable_mem = int(vmxml.max_mem) - get_vm_usable_mem(session)
    else:
        unusable_mem = vm_unusable_mem(session)
    original_outside_mem = vm.get_used_mem()
    original_inside_mem = get_vm_usable_mem(session)
    session.close()
    # Prepare VM state
    if not start_vm:
        vm.destroy()
    else:
        if paused_after_start_vm:
            vm.pause()
    old_libvirt = is_old_libvirt()
    if old_libvirt:
        logging.info("Running test on older libvirt")
        use_kilobytes = True
    else:
        logging.info("Running test on newer libvirt")
        use_kilobytes = False

    # Argument pattern is complex, build with dargs
    dargs = {
        'flagstr': flags,
        'use_kilobytes': use_kilobytes,
        'uri': uri,
        'ignore_status': True,
        "debug": True,
        'readonly': readonly
    }
    dargs.update(make_domref(domarg, vm_ref, domid, vm_name, domuuid))
    dargs.update(make_sizeref(sizearg, mem_ref, original_outside_mem))

    # Prepare libvirtd status
    libvirtd = utils_libvirtd.Libvirtd()
    if libvirt_status == "off":
        libvirtd.stop()
    else:
        if not libvirtd.is_running():
            libvirtd.start()

    if status_error or (old_libvirt_fail & old_libvirt):
        logging.info("Error Test: Expecting an error to occur!")

    try:
        memory_change = True
        if manipulate_dom_before_setmem:
            manipulate_domain(test, vm_name, manipulate_action)
            if manipulate_action in ['save', 'managedsave', 's4']:
                memory_change = False

        result = virsh.setmem(**dargs)
        status = result.exit_status

        if status is 0:
            logging.info("Waiting %d seconds for VM memory to settle",
                         quiesce_delay)
            # It takes time for kernel to settle on new memory
            # and current clean pages is not predictable. Therefore,
            # extremely difficult to determine quiescence, so
            # sleep one second per error percent is reasonable option.
            time.sleep(quiesce_delay)

        if manipulate_dom_before_setmem:
            manipulate_domain(test, vm_name, manipulate_action, True)
        if manipulate_dom_after_setmem:
            manipulate_domain(test, vm_name, manipulate_action)
            manipulate_domain(test, vm_name, manipulate_action, True)

        # Recover libvirtd status
        if libvirt_status == "off":
            libvirtd.start()

        # Gather stats if not running error test
        if not status_error and not old_libvirt_fail:
            # Expected results for both inside and outside
            if remove_balloon_driver:
                expected_mem = original_outside_mem
            else:
                if not memory_change:
                    expected_mem = original_inside_mem
                elif sizearg == "yes":
                    expected_mem = int(dargs["sizearg"])
                else:
                    expected_mem = int(dargs["size"])
            if memory_change:
                # Should minus unusable memory for inside memory check
                expected_inside_mem = expected_mem - unusable_mem
                expected_outside_mem = expected_mem
            else:
                expected_inside_mem = expected_mem
                expected_outside_mem = original_outside_mem

            def get_vm_mem():
                """
                Test results for both inside and outside
                :return: Get vm memory for both inside and outside
                """
                if not memory_change:
                    test_inside_mem = original_inside_mem
                    test_outside_mem = original_outside_mem
                else:
                    if vm.state() == "shut off":
                        vm.start()
                    elif vm.state() == "paused":
                        # Make sure it's never paused
                        vm.resume()
                    session = vm.wait_for_login()

                    # Actual results
                    test_inside_mem = get_vm_usable_mem(session)
                    session.close()
                    test_outside_mem = vm.get_used_mem()
                return (test_inside_mem, test_outside_mem)

            # Don't care about memory comparison on error test
            def verify_outside_result():
                _, test_outside_mem = get_vm_mem()
                return (cal_deviation(test_outside_mem, expected_outside_mem)
                        <= delta_percentage)

            def verify_inside_result():
                test_inside_mem, _ = get_vm_mem()
                return (cal_deviation(test_inside_mem, expected_inside_mem) <=
                        delta_percentage)

            msg = "test conditions not met: "
            error_flag = 0
            if status is not 0:
                error_flag = 1
                msg += "Non-zero virsh setmem exit code. "
            if not utils_misc.wait_for(verify_outside_result, timeout=240):
                error_flag = 1
                msg += "Outside memory deviated. "
            if not utils_misc.wait_for(verify_inside_result, timeout=240):
                error_flag = 1
                msg += "Inside memory deviated. "

            test_inside_mem, test_outside_mem = get_vm_mem()
            print_debug_stats(original_inside_mem, original_outside_mem,
                              test_inside_mem, test_outside_mem,
                              expected_outside_mem, expected_inside_mem,
                              delta_percentage, unusable_mem)
            if error_flag:
                test.fail(msg)
        elif not status_error and old_libvirt_fail:
            if status is 0:
                if old_libvirt:
                    test.fail("Error test did not result in an error")
            else:
                if not old_libvirt:
                    test.fail("Newer libvirt failed when it should not")
        else:  # Verify an error test resulted in error
            if status is 0:
                test.fail("Error test did not result in an error")
            if expect_msg:
                libvirt.check_result(result, expect_msg.split(';'))
    finally:
        if need_mkswap:
            vm.cleanup_swap()
        vm.destroy()
        backup_xml.sync()
示例#6
0
def run(test, params, env):
    """
    Test misc tests of virtual cpu features

    1) check dumpxml after snapshot-create/revert

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """

    def do_snapshot(vm_name, expected_str):
        """
        Run snapshot related commands: snapshot-create-as, snapshot-list
        snapshot-dumpxml, snapshot-revert

        :param vm_name: vm name
        :param expected_str: expected string in snapshot-dumpxml
        :raise: test.fail if virsh command failed
        """
        snapshot_name = vm_name + "-snap"
        virsh_dargs = {'debug': True}

        cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name,
                                              **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

        try:
            snapshots = virsh.snapshot_list(vm_name, **virsh_dargs)
        except process.CmdError:
            test.fail("Failed to get snapshots list for %s" % vm_name)
        if snapshot_name not in snapshots:
            test.fail("The snapshot '%s' was not in snapshot-list."
                      % snapshot_name)
        cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name,
                                            **virsh_dargs)
        libvirt.check_result(cmd_result, expected_match=expected_str)

        cmd_result = virsh.snapshot_revert(vm_name, "", "--current",
                                           **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)

    cpu_mode = params.get('cpu_mode')
    cpu_vendor_id = params.get("vendor_id")
    expected_str_before_startup = params.get("expected_str_before_startup")
    expected_str_after_startup = params.get("expected_str_after_startup")
    expected_qemuline = params.get("expected_qemuline")
    cmd_in_guest = params.get("cmd_in_guest")
    test_operations = params.get("test_operations")
    status_error = "yes" == params.get("status_error", "no")
    err_msg = params.get("err_msg")

    bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        if cpu_vendor_id:
            output = virsh.capabilities(debug=True)
            host_vendor = re.findall(r'<vendor>(\w+)<', output)[0]
            if not re.search(host_vendor, cpu_vendor_id, re.IGNORECASE):
                test.cancel("Not supported cpu vendor_id {} on this host."
                            .format(cpu_vendor_id))

        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        # Create cpu xml for test
        if vmxml.xmltreefile.find('cpu'):
            cpu_xml = vmxml.cpu
        else:
            cpu_xml = vm_xml.VMCPUXML()
        if cpu_mode:
            cpu_xml.mode = cpu_mode
        if cpu_vendor_id:
            cpu_xml.vendor_id = cpu_vendor_id

        # Update vm's cpu
        vmxml.cpu = cpu_xml

        vmxml.sync()

        if expected_str_before_startup:
            libvirt.check_dumpxml(vm, expected_str_before_startup)

        if test_operations:
            for action in test_operations.split(","):
                if action == "do_snapshot":
                    do_snapshot(vm_name, expected_str_before_startup)

        # Check if vm could start successfully
        result = virsh.start(vm_name, debug=True)
        libvirt.check_exit_status(result)

        if expected_str_after_startup:
            libvirt.check_dumpxml(vm, expected_str_after_startup)

        if expected_qemuline:
            libvirt.check_qemu_cmd_line(expected_qemuline)

        if cmd_in_guest:
            vm_session = vm.wait_for_login()
            if vm_session.cmd_status(cmd_in_guest):
                vm_session.close()
                test.fail("Failed to run '%s' in vm." % cmd_in_guest)
            vm_session.close()

    finally:
        logging.debug("Recover test environment")
        if vm.is_alive():
            vm.destroy()

        libvirt.clean_up_snapshots(vm_name, domxml=bkxml)
        bkxml.sync()
示例#7
0
def run(test, params, env):
    """
    Test misc tests of virtual cpu features

    1) check dumpxml after snapshot-create/revert
    2) check vendor_id
    3) check maximum vcpus with topology settings

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def update_cpu_xml():
        """
        Update cpu xml for test
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        # Create cpu xml for test
        if vmxml.xmltreefile.find('cpu'):
            cpu_xml = vmxml.cpu
        else:
            cpu_xml = vm_xml.VMCPUXML()

        if customize_cpu_features:
            for idx in range(len(cpu_xml.get_feature_list()) - 1, -1, -1):
                cpu_xml.remove_feature(idx)
            domcapa_xml = domcapability_xml.DomCapabilityXML()
            features = domcapa_xml.get_additional_feature_list(
                'host-model', ignore_features=None)
            for feature in features:
                for feature_name, feature_policy in feature.items():
                    # For host-passthrough mode, adding "invtsc" requires
                    # more settings, so it will be ignored.
                    if feature_name != "invtsc":
                        cpu_xml.add_feature(feature_name, feature_policy)

        if cpu_mode:
            cpu_xml.mode = cpu_mode
        if cpu_vendor_id:
            cpu_xml.vendor_id = cpu_vendor_id

        # Update vm's cpu
        vmxml.cpu = cpu_xml
        vmxml.sync()

        if vcpu_max:
            if with_topology:
                vm_xml.VMXML.set_vm_vcpus(vm_name,
                                          int(vcpu_max),
                                          cores=int(vcpu_max),
                                          sockets=1,
                                          threads=1,
                                          add_topology=with_topology,
                                          topology_correction=with_topology)
            else:
                vm_xml.VMXML.set_vm_vcpus(vm_name, int(vcpu_max))

    def do_snapshot(vm_name, expected_str):
        """
        Run snapshot related commands: snapshot-create-as, snapshot-list
        snapshot-dumpxml, snapshot-revert

        :param vm_name: vm name
        :param expected_str: expected string in snapshot-dumpxml
        :raise: test.fail if virsh command failed
        """
        snapshot_name = vm_name + "-snap"
        virsh_dargs = {'debug': True}

        cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name,
                                              **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

        try:
            snapshots = virsh.snapshot_list(vm_name, **virsh_dargs)
        except process.CmdError:
            test.fail("Failed to get snapshots list for %s" % vm_name)
        if snapshot_name not in snapshots:
            test.fail("The snapshot '%s' was not in snapshot-list." %
                      snapshot_name)
        cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name,
                                            **virsh_dargs)
        libvirt.check_result(cmd_result, expected_match=expected_str)

        cmd_result = virsh.snapshot_revert(vm_name, "", "--current",
                                           **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

    def check_feature_list(vm, original_dict):
        """
        Compare new cpu feature list and original cpu

        :param vm: VM object
        :original_dict: Cpu feature dict , {"name1":"policy1","name2":"policy2"}
        """
        new_cpu_xml = vm_xml.VMXML.new_from_dumpxml(vm.name).cpu
        new_feature_dict = new_cpu_xml.get_dict_type_feature()
        if new_feature_dict != original_dict:
            test.fail('CPU feature lists are different, original is :%s,'
                      ' new is %s:' % (original_dict, new_feature_dict))

    libvirt_version.is_libvirt_feature_supported(params)
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)

    cpu_mode = params.get('cpu_mode')
    vcpu_max = params.get('vcpu_max')
    expected_str_before_startup = params.get("expected_str_before_startup")
    expected_str_after_startup = params.get("expected_str_after_startup")

    test_operations = params.get("test_operations")
    check_vendor_id = "yes" == params.get("check_vendor_id", "no")
    virsh_edit_cmd = params.get("virsh_edit_cmd")
    with_topology = "yes" == params.get("with_topology", "no")

    status_error = "yes" == params.get("status_error", "no")
    err_msg = params.get("err_msg")

    cpu_vendor_id = None
    expected_qemuline = None
    cmd_in_guest = params.get("cmd_in_guest")
    customize_cpu_features = "yes" == params.get("customize_cpu_features",
                                                 "no")
    bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    managed_save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name

    try:
        if check_vendor_id:
            output = virsh.capabilities(debug=True)
            host_vendor = re.findall(r'<vendor>(\w+)<', output)[0]

            cpu_vendor_id = 'GenuineIntel'
            if host_vendor != "Intel":
                cpu_vendor_id = 'AuthenticAMD'
            logging.debug("Set cpu vendor_id to %s on this host.",
                          cpu_vendor_id)

            expected_qemuline = "vendor=" + cpu_vendor_id
            cmd_in_guest = ("cat /proc/cpuinfo | grep vendor_id | grep {}".
                            format(cpu_vendor_id))

        # Update xml for test
        update_cpu_xml()

        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        logging.debug("Pre-test xml is %s", vmxml.xmltreefile)
        cpu_xml = vmxml.cpu
        feature_dict = cpu_xml.get_dict_type_feature()

        if expected_str_before_startup:
            libvirt.check_dumpxml(vm, expected_str_before_startup)

        if test_operations:
            for action in test_operations.split(","):
                if action == "do_snapshot":
                    do_snapshot(vm_name, expected_str_before_startup)

        if virsh_edit_cmd:
            status = libvirt.exec_virsh_edit(vm_name,
                                             virsh_edit_cmd.split(","))
            if status == status_error:
                test.fail("Virsh edit got unexpected result.")

        # Check if vm could start successfully
        if not status_error:
            result = virsh.start(vm_name, debug=True)
            libvirt.check_exit_status(result)

            if expected_str_after_startup:
                libvirt.check_dumpxml(vm, expected_str_after_startup)

            if expected_qemuline:
                libvirt.check_qemu_cmd_line(expected_qemuline)

            if cmd_in_guest:
                vm_session = vm.wait_for_login()
                status, output = vm_session.cmd_status_output(cmd_in_guest)
                if status:
                    vm_session.close()
                    test.fail("Failed to run '{}' in vm with "
                              "messages:\n{}".format(cmd_in_guest, output))
                vm_session.close()
                if cpu_mode == 'maximum':
                    check_vm_cpu_model(output.strip(), cmd_in_guest, test)

            # Add case: Check cpu xml after domain Managedsaved and restored
            if test_operations:
                for item in test_operations.split(','):
                    if item == "managedsave_restore":
                        # (1)Domain Manage saved
                        virsh.managedsave(vm_name,
                                          ignore_status=False,
                                          debug=True)
                        check_feature_list(vm, feature_dict)
                        # (2)Domain Restore
                        virsh.restore(managed_save_file,
                                      ignore_status=False,
                                      debug=True)
                        # (5)Check mode and feature list here
                        libvirt.check_dumpxml(vm, cpu_mode)
                        check_feature_list(vm, feature_dict)

    finally:
        logging.debug("Recover test environment")
        if os.path.exists(managed_save_file):
            virsh.managedsave_remove(vm_name, debug=True)
        if vm.is_alive():
            vm.destroy()
        libvirt.clean_up_snapshots(vm_name, domxml=bkxml)
        bkxml.sync()
示例#8
0
def run(test, params, env):
    """
    Test sysinfo in guest
    <sysinfo type='fwcfg'>...</sysinfo>
    <sysinfo type='smbios'>...</sysinfo>

    Steps:
    1) Edit VM XML for sysinfo element
    2) Verify if guest can boot as expected
    3) Check if the sysinfo is correct
    """

    vm_name = params.get("main_vm", "")
    vm = env.get_vm(vm_name)
    boot_type = params.get("boot_type", "")
    loader_type = params.get("loader_type")
    loader = params.get("loader")
    sysinfo_type = params.get("sysinfo_type", "")
    entry_name = params.get("entry_name")
    value_string = params.get("value_string")
    with_file = ("yes" == params.get("with_file", "no"))
    with_value = ("yes" == params.get("with_value", "no"))
    entry_file = os.path.join(data_dir.get_tmp_dir(), "provision.ign")
    err_msg = params.get("error_msg", "")
    status_error = ("yes" == params.get("status_error", "no"))
    without_name = ("yes" == params.get("without_name", "no"))

    if not libvirt_version.version_compare(6, 5, 0):
        test.cancel("FWCFG sysinfo is not supported in "
                    "current libvirt version")

    if (boot_type == "seabios"
            and not utils_package.package_install('seabios-bin')):
        test.cancel("Failed to install Seabios")

    if (boot_type == "ovmf" and not utils_package.package_install('OVMF')):
        test.cancel("Failed to install OVMF")

    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

    try:
        # Specify boot loader for OVMF
        if boot_type == "ovmf":
            os_xml = vmxml.os
            os_xml.loader_type = loader_type
            os_xml.loader = loader
            os_xml.loader_readonly = "yes"
            vmxml.os = os_xml

        # Set attributes of fwcfg sysinfo in VMSysinfoXML
        if sysinfo_type == "fwcfg":
            sysinfo_xml = vm_xml.VMSysinfoXML()
            sysinfo_xml.type = sysinfo_type

            # Test with entry value in text
            if with_value:
                sysinfo_xml.entry_name = entry_name
                sysinfo_xml.entry = value_string

            # Test with entry file
            if with_file:
                with open('%s' % entry_file, 'w+') as f:
                    f.write('%s' % value_string)
                sysinfo_xml.entry_name = entry_name
                sysinfo_xml.entry_file = entry_file
            # Negative test without entry name
            elif without_name:
                sysinfo_xml.entry = value_string
            # Negative test without file in entry
            else:
                sysinfo_xml.entry_name = entry_name

        vmxml.sysinfo = sysinfo_xml
        logging.debug("New VM XML is:\n%s" % vmxml)
        ret = virsh.define(vmxml.xml)
        libvirt.check_result(ret, expected_fails=err_msg)
        result = virsh.start(vm_name, debug=True)
        libvirt.check_exit_status(result)

        if not status_error:
            # Check result in dumpxml and qemu cmdline
            if with_file:
                expect_xml_line = "<entry file=\"%s\" name=\"%s\" />" % (
                    entry_file, entry_name)
                expect_qemu_line = "-fw_cfg name=%s,file=%s" % (entry_name,
                                                                entry_file)
            if with_value:
                expect_xml_line = "<entry name=\"%s\">%s</entry>" % (
                    entry_name, value_string)
                expect_qemu_line = "-fw_cfg name=%s,string=%s" % (entry_name,
                                                                  value_string)
            libvirt.check_dumpxml(vm, expect_xml_line)
            libvirt.check_qemu_cmd_line(expect_qemu_line)

            # Check result in guest
            kwargs = {
                "sysinfo_type": sysinfo_type,
                "value_string": value_string,
                "entry_name": entry_name
            }
            check_in_vm(test, vm, **kwargs)

    finally:
        logging.debug("Start to cleanup")
        if vm.is_alive():
            vm.destroy()
        logging.debug("Restore the VM XML")
        vmxml_backup.sync(options="--nvram")
        # Remove tmp file
        if os.path.exists(entry_file):
            os.remove(entry_file)
示例#9
0
def run(test, params, env):
    """
    Test extended TSEG on Q35 machine types
    <smm state='on'>
        <tseg unit='MiB'>48</tseg>
    </smm>

    Steps:
    1) Edit VM xml for smm or tseg sub element
    2) Verify if Guest can boot as expected
    3) On i440 machine types, the property does not support.
       On Q35 machine types, both Seabios and OVMF Guest can bootup
    """

    vm_name = params.get("main_vm", "")
    vm = env.get_vm(vm_name)
    smm_state = params.get("smm_state", "off")
    unit = params.get("tseg_unit")
    size = params.get("tseg_size")
    boot_type = params.get("boot_type", "")
    loader_type = params.get("loader_type")
    loader = params.get("loader")
    err_msg = params.get("error_msg", "")
    vm_arch_name = params.get("vm_arch_name", "x86_64")
    status_error = ("yes" == params.get("status_error", "no"))

    if not libvirt_version.version_compare(4, 5, 0):
        test.cancel("TSEG does not support in " "current libvirt version")

    if (boot_type == "seabios"
            and not utils_package.package_install('seabios-bin')):
        test.cancel("Failed to install Seabios")

    if (boot_type == 'ovmf' and not utils_package.package_install('OVMF')):
        test.cancel("Failed to install OVMF")

    # Back VM XML
    v_xml_backup = vm_xml.VMXML.new_from_dumpxml(vm_name)
    v_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)

    try:
        # Specify boot loader for OVMF
        if boot_type == 'ovmf':
            os_xml = v_xml.os
            os_xml.loader_type = loader_type
            os_xml.loader = loader
            os_xml.loader_readonly = "yes"
            v_xml.os = os_xml

        try:
            features_xml = v_xml.features
        except xcepts.LibvirtXMLNotFoundError:
            if vm_arch_name == 'x86_64':
                # ACPI is required for UEFI on x86_64
                v_xml.xmltreefile.create_by_xpath("/features/acpi")
                features_xml = v_xml.features
            else:
                features_xml = vm_xml.VMFeaturesXML()

        features_xml.smm = smm_state
        if unit and size:
            features_xml.smm_tseg_unit = unit
            features_xml.smm_tseg = size
        v_xml.features = features_xml

        logging.debug("New VM XML is:\n%s", v_xml)
        ret = virsh.define(v_xml.xml)
        utlv.check_result(ret, expected_fails=err_msg)

        # Check result
        if not status_error:
            vm.start()
            if unit and size:
                # If tseg unit is KiB, convert it to MiB
                # as vm dumpxml convert it automatically
                if unit == 'KiB':
                    unit, size = unify_to_MiB(unit, size)
                expect_line = "<tseg unit=\"%s\">%s</tseg>" % (unit, size)
                utlv.check_dumpxml(vm, expect_line)
                # Qemu cmdline use mbytes
                unit, tseg_mbytes = unify_to_MiB(unit, size)
                expect_line = '-global mch.extended-tseg-mbytes=%s' % size
                utlv.check_qemu_cmd_line(expect_line)
    finally:
        logging.debug("Restore the VM XML")
        if vm.is_alive():
            vm.destroy()
        # OVMF enable nvram by default
        v_xml_backup.sync(options="--nvram")