def reset_env(vm_name, xml_file):
    """
    Reset env

    :param vm_name: the vm name
    :xml_file: domain xml file
    """
    virsh.destroy(vm_name)
    virsh.undefine(vm_name)
    virsh.define(xml_file)
    if os.path.exists(xml_file):
        os.remove(xml_file)
Beispiel #2
0
def test_add_domain(vm, params):
    """
    Test command add_domain:
    """
    add_ref = params.get("gf_add_ref", "disk")
    readonly = "yes" == params.get("gf_add_readonly")

    gf = utils_test.libguestfs.GuestfishTools(params)

    image_path = params.get("image_path")
    pv_name = params.get("pv_name")
    test_domain_name = "libguestfs_test_domain"
    test_dir = params.get("img_dir", data_dir.get_tmp_dir())
    test_xml = test_dir + '/test_domain.xml'

    xml_content = "<domain type='kvm'>\n\
      <memory>500000</memory>\n\
      <name>%s</name>\n\
      <vcpu>1</vcpu>\n\
      <os>\n\
        <type>hvm</type>\n\
        <boot dev='hd'/>\n\
      </os>\n\
      <devices>\n\
        <disk type='file' device='disk'>\n\
          <source file='%s'/>\n\
          <target dev='hda' bus='ide'/>\n\
        </disk>\n\
      </devices>\n\
    </domain>\n\
    " % (test_domain_name, image_path)
    f = open(test_xml, "w")
    f.write(xml_content)
    f.close()

    virsh.define(test_xml)
    gf.add_domain(test_domain_name)
    gf.run()
    gf_result = gf.list_devices()

    if '/dev/sd' not in gf_result.stdout:
        gf.close_session()
        logging.error(gf_result)
        virsh.undefine(test_domain_name)
        os.system('rm -f %s' % test_xml)
        raise error.TestFail("test_add_domain failed")
    gf.close_session()
    virsh.undefine(test_domain_name)
    os.system('rm -f %s' % test_xml)
Beispiel #3
0
    def restore(self, name):
        dom = name
        name = dom['name']
        doms = self.current_state
        if name in doms:
            self.remove(doms[name])

        domfile = tempfile.NamedTemporaryFile(delete=False)
        fname = domfile.name
        domfile.writelines(dom['inactive xml'])
        domfile.close()

        try:
            if dom['persistent'] == 'yes':
                res = virsh.define(fname)
                if res.exit_status:
                    raise Exception(str(res))
                if dom['state'] != 'shut off':
                    res = virsh.start(name)
                    if res.exit_status:
                        raise Exception(str(res))
            else:
                res = virsh.create(fname)
                if res.exit_status:
                    raise Exception(str(res))
        finally:
            os.remove(fname)

        if dom['autostart'] == 'enable':
            res = virsh.autostart(name, '')
            if res.exit_status:
                raise Exception(str(res))
Beispiel #4
0
def enable_normal_boot(vmxml, check_points, define_error, test):
    """
    Undefine/Define VM and check the result

    :param vmxml: The instance of VMXML class
    :param  check_points: The list of check points of result
    :param define_error: The define error status
    :param test: Avocado test object
    """
    logging.debug("Boot guest in normal mode:\n%s",
                  open(vmxml.xml).read())
    vmxml.undefine(options="--nvram")
    ret = virsh.define(vmxml.xml)
    if ret.exit_status:
        if define_error:
            utlv.check_result(ret, expected_fails=check_points)
        else:
            test.fail("Failed to define VM from %s" % vmxml.xml)
def run(test, params, env):
    """
    Test interface with unprivileged user
    """
    def create_bridge(br_name, iface_name):
        """
        Create bridge attached to physical interface
        """
        # Make sure the bridge not exist
        if libvirt.check_iface(br_name, "exists", "--all"):
            test.cancel("The bridge %s already exist" % br_name)

        # Create bridge
        utils_package.package_install('tmux')
        cmd = 'tmux -c "ip link add name {0} type bridge; ip link set {1} up;' \
              ' ip link set {1} master {0}; ip link set {0} up; pkill dhclient; ' \
              'sleep 6; dhclient {0}; ifconfig {1} 0"'.format(br_name, iface_name)
        process.run(cmd, shell=True, verbose=True)

    def check_ping(dest_ip,
                   ping_count,
                   timeout,
                   src_ip=None,
                   session=None,
                   expect_success=True):
        """
        Check if ping result meets expectaion
        """
        status, output = utils_net.ping(dest=dest_ip,
                                        count=ping_count,
                                        interface=src_ip,
                                        timeout=timeout,
                                        session=session,
                                        force_ipv4=True)
        success = True if status == 0 else False

        if success != expect_success:
            test.fail('Ping result not met expectation, '
                      'actual result is {}'.format(success))

    if not libvirt_version.version_compare(5, 6, 0):
        test.cancel('Libvirt version is too low for this test.')

    vm_name = params.get('main_vm')
    rand_id = '_' + utils_misc.generate_random_string(3)

    upu_vm_name = 'upu_vm' + rand_id
    user_vm_name = params.get('user_vm_name', 'non_root_vm')
    bridge_name = params.get('bridge_name', 'test_br0') + rand_id
    device_type = params.get('device_type', '')
    iface_name = utils_net.get_net_if(state="UP")[0]
    tap_name = params.get('tap_name', 'mytap0') + rand_id
    macvtap_name = params.get('macvtap_name', 'mymacvtap0') + rand_id
    remote_ip = params.get('remote_ip')
    up_user = params.get('up_user', 'test_upu') + rand_id
    case = params.get('case', '')

    # Create unprivileged user
    logging.info('Create unprivileged user %s', up_user)
    process.run('useradd %s' % up_user, shell=True, verbose=True)
    root_vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    upu_args = {
        'unprivileged_user': up_user,
        'ignore_status': False,
        'debug': True,
    }

    try:
        # Create vm as unprivileged user
        logging.info('Create vm as unprivileged user')
        upu_vmxml = root_vmxml.copy()

        # Prepare vm for unprivileged user
        xml_devices = upu_vmxml.devices
        disks = xml_devices.by_device_tag("disk")
        for disk in disks:
            ori_path = disk.source['attrs'].get('file')
            if not ori_path:
                continue

            file_name = ori_path.split('/')[-1]
            new_disk_path = '/home/{}/{}'.format(up_user, file_name)
            logging.debug('New disk path:{}'.format(new_disk_path))

            # Copy disk image file and chown to make sure that
            # unprivileged user has access
            shutil.copyfile(ori_path, new_disk_path)
            shutil.chown(new_disk_path, up_user, up_user)

            # Modify xml to set new path of disk
            disk_index = xml_devices.index(disk)
            source = xml_devices[disk_index].source
            new_attrs = source.attrs
            new_attrs['file'] = new_disk_path
            source.attrs = new_attrs
            xml_devices[disk_index].source = source
            logging.debug(xml_devices[disk_index].source)

        upu_vmxml.devices = xml_devices

        new_xml_path = '/home/{}/upu.xml'.format(up_user)
        shutil.copyfile(upu_vmxml.xml, new_xml_path)

        # Define vm for unprivileged user
        virsh.define(new_xml_path, **upu_args)
        virsh.domrename(vm_name, upu_vm_name, **upu_args)
        logging.debug(virsh.dumpxml(upu_vm_name, **upu_args))
        upu_vmxml = vm_xml.VMXML()
        upu_vmxml.xml = virsh.dumpxml(upu_vm_name, **upu_args).stdout_text

        if case == 'precreated':
            if device_type == 'tap':
                # Create bridge
                create_bridge(bridge_name, iface_name)

                # Create tap device
                tap_cmd = 'ip tuntap add mode tap user {user} group {user} ' \
                          'name {tap};ip link set {tap} up;ip link set {tap} ' \
                          'master {br}'.format(tap=tap_name, user=up_user,
                                               br=bridge_name)

                # Execute command as root
                process.run(tap_cmd, shell=True, verbose=True)

            if device_type == 'macvtap':
                # Create macvtap device
                mac_addr = utils_net.generate_mac_address_simple()
                macvtap_cmd = 'ip link add link {iface} name {macvtap} address' \
                              ' {mac} type macvtap mode bridge;' \
                              'ip link set {macvtap} up'.format(
                               iface=iface_name,
                               macvtap=macvtap_name,
                               mac=mac_addr)
                process.run(macvtap_cmd, shell=True, verbose=True)
                cmd_get_tap = 'ip link show {} | head -1 | cut -d: -f1'.format(
                    macvtap_name)
                tap_index = process.run(cmd_get_tap, shell=True,
                                        verbose=True).stdout_text.strip()
                device_path = '/dev/tap{}'.format(tap_index)
                logging.debug('device_path: {}'.format(device_path))
                # Change owner and group for device
                process.run('chown {user} {path};chgrp {user} {path}'.format(
                    user=up_user, path=device_path),
                            shell=True,
                            verbose=True)
                # Check if device owner is changed to unprivileged user
                process.run('ls -l %s' % device_path, shell=True, verbose=True)

            # Modify interface
            all_devices = upu_vmxml.devices
            iface_list = all_devices.by_device_tag('interface')
            if not iface_list:
                test.error('No iface to modify')
            iface = iface_list[0]

            # Remove other interfaces
            for ifc in iface_list[1:]:
                all_devices.remove(ifc)

            if device_type == 'tap':
                dev_name = tap_name
            elif device_type == 'macvtap':
                dev_name = macvtap_name
            else:
                test.error('Invalid device type: {}'.format(device_type))

            if_index = all_devices.index(iface)
            iface = all_devices[if_index]
            iface.type_name = 'ethernet'
            iface.target = {'dev': dev_name, 'managed': 'no'}

            if device_type == 'macvtap':
                iface.mac_address = mac_addr
            logging.debug(iface)

            upu_vmxml.devices = all_devices
            logging.debug(upu_vmxml)

            # Define updated xml
            shutil.copyfile(upu_vmxml.xml, new_xml_path)
            upu_vmxml.xml = new_xml_path
            virsh.define(new_xml_path, **upu_args)

            # Switch to unprivileged user and modify vm's interface
            # Start vm as unprivileged user and test network
            virsh.start(upu_vm_name,
                        debug=True,
                        ignore_status=False,
                        unprivileged_user=up_user)
            cmd = ("su - %s -c 'virsh console %s'" % (up_user, upu_vm_name))
            session = aexpect.ShellSession(cmd)
            session.sendline()
            remote.handle_prompts(session, params.get("username"),
                                  params.get("password"), r"[\#\$]\s*$", 30)
            logging.debug(session.cmd_output('ifconfig'))
            check_ping(remote_ip, 5, 10, session=session)
            session.close()

    finally:
        if 'upu_virsh' in locals():
            virsh.destroy(upu_vm_name, unprivileged_user=up_user)
            virsh.undefine(upu_vm_name, unprivileged_user=up_user)
        if case == 'precreated':
            try:
                if device_type == 'tap':
                    process.run('ip tuntap del mode tap {}'.format(tap_name),
                                shell=True,
                                verbose=True)
                elif device_type == 'macvtap':
                    process.run('ip l del {}'.format(macvtap_name),
                                shell=True,
                                verbose=True)
            except Exception:
                pass
            finally:
                cmd = 'tmux -c "ip link set {1} nomaster;  ip link delete {0};' \
                      'pkill dhclient; sleep 6; dhclient {1}"'.format(bridge_name, iface_name)
                process.run(cmd, shell=True, verbose=True, ignore_status=True)
        process.run('pkill -u {0};userdel -f -r {0}'.format(up_user),
                    shell=True,
                    verbose=True,
                    ignore_status=True)
    # Check disk count after VM shutdown (with --config).
    check_count_after_shutdown = True
    disk_count_after_shutdown = vm_xml.VMXML.get_disk_count(vm_name)
    if test_cmd == "attach-disk":
        if disk_count_after_shutdown == disk_count_before_cmd:
            check_count_after_shutdown = False
    elif test_cmd == "detach-disk":
        if disk_count_after_shutdown < disk_count_before_cmd:
            check_count_after_shutdown = False

    # Recover VM.
    if vm.is_alive():
        vm.destroy(gracefully=False)
    virsh.undefine(vm_name)
    virsh.define(vm_xml_file)
    if os.path.exists(device_source):
        os.remove(device_source)

    # Check results.
    if status_error == 'yes':
        if status == 0:
            raise error.TestFail("virsh %s exit with unexpected value."
                                 % test_cmd)
    else:
        if status != 0:
            raise error.TestFail("virsh %s failed." % test_cmd)
        if test_cmd == "attach-disk":
            if not check_count_after_cmd:
                raise error.TestFail("Cannot see deivce in xml file"
                                      " after attach.")
Beispiel #7
0
        if os.path.exists(save_file):
            save_exist = True

        # Check if save file exists if use --managed-save
        volume_exist = False
        if volume and os.path.exists(volume):
            volume_exist = True

        # Test define with acl control and recover domain.
        if params.get('setup_libvirt_polkit') == 'yes':
            if virsh.domain_exists(vm.name):
                virsh.undefine(vm_ref, ignore_status=True)
            cmd = "chmod 666 %s" % backup_xml.xml
            utils.run(cmd, ignore_status=False)
            s_define = virsh.define(backup_xml.xml,
                                    unprivileged_user=unprivileged_user,
                                    uri=uri, ignore_status=True, debug=True)
            status3 = s_define.exit_status

    finally:
        # Recover main VM.
        backup_xml.sync()

        # Clean up pool
        if pvtest:
            pvtest.cleanup_pool(pool_name, pool_type,
                                pool_target, emulated_img)
        # Recover VM snapshots.
        if option.count("snapshot") and (not vm.is_lxc()):
            logging.debug("Recover snapshots for domain!")
            for file_item in snp_file_list:
Beispiel #8
0
def run(test, params, env):
    """
    Test command: virsh edit.

    The command can edit XML configuration for a domain
    1.Prepare test environment,destroy or suspend a VM.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh edit operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    vcpucount_result = virsh.vcpucount(vm_name, options="--config --maximum")
    if vcpucount_result.exit_status:
        # Fail back to libvirt_xml way to test vcpucount.
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        original_vcpu = str(vmxml.vcpu)
    else:
        original_vcpu = vcpucount_result.stdout.strip()

    expected_vcpu = str(int(original_vcpu) + 1)

    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("edit_vm_ref")
    status_error = params.get("status_error")

    def modify_vcpu(source, edit_cmd):
        """
        Modify vm's cpu information.

        :param source : virsh edit's option.
        :param dic_mode : a edit commad line .
        :return: True if edit successed,False if edit failed.
        """
        session = aexpect.ShellSession("sudo -s")
        try:
            session.sendline("virsh -c %s edit %s" % (vm.connect_uri, source))
            session.sendline(edit_cmd)
            session.send('\x1b')
            session.send('ZZ')
            remote.handle_prompts(session, None, None, r"[\#\$]\s*$")
            session.close()
            return True
        except:
            return False

    def edit_vcpu(source, guest_name):
        """
        Modify vm's cpu information by virsh edit command.

        :param source : virsh edit's option.
        :param guest_name : vm's name.
        :return: True if edit successed,False if edit failed.
        """
        dic_mode = {
            "edit": r":%s /[0-9]*<\/vcpu>/" + expected_vcpu + r"<\/vcpu>",
            "recover": r":%s /[0-9]*<\/vcpu>/" + original_vcpu + r"<\/vcpu>"}
        status = modify_vcpu(source, dic_mode["edit"])
        if not status:
            return status
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(guest_name, ignore_status=True)
            virsh.destroy(guest_name)
        elif params.get("start_vm") == "yes":
            virsh.destroy(guest_name)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vcpus = str(vmxml.vcpu)
        # Recover cpuinfo
        # Use name rather than source, since source could be domid
        status = modify_vcpu(guest_name, dic_mode["recover"])
        if status and vcpus != expected_vcpu:
            return False
        return status

    # run test case
    xml_file = os.path.join(test.tmpdir, 'tmp.xml')
    virsh.dumpxml(vm_name, extra="--inactive", to_file=xml_file)

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    try:
        if vm_ref == "id":
            status = edit_vcpu(domid, vm_name)
        elif vm_ref == "uuid":
            status = edit_vcpu(domuuid, vm_name)
        elif vm_ref == "name" and status_error == "no":
            status = edit_vcpu(vm_name, vm_name)
        else:
            status = False
            if vm_ref.find("invalid") != -1:
                vm_ref = params.get(vm_ref)
            elif vm_ref == "name":
                vm_ref = "%s %s" % (vm_name, params.get("edit_extra_param"))
            edit_status = virsh.edit(vm_ref).exit_status
            if edit_status == 0:
                status = True
    except:
        status = False

    # recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # Recover VM
    if vm.is_alive():
        vm.destroy()
    virsh.undefine(vm_name)
    virsh.define(xml_file)

    # check status_error
    if status_error == "yes":
        if status:
            raise error.TestFail("Run successfully with wrong command!")
    elif status_error == "no":
        if not status:
            raise error.TestFail("Run failed with right command")
Beispiel #9
0
def run(test, params, env):
    """
    Test vcpu
    """
    vm_name = params.get('main_vm')
    check = params.get('check', '')
    status_error = 'yes' == params.get('status_error', 'no')
    err_msg = params.get('err_msg', '')
    guest_vcpu = params.get('guest_vcpu')
    boot_timeout = int(params.get('boot_timeout', 240))
    start_fail = 'yes' == params.get('start_fail', 'no')

    vm = env.get_vm(vm_name)
    vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()

    def check_onlinevcpus(vm, cpu_num):
        """

        Check whether all vcpus are online as expected.

        :param vm: the exact VM need to check
        :param cpu_num: the num of online vcpus need to match
        """
        if not utils_misc.wait_for(
                lambda: utils_misc.check_if_vm_vcpu_match(cpu_num, vm),
                timeout=120, step=5, text="wait for vcpu online"):
            test.fail('Not all vcpus are online as expected.')

    def set_iommu(vmxml, **dargs):
        """

        Add iommu device to vm.

        :param vmxml: xml of vm to be add iommu device
        :param dargs: args or the iommu device
        :return:
        """
        logging.info('Add iommu device to vm.')
        iommu_device = Iommu()
        iommu_device.model = dargs.get('model', 'intel')
        iommu_device.driver = dargs.get('driver', {'intremap': 'on', 'eim': 'on'})
        vmxml.add_device(iommu_device)

    try:
        # Check the output of "virsh maxvcpus" for both i440fx and q35 VM
        if check == 'virsh_maxvcpus':
            report_num = params.get('report_num', '')
            logging.info('Check the output of virsh maxvcpus')
            cmd_result = virsh.maxvcpus(debug=True)
            if cmd_result.exit_status == 0 and cmd_result.stdout.strip() == report_num:
                logging.debug('Test passed as the reported max vcpu num is %s', report_num)
            else:
                test.fail('Test failed as the reported max vcpu num is not as expected.')

        # Check the output of "virsh capabilities" for both i440fx and q35 VM
        if check == "virsh_capabilities":
            report_num_pc_7 = params.get('report_num_pc_7', '')
            report_num_q35_73 = params.get('report_num_q35_73', '')
            report_num_q35_7_8 = params.get('report_num_q35_7_8', '')
            logging.info('Check the output of virsh capabilities')
            xmltreefile = capability_xml.CapabilityXML().xmltreefile
            machtype_vcpunum_dict = {}
            for guest in xmltreefile.findall('guest'):
                for arch in guest.findall('arch'):
                    if arch.get('name') == "x86_64":
                        for machine in arch.findall('machine'):
                            machine_text = machine.text
                            vcpunum = machine.get('maxCpus')
                            machtype_vcpunum_dict[machine_text] = vcpunum
            for key in machtype_vcpunum_dict:
                logging.info("%s : %s", key, machtype_vcpunum_dict[key])
                if key.startswith('pc-i440fx') or key.startswith('rhel') or key == 'pc':
                    if machtype_vcpunum_dict[key] != report_num_pc_7:
                        test.fail('Test failed as i440fx_max_vcpus_num in virsh_capa is wrong.')
                if key.startswith('pc-q35') or key == 'q35':
                    if key == "pc-q35-rhel7.3.0":
                        if machtype_vcpunum_dict[key] != report_num_q35_73:
                            test.fail('Test failed as q35_rhel73_max_vcpus_num in virsh_capa is wrong.')
                    else:
                        if machtype_vcpunum_dict[key] != report_num_q35_7_8:
                            test.fail('Test failed as the q35_max_vcpus_num in virsh_capa is wrong.')

        # Test i440fx VM starts with 240(positive)/241(negative) vcpus and hot-plugs vcpus to 240
        if check.startswith('i440fx_test'):
            current_vcpu = params.get('current_vcpu')
            if 'hotplug' not in check:
                vmxml.vcpu = int(guest_vcpu)
                vmxml.sync()
                if status_error:
                    if start_fail:
                        result_need_check = virsh.start(vm_name, debug=True)
                else:
                    vm.start()
                    logging.info(libvirt_xml.VMXML.new_from_dumpxml(vm_name))
                    vm.wait_for_login(timeout=boot_timeout).close()
                    check_onlinevcpus(vm, int(guest_vcpu))
            else:
                vmxml.vcpu = int(guest_vcpu)
                vmxml.current_vcpu = int(current_vcpu)
                vmxml.sync()
                vm.start()
                logging.info(libvirt_xml.VMXML.new_from_dumpxml(vm_name))
                vm.wait_for_login(timeout=boot_timeout).close()
                check_onlinevcpus(vm, int(current_vcpu))
                res = virsh.setvcpus(vm_name, guest_vcpu, debug=True)
                libvirt.check_exit_status(res)
                check_onlinevcpus(vm, int(guest_vcpu))

        # Configure a guest vcpu > 255 without iommu device for q35 VM
        if check == 'no_iommu':
            logging.info('Set vcpu to %s', guest_vcpu)
            vmxml.vcpu = int(guest_vcpu)
            result_need_check = virsh.define(vmxml.xml, debug=True)

        # Set iommu device but not set ioapci in features for q35 VM
        if check == 'with_iommu':
            logging.info('Set vcpu to %s', guest_vcpu)
            vmxml.vcpu = int(guest_vcpu)
            set_iommu(vmxml)
            result_need_check = virsh.define(vmxml.xml, debug=True)

        # Add ioapic and iommu device in xml for q35 VM
        if check.startswith('ioapic_iommu'):
            logging.info('Modify features')
            vm_features = vmxml.features
            vm_features.add_feature('apic')
            vm_features.add_feature('ioapic', 'driver', 'qemu')
            vmxml.features = vm_features
            logging.debug(vmxml.features.get_feature_list())

            logging.info('Set vcpu to %s', guest_vcpu)
            set_iommu(vmxml)

            ori_vcpu = vmxml.vcpu
            vmxml.vcpu = int(guest_vcpu)
            vmxml.current_vcpu = ori_vcpu

            if 'hotplug' not in check:
                vmxml.current_vcpu = int(guest_vcpu)

            vmxml.sync()
            logging.debug(virsh.dumpxml(vm_name))

            if status_error:
                if start_fail:
                    result_need_check = virsh.start(vm_name, debug=True)

            else:
                # Login guest and check guest cpu number
                vm.start()
                session = vm.wait_for_login(timeout=boot_timeout)
                logging.debug(session.cmd('lscpu -e'))

                # Hotplug vcpu to $guest_vcpu
                if 'hotplug' in check:
                    res = virsh.setvcpus(vm_name, guest_vcpu, debug=True)
                    libvirt.check_exit_status(res)

                # Check if vcpu(s) are online
                check_onlinevcpus(vm, int(guest_vcpu))

        # Check result if there's result to check
        if 'result_need_check' in locals():
            libvirt.check_result(result_need_check, err_msg)

    finally:
        bkxml.sync()
def run(test, params, env):
    """
    Test for basic serial character device function.

    1) Define the VM with specified serial device and define result meets
       expectation.
    2) Test whether defined XML meets expectation
    3) Start the guest and check if start result meets expectation
    4) Test the function of started serial device
    5) Shutdown the VM and check whether cleaned up properly
    6) Clean up environment
    """
    def set_targets(serial):
        """
        Prepare a serial device XML according to parameters
        """
        machine = platform.machine()
        if "ppc" in machine:
            serial.target_model = 'spapr-vty'
            serial.target_type = 'spapr-vio-serial'
        elif "aarch" in machine:
            serial.target_model = 'pl011'
            serial.target_type = 'system-serial'
        else:
            serial.target_model = 'isa-serial'
            serial.target_type = 'isa-serial'

    def prepare_serial_device():
        """
        Prepare a serial device XML according to parameters
        """
        local_serial_type = serial_type
        if serial_type == "tls":
            local_serial_type = "tcp"
        serial = librarian.get('serial')(local_serial_type)

        serial.target_port = "0"

        set_targets(serial)

        sources = []
        logging.debug(sources_str)
        for source_str in sources_str.split():
            source_dict = {}
            for att in source_str.split(','):
                key, val = att.split(':')
                source_dict[key] = val
            sources.append(source_dict)
        serial.sources = sources
        return serial

    def prepare_console_device():
        """
        Prepare a serial device XML according to parameters
        """
        local_serial_type = serial_type
        if serial_type == "tls":
            local_serial_type = "tcp"
        console = librarian.get('console')(local_serial_type)
        console.target_type = console_target_type
        console.target_port = console_target_port

        sources = []
        logging.debug(sources_str)
        for source_str in sources_str.split():
            source_dict = {}
            for att in source_str.split(','):
                key, val = att.split(':')
                source_dict[key] = val
            sources.append(source_dict)
        console.sources = sources
        return console

    def define_and_check():
        """
        Predict the error message when defining and try to define the guest
        with testing serial device.
        """
        fail_patts = []
        if serial_type in [
                'dev', 'file', 'pipe', 'unix'
        ] and not any(['path' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing source path attribute for char device")
        if serial_type in [
                'tcp'
        ] and not any(['host' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing source host attribute for char device")
        if serial_type in [
                'tcp', 'udp'
        ] and not any(['service' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing source service attribute for char "
                              "device")
        if serial_type in [
                'spiceport'
        ] and not any(['channel' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing source channel attribute for char "
                              "device")
        if serial_type in [
                'nmdm'
        ] and not any(['master' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing master path attribute for nmdm device")
        if serial_type in [
                'nmdm'
        ] and not any(['slave' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing slave path attribute for nmdm device")
        if serial_type in ['spicevmc']:
            fail_patts.append(r"spicevmc device type only supports virtio")

        vm_xml.undefine()
        res = vm_xml.virsh.define(vm_xml.xml)
        libvirt.check_result(res, expected_fails=fail_patts)
        return not res.exit_status

    def check_xml():
        """
        Predict the result serial device and generated console device
        and check the result domain XML against expectation
        """
        console_cls = librarian.get('console')

        local_serial_type = serial_type

        if serial_type == 'tls':
            local_serial_type = 'tcp'
        # Predict expected serial and console XML
        expected_console = console_cls(local_serial_type)

        if local_serial_type == 'udp':
            sources = []
            for source in serial_dev.sources:
                if 'service' in source and 'mode' not in source:
                    source['mode'] = 'connect'
                sources.append(source)
        else:
            sources = serial_dev.sources

        expected_console.sources = sources

        if local_serial_type == 'tcp':
            if 'protocol_type' in local_serial_type:
                expected_console.protocol_type = serial_dev.protocol_type
            else:
                expected_console.protocol_type = "raw"

        expected_console.target_port = serial_dev.target_port
        if 'target_type' in serial_dev:
            expected_console.target_type = serial_dev.target_type
        expected_console.target_type = console_target_type
        logging.debug("Expected console XML is:\n%s", expected_console)

        # Get current serial and console XML
        current_xml = VMXML.new_from_dumpxml(vm_name)
        serial_elem = current_xml.xmltreefile.find('devices/serial')
        console_elem = current_xml.xmltreefile.find('devices/console')
        if console_elem is None:
            test.fail("Expect generate console automatically, "
                      "but found none.")
        if serial_elem and console_target_type != 'serial':
            test.fail("Don't Expect exist serial device, "
                      "but found:\n%s" % serial_elem)

        cur_console = console_cls.new_from_element(console_elem)
        logging.debug("Current console XML is:\n%s", cur_console)
        # Compare current serial and console with oracle.
        if not expected_console == cur_console:
            # "==" has been override
            test.fail("Expect generate console:\n%s\nBut got:\n%s" %
                      (expected_console, cur_console))

        if console_target_type == 'serial':
            serial_cls = librarian.get('serial')
            expected_serial = serial_cls(local_serial_type)
            expected_serial.sources = sources

            set_targets(expected_serial)

            if local_serial_type == 'tcp':
                if 'protocol_type' in local_serial_type:
                    expected_serial.protocol_type = serial_dev.protocol_type
                else:
                    expected_serial.protocol_type = "raw"
            expected_serial.target_port = serial_dev.target_port
            if serial_elem is None:
                test.fail("Expect exist serial device, " "but found none.")
            cur_serial = serial_cls.new_from_element(serial_elem)
            logging.debug("Expected serial XML is:\n%s", expected_serial)
            logging.debug("Current serial XML is:\n%s", cur_serial)
            # Compare current serial and console with oracle.
            if not expected_serial == cur_serial:
                # "==" has been override
                test.fail("Expect serial device:\n%s\nBut got:\n "
                          "%s" % (expected_serial, cur_serial))

    def prepare_serial_console():
        """
        Prepare serial console to connect with guest serial according to
        the serial type
        """
        is_server = console_type == 'server'
        if serial_type == 'unix':
            # Unix socket path should match SELinux label
            socket_path = '/var/lib/libvirt/qemu/virt-test'
            console = Console('unix', socket_path, is_server)
        elif serial_type == 'tls':
            host = '127.0.0.1'
            service = 5556
            console = Console('tls', (host, service), is_server,
                              custom_pki_path)
        elif serial_type == 'tcp':
            host = '127.0.0.1'
            service = 2445
            console = Console('tcp', (host, service), is_server)
        elif serial_type == 'udp':
            host = '127.0.0.1'
            service = 2445
            console = Console('udp', (host, service), is_server)
        elif serial_type == 'file':
            socket_path = '/var/log/libvirt/virt-test'
            console = Console('file', socket_path, is_server)
        elif serial_type == 'pipe':
            socket_path = '/tmp/virt-test'
            console = Console('pipe', socket_path, is_server)
        else:
            logging.debug("Serial type %s don't support console test yet.",
                          serial_type)
            console = None
        return console

    def check_qemu_cmdline():
        """
        Check if VM's qemu command line meets expectation.
        """
        cmdline = open('/proc/%s/cmdline' % vm.get_pid()).read()
        logging.debug('Qemu command line: %s', cmdline)
        options = cmdline.split('\x00')
        ser_opt = None
        con_opt = None
        ser_dev = None
        con_dev = None
        exp_ser_opt = None
        exp_ser_dev = None
        exp_con_opt = None
        exp_con_dev = None
        # Get serial and console options from qemu command line
        for idx, opt in enumerate(options):
            if opt == '-chardev':
                if 'id=charserial' in options[idx + 1]:
                    ser_opt = options[idx + 1]
                    ser_dev = options[idx + 3]
                    logging.debug('Serial option is: %s', ser_opt)
                    logging.debug('Serial device option is: %s', ser_dev)
                if 'id=charconsole' in options[idx + 1]:
                    con_opt = options[idx + 1]
                    con_dev = options[idx + 3]
                    logging.debug('Console option is: %s', con_opt)
                    logging.debug('Console device option is: %s', con_dev)

        # Get expected serial and console options from params
        if serial_type == 'dev':
            ser_type = 'tty'
        elif serial_type in ['unix', 'tcp', 'tls']:
            ser_type = 'socket'
        else:
            ser_type = serial_type

        exp_ser_opts = [ser_type, 'id=charserial0']
        if serial_type in ['dev', 'file', 'pipe', 'unix']:
            for source in serial_dev.sources:
                if 'path' in source:
                    path = source['path']
            if serial_type == 'file':
                # This fdset number should be make flexible later
                exp_ser_opts.append('path=/dev/fdset/2')
                exp_ser_opts.append('append=on')
            else:
                exp_ser_opts.append('path=%s' % path)
        elif serial_type in ['tcp', 'tls']:
            for source in serial_dev.sources:
                if 'host' in source:
                    host = source['host']
                if 'service' in source:
                    port = source['service']
            exp_ser_opts.append('host=%s' % host)
            exp_ser_opts.append('port=%s' % port)
        elif serial_type in ['udp']:
            localaddr = ''
            localport = '0'
            for source in serial_dev.sources:
                if source['mode'] == 'connect':
                    if 'host' in source:
                        host = source['host']
                    if 'service' in source:
                        port = source['service']
                else:
                    if 'host' in source:
                        localaddr = source['host']
                    if 'service' in source:
                        localport = source['service']
            exp_ser_opts.append('host=%s' % host)
            exp_ser_opts.append('port=%s' % port)
            exp_ser_opts.append('localaddr=%s' % localaddr)
            exp_ser_opts.append('localport=%s' % localport)

        if serial_type in ['unix', 'tcp', 'udp', 'tls']:
            for source in serial_dev.sources:
                if 'mode' in source:
                    mode = source['mode']
            if mode == 'bind':
                exp_ser_opts.append('server')
                exp_ser_opts.append('nowait')

        if serial_type == 'tls':
            exp_ser_opts.append('tls-creds=objcharserial0_tls0')

        exp_ser_opt = ','.join(exp_ser_opts)

        if "ppc" in platform.machine():
            exp_ser_devs = [
                'spapr-vty', 'chardev=charserial0', 'reg=0x30000000'
            ]
            if libvirt_version.version_compare(3, 9, 0):
                exp_ser_devs.insert(2, 'id=serial0')
        else:
            exp_ser_devs = ['isa-serial', 'chardev=charserial0', 'id=serial0']
        exp_ser_dev = ','.join(exp_ser_devs)

        if console_target_type != 'serial' or serial_type in ['spiceport']:
            exp_ser_opt = None
            exp_ser_dev = None

        # Check options against expectation
        if ser_opt != exp_ser_opt:
            test.fail('Expect get qemu command serial option "%s", '
                      'but got "%s"' % (exp_ser_opt, ser_opt))
        if ser_dev != exp_ser_dev:
            test.fail('Expect get qemu command serial device option "%s", '
                      'but got "%s"' % (exp_ser_dev, ser_dev))

        if console_target_type == 'virtio':
            exp_con_opts = [serial_type, 'id=charconsole0']
            exp_con_opt = ','.join(exp_con_opts)

            exp_con_devs = []
            if console_target_type == 'virtio':
                exp_con_devs.append('virtconsole')
            exp_con_devs += ['chardev=charconsole0', 'id=console0']
            exp_con_dev = ','.join(exp_con_devs)
            if con_opt != exp_con_opt:
                test.fail('Expect get qemu command console option "%s", '
                          'but got "%s"' % (exp_con_opt, con_opt))
            if con_dev != exp_con_dev:
                test.fail(
                    'Expect get qemu command console device option "%s", '
                    'but got "%s"' % (exp_con_dev, con_dev))

    def check_serial_console(console, username, password):
        """
        Check whether console works properly by read the result for read only
        console and login and emit a command for read write console.
        """
        if serial_type in ['file', 'tls']:
            _, output = console.read_until_output_matches(['.*[Ll]ogin:'])
        else:
            console.wait_for_login(username, password)
            status, output = console.cmd_status_output('pwd')
            logging.debug("Command status: %s", status)
            logging.debug("Command output: %s", output)

    def cleanup(objs_list):
        """
        Clean up test environment
        """
        if serial_type == 'file':
            if os.path.exists('/var/log/libvirt/virt-test'):
                os.remove('/var/log/libvirt/virt-test')

        # recovery test environment
        for obj in objs_list:
            obj.auto_recover = True
            del obj

    def get_console_type():
        """
        Check whether console should be started before or after guest starting.
        """
        if serial_type in ['tcp', 'unix', 'udp', 'tls']:
            for source in serial_dev.sources:
                if 'mode' in source and source['mode'] == 'connect':
                    return 'server'
            return 'client'
        elif serial_type in ['file']:
            return 'client'
        elif serial_type in ['pipe']:
            return 'server'

    serial_type = params.get('serial_type', 'pty')
    sources_str = params.get('serial_sources', '')
    username = params.get('username')
    password = params.get('password')
    console_target_type = params.get('console_target_type', 'serial')
    console_target_port = params.get('console_target_port', '0')
    second_serial_console = params.get('second_serial_console', 'no') == 'yes'
    custom_pki_path = params.get('custom_pki_path', '/etc/pki/libvirt-chardev')
    auto_recover = params.get('auto_recover', 'no')
    client_pwd = params.get('client_pwd', None)
    server_pwd = params.get('server_pwd', None)

    args_list = [client_pwd, server_pwd]

    for arg in args_list:
        if arg and arg.count("ENTER.YOUR."):
            raise test.fail("Please assign a value for %s!" % arg)

    vm_name = params.get('main_vm', 'virt-tests-vm1')
    vm = env.get_vm(vm_name)

    # it's used to clean up TLS objs later
    objs_list = []

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    try:
        vm_xml.remove_all_device_by_type('serial')
        vm_xml.remove_all_device_by_type('console')
        if serial_type == "tls":
            test_dict = dict(params)
            tls_obj = TLSConnection(test_dict)
            if auto_recover == "yes":
                objs_list.append(tls_obj)
            tls_obj.conn_setup(False, True, True)

        serial_dev = prepare_serial_device()
        if console_target_type == 'serial' or second_serial_console:
            logging.debug('Serial device:\n%s', serial_dev)
            vm_xml.add_device(serial_dev)

        console_dev = prepare_console_device()
        logging.debug('Console device:\n%s', console_dev)
        vm_xml.add_device(console_dev)
        if second_serial_console:
            console_target_type = 'serial'
            console_target_port = '1'
            console_dev = prepare_console_device()
            logging.debug('Console device:\n%s', console_dev)
            vm_xml.add_device(console_dev)
            vm_xml.undefine()
            res = virsh.define(vm_xml.xml)
            if res.stderr.find(r'Only the first console can be a serial port'):
                logging.debug("Test only the first console can be a serial"
                              "succeeded")
                return
            test.fail("Test only the first console can be serial failed.")

        console_type = get_console_type()

        if not define_and_check():
            logging.debug("Can't define the VM, exiting.")
            return
        check_xml()

        expected_fails = []
        if serial_type == 'nmdm' and platform.system() != 'FreeBSD':
            expected_fails.append("unsupported chardev 'nmdm'")

        # Prepare console before start when console is server
        if console_type == 'server':
            console = prepare_serial_console()

        res = virsh.start(vm_name)
        libvirt.check_result(res, expected_fails, [])
        if res.exit_status:
            logging.debug("Can't start the VM, exiting.")
            return

        check_qemu_cmdline()

        # Prepare console after start when console is client
        if console_type == 'client':
            console = prepare_serial_console()

        if (console_type is not None and serial_type != 'tls'
                and console_type != 'server'):
            check_serial_console(console, username, password)

        vm.destroy()

    finally:
        cleanup(objs_list)
        vm_xml_backup.sync()
def run(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Label the VM and disk with proper label.
    (3).Start VM and check the context.
    (4).Destroy VM and check the context.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get(
        "svirt_undefine_define_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_undefine_define_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_undefine_define_vm_sec_model", "selinux")
    sec_label = params.get("svirt_undefine_define_vm_sec_label", None)
    sec_relabel = params.get("svirt_undefine_define_vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'model': sec_model, 'label': sec_label,
                'relabel': sec_relabel}
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get varialbles about image.
    img_label = params.get('svirt_undefine_define_disk_label')
    # Label the disks of VM with img_label.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    for disk in disks.values():
        disk_path = disk['source']
        backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file(
            filename=disk_path)
        utils_selinux.set_context_of_file(filename=disk_path,
                                          context=img_label)
    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the context of the VM.
    vmxml.set_seclabel([sec_dict])
    vmxml.sync()

    try:
        xml_file = (os.path.join(data_dir.get_tmp_dir(), "vmxml"))
        if vm.is_alive():
            vm.destroy()
        virsh.dumpxml(vm.name, to_file=xml_file)
        cmd_result = virsh.undefine(vm.name)
        if cmd_result.exit_status:
            raise error.TestFail("Failed to undefine vm."
                                 "Detail: %s" % cmd_result)
        cmd_result = virsh.define(xml_file)
        if cmd_result.exit_status:
            raise error.TestFail("Failed to define vm."
                                 "Detail: %s" % cmd_result)
    finally:
        # clean up
        for path, label in backup_labels_of_disks.items():
            utils_selinux.set_context_of_file(filename=path, context=label)
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
Beispiel #12
0
            xml_exist = True

        # Check if save file exists if use --managed-save
        save_exist = os.path.exists(save_file)

        # Check if save file exists if use --managed-save
        volume_exist = volume and os.path.exists(volume)

        # Test define with acl control and recover domain.
        if params.get('setup_libvirt_polkit') == 'yes':
            if virsh.domain_exists(vm.name):
                virsh.undefine(vm_ref, ignore_status=True)
            cmd = "chmod 666 %s" % backup_xml.xml
            process.run(cmd, ignore_status=False, shell=True)
            s_define = virsh.define(backup_xml.xml,
                                    unprivileged_user=unprivileged_user,
                                    uri=uri, ignore_status=True, debug=True)
            status3 = s_define.exit_status

    finally:
        # Recover main VM.
        backup_xml.sync()

        # Recover existing guest images
        if existing_images and option.count("remove-all-storage"):
            for img in existing_images:
                backup_img = img + '.bak'
                logging.info('Recover image %s to %s', backup_img, img)
                shutil.move(backup_img, img)

        # Clean up pool
Beispiel #13
0
def run_virsh_edit(test, params, env):
    """
    Test command: virsh edit.

    The command can edit XML configuration for a domain
    1.Prepare test environment,destroy or suspend a VM.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh edit operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    vcpucount_result = virsh.vcpucount(vm_name, options="--config --maximum")
    if vcpucount_result.exit_status:
        raise error.TestError("Failed to get vcpucount. Detail:\n%s" %
                              vcpucount_result)
    original_vcpu = vcpucount_result.stdout.strip()
    expected_vcpu = str(int(original_vcpu) + 1)

    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("edit_vm_ref")
    status_error = params.get("status_error")

    def modify_vcpu(source, edit_cmd):
        """
        Modify vm's cpu information.

        :param source : virsh edit's option.
        :param dic_mode : a edit commad line .
        :return: True if edit successed,False if edit failed.
        """
        session = aexpect.ShellSession("sudo -s")
        try:
            session.sendline("virsh edit %s" % source)
            session.sendline(edit_cmd)
            session.send('\x1b')
            session.send('ZZ')
            remote.handle_prompts(session, None, None, r"[\#\$]\s*$")
            session.close()
            return True
        except:
            return False

    def edit_vcpu(source, guest_name):
        """
        Modify vm's cpu information by virsh edit command.

        :param source : virsh edit's option.
        :param guest_name : vm's name.
        :return: True if edit successed,False if edit failed.
        """
        dic_mode = {
            "edit": r":%s /[0-9]*<\/vcpu>/" + expected_vcpu + r"<\/vcpu>",
            "recover": r":%s /[0-9]*<\/vcpu>/" + original_vcpu + r"<\/vcpu>"
        }
        status = modify_vcpu(source, dic_mode["edit"])
        if not status:
            return status
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(guest_name, ignore_status=True)
            virsh.destroy(guest_name)
        elif params.get("start_vm") == "yes":
            virsh.destroy(guest_name)
        vcpus = vm.dominfo()["CPU(s)"]
        # Recover cpuinfo
        # Use name rather than source, since source could be domid
        status = modify_vcpu(guest_name, dic_mode["recover"])
        if status and vcpus != expected_vcpu:
            return False
        return status

    # run test case
    xml_file = os.path.join(test.tmpdir, 'tmp.xml')
    virsh.dumpxml(vm_name, extra="--inactive", to_file=xml_file)

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    try:
        if vm_ref == "id":
            status = edit_vcpu(domid, vm_name)
        elif vm_ref == "uuid":
            status = edit_vcpu(domuuid, vm_name)
        elif vm_ref == "name" and status_error == "no":
            status = edit_vcpu(vm_name, vm_name)
        else:
            status = False
            if vm_ref.find("invalid") != -1:
                vm_ref = params.get(vm_ref)
            elif vm_ref == "name":
                vm_ref = "%s %s" % (vm_name, params.get("edit_extra_param"))
            edit_status = virsh.edit(vm_ref).exit_status
            if edit_status == 0:
                status = True
    except:
        status = False

    # recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # Recover VM
    if vm.is_alive():
        vm.destroy()
    virsh.undefine(vm_name)
    virsh.define(xml_file)

    # check status_error
    if status_error == "yes":
        if status:
            raise error.TestFail("Run successfully with wrong command!")
    elif status_error == "no":
        if not status:
            raise error.TestFail("Run failed with right command")
    def trigger_events(dom, events_list=[]):
        """
        Trigger various events in events_list

        :param dom: the vm objects corresponding to the domain
        :return: the expected output that virsh event command prints out
        """
        expected_events_list = []
        save_path = os.path.join(tmpdir, "%s_event.save" % dom.name)
        print(dom.name)
        xmlfile = dom.backup_xml()
        new_disk = os.path.join(tmpdir, "%s_new_disk.img" % dom.name)
        dest_path = os.path.join(data_dir.get_data_dir(), "copy")

        try:
            for event in events_list:
                logging.debug("Current event is: %s", event)
                if event in ['start', 'restore', 'create', 'edit', 'define',
                             'undefine', 'crash', 'device-removal-failed',
                             'watchdog', 'io-error']:
                    if dom.is_alive():
                        dom.destroy()
                        if event in ['create', 'define']:
                            dom.undefine()
                else:
                    if not dom.is_alive():
                        dom.start()
                        dom.wait_for_login().close()
                        if event == "resume":
                            dom.pause()

                if event == "undefine":
                    virsh.undefine(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Undefined Removed")
                elif event == "create":
                    virsh.create(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                elif event == "destroy":
                    virsh.destroy(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Destroyed")
                elif event == "define":
                    virsh.define(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Added")
                elif event == "start":
                    virsh.start(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                    dom.wait_for_login().close()
                elif event == "suspend":
                    virsh.suspend(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    if not libvirt_version.version_compare(5, 3, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Suspended Paused")
                elif event == "resume":
                    virsh.resume(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "save":
                    virsh.save(dom.name, save_path, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Saved")
                elif event == "restore":
                    if not os.path.exists(save_path):
                        logging.error("%s not exist", save_path)
                    else:
                        virsh.restore(save_path, **virsh_dargs)
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Started Restored")
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Resumed Snapshot")
                elif event == "edit":
                    #Check whether 'description' element exists.
                    domxml = virsh.dumpxml(dom.name).stdout.strip()
                    find_desc = parseString(domxml).getElementsByTagName("description")
                    if find_desc == []:
                        #If not exists, add one for it.
                        logging.info("Adding <description> to guest")
                        virsh.desc(dom.name, "--config", "Added desc for testvm", **virsh_dargs)
                    #The edit operation is to delete 'description' element.
                    edit_cmd = [r":g/<description.*<\/description>/d"]
                    utlv.exec_virsh_edit(dom.name, edit_cmd)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Updated")
                elif event == "shutdown":
                    if signal_name is None:
                        virsh.shutdown(dom.name, **virsh_dargs)
                        # Wait a few seconds for shutdown finish
                        time.sleep(3)
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            #Shutdown reason distinguished from qemu_2.9.0-9
                            expected_events_list.append("'lifecycle' for %s:"
                                                        " Shutdown Finished after guest request")
                    else:
                        os.kill(dom.get_pid(), getattr(signal, signal_name))
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            expected_events_list.append("'lifecycle' for %s:"
                                                        " Shutdown Finished after host request")
                    if not utils_misc.compare_qemu_version(2, 9, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Shutdown Finished")
                    wait_for_shutoff(dom)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Shutdown")
                elif event == "crash":
                    if not vmxml.xmltreefile.find('devices').findall('panic'):
                        # Set panic device
                        panic_dev = Panic()
                        panic_dev.model = panic_model
                        panic_dev.addr_type = addr_type
                        panic_dev.addr_iobase = addr_iobase
                        vmxml.add_device(panic_dev)
                    vmxml.on_crash = "coredump-restart"
                    vmxml.sync()
                    logging.info("Guest xml now is: %s", vmxml)
                    dom.start()
                    session = dom.wait_for_login()
                    # Stop kdump in the guest
                    session.cmd("systemctl stop kdump", ignore_all_errors=True)
                    # Enable sysRq
                    session.cmd("echo 1 > /proc/sys/kernel/sysrq")
                    try:
                        # Crash the guest
                        session.cmd("echo c > /proc/sysrq-trigger", timeout=90)
                    except (ShellTimeoutError, ShellProcessTerminatedError) as details:
                        logging.info(details)
                    session.close()
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Crashed Panicked")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "reset":
                    virsh.reset(dom.name, **virsh_dargs)
                    expected_events_list.append("'reboot' for %s")
                elif event == "vcpupin":
                    virsh.vcpupin(dom.name, '0', '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.vcpupin0: 0")
                elif event == "emulatorpin":
                    virsh.emulatorpin(dom.name, '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.emulatorpin: 0")
                elif event == "setmem":
                    mem_size = int(params.get("mem_size", 512000))
                    virsh.setmem(dom.name, mem_size, **virsh_dargs)
                    expected_events_list.append("'balloon-change' for %s:")
                elif event == "device-added-removed":
                    add_disk(dom.name, new_disk, 'vdb', '')
                    expected_events_list.append("'device-added' for %s:"
                                                " virtio-disk1")
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " virtio-disk1")
                    iface_xml_obj = create_iface_xml()
                    iface_xml_obj.xmltreefile.write()
                    virsh.detach_device(dom.name, iface_xml_obj.xml, **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " net0")
                    time.sleep(2)
                    virsh.attach_device(dom.name, iface_xml_obj.xml, **virsh_dargs)
                    expected_events_list.append("'device-added' for %s:"
                                                " net0")
                elif event == "block-threshold":
                    add_disk(dom.name, new_disk, 'vdb', '', format=disk_format)
                    logging.debug(process.run('qemu-img info %s -U' % new_disk))
                    virsh.domblkthreshold(vm_name, 'vdb', '100M')
                    session = dom.wait_for_login()
                    session.cmd("mkfs.ext4 /dev/vdb && mount /dev/vdb /mnt && ls /mnt && "
                                "dd if=/dev/urandom of=/mnt/bigfile bs=1M count=300 && sync")
                    time.sleep(5)
                    session.close()
                    expected_events_list.append("'block-threshold' for %s:"
                                                " dev: vdb(%s)  104857600 29368320")
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                elif event == "change-media":
                    target_device = "hdc"
                    device_target_bus = params.get("device_target_bus", "ide")
                    disk_blk = vm_xml.VMXML.get_disk_blk(dom.name)
                    logging.info("disk_blk %s", disk_blk)
                    if target_device not in disk_blk:
                        logging.info("Adding cdrom to guest")
                        if dom.is_alive():
                            dom.destroy()
                        add_disk(dom.name, new_disk, target_device,
                                 ("--type cdrom --sourcetype file --driver qemu " +
                                  "--config --targetbus %s" % device_target_bus))
                        dom.start()
                    all_options = new_disk + " --insert"
                    virsh.change_media(dom.name, target_device,
                                       all_options, **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus +
                                                " opened")
                    expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus +
                                                " closed")
                    all_options = new_disk + " --eject"
                    virsh.change_media(dom.name, target_device,
                                       all_options, **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus +
                                                " opened")
                elif event == "hwclock":
                    session = dom.wait_for_login()
                    try:
                        session.cmd("hwclock --systohc", timeout=60)
                    except (ShellTimeoutError, ShellProcessTerminatedError) as details:
                        logging.info(details)
                    session.close()
                    expected_events_list.append("'rtc-change' for %s:")
                elif event == "metadata_set":
                    metadata_uri = params.get("metadata_uri")
                    metadata_key = params.get("metadata_key")
                    metadata_value = params.get("metadata_value")
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="",
                                   key=metadata_key,
                                   new_metadata=metadata_value,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                elif event == "metadata_edit":
                    metadata_uri = "http://herp.derp/"
                    metadata_key = "herp"
                    metadata_value = "<derp xmlns:foobar='http://foo.bar/'>foo<bar></bar></derp>"
                    virsh_cmd = r"virsh metadata %s --uri %s --key %s %s"
                    virsh_cmd = virsh_cmd % (dom.name, metadata_uri,
                                             metadata_key, "--edit")
                    session = aexpect.ShellSession("sudo -s")
                    logging.info("Running command: %s", virsh_cmd)
                    try:
                        session.sendline(virsh_cmd)
                        session.sendline(r":insert")
                        session.sendline(metadata_value)
                        session.sendline(".")
                        session.send('ZZ')
                        remote.handle_prompts(session, None, None, r"[\#\$]\s*$",
                                              debug=True, timeout=60)
                    except Exception as e:
                        test.error("Error occured: %s" % e)
                    session.close()
                    # Check metadata after edit
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="",
                                   key=metadata_key,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                elif event == "metadata_remove":
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="--remove",
                                   key=metadata_key,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                elif event == "blockcommit":
                    disk_path = dom.get_blk_devices()['vda']['source']
                    virsh.snapshot_create_as(dom.name, "s1 --disk-only --no-metadata", **virsh_dargs)
                    snapshot_path = dom.get_blk_devices()['vda']['source']
                    virsh.blockcommit(dom.name, "vda", "--active --pivot", **virsh_dargs)
                    expected_events_list.append("'block-job' for %s: "
                                                "Active Block Commit for " + "%s" % snapshot_path + " ready")
                    expected_events_list.append("'block-job-2' for %s: "
                                                "Active Block Commit for vda ready")
                    expected_events_list.append("'block-job' for %s: "
                                                "Active Block Commit for " + "%s" % disk_path + " completed")
                    expected_events_list.append("'block-job-2' for %s: "
                                                "Active Block Commit for vda completed")
                    os.unlink(snapshot_path)
                elif event == "blockcopy":
                    disk_path = dom.get_blk_devices()['vda']['source']
                    dom.undefine()
                    virsh.blockcopy(dom.name, "vda", dest_path, "--pivot", **virsh_dargs)
                    expected_events_list.append("'block-job' for %s: "
                                                "Block Copy for " + "%s" % disk_path + " ready")
                    expected_events_list.append("'block-job-2' for %s: "
                                                "Block Copy for vda ready")
                    expected_events_list.append("'block-job' for %s: "
                                                "Block Copy for " + "%s" % dest_path + " completed")
                    expected_events_list.append("'block-job-2' for %s: "
                                                "Block Copy for vda completed")
                elif event == "detach-dimm":
                    prepare_vmxml_mem(vmxml)
                    tg_size = params.get("dimm_size")
                    tg_sizeunit = params.get("dimm_unit")
                    dimm_xml = utils_hotplug.create_mem_xml(tg_size, None, None, tg_sizeunit)
                    virsh.attach_device(dom.name, dimm_xml.xml,
                                        flagstr="--config", **virsh_dargs)
                    vmxml_dimm = vm_xml.VMXML.new_from_dumpxml(dom.name)
                    logging.debug("Current vmxml with plugged dimm dev is %s\n" % vmxml_dimm)
                    virsh.start(dom.name, **virsh_dargs)
                    dom.wait_for_login().close()
                    result = virsh.detach_device(dom.name, dimm_xml.xml, debug=True, ignore_status=True)
                    expected_fails = params.get("expected_fails")
                    utlv.check_result(result, expected_fails)
                    vmxml_live = vm_xml.VMXML.new_from_dumpxml(dom.name)
                    logging.debug("Current vmxml after hot-unplug dimm is %s\n" % vmxml_live)
                    expected_events_list.append("'device-removal-failed' for %s: dimm0")
                elif event == "watchdog":
                    vmxml.remove_all_device_by_type('watchdog')
                    watchdog_dev = Watchdog()
                    watchdog_dev.model_type = params.get("watchdog_model")
                    action = params.get("action")
                    watchdog_dev.action = action
                    vmxml.add_device(watchdog_dev)
                    vmxml.sync()
                    logging.debug("Current vmxml with watchdog dev is %s\n" % vmxml)
                    virsh.start(dom.name, **virsh_dargs)
                    session = dom.wait_for_login()
                    try:
                        session.cmd("echo 0 > /dev/watchdog")
                    except (ShellTimeoutError, ShellProcessTerminatedError) as details:
                        test.fail("Failed to trigger watchdog: %s" % details)
                    session.close()
                    # watchdog acts slowly, waiting for it.
                    time.sleep(30)
                    expected_events_list.append("'watchdog' for %s: " + "%s" % action)
                    if action == 'pause':
                        expected_events_list.append("'lifecycle' for %s: Suspended Watchdog")
                        virsh.resume(dom.name, **virsh_dargs)
                    else:
                        # action == 'reset'
                        expected_events_list.append("'reboot' for %s")
                elif event == "io-error":
                    part_size = params.get("part_size")
                    resume_event = params.get("resume_event")
                    suspend_event = params.get("suspend_event")
                    process.run("truncate -s %s %s" % (part_size, small_part), shell=True)
                    utlv.mkfs(small_part, part_format)
                    utils_misc.mount(small_part, mount_point, None)
                    add_disk(dom.name, new_disk, 'vdb', '--subdriver qcow2 --config', 'qcow2')
                    dom.start()
                    session = dom.wait_for_login()
                    session.cmd("mkfs.ext4 /dev/vdb && mount /dev/vdb /mnt && ls /mnt && "
                                "dd if=/dev/zero of=/mnt/test.img bs=1M count=50", ignore_all_errors=True)
                    time.sleep(5)
                    session.close()
                    expected_events_list.append("'io-error' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause")
                    expected_events_list.append("'io-error-reason' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause due to enospc")
                    expected_events_list.append(suspend_event)
                    process.run("df -hT")
                    virsh.resume(dom.name, **virsh_dargs)
                    time.sleep(5)
                    expected_events_list.append(resume_event)
                    expected_events_list.append("'io-error' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause")
                    expected_events_list.append("'io-error-reason' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause due to enospc")
                    expected_events_list.append(suspend_event)
                    ret = virsh.domstate(dom.name, "--reason", **virsh_dargs)
                    if ret.stdout.strip() != "paused (I/O error)":
                        test.fail("Domain state should still be paused due to I/O error!")
                else:
                    test.error("Unsupported event: %s" % event)
                # Event may not received immediately
                time.sleep(3)
        finally:
            if os.path.exists(save_path):
                os.unlink(save_path)
            if os.path.exists(new_disk):
                os.unlink(new_disk)
            if os.path.exists(dest_path):
                os.unlink(dest_path)
        return [(dom.name, event) for event in expected_events_list]
Beispiel #15
0
def run(test, params, env):
    """
    Test vcpu
    """
    vm_name = params.get('main_vm')
    check = params.get('check', '')
    status_error = 'yes' == params.get('status_error', 'no')
    err_msg = params.get('err_msg', '')
    guest_vcpu = params.get('guest_vcpu')
    boot_timeout = int(params.get('boot_timeout', 240))
    start_fail = 'yes' == params.get('start_fail', 'no')

    vm = env.get_vm(vm_name)
    vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()

    def check_onlinevcpus(vm, cpu_num):
        """

        Check whether all vcpus are online as expected.

        :param vm: the exact VM need to check
        :param cpu_num: the num of online vcpus need to match
        """
        if not utils_misc.wait_for(
                lambda: utils_misc.check_if_vm_vcpu_match(cpu_num, vm),
                timeout=120,
                step=5,
                text="wait for vcpu online"):
            test.fail('Not all vcpus are online as expected.')

    def set_iommu(vmxml, **dargs):
        """

        Add iommu device to vm.

        :param vmxml: xml of vm to be add iommu device
        :param dargs: args or the iommu device
        :return:
        """
        logging.info('Add iommu device to vm.')
        iommu_device = Iommu()
        iommu_device.model = dargs.get('model', 'intel')
        iommu_device.driver = dargs.get('driver', {
            'intremap': 'on',
            'eim': 'on'
        })
        vmxml.add_device(iommu_device)

    try:
        # Check the output of "virsh maxvcpus" for both i440fx and q35 VM
        if check == 'virsh_maxvcpus':
            report_num = params.get('report_num', '')
            logging.info('Check the output of virsh maxvcpus')
            cmd_result = virsh.maxvcpus(debug=True)
            if cmd_result.exit_status == 0 and cmd_result.stdout.strip(
            ) == report_num:
                logging.debug('Test passed as the reported max vcpu num is %s',
                              report_num)
            else:
                test.fail(
                    'Test failed as the reported max vcpu num is not as expected.'
                )

        # Check the output of "virsh capabilities" for both i440fx and q35 VM
        if check == "virsh_capabilities":
            report_num_pc_7 = params.get('report_num_pc_7', '')
            report_num_q35_73 = params.get('report_num_q35_73', '')
            report_num_q35_7_8 = params.get('report_num_q35_7_8', '')
            logging.info('Check the output of virsh capabilities')
            xmltreefile = capability_xml.CapabilityXML().xmltreefile
            machtype_vcpunum_dict = {}
            for guest in xmltreefile.findall('guest'):
                for arch in guest.findall('arch'):
                    if arch.get('name') == "x86_64":
                        for machine in arch.findall('machine'):
                            machine_text = machine.text
                            vcpunum = machine.get('maxCpus')
                            machtype_vcpunum_dict[machine_text] = vcpunum
            for key in machtype_vcpunum_dict:
                logging.info("%s : %s", key, machtype_vcpunum_dict[key])
                if key.startswith('pc-i440fx') or key.startswith(
                        'rhel') or key == 'pc':
                    if machtype_vcpunum_dict[key] != report_num_pc_7:
                        test.fail(
                            'Test failed as i440fx_max_vcpus_num in virsh_capa is wrong.'
                        )
                if key.startswith('pc-q35') or key == 'q35':
                    if key == "pc-q35-rhel7.3.0":
                        if machtype_vcpunum_dict[key] != report_num_q35_73:
                            test.fail(
                                'Test failed as q35_rhel73_max_vcpus_num in virsh_capa is wrong.'
                            )
                    else:
                        if machtype_vcpunum_dict[key] != report_num_q35_7_8:
                            test.fail(
                                'Test failed as the q35_max_vcpus_num in virsh_capa is wrong.'
                            )

        # Test i440fx VM starts with 240(positive)/241(negative) vcpus and hot-plugs vcpus to 240
        if check.startswith('i440fx_test'):
            current_vcpu = params.get('current_vcpu')
            if 'hotplug' not in check:
                vmxml.vcpu = int(guest_vcpu)
                vmxml.sync()
                if status_error:
                    if start_fail:
                        result_need_check = virsh.start(vm_name, debug=True)
                else:
                    vm.start()
                    logging.info(libvirt_xml.VMXML.new_from_dumpxml(vm_name))
                    vm.wait_for_login(timeout=boot_timeout).close()
                    check_onlinevcpus(vm, int(guest_vcpu))
            else:
                vmxml.vcpu = int(guest_vcpu)
                vmxml.current_vcpu = int(current_vcpu)
                vmxml.sync()
                vm.start()
                logging.info(libvirt_xml.VMXML.new_from_dumpxml(vm_name))
                vm.wait_for_login(timeout=boot_timeout).close()
                check_onlinevcpus(vm, int(current_vcpu))
                res = virsh.setvcpus(vm_name, guest_vcpu, debug=True)
                libvirt.check_exit_status(res)
                check_onlinevcpus(vm, int(guest_vcpu))

        # Configure a guest vcpu > 255 without iommu device for q35 VM
        if check == 'no_iommu':
            logging.info('Set vcpu to %s', guest_vcpu)
            vmxml.vcpu = int(guest_vcpu)
            result_need_check = virsh.define(vmxml.xml, debug=True)

        # Set iommu device but not set ioapci in features for q35 VM
        if check == 'with_iommu':
            logging.info('Set vcpu to %s', guest_vcpu)
            vmxml.vcpu = int(guest_vcpu)
            set_iommu(vmxml)
            result_need_check = virsh.define(vmxml.xml, debug=True)

        # Add ioapic and iommu device in xml for q35 VM
        if check.startswith('ioapic_iommu'):
            logging.info('Modify features')
            vm_features = vmxml.features
            vm_features.add_feature('apic')
            vm_features.add_feature('ioapic', 'driver', 'qemu')
            vmxml.features = vm_features
            logging.debug(vmxml.features.get_feature_list())

            logging.info('Set vcpu to %s', guest_vcpu)
            set_iommu(vmxml)

            ori_vcpu = vmxml.vcpu
            vmxml.vcpu = int(guest_vcpu)
            vmxml.current_vcpu = ori_vcpu

            if 'hotplug' not in check:
                vmxml.current_vcpu = int(guest_vcpu)

            if status_error:
                if start_fail:
                    if libvirt_version.version_compare(5, 6, 0):
                        result_need_check = virsh.define(vmxml.xml, debug=True)
                    else:
                        vmxml.sync()
                        result_need_check = virsh.start(vm_name, debug=True)

            else:
                # Login guest and check guest cpu number
                vmxml.sync()
                logging.debug(virsh.dumpxml(vm_name))
                vm.start()
                session = vm.wait_for_login(timeout=boot_timeout)
                logging.debug(session.cmd('lscpu -e'))

                # Hotplug vcpu to $guest_vcpu
                if 'hotplug' in check:
                    res = virsh.setvcpus(vm_name, guest_vcpu, debug=True)
                    libvirt.check_exit_status(res)

                # Check if vcpu(s) are online
                check_onlinevcpus(vm, int(guest_vcpu))

        # Check result if there's result to check
        if 'result_need_check' in locals():
            libvirt.check_result(result_need_check, err_msg)

    finally:
        bkxml.sync()
Beispiel #16
0
def run(test, params, env):
    """
    Test command: virsh autostart

    Set(or disable) autostart for a domain
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    persistent_vm = "yes" == params.get("persistent_vm", "yes")
    readonly_mode = "yes" == params.get("readonly_mode", "no")
    autostart_vm = "yes" == params.get("autostart_vm", "no")
    autostart_extra = params.get("autostart_extra", "")
    status_error = "yes" == params.get("status_error", "no")

    # Prepare transient/persistent vm
    original_xml = vm.backup_xml()
    if not persistent_vm and vm.is_persistent():
        vm.undefine()
    elif persistent_vm and not vm.is_persistent():
        vm.define(original_xml)

    original_autost = vm.is_autostart()
    logging.debug("Original VM %s autostart: %s", vm_name, original_autost)
    options = " "
    if not autostart_vm:
        options = "--disable "
    if autostart_extra:
        options += autostart_extra
    # Readonly mode
    ro_flag = False
    if readonly_mode:
        ro_flag = True

    # Result check
    def autostart_check():
        """
        Check if the VM autostart
        """
        res = False
        if autostart_vm and vm.is_autostart() and vm.is_alive():
            logging.debug("VM autostart as expected")
            res = True
        if not autostart_vm and not vm.is_autostart() and vm.is_dead():
            logging.debug("VM not autostart as expected")
            res = True
        return res

    # Run test
    try:
        # Make sure the VM is inactive(except transient VM)
        if vm.is_persistent() and vm.is_alive():
            vm.destroy()
        cmd_result = virsh.autostart(vm_name,
                                     options,
                                     ignore_status=True,
                                     debug=True,
                                     readonly=ro_flag)
        err = cmd_result.stderr.strip()
        status = cmd_result.exit_status
        # Restart libvirtd and sleep 2
        utils_libvirtd.libvirtd_restart()
        if not status_error:
            if status:
                raise error.TestFail(err)
            elif not autostart_check():
                raise error.TestFail("Autostart check fail")
        elif status_error and status == 0:
            raise error.TestFail("Expect fail, but run successfully.")
    finally:
        # Recover env
        vm.destroy()
        if not vm.is_persistent():
            virsh.define(original_xml)
            os.remove(original_xml)
        if original_autost and not vm.is_autostart():
            virsh.autostart(vm_name, "")
        elif not original_autost and vm.is_autostart():
            virsh.autostart(vm_name, "--disable")
Beispiel #17
0
def run_virsh_vcpucount(test, params, env):
    """
    Test the command virsh vcpucount

    (1) Iterate perform setvcpus operation with four valid options.
    (2) Iterate call virsh vcpucount with given options.
    (3) Check whether the virsh vcpucount works as expected.
    (4) Recover test environment.

    The test works for domain state as "shut off" or "running", it check
    vcpucount result after vcpu hotplug using setvcpus.

    For setvcpus, include four valid options:
      --config
      --config --maximum
      --live
      --guest

    For vcpucount options, restrict up to 2 options together, upstream libvirt
    support more options combinations now (e.g. 3 options together or single
    --maximum option), for backward support, only following options are
    checked:
      None
      --config --active
      --config --maximum
      --live --active
      --live --maximum
      --current --active
      --current --maximum
      --guest
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    xml_file = params.get("vcpucount_xml_file", "vm.xml")
    virsh.dumpxml(vm_name, extra="", to_file=xml_file)
    pre_vm_state = params.get("vcpucount_pre_vm_state")
    options = params.get("vcpucount_options")
    status_error = params.get("status_error")
    set_option = ["--config", "--config --maximum", "--live", "--guest"]

    # maximum options should be 2
    if len(options.split()) > 2:
        raise error.TestNAError("Options exceeds 2 is not supported")

    # Prepare domain
    reset_domain(vm, pre_vm_state)

    # Perform guest vcpu hotplug
    for i in range(len(set_option)):
        # Hotplug domain vcpu
        result = virsh.setvcpus(vm_name, 2, set_option[i], ignore_status=True,
                                debug=True)
        setvcpus_status = result.exit_status

        # Call virsh vcpucount with option
        result = virsh.vcpucount(vm_name, options, ignore_status=True,
                                 debug=True)
        output = result.stdout.strip()
        vcpucount_status = result.exit_status

        if "--guest" in options:
            if result.stderr.count("doesn't support option"):
                raise error.TestNAError("Option %s is not supported" % options)

        # Reset domain
        reset_domain(vm, pre_vm_state)

        # Check result
        if status_error == "yes":
            if vcpucount_status == 0:
                raise error.TestFail("Run successfully with wrong command!")
            else:
                logging.info("Run failed as expected")
        else:
            if vcpucount_status != 0:
                raise error.TestFail("Run command failed with options %s" %
                                     options)
            elif setvcpus_status == 0:
                if pre_vm_state == "shut off":
                    if i == 0:
                        expect_out = [4, 2]
                        chk_output_shutoff(output, expect_out, options)
                    elif i == 1:
                        expect_out = [2, 1]
                        chk_output_shutoff(output, expect_out, options)
                    else:
                        raise error.TestFail("setvcpus should failed")
                else:
                    if i == 0:
                        expect_out = [4, 4, 2, 1, 1]
                        chk_output_running(output, expect_out, options)
                    elif i == 1:
                        expect_out = [2, 4, 1, 1, 1]
                        chk_output_running(output, expect_out, options)
                    elif i == 2:
                        expect_out = [4, 4, 1, 2, 2]
                        chk_output_running(output, expect_out, options)
                    else:
                        expect_out = [4, 4, 1, 1, 2]
                        chk_output_running(output, expect_out, options)
            else:
                if pre_vm_state == "shut off":
                    expect_out = [4, 1]
                    chk_output_shutoff(output, expect_out, options)
                else:
                    expect_out = [4, 4, 1, 1, 1]
                    chk_output_running(output, expect_out, options)

    # Recover env
    virsh.destroy(vm_name)
    virsh.undefine(vm_name)
    virsh.define(xml_file)
    if os.path.exists(xml_file):
        os.remove(xml_file)
Beispiel #18
0
    # Check disk count after VM shutdown (with --config).
    check_count_after_shutdown = True
    disk_count_after_shutdown = vm_xml.VMXML.get_disk_count(vm_name)
    if test_cmd == "attach-disk":
        if disk_count_after_shutdown == disk_count_before_cmd:
            check_count_after_shutdown = False
    elif test_cmd == "detach-disk":
        if disk_count_after_shutdown < disk_count_before_cmd:
            check_count_after_shutdown = False

    # Recover VM.
    if vm.is_alive():
        vm.destroy(gracefully=False)
    virsh.undefine(vm_name)
    virsh.define(vm_xml_file)
    if os.path.exists(device_source):
        os.remove(device_source)

    # Check results.
    if status_error == 'yes':
        if status == 0:
            raise error.TestFail("virsh %s exit with unexpected value." %
                                 test_cmd)
    else:
        if status != 0:
            raise error.TestFail("virsh %s failed." % test_cmd)
        if test_cmd == "attach-disk":
            if at_options.count("config"):
                if not check_count_after_shutdown:
                    raise error.TestFail("Cannot see config attached device "
Beispiel #19
0
def run(test, params, env):
    """
    1) Test Define/undefine/start/destroy/save/restore a OVMF/Seabios domain
    with 'boot dev' element or 'boot order' element
    2) Test Create snapshot with 'boot dev' or 'boot order'

    Steps:
    1) Prepare a typical VM XML, e.g. for OVMF or Seabios Guest boot
    2) Setup boot sequence by element 'boot dev' or 'boot order'
    3) Define/undefine/start/destroy/save/restore VM and check result
    4) Create snapshot with 'boot dev' or 'boot order'
    """
    vm_name = params.get("main_vm", "")
    vm = env.get_vm(vm_name)
    boot_type = params.get("boot_type", "seabios")
    boot_ref = params.get("boot_ref", "dev")
    disk_target_dev = params.get("disk_target_dev", "")
    disk_target_bus = params.get("disk_target_bus", "")
    tmp_file = data_dir.get_data_dir()
    save_file = os.path.join(tmp_file, vm_name + ".save")
    nvram_file = params.get("nvram", "")
    expected_text = params.get("expected_text", None)
    boot_entry = params.get("boot_entry", None)
    with_snapshot = "yes" == params.get("with_snapshot", "no")
    snapshot_take = int(params.get("snapshot_take", "1"))
    postfix = params.get("postfix", "")

    # Back VM XML
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    if boot_type == "ovmf":
        if not libvirt_version.version_compare(2, 0, 0):
            test.error("OVMF doesn't support in current" " libvirt version.")

        if not utils_package.package_install('OVMF'):
            test.error("OVMF package install failed")

    if (boot_type == "seabios"
            and not utils_package.package_install('seabios-bin')):
        test.error("seabios package install failed")

    try:
        prepare_boot_xml(vmxml, params)

        # Update domain disk
        # Use sata for uefi, and ide for seabios
        domain_disk = vmxml.get_devices(device_type='disk')[0]
        domain_disk.target = {"dev": disk_target_dev, "bus": disk_target_bus}
        del domain_disk['address']
        vmxml.remove_all_disk()
        vmxml.add_device(domain_disk)

        # Setup boot start sequence
        vmxml.remove_all_boots()
        if boot_ref == "order":
            vmxml.set_boot_order_by_target_dev(disk_target_dev, "1")
        if boot_ref == "dev":
            vmxml.set_os_attrs(**{"boots": ["hd"]})

        logging.debug("The new VM XML is:\n%s", vmxml)
        vmxml.undefine()

        virsh_dargs = {"debug": True, "ignore_status": True}

        if boot_type == "s390_qemu":
            # Start test and check result
            ret = virsh.define(vmxml.xml, **virsh_dargs)
            ret = virsh.start(vm_name, "--paused", **virsh_dargs)
            time.sleep(1)
            vm.create_serial_console()
            time.sleep(1)
            vm.resume()
            if not boot_entry:
                check_boot = console_check(vm, expected_text)
                if not wait_for(check_boot, 60, 1):
                    test.fail("No boot menu found. Please check log.")
            else:
                vm.serial_console.send(boot_entry)
                time.sleep(0.5)
                vm.serial_console.sendcontrol('m')
                check_boot = console_check(vm, expected_text)
                if not wait_for(check_boot, 60, 1):
                    test.fail("Boot entry not selected. Please check log.")
                vm.wait_for_login()
        else:
            if with_snapshot:
                # Create snapshot for guest with boot dev or boot order
                virsh.define(vmxml.xml, **virsh_dargs)
                virsh.start(vm_name, **virsh_dargs)
                vm.wait_for_login()
                external_snapshot = libvirt_disk.make_external_disk_snapshots(
                    vm, disk_target_dev, postfix, snapshot_take)
                snapshot_list_check(vm_name, snapshot_take, postfix, test)
            else:
                # Test the lifecycle for the guest
                kwargs = {
                    "vm_name": vm_name,
                    "save_file": save_file,
                    "boot_type": boot_type,
                    "nvram_file": nvram_file
                }
                domain_lifecycle(vmxml, vm, test, virsh_dargs, **kwargs)
    finally:
        logging.debug("Start to cleanup")
        if vm.is_alive:
            vm.destroy()
        logging.debug("Restore the VM XML")
        if os.path.exists(save_file):
            os.remove(save_file)
        if with_snapshot:
            vmxml_recover_from_snap(vm_name, boot_type, vmxml_backup, test)
            # Remove the generated snapshot file
            for snap_file in external_snapshot:
                if os.path.exists(snap_file):
                    os.remove(snap_file)
        else:
            vmxml_backup.sync()
Beispiel #20
0
    def modify_iface_xml(update, status_error=False):
        """
        Modify interface xml options
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        xml_devices = vmxml.devices
        iface_index = xml_devices.index(
            xml_devices.by_device_tag("interface")[0])
        iface = xml_devices[iface_index]
        if iface_model:
            iface.model = iface_model
        else:
            del iface.model
        if iface_type:
            iface.type_name = iface_type
        del iface.source
        source = ast.literal_eval(iface_source)
        if source:
            net_ifs = utils_net.get_net_if(state="UP")
            # Check source device is valid or not,
            # if it's not in host interface list, try to set
            # source device to first active interface of host
            if (iface.type_name == "direct" and source.has_key('dev')
                    and source['dev'] not in net_ifs):
                logging.warn(
                    "Source device %s is not a interface"
                    " of host, reset to %s", source['dev'], net_ifs[0])
                source['dev'] = net_ifs[0]
            iface.source = source
        backend = ast.literal_eval(iface_backend)
        if backend:
            iface.backend = backend
        driver_dict = {}
        driver_host = {}
        driver_guest = {}
        if iface_driver:
            driver_dict = ast.literal_eval(iface_driver)
        if iface_driver_host:
            driver_host = ast.literal_eval(iface_driver_host)
        if iface_driver_guest:
            driver_guest = ast.literal_eval(iface_driver_guest)
        iface.driver = iface.new_driver(driver_attr=driver_dict,
                                        driver_host=driver_host,
                                        driver_guest=driver_guest)
        if iface.address:
            del iface.address

        logging.debug("New interface xml file: %s", iface)
        if unprivileged_user:
            # Create disk image for unprivileged user
            disk_index = xml_devices.index(
                xml_devices.by_device_tag("disk")[0])
            disk_xml = xml_devices[disk_index]
            logging.debug("source: %s", disk_xml.source)
            disk_source = disk_xml.source.attrs["file"]
            cmd = ("cp -fZ {0} {1} && chown {2}:{2} {1}"
                   "".format(disk_source, dst_disk, unprivileged_user))
            utils.run(cmd)
            disk_xml.source = disk_xml.new_disk_source(
                attrs={"file": dst_disk})
            vmxml.devices = xml_devices
            # Remove all channels to avoid of permission problem
            channels = vmxml.get_devices(device_type="channel")
            for channel in channels:
                vmxml.del_device(channel)

            vmxml.xmltreefile.write()
            logging.debug("New VM xml: %s", vmxml)
            utils.run("chmod a+rw %s" % vmxml.xml)
            virsh.define(vmxml.xml, **virsh_dargs)
        # Try to modify interface xml by update-device or edit xml
        elif update:
            iface.xmltreefile.write()
            ret = virsh.update_device(vm_name, iface.xml, ignore_status=True)
            libvirt.check_exit_status(ret, status_error)
        else:
            vmxml.devices = xml_devices
            vmxml.xmltreefile.write()
            vmxml.sync()
Beispiel #21
0
def run(test, params, env):
    """
    Test mtu feature from virtual network
    """
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)
    mtu_type = params.get('mtu_type')
    mtu_size = params.get('mtu_size', '')
    net = params.get('net', DEFAULT_NET)
    net_type = params.get('net_type', '')
    with_iface = 'yes' == params.get('with_iface', 'no')
    with_net = 'yes' == params.get('with_net', 'no')
    status_error = 'yes' == params.get('status_error', 'no')
    check = params.get('check', '')
    error_msg = params.get('error_msg', '')
    bridge_name = 'br_mtu' + utils_misc.generate_random_string(3)
    add_pkg = params.get('add_pkg', '')
    model = params.get('model', 'virtio')

    def set_network(size, net='default'):
        """
        Set mtu size to a certain network
        """
        logging.info('Set mtu size of network "%s" to %s', net, size)
        default_xml = NetworkXML.new_from_net_dumpxml(net)
        default_xml.mtu = size
        default_xml.sync()
        logging.debug(virsh.net_dumpxml(net))

    def set_interface(mtu_size='', source_network='default',
                      iface_type='network', iface_model='virtio'):
        """
        Set mtu size to a certain interface
        """
        interface_type = 'bridge' if iface_type in ('bridge', 'openvswitch') else iface_type
        iface_dict = {
            'type': interface_type,
            'source': "{'%s': '%s'}" % (interface_type, source_network),
            'model': iface_model
        }

        if iface_type == 'openvswitch':
            iface_dict.update({'virtualport_type': 'openvswitch'})

        if mtu_size:
            iface_dict.update({'mtu': "{'size': %s}" % mtu_size})

        libvirt.modify_vm_iface(vm_name, 'update_iface', iface_dict)
        logging.debug(virsh.dumpxml(vm_name).stdout)

    def get_default_if():
        """
        Get default interface that is using by vm
        """
        ifaces = utils_net.get_sorted_net_if()
        logging.debug('Interfaces on host: %s', ifaces)
        for iface in ifaces[0]:
            if 'Link detected: yes' in process.run('ethtool %s' % iface).stdout_text:
                logging.debug('Found host interface "%s"', iface)
                return iface

    def create_bridge():
        """
        Create a bridge on host for test
        """
        cmd_create_br = 'nmcli con add type bridge con-name %s ifname %s'
        con_name = 'con_' + utils_misc.generate_random_string(3)
        bridge_name = 'br_' + utils_misc.generate_random_string(3)
        process.run(cmd_create_br % (con_name, bridge_name), verbose=True)
        return con_name, bridge_name

    def create_network_xml(name, network_type, base_if='', **kwargs):
        """
        Create a network xml to be defined
        """
        m_net = NetworkXML(name)
        m_net.forward = {'mode': 'bridge'}
        if network_type in ('bridge', 'openvswitch'):
            m_net.bridge = {'name': kwargs['bridge_name']}
        elif network_type == 'macvtap':
            if base_if:
                m_net.forward_interface = [{'dev': base_if}]
        if network_type == 'openvswitch':
            m_net.virtualport_type = 'openvswitch'
        if 'mtu' in kwargs:
            m_net.mtu = kwargs['mtu']
        logging.debug(m_net)
        return m_net.xml

    def create_iface(iface_type, **kwargs):
        """
        Create a interface to be attached to vm
        """
        m_iface = Interface(iface_type)
        m_iface.mac_address = utils_net.generate_mac_address_simple()
        if 'base_if' in kwargs:
            m_iface.source = {'dev': kwargs['base_if'],
                              'mode': 'vepa'}
        if 'source_net' in kwargs:
            m_iface.source = {'network': kwargs['source_net']}
        if 'mtu' in kwargs:
            m_iface.mtu = {'size': kwargs['mtu']}
        if 'model_net' in kwargs:
            m_iface.model = kwargs['model_net']
        logging.debug(m_iface.get_xml())
        logging.debug(m_iface)
        return m_iface

    def check_mtu(mtu_size, qemu=False):
        """
        Check if mtu meets expectation on host
        """
        error = ''
        live_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        iface_xml = live_vmxml.get_devices('interface')[0]
        logging.debug(iface_xml.target)
        dev = iface_xml.target['dev']
        ifconfig_info = process.run('ifconfig|grep mtu|grep %s' % dev,
                                    shell=True, verbose=True).stdout_text
        if 'mtu %s' % mtu_size in ifconfig_info:
            logging.info('PASS on ifconfig check for vnet.')
        else:
            error += 'Fail on ifconfig check for vnet.'
        if qemu:
            qemu_mtu_info = process.run('ps aux|grep qemu-kvm',
                                        shell=True, verbose=True).stdout_text
            if 'host_mtu=%s' % mtu_size in qemu_mtu_info:
                logging.info('PASS on qemu cmd line check.')
            else:
                error += 'Fail on qemu cmd line check.'
        if error:
            test.fail(error)

    def check_mtu_in_vm(fn_login, mtu_size):
        """
        Check if mtu meets expectations in vm
        """
        session = fn_login()
        check_cmd = 'ifconfig'
        output = session.cmd(check_cmd)
        session.close()
        logging.debug(output)
        if 'mtu %s' % mtu_size not in output:
            test.fail('MTU check inside vm failed.')
        else:
            logging.debug("MTU check inside vm passed.")

    try:
        bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        bk_netxml = NetworkXML.new_from_net_dumpxml(DEFAULT_NET)
        if add_pkg:
            add_pkg = add_pkg.split()
            utils_package.package_install(add_pkg)
        if 'openvswitch' in add_pkg:
            br = 'ovsbr0' + utils_misc.generate_random_string(3)
            process.run('systemctl start openvswitch.service', shell=True, verbose=True)
            process.run('ovs-vsctl add-br %s' % br, shell=True, verbose=True)
            process.run('ovs-vsctl show', shell=True, verbose=True)

        if not check or check in ['save', 'managedsave', 'hotplug_save']:
            # Create bridge or network and set mtu
            iface_type = 'network'
            if net_type in ('bridge', 'openvswitch'):
                if net_type == 'bridge':
                    params['con_name'], br = create_bridge()
                if mtu_type == 'network':
                    test_net = create_network_xml(
                        bridge_name, net_type,
                        bridge_name=br
                    )
                    virsh.net_create(test_net, debug=True)
                    virsh.net_dumpxml(bridge_name, debug=True)
                if mtu_type == 'interface':
                    iface_type = net_type
                    bridge_name = br
            elif net_type == 'network':
                if mtu_type == 'network':
                    set_network(mtu_size)

            iface_mtu = 0
            if mtu_type == 'interface':
                iface_mtu = mtu_size
            if mtu_type == 'network' and with_iface:
                mtu_size = str(int(mtu_size)//2)
                iface_mtu = mtu_size

            source_net = bridge_name if net_type in ('bridge', 'openvswitch') else 'default'

            # set mtu in vm interface
            set_interface(iface_mtu, source_network=source_net, iface_type=iface_type, iface_model=model)
            vm.start()
            vm_login = vm.wait_for_serial_login if net_type in ('bridge', 'openvswitch') else vm.wait_for_login
            vm_login().close()
            check_qemu = True if mtu_type == 'interface' else False

            # Test mtu after save vm
            if check in ('save', 'hotplug_save'):
                if check == 'hotplug_save':
                    iface = create_iface('network', source_net='default',
                                         mtu=mtu_size, model_net=model)
                    params['mac'] = iface.mac_address
                    virsh.attach_device(vm_name, iface.xml, debug=True)
                    virsh.dumpxml(vm_name, debug=True)
                    dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                    if params['mac'] not in str(dom_xml):
                        test.fail('Failed to attach interface with mtu')
                save_path = os.path.join(data_dir.get_tmp_dir(), vm_name + '.save')
                virsh.save(vm_name, save_path, debug=True)
                virsh.restore(save_path, debug=True)
            if check == 'managedsave':
                virsh.managedsave(vm_name, debug=True)
                virsh.start(vm_name, debug=True)

            # Check in both host and vm
            check_mtu(mtu_size, check_qemu)
            check_mtu_in_vm(vm_login, mtu_size)
            vm_login(timeout=60).close()

            if check == 'hotplug_save':
                virsh.detach_interface(vm_name, 'network %s' % params['mac'], debug=True)
                time.sleep(5)
                dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                if params['mac'] in str(dom_xml):
                    test.fail('Failed to detach interface with mtu after save-restore')

        else:
            hotplug = 'yes' == params.get('hotplug', 'False')
            if check == 'net_update':
                result = virsh.net_update(
                    DEFAULT_NET, 'modify', 'mtu',
                    '''"<mtu size='%s'/>"''' % mtu_size,
                    debug=True
                )
            if check in ('macvtap', 'bridge_net', 'ovswitch_net'):
                base_if = get_default_if()
                macv_name = 'direct-macvtap' + utils_misc.generate_random_string(3)

                # Test mtu in different type of network
                if mtu_type == 'network':
                    if check == 'macvtap':
                        test_net = create_network_xml(macv_name, 'macvtap',
                                                      base_if, mtu=mtu_size)
                    if check == 'bridge_net':
                        params['con_name'], br = create_bridge()
                        test_net = create_network_xml(
                            bridge_name, 'bridge', mtu=mtu_size,
                            bridge_name=br
                        )
                    if check == 'ovswitch_net':
                        test_net = create_network_xml(
                            bridge_name, 'openvswitch', mtu=mtu_size,
                            bridge_name=br
                        )
                    if 'net_create' in params['id']:
                        result = virsh.net_create(test_net, debug=True)
                    if 'net_define' in params['id']:
                        result = virsh.net_define(test_net, debug=True)

                # Test mtu with or without a binding network
                elif mtu_type == 'interface':
                    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
                    if with_net:
                        test_net = create_network_xml(macv_name, 'macvtap', base_if)
                        virsh.net_create(test_net, debug=True)
                        iface = create_iface('network', source_net=macv_name, mtu=mtu_size)
                        if hotplug:
                            result = virsh.attach_device(vm_name, iface.xml, debug=True)
                        else:
                            vmxml.add_device(iface)
                            vmxml.sync()
                            result = virsh.start(vm_name)
                    else:
                        iface = create_iface('direct', base_if=base_if, mtu=mtu_size)
                        if hotplug:
                            result = virsh.attach_device(vm_name, iface.xml, debug=True)
                        else:
                            vmxml.add_device(iface)
                            result = virsh.define(vmxml.xml, debug=True)
            if check == 'invalid_val':
                iface = create_iface('network', source_net='default', mtu=mtu_size)
                result = virsh.attach_device(vm_name, iface.xml, debug=True)

            # Check result
            libvirt.check_exit_status(result, status_error)
            libvirt.check_result(result, [error_msg])

    finally:
        bk_xml.sync()
        bk_netxml.sync()
        if 'test_net' in locals():
            virsh.net_destroy(bridge_name, debug=True)
        if params.get('con_name'):
            process.run('nmcli con del %s' % params['con_name'], verbose=True)
        if add_pkg:
            process.run("ovs-vsctl del-br %s" % br, verbose=True)
            utils_package.package_remove(add_pkg)
Beispiel #22
0
def reset_env(vm_name, xml_file):
    virsh.destroy(vm_name)
    virsh.undefine(vm_name)
    virsh.define(xml_file)
    if os.path.exists(xml_file):
        os.remove(xml_file)
Beispiel #23
0
def run(test, params, env):
    """
    Test pci/pcie-to-pci bridge

    Hotplug interface to pci/pcie-to-pci bridge, then check xml and
    inside vm.
    Hotunplug interface, then check xml and inside vm
    Other test scenarios of pci/pcie-to-pci bridge
    """

    def create_pci_device(pci_model, pci_model_name, **kwargs):
        """
        Create a pci/pcie bridge

        :param pci_model: model of pci controller device
        :param pci_model_name: model name of pci controller device
        :param kwargs: other k-w args that needed to create device
        :return: the newly created device object
        """
        pci_bridge = Controller('pci')
        pci_bridge.model = pci_model
        pci_bridge.model_name = {'name': pci_model_name}
        if 'index' in kwargs:
            pci_bridge.index = kwargs['index']
        if 'address' in kwargs:
            pci_bridge.address = pci_bridge.new_controller_address(
                attrs=eval(kwargs['address']))

        logging.debug('pci_bridge: %s', pci_bridge)
        return pci_bridge

    def create_iface(iface_model, iface_source, **kwargs):
        """
        Create an interface to be attached to vm

        :param iface_model: model of the interface device
        :param iface_source: source of the interface device
        :param kwargs: other k-w args that needed to create device
        :return: the newly created interface object
        """
        iface = Interface('network')
        iface.model = iface_model
        iface.source = eval(iface_source)

        if 'mac' in kwargs:
            iface.mac_address = kwargs['mac']
        else:
            mac = utils_net.generate_mac_address_simple()
            iface.mac_address = mac

        if 'address' in kwargs:
            iface.address = iface.new_iface_address(attrs=eval(kwargs['address']))

        logging.debug('iface: %s', iface)
        return iface

    vm_name = params.get('main_vm')
    status_error = 'yes' == params.get('status_error', 'no')
    err_msg = params.get('err_msg', '').split(';')
    case = params.get('case', '')
    hotplug = 'yes' == params.get('hotplug', 'no')

    need_pci_br = 'yes' == params.get('need_pci_br', 'no')
    pci_model = params.get('pci_model', 'pci')
    pci_model_name = params.get('pci_model_name')
    pci_br_kwargs = eval(params.get('pci_br_kwargs', '{}'))

    pci_br_has_device = 'yes' == params.get('pci_br_has_device', 'no')
    sound_dev_model_type = params.get('sound_dev_model_type', '')
    sound_dev_address = params.get('sound_dev_address', '')

    iface_model = params.get('iface_model', '')
    iface_source = params.get('iface_source', '')
    iface_kwargs = eval(params.get('iface_kwargs', '{}'))

    max_slots = int(params.get('max_slots', 31))
    pcie_br_count = int(params.get('pcie_br_count', 3))

    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()
    vm = env.get_vm(vm_name)

    try:

        # Check if there is a pci/pcie-to-pci bridge, if so,
        # just use the existing pci/pcie-to-pci-bridge to test
        ori_pci_br = [dev for dev in vmxml.get_devices('controller')
                      if dev.type == 'pci' and dev.model == pci_model]

        if need_pci_br:
            # If there is not a pci/pcie-to-pci bridge to test,
            # create one and add to vm
            if not ori_pci_br:
                logging.info('No %s on vm, create one', pci_model)
                pci_bridge = create_pci_device(pci_model, pci_model_name)
                vmxml.add_device(pci_bridge)
                vmxml.sync()
                logging.debug(virsh.dumpxml(vm_name))

            # Check if pci/pcie-to-pci bridge is successfully added
            vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
            cur_pci_br = [dev for dev in vmxml.get_devices('controller')
                          if dev.type == 'pci' and dev.model == pci_model]
            if not cur_pci_br:
                test.error('Failed to add %s controller to vm xml' % pci_model)

            pci_br = cur_pci_br[0]
            logging.debug('pci_br: %s', pci_br)
            pci_br_index = pci_br.index

        # If test scenario requires another pci device on pci/pcie-to-pci
        # bridge before hotplug, add a sound device and make sure
        # the 'bus' is same with pci bridge index
        if need_pci_br and pci_br_has_device:
            sound_dev = Sound()
            sound_dev.model_type = sound_dev_model_type
            sound_dev.address = eval(sound_dev_address % pci_br_index)
            logging.debug('sound_dev.address: %s', sound_dev.address)
            vmxml.add_device(sound_dev)
            if case != 'vm_with_pcie_br_1_br':
                vmxml.sync()

        # Test hotplug scenario
        if hotplug:
            vm.start()
            vm.wait_for_login().close()

            # Create interface to be hotplugged
            logging.info('Create interface to be hotplugged')
            target_bus = cur_pci_br[0].index
            target_bus = hex(int(target_bus))
            logging.debug('target_bus: %s', target_bus)

            new_iface_kwargs = {'address': iface_kwargs['address'] % target_bus}
            logging.debug('address: %s', new_iface_kwargs['address'])
            iface = create_iface(iface_model, iface_source, **new_iface_kwargs)
            mac = iface.mac_address

            result = virsh.attach_device(vm_name, iface.xml, debug=True)
            libvirt.check_exit_status(result)

            xml_after_attach = VMXML.new_from_dumpxml(vm_name)
            logging.debug(virsh.dumpxml(vm_name))

            # Check if the iface with given mac address is successfully
            # attached with address bus equal to pcie/pci bridge's index
            iface_list = [
                iface for iface in xml_after_attach.get_devices('interface')
                if iface.mac_address == mac and
                int(iface.address['attrs']['bus'], 16) == int(pci_br_index, 16)
            ]

            logging.debug('iface list after attach: %s', iface_list)
            if not iface_list:
                test.error('Failed to attach interface %s' % iface)

            # Check inside vm
            def check_inside_vm(session, expect=True):
                ip_output = session.cmd('ip a')
                logging.debug('output of "ip a": %s', ip_output)

                return expect if mac in ip_output else not expect

            session = vm.wait_for_serial_login()
            if not utils_misc.wait_for(lambda: check_inside_vm(session, True),
                                       timeout=60, step=5):
                test.fail('Check interface inside vm failed,'
                          'interface not successfully attached:'
                          'not found mac address %s' % mac)
            session.close()

            # Test hotunplug
            result = virsh.detach_device(vm_name, iface.xml, debug=True)
            libvirt.check_exit_status(result)

            # Check if the iface with given mac address has been
            # successfully detached
            def is_hotunplug_interface_ok():
                xml_after_detach = VMXML.new_from_dumpxml(vm_name)
                iface_list_after_detach = [
                    iface for iface in xml_after_detach.get_devices('interface')
                    if iface.mac_address == mac
                ]
                logging.debug('iface list after detach: %s', iface_list_after_detach)
                return iface_list_after_detach == []

            if not utils_misc.wait_for(is_hotunplug_interface_ok, timeout=20):
                test.fail('Failed to detach device: %s' % iface)
            logging.debug(virsh.dumpxml(vm_name))

            # Check again inside vm
            session = vm.wait_for_serial_login()
            if not utils_misc.wait_for(lambda: check_inside_vm(session, False),
                                       timeout=60, step=5):
                test.fail('Check interface inside vm failed,'
                          'interface not successfully detached:'
                          'found mac address %s' % mac)
            session.close()

        # Other test scenarios of pci/pcie
        if case:
            logging.debug('iface_kwargs: %s', iface_kwargs)

            # Setting pcie-to-pci-bridge model name !=pcie-pci-bridge.
            # or Invalid controller index for pcie-to-pci-bridge.
            if case in ('wrong_model_name', 'invalid_index'):
                pci_bridge = create_pci_device(pci_model, pci_model_name,
                                               **pci_br_kwargs)
                vmxml.add_device(pci_bridge)
                result_to_check = virsh.define(vmxml.xml, debug=True)

            # Attach device with invalid slot to pcie-to-pci-bridge
            if case == 'attach_with_invalid_slot':
                iface = create_iface(iface_model, iface_source, **iface_kwargs)
                vmxml.add_device(iface)
                result_to_check = virsh.define(vmxml.xml, debug=True)

            # Test that pcie-to-pci-bridge has 31 available slots
            if case == 'max_slots':
                target_bus = cur_pci_br[0].index
                target_bus = hex(int(target_bus))
                logging.debug('target_bus: %s', target_bus)

                # Attach 32 interfaces
                for i in range(max_slots + 1):
                    logging.debug('address: %s', iface_kwargs['address'])
                    new_iface_kwargs = {'address': iface_kwargs['address']
                                        % (target_bus, hex(i + 1))}
                    iface = create_iface(iface_model, iface_source,
                                         **new_iface_kwargs)
                    logging.info('Attaching the %d th interface', i + 1)
                    result_in_loop = virsh.attach_device(
                        vm_name, iface.xml, flagstr='--config', debug=True)

                    # Attaching the 32rd interfaces will fail
                    if i == max_slots:
                        status_error = True
                    libvirt.check_exit_status(result_in_loop,
                                              expect_error=status_error)
                logging.debug(virsh.dumpxml(vm_name))

                # Get all devices on pcie-to-pci-bridge from new xml
                # Test if it matches with value of max_slots
                new_xml = VMXML.new_from_dumpxml(vm_name)
                device_on_pci_br = [
                    dev for dev in new_xml.get_devices('interface')
                    if dev.address['type_name'] == 'pci' and
                    int(dev.address['attrs']['bus'], 16) == int(target_bus, 16)
                ]

                logging.info('All slots of pcie-to-pci-bridge is %d',
                             len(device_on_pci_br))
                if len(device_on_pci_br) != max_slots:
                    test.fail('Max slots is %d instead of %d' %
                              (len(device_on_pci_br), max_slots))

            # Define a guest with pcie-to-pci-bridge controller's index <=bus
            if case.startswith('index_v_bus'):
                last_pci_index = max([
                    int(dev.index) for dev in vmxml.get_devices('controller')
                    if dev.type == 'pci'])

                # New index of new pcie-bridge should be +1
                new_index = last_pci_index + 1
                if case.endswith('less_than'):
                    new_bus = new_index + 1
                elif case.endswith('equal_to'):
                    new_bus = new_index
Beispiel #24
0
    def modify_iface_xml(update, status_error=False):
        """
        Modify interface xml options
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        xml_devices = vmxml.devices
        iface_index = xml_devices.index(
            xml_devices.by_device_tag("interface")[0])
        iface = xml_devices[iface_index]
        if iface_model:
            iface.model = iface_model
        else:
            del iface.model
        if iface_type:
            iface.type_name = iface_type
        del iface.source
        source = ast.literal_eval(iface_source)
        if source:
            net_ifs = utils_net.get_net_if(state="UP")
            # Check source device is valid or not,
            # if it's not in host interface list, try to set
            # source device to first active interface of host
            if (iface.type_name == "direct" and 'dev' in source
                    and source['dev'] not in net_ifs):
                logging.warn(
                    "Source device %s is not a interface"
                    " of host, reset to %s", source['dev'], net_ifs[0])
                source['dev'] = net_ifs[0]
            iface.source = source
        backend = ast.literal_eval(iface_backend)
        if backend:
            iface.backend = backend
        driver_dict = {}
        driver_host = {}
        driver_guest = {}
        if iface_driver:
            driver_dict = ast.literal_eval(iface_driver)
        if iface_driver_host:
            driver_host = ast.literal_eval(iface_driver_host)
        if iface_driver_guest:
            driver_guest = ast.literal_eval(iface_driver_guest)
        iface.driver = iface.new_driver(driver_attr=driver_dict,
                                        driver_host=driver_host,
                                        driver_guest=driver_guest)
        if test_target:
            logging.debug("iface.target is %s" % target_dev)
            iface.target = {"dev": target_dev}
        if iface.address:
            del iface.address
        if set_ip:
            iface.ips = [ast.literal_eval(x) for x in set_ips]
        logging.debug("New interface xml file: %s", iface)
        if unprivileged_user:
            # Create disk image for unprivileged user
            disk_index = xml_devices.index(
                xml_devices.by_device_tag("disk")[0])
            disk_xml = xml_devices[disk_index]
            logging.debug("source: %s", disk_xml.source)
            disk_source = disk_xml.source.attrs["file"]
            cmd = ("cp -fZ {0} {1} && chown {2}:{2} {1}"
                   "".format(disk_source, dst_disk, unprivileged_user))
            process.run(cmd, shell=True)
            disk_xml.source = disk_xml.new_disk_source(
                attrs={"file": dst_disk})
            vmxml.devices = xml_devices
            # Remove all channels to avoid of permission problem
            channels = vmxml.get_devices(device_type="channel")
            for channel in channels:
                vmxml.del_device(channel)
            logging.info("Unprivileged users can't use 'dac' security driver,"
                         " removing from domain xml if present...")
            vmxml.del_seclabel([('model', 'dac')])

            vmxml.xmltreefile.write()
            logging.debug("New VM xml: %s", vmxml)
            process.run("chmod a+rw %s" % vmxml.xml, shell=True)
            virsh.define(vmxml.xml, **virsh_dargs)
        # Try to modify interface xml by update-device or edit xml
        elif update:
            iface.xmltreefile.write()
            ret = virsh.update_device(vm_name, iface.xml, ignore_status=True)
            libvirt.check_exit_status(ret, status_error)
        else:
            vmxml.devices = xml_devices
            vmxml.xmltreefile.write()
            try:
                vmxml.sync()
                if define_error:
                    test.fail("Define VM succeed, but it should fail")
            except xcepts.LibvirtXMLError as e:
                if not define_error:
                    test.fail("Define VM fail: %s" % e)
    if not utils_libvirtd.libvirtd_is_running():
        raise error.TestFail("Libvirtd service is dead.")
    # Check_result
    try:
        try:
            if not status_error:
                if status == 0:
                    check_xml(vm_name, target, dest_path, options)
                    if options.count("--bandwidth"):
                        utl.check_blockjob(vm_name, target, "bandwidth", bandwidth)
                    if options.count("--pivot") + options.count("--finish") == 0:
                        finish_job(vm_name, target, default_timeout)
                    if options.count("--raw"):
                        check_format(dest_path, "raw")
                else:
                    raise error.TestFail(cmd_result.stderr)
            else:
                if status:
                    logging.debug("Expect error: %s", cmd_result.stderr)
                else:
                    raise error.TestFail("Expect fail, but run successfully.")
        except JobTimeout, excpt:
            if not status_error:
                raise error.TestFail("Run command failed: %s" % excpt)
    finally:
        if vm.is_alive():
            vm.destroy()
            virsh.define(original_xml)
        if os.path.exists(dest_path):
            os.remove(dest_path)
Beispiel #26
0
    def trigger_events(dom, events_list=[]):
        """
        Trigger various events in events_list

        :param dom: the vm objects corresponding to the domain
        :return: the expected output that virsh event command prints out
        """
        expected_events_list = []
        save_path = os.path.join(tmpdir, "%s_event.save" % dom.name)
        print(dom.name)
        xmlfile = dom.backup_xml()

        try:
            for event in events_list:
                if event in [
                        'start', 'restore', 'create', 'edit', 'define',
                        'undefine', 'crash'
                ]:
                    if dom.is_alive():
                        dom.destroy()
                        if event in ['create', 'define']:
                            dom.undefine()
                else:
                    if not dom.is_alive():
                        dom.start()
                        dom.wait_for_login().close()
                        if event == "resume":
                            dom.pause()

                if event == "undefine":
                    virsh.undefine(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Undefined Removed")
                elif event == "create":
                    virsh.create(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                elif event == "destroy":
                    virsh.destroy(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Destroyed")
                elif event == "define":
                    virsh.define(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Added")
                elif event == "start":
                    virsh.start(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                    dom.wait_for_login().close()
                elif event == "suspend":
                    virsh.suspend(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    if not libvirt_version.version_compare(5, 3, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Suspended Paused")
                elif event == "resume":
                    virsh.resume(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "save":
                    virsh.save(dom.name, save_path, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Saved")
                elif event == "restore":
                    if not os.path.exists(save_path):
                        logging.error("%s not exist", save_path)
                    else:
                        virsh.restore(save_path, **virsh_dargs)
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Started Restored")
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Resumed Snapshot")
                elif event == "edit":
                    #Check whether 'description' element exists.
                    domxml = virsh.dumpxml(dom.name).stdout.strip()
                    find_desc = parseString(domxml).getElementsByTagName(
                        "description")
                    if find_desc == []:
                        #If not exists, add one for it.
                        logging.info("Adding <description> to guest")
                        virsh.desc(dom.name, "--config",
                                   "Added desc for testvm", **virsh_dargs)
                    #The edit operation is to delete 'description' element.
                    edit_cmd = [r":g/<description.*<\/description>/d"]
                    utlv.exec_virsh_edit(dom.name, edit_cmd)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Updated")
                elif event == "shutdown":
                    if signal_name is None:
                        virsh.shutdown(dom.name, **virsh_dargs)
                        # Wait a few seconds for shutdown finish
                        time.sleep(3)
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            #Shutdown reason distinguished from qemu_2.9.0-9
                            expected_events_list.append(
                                "'lifecycle' for %s:"
                                " Shutdown Finished after guest request")
                    else:
                        os.kill(dom.get_pid(), getattr(signal, signal_name))
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            expected_events_list.append(
                                "'lifecycle' for %s:"
                                " Shutdown Finished after host request")
                    if not utils_misc.compare_qemu_version(2, 9, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Shutdown Finished")
                    wait_for_shutoff(dom)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Shutdown")
                elif event == "crash":
                    if not vmxml.xmltreefile.find('devices').findall('panic'):
                        # Set panic device
                        panic_dev = Panic()
                        panic_dev.model = panic_model
                        panic_dev.addr_type = addr_type
                        panic_dev.addr_iobase = addr_iobase
                        vmxml.add_device(panic_dev)
                    vmxml.on_crash = "coredump-restart"
                    vmxml.sync()
                    logging.info("Guest xml now is: %s", vmxml)
                    dom.start()
                    session = dom.wait_for_login()
                    # Stop kdump in the guest
                    session.cmd("systemctl stop kdump", ignore_all_errors=True)
                    # Enable sysRq
                    session.cmd("echo 1 > /proc/sys/kernel/sysrq")
                    try:
                        # Crash the guest
                        session.cmd("echo c > /proc/sysrq-trigger", timeout=60)
                    except (ShellTimeoutError,
                            ShellProcessTerminatedError) as details:
                        logging.info(details)
                    session.close()
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Crashed Panicked")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "reset":
                    virsh.reset(dom.name, **virsh_dargs)
                    expected_events_list.append("'reboot' for %s")
                elif event == "vcpupin":
                    virsh.vcpupin(dom.name, '0', '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.vcpupin0: 0")
                elif event == "emulatorpin":
                    virsh.emulatorpin(dom.name, '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.emulatorpin: 0")
                elif event == "setmem":
                    mem_size = int(params.get("mem_size", 512000))
                    virsh.setmem(dom.name, mem_size, **virsh_dargs)
                    expected_events_list.append("'balloon-change' for %s:")
                elif event == "device-added-removed":
                    add_disk(dom.name, new_disk, 'vdb', '')
                    expected_events_list.append("'device-added' for %s:"
                                                " virtio-disk1")
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " virtio-disk1")
                    ifaces = vmxml.devices.by_device_tag('interface')
                    if ifaces:
                        iface_xml_obj = ifaces[0]
                        iface_xml_obj.del_address()
                        logging.debug(iface_xml_obj)
                    else:
                        test.error('No interface in vm to be detached.')

                    virsh.detach_device(dom.name,
                                        iface_xml_obj.xml,
                                        wait_remove_event=True,
                                        event_timeout=60,
                                        **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " net0")
                    virsh.attach_device(dom.name, iface_xml_obj.xml,
                                        **virsh_dargs)
                    expected_events_list.append("'device-added' for %s:"
                                                " net0")
                elif event == "change-media":
                    target_device = "hdc"
                    device_target_bus = params.get("device_target_bus", "ide")
                    disk_blk = vm_xml.VMXML.get_disk_blk(dom.name)
                    logging.info("disk_blk %s", disk_blk)
                    if target_device not in disk_blk:
                        logging.info("Adding cdrom to guest")
                        if dom.is_alive():
                            dom.destroy()
                        add_disk(
                            dom.name, "''", target_device,
                            ("--type cdrom --sourcetype file --driver qemu " +
                             "--config --targetbus %s" % device_target_bus))
                        dom.start()
                    all_options = new_disk + " --insert"
                    virsh.change_media(dom.name, target_device, all_options,
                                       **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " opened")
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " closed")
                    all_options = new_disk + " --eject"
                    virsh.change_media(dom.name, target_device, all_options,
                                       **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " opened")

                else:
                    test.error("Unsupported event: %s" % event)
                # Event may not received immediately
                time.sleep(3)
        finally:
            if os.path.exists(save_path):
                os.unlink(save_path)
            if os.path.exists(new_disk):
                os.unlink(new_disk)
        return [(dom.name, event) for event in expected_events_list]
Beispiel #27
0
def run_virsh_edit(test, params, env):
    """
    Test command: virsh edit.

    The command can edit XML configuration for a domain
    1.Prepare test environment,destroy or suspend a VM.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh edit operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    vcpucount_result = virsh.vcpucount(vm_name, options="--config")
    if vcpucount_result.exit_status:
        raise error.TestError("Failed to get vcpucount. Detail:\n%s"
                              % vcpucount_result)
    original_vcpu = vcpucount_result.stdout.strip()
    expected_vcpu = str(int(original_vcpu)+1)

    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("edit_vm_ref")
    status_error = params.get("status_error")

    def modify_vcpu(source, edit_cmd):
        """
        Modify vm's cpu infomation.

        @param: source : virsh edit's option.
        @param: dic_mode : a edit commad line .
        @return: True if edit successed,False if edit failed.
        """
        session = aexpect.ShellSession("sudo -s")
        try:
            session.sendline("virsh edit %s" % source)
            session.sendline(edit_cmd)
            session.send('\x1b')
            session.send('ZZ')
            # use sleep(1) to make sure the modify has been completed.
            time.sleep(1)
            session.close()
            return True
        except:
            return False

    def edit_vcpu(source, guest_name):
        """
        Modify vm's cpu infomation by virsh edit command.

        @param: source : virsh edit's option.
        @param: guest_name : vm's name.
        @return: True if edit successed,False if edit failed.
        """
        dic_mode = {"edit": ":%s /[0-9]*<\/vcpu>/"+expected_vcpu+"<\/vcpu>",
                    "recover": ":%s /[0-9]*<\/vcpu>/"+original_vcpu+"<\/vcpu>"}
        status = modify_vcpu(source, dic_mode["edit"])
        if not status:
            return status
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(guest_name, ignore_status=True)
            virsh.destroy(guest_name)
        elif params.get("start_vm") == "yes":
            virsh.destroy(guest_name)
        vcpus = vm.dominfo()["CPU(s)"]
        #Recover cpuinfo
        status = modify_vcpu(source, dic_mode["recover"])
        if status and vcpus != expected_vcpu:
            return False
        return status

    #run test case
    xml_file = os.path.join(test.tmpdir, 'tmp.xml')
    virsh.dumpxml(vm_name, extra="", to_file=xml_file)

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    try:
        if vm_ref == "id":
            status = edit_vcpu(domid, vm_name)
        elif vm_ref == "uuid":
            status = edit_vcpu(domuuid, vm_name)
        elif vm_ref == "name" and status_error == "no":
            status = edit_vcpu(vm_name, vm_name)
        else:
            status = False
            if vm_ref.find("invalid") != -1:
                vm_ref = params.get(vm_ref)
            elif vm_ref == "name":
                vm_ref = "%s %s" % (vm_name, params.get("edit_extra_param"))
            edit_status = virsh.edit(vm_ref).exit_status
            if edit_status == 0:
                status = True
    except:
        status = False

    #recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    #Recover VM
    if vm.is_alive():
        vm.destroy()
    virsh.undefine(vm_name)
    virsh.define(xml_file)

    #check status_error
    if status_error == "yes":
        if status:
            raise error.TestFail("Run successfully with wrong command!")
    elif status_error == "no":
        if not status:
            raise error.TestFail("Run failed with right command")
Beispiel #28
0
def run(test, params, env):
    """
    Test mtu feature from virtual network
    """
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)
    mtu_type = params.get('mtu_type')
    mtu_size = params.get('mtu_size', '')
    net = params.get('net', DEFAULT_NET)
    net_type = params.get('net_type', '')
    with_iface = 'yes' == params.get('with_iface', 'no')
    with_net = 'yes' == params.get('with_net', 'no')
    status_error = 'yes' == params.get('status_error', 'no')
    check = params.get('check', '')
    error_msg = params.get('error_msg', '')
    bridge_name = 'br_mtu' + utils_misc.generate_random_string(3)
    add_pkg = params.get('add_pkg', '')
    model = params.get('model', 'virtio')
    timeout = int(params.get('timeout', 240))
    wait_for_up = int(params.get('wait_for_up', 0))
    create_tap = 'yes' == params.get('create_tap', 'no')
    tap_mtu_size = int(params.get('tap_mtu_size', 2000))

    if create_tap and 'managed_no' in params['name']:
        if not libvirt_version.version_compare(7, 0, 0):
            test.cancel('This test is not supported until libvirt-7.0.0')

    def set_network(size, net='default'):
        """
        Set mtu size to a certain network
        """
        logging.info('Set mtu size of network "%s" to %s', net, size)
        default_xml = NetworkXML.new_from_net_dumpxml(net)
        default_xml.mtu = size
        default_xml.sync()
        logging.debug(virsh.net_dumpxml(net))

    def set_interface(mtu_size='',
                      source_network='default',
                      iface_type='network',
                      iface_model='virtio',
                      iface_target=None):
        """
        Set mtu size to a certain interface
        """
        interface_type = 'bridge' if iface_type in (
            'bridge', 'openvswitch') else iface_type
        iface_dict = {'type': interface_type, 'model': iface_model}
        if source_network:
            iface_dict.update(
                {'source': "{'%s': '%s'}" % (interface_type, source_network)})

        if iface_type == 'openvswitch':
            iface_dict.update({'virtualport_type': 'openvswitch'})

        if mtu_size:
            iface_dict.update({'mtu': "{'size': %s}" % mtu_size})

        if iface_target:
            iface_dict.update({'target': iface_target})

        libvirt.modify_vm_iface(vm_name, 'update_iface', iface_dict)
        logging.debug(virsh.dumpxml(vm_name).stdout)

    def get_default_if():
        """
        Get default interface that is using by vm
        """
        ifaces = utils_net.get_sorted_net_if()
        logging.debug('Interfaces on host: %s', ifaces)
        for iface in ifaces[0]:
            if 'Link detected: yes' in process.run('ethtool %s' %
                                                   iface).stdout_text:
                logging.debug('Found host interface "%s"', iface)
                return iface

    def create_bridge():
        """
        Create a bridge on host for test
        """
        cmd_create_br = 'nmcli con add type bridge con-name %s ifname %s'
        con_name = 'con_' + utils_misc.generate_random_string(3)
        bridge_name = 'br_' + utils_misc.generate_random_string(3)
        process.run(cmd_create_br % (con_name, bridge_name), verbose=True)
        return con_name, bridge_name

    def create_network_xml(name, network_type, base_if='', **kwargs):
        """
        Create a network xml to be defined
        """
        m_net = NetworkXML(name)
        m_net.forward = {'mode': 'bridge'}
        if network_type in ('bridge', 'openvswitch'):
            m_net.bridge = {'name': kwargs['bridge_name']}
        elif network_type == 'macvtap':
            if base_if:
                m_net.forward_interface = [{'dev': base_if}]
        if network_type == 'openvswitch':
            m_net.virtualport_type = 'openvswitch'
        if 'mtu' in kwargs:
            m_net.mtu = kwargs['mtu']
        logging.debug(m_net)
        libvirt.wait_for_file_over("</network>", m_net.xml)
        return m_net.xml

    def create_iface(iface_type, **kwargs):
        """
        Create a interface to be attached to vm
        """
        m_iface = Interface(iface_type)
        m_iface.mac_address = utils_net.generate_mac_address_simple()
        if 'base_if' in kwargs:
            m_iface.source = {'dev': kwargs['base_if'], 'mode': 'vepa'}
        if 'source_net' in kwargs:
            m_iface.source = {'network': kwargs['source_net']}
        if 'mtu' in kwargs:
            m_iface.mtu = {'size': kwargs['mtu']}
        if 'model_net' in kwargs:
            m_iface.model = kwargs['model_net']
        logging.debug(m_iface.get_xml())
        logging.debug(m_iface)
        return m_iface

    def check_mtu(mtu_size, qemu=False):
        """
        Check if mtu meets expectation on host
        """
        error = ''
        live_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        iface_xml = live_vmxml.get_devices('interface')[0]
        logging.debug(iface_xml.target)
        dev = iface_xml.target['dev']
        tap_info = process.run('ip link|grep mtu|grep %s' % dev,
                               shell=True,
                               verbose=True).stdout_text
        if 'mtu %s' % mtu_size in tap_info:
            logging.info('PASS on mtu check for vnet.')
        else:
            error += 'Fail on mtu check for vnet.'
        if qemu:
            qemu_mtu_info = process.run('ps aux|grep qemu-kvm',
                                        shell=True,
                                        verbose=True).stdout_text
            if 'host_mtu=%s' % mtu_size in qemu_mtu_info:
                logging.info('PASS on qemu cmd line check.')
            else:
                error += 'Fail on qemu cmd line check.'
        if error:
            test.fail(error)

    def check_mtu_in_vm(fn_login, mtu_size, timeout):
        """
        Check if mtu meets expectations in vm
        """
        session = fn_login(timeout=timeout)
        check_cmd = 'ip link'
        output = session.cmd(check_cmd)
        session.close()
        logging.debug(output)
        if 'mtu %s' % mtu_size not in output:
            test.fail('MTU check inside vm failed.')
        else:
            logging.debug("MTU check inside vm passed.")

    try:
        bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        bk_netxml = NetworkXML.new_from_net_dumpxml(DEFAULT_NET)
        if add_pkg:
            add_pkg = add_pkg.split()
            new_pkg = add_pkg.copy()
            if 'openvswitch' in add_pkg and shutil.which('ovs-vsctl'):
                new_pkg.remove('openvswitch')
            utils_package.package_install(new_pkg)
        if 'openvswitch' in add_pkg:
            br = 'ovsbr0' + utils_misc.generate_random_string(3)
            process.run('systemctl start openvswitch.service',
                        shell=True,
                        verbose=True)
            process.run('ovs-vsctl add-br %s' % br, shell=True, verbose=True)
            process.run('ovs-vsctl show', shell=True, verbose=True)

        if create_tap:
            tap_name = 'tap_' + utils_misc.generate_random_string(3)
            tap_cmd = params.get('tap_cmd')
            if tap_cmd is None:
                test.error('No tap creating command provided.')
            tap_cmd = tap_cmd.format(tap_name=tap_name)
            logging.debug('Tap creating command: \n %s', tap_cmd)
            # Create tap device
            process.run(tap_cmd, verbose=True, shell=True)
            # Check tap device's detail
            ip_link_cmd = 'ip link show %s'
            process.run(ip_link_cmd % tap_name, verbose=True)

            if 'managed_no' in params['name']:
                iface_target = params.get('iface_target') % tap_name
                set_interface(mtu_size,
                              source_network=None,
                              iface_type='ethernet',
                              iface_target=iface_target)
                vm.start()
                logging.debug(virsh.dumpxml(vm_name).stdout)

                # Check mtu of tap on host
                host_iface_info = process.run(ip_link_cmd % tap_name,
                                              verbose=True).stdout_text
                if 'mtu %s' % tap_mtu_size in host_iface_info:
                    logging.info('Host mtu size check PASS.')
                else:
                    test.fail('Host mtu size check FAIL.')

                # Get iface mac address
                vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                iface = vmxml.devices.by_device_tag('interface')[0]
                iface_mac = iface.mac_address

                # Check mtu inside vm
                session = vm.wait_for_serial_login()
                iface_in_vm = utils_net.get_linux_ifname(session,
                                                         mac_address=iface_mac)
                vm_iface_info = session.cmd(ip_link_cmd % iface_in_vm)
                session.close()
                logging.debug(vm_iface_info)
                if 'mtu %s' % mtu_size in vm_iface_info:
                    logging.info('Inside-vm mtu size check PASS.')
                else:
                    test.fail('Inside-vm mtu size check FAIL.')

        elif not check or check in ['save', 'managedsave', 'hotplug_save']:
            # Create bridge or network and set mtu
            iface_type = 'network'
            if net_type in ('bridge', 'openvswitch'):
                if net_type == 'bridge':
                    params['con_name'], br = create_bridge()
                if mtu_type == 'network':
                    test_net = create_network_xml(bridge_name,
                                                  net_type,
                                                  bridge_name=br)
                    virsh.net_create(test_net, debug=True)
                    virsh.net_dumpxml(bridge_name, debug=True)
                if mtu_type == 'interface':
                    iface_type = net_type
                    bridge_name = br
            elif net_type == 'network':
                if mtu_type == 'network':
                    set_network(mtu_size)

            iface_mtu = 0
            if mtu_type == 'interface':
                iface_mtu = mtu_size
            if mtu_type == 'network' and with_iface:
                mtu_size = str(int(mtu_size) // 2)
                iface_mtu = mtu_size

            source_net = bridge_name if net_type in (
                'bridge', 'openvswitch') else 'default'

            # set mtu in vm interface
            set_interface(iface_mtu,
                          source_network=source_net,
                          iface_type=iface_type,
                          iface_model=model)
            vm.start()
            vm_login = vm.wait_for_serial_login if net_type in (
                'bridge', 'openvswitch') else vm.wait_for_login
            vm_login().close()
            check_qemu = True if mtu_type == 'interface' else False

            # Test mtu after save vm
            if check in ('save', 'hotplug_save'):
                if check == 'hotplug_save':
                    iface = create_iface('network',
                                         source_net='default',
                                         mtu=mtu_size,
                                         model_net=model)
                    params['mac'] = iface.mac_address
                    virsh.attach_device(vm_name, iface.xml, debug=True)
                    virsh.dumpxml(vm_name, debug=True)
                    dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                    if params['mac'] not in str(dom_xml):
                        test.fail('Failed to attach interface with mtu')
                save_path = os.path.join(data_dir.get_tmp_dir(),
                                         vm_name + '.save')
                time.sleep(wait_for_up)
                virsh.save(vm_name, save_path, debug=True)
                virsh.restore(save_path, debug=True)
            if check == 'managedsave':
                virsh.managedsave(vm_name, debug=True)
                virsh.start(vm_name, debug=True)

            # Check in both host and vm
            check_mtu(mtu_size, check_qemu)
            if mtu_type == 'interface' or with_iface:
                check_mtu_in_vm(vm_login, mtu_size, timeout=timeout)
                vm_login(timeout=timeout).close()

            if check == 'hotplug_save':
                virsh.detach_interface(vm_name,
                                       'network %s' % params['mac'],
                                       debug=True)
                time.sleep(5)
                dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                if params['mac'] in str(dom_xml):
                    test.fail(
                        'Failed to detach interface with mtu after save-restore'
                    )

        else:
            hotplug = 'yes' == params.get('hotplug', 'False')
            if check == 'net_update':
                result = virsh.net_update(DEFAULT_NET,
                                          'modify',
                                          'mtu',
                                          '''"<mtu size='%s'/>"''' % mtu_size,
                                          debug=True)
            if check in ('macvtap', 'bridge_net', 'ovswitch_net'):
                base_if = get_default_if()
                macv_name = 'direct-macvtap' + utils_misc.generate_random_string(
                    3)

                # Test mtu in different type of network
                if mtu_type == 'network':
                    if check == 'macvtap':
                        test_net = create_network_xml(macv_name,
                                                      'macvtap',
                                                      base_if,
                                                      mtu=mtu_size)
                    if check == 'bridge_net':
                        params['con_name'], br = create_bridge()
                        test_net = create_network_xml(bridge_name,
                                                      'bridge',
                                                      mtu=mtu_size,
                                                      bridge_name=br)
                    if check == 'ovswitch_net':
                        test_net = create_network_xml(bridge_name,
                                                      'openvswitch',
                                                      mtu=mtu_size,
                                                      bridge_name=br)
                    if 'net_create' in params['id']:
                        result = virsh.net_create(test_net, debug=True)
                    if 'net_define' in params['id']:
                        result = virsh.net_define(test_net, debug=True)

                # Test mtu with or without a binding network
                elif mtu_type == 'interface':
                    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
                    if with_net:
                        test_net = create_network_xml(macv_name, 'macvtap',
                                                      base_if)
                        virsh.net_create(test_net, debug=True)
                        iface = create_iface('network',
                                             source_net=macv_name,
                                             mtu=mtu_size,
                                             model_net=model)
                        if hotplug:
                            result = virsh.attach_device(vm_name,
                                                         iface.xml,
                                                         debug=True)
                        else:
                            vmxml.add_device(iface)
                            vmxml.sync()
                            result = virsh.start(vm_name)
                    else:
                        iface = create_iface('direct',
                                             base_if=base_if,
                                             mtu=mtu_size,
                                             model_net=model)
                        if hotplug:
                            result = virsh.attach_device(vm_name,
                                                         iface.xml,
                                                         debug=True)
                        else:
                            vmxml.add_device(iface)
                            result = virsh.define(vmxml.xml, debug=True)
            if check == 'invalid_val':
                iface = create_iface('network',
                                     source_net='default',
                                     mtu=mtu_size,
                                     model_net=model)
                result = virsh.attach_device(vm_name, iface.xml, debug=True)

            # Check result
            libvirt.check_exit_status(result, status_error)
            libvirt.check_result(result, [error_msg])

    finally:
        bk_xml.sync()
        bk_netxml.sync()
        if 'test_net' in locals():
            virsh.net_destroy(bridge_name, debug=True)
        if params.get('con_name'):
            process.run('nmcli con del %s' % params['con_name'], verbose=True)
        if add_pkg:
            process.run("ovs-vsctl del-br %s" % br, verbose=True)
            utils_package.package_remove(add_pkg)
        if create_tap:
            process.run('ip tuntap del mode tap {}'.format(tap_name),
                        verbose=True,
                        shell=True)
Beispiel #29
0
def reset_env(vm_name, xml_file):
    virsh.destroy(vm_name)
    virsh.undefine(vm_name)
    virsh.define(xml_file)
    if os.path.exists(xml_file):
        os.remove(xml_file)
Beispiel #30
0
def run_virsh_setvcpus(test, params, env):
    """
    Test command: virsh setvcpus.

    The conmand can change the number of virtual CPUs in the guest domain.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh setvcpus operation.
    3.Recover test environment.
    4.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    xml_file = params.get("setvcpus_xml_file", "vm.xml")
    virsh.dumpxml(vm_name, extra="--inactive", to_file=xml_file)
    tmp_file = params.get("setvcpus_tmp_file", "tmp.xml")
    pre_vm_state = params.get("setvcpus_pre_vm_state")
    command = params.get("setvcpus_command", "setvcpus")
    options = params.get("setvcpus_options")
    domain = params.get("setvcpus_domain")
    count = params.get("setvcpus_count")
    extra_param = params.get("setvcpus_extra_param")
    count_option = "%s %s" % (count, extra_param)
    status_error = params.get("status_error")

    def get_current_vcpus():
        """
        Get current vcpu number.
        """
        vcpus_set = ""
        virsh.dumpxml(vm_name, extra="", to_file=tmp_file)
        dom = parse(tmp_file)
        root = dom.documentElement
        vcpus_2 = root.getElementsByTagName("vcpu")
        for n in vcpus_2:
            vcpus_set += n.getAttribute("current")
            vcpus_set = int(vcpus_set)
        dom.unlink()
        return vcpus_set

    if vm.is_alive():
        vm.destroy()
    vm_xml = libvirt_xml.VMXML()
    vm_xml.set_vm_vcpus(vm_name, 2)
    vm.start()
    vm.wait_for_login()

    if status_error == "no":
        vcpus_new = len(vm.vcpuinfo())
    domid = vm.get_id()
    domuuid = vm.get_uuid()
    if pre_vm_state == "paused":
        vm.pause()
    elif pre_vm_state == "shut off":
        vm.destroy()

    if domain == "remote_name":
        remote_ssh_addr = params.get("remote_ip", None)
        remote_addr = params.get("local_ip", None)
        remote_password = params.get("remote_password", None)
        host_type = virsh.driver()
        if host_type == "qemu":
            remote_string = "qemu+ssh://%s/system" % remote_addr
        elif host_type == "xen":
            remote_string = "xen+ssh://%s" % remote_addr
        command = "virsh -c %s setvcpus %s 1 --live" % (remote_string, vm_name)
        if virsh.has_command_help_match(command, "--live") is None:
            status_error = "yes"
        session = remote.remote_login("ssh", remote_ssh_addr, "22", "root",
                                      remote_password, "#")
        session.cmd_output('LANG=C')
        status, output = session.cmd_status_output(command, internal_timeout=5)
        session.close()
        vcpus_current = len(vm.vcpuinfo())
    else:
        if domain == "name":
            dom_option = vm_name
        elif domain == "id":
            dom_option = domid
            if params.get("setvcpus_hex_id") is not None:
                dom_option = hex(int(domid))
            elif params.get("setvcpus_invalid_id") is not None:
                dom_option = params.get("setvcpus_invalid_id")
        elif domain == "uuid":
            dom_option = domuuid
            if params.get("setvcpus_invalid_uuid") is not None:
                dom_option = params.get("setvcpus_invalid_uuid")
        else:
            dom_option = domain
        option_list = options.split(" ")
        for item in option_list:
            if virsh.has_command_help_match(command, item) is None:
                status_error = "yes"
                break
        status = virsh.setvcpus(dom_option,
                                count_option,
                                options,
                                ignore_status=True).exit_status
        if pre_vm_state == "paused":
            virsh.resume(vm_name, ignore_status=True)
        if status_error == "no":
            if status == 0:
                if pre_vm_state == "shut off":
                    if options == "--config":
                        vcpus_set = len(vm.vcpuinfo())
                    elif options == "--current":
                        vcpus_set = get_current_vcpus()
                    elif options == "--maximum --config":
                        vcpus_set = ""
                        dom = parse("/etc/libvirt/qemu/%s.xml" % vm_name)
                        vcpus_set = dom.getElementsByTagName(
                            "vcpu")[0].firstChild.data
                        vcpus_set = int(vcpus_set)
                        dom.unlink()
                else:
                    vcpus_set = len(vm.vcpuinfo())
                if domain == "id":
                    cmd_chk = "cat /etc/libvirt/qemu/%s.xml" % vm_name
                    output1 = commands.getoutput(cmd_chk)
                    logging.info("guest-info:\n%s" % output1)

    virsh.destroy(vm_name)
    virsh.undefine(vm_name)
    virsh.define(xml_file)
    if os.path.exists(xml_file):
        os.remove(xml_file)
    if os.path.exists(tmp_file):
        os.remove(tmp_file)

    # check status_error
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    else:
        if status != 0:
            raise error.TestFail("Run failed with right command")
        else:
            if options == "--maximum --config":
                if vcpus_set != 4:
                    raise error.TestFail("Run failed with right command1")
            elif domain == "id":
                if options == "--config":
                    if vcpus_set != vcpus_new or not re.search(
                            '<vcpu current=\'1\'>%s</vcpu>' % vcpus_new,
                            output1):
                        raise error.TestFail("Run failed with right command2")
                elif options == "--config --live":
                    if vcpus_set != 1 or not re.search(
                            '<vcpu current=\'1\'>%s</vcpu>' % vcpus_new,
                            output1):
                        raise error.TestFail("Run failed with right command3")
                else:
                    if vcpus_set != 1 or re.search(
                            '<vcpu current=\'1\'>%s</vcpu>' % vcpus_new,
                            output1):
                        raise error.TestFail("Run failed with right command4")
            else:
                if vcpus_set != 1:
                    raise error.TestFail("Run failed with right command5")
Beispiel #31
0
def run(test, params, env):
    """
    LXC container life cycle testing by virsh command
    """
    uri = params.get("connect_uri", "lxc:///")
    vm_name = params.get("main_vm")
    dom_type = params.get("lxc_domtype", "lxc")
    vcpu = int(params.get("lxc_vcpu", 1))
    max_mem = int(params.get("lxc_max_mem", 500000))
    current_mem = int(params.get("lxc_current_mem", 500000))
    os_type = params.get("lxc_ostype", "exe")
    os_arch = params.get("lxc_osarch", "x86_64")
    os_init = params.get("lxc_osinit", "/bin/sh")
    emulator_path = params.get("lxc_emulator",
                               "/usr/libexec/libvirt_lxc")
    interface_type = params.get("lxc_interface_type", "network")
    net_name = params.get("lxc_net_name", "default")
    full_os = ("yes" == params.get("lxc_full_os", "no"))
    install_root = params.get("lxc_install_root", "/")
    fs_target = params.get("lxc_fs_target", "/")
    fs_accessmode = params.get("lxc_fs_accessmode", "passthrough")
    passwd = params.get("lxc_fs_passwd", "redhat")

    def generate_container_xml():
        """
        Generate container xml
        """
        vmxml = vm_xml.VMXML(dom_type)
        vmxml.vm_name = vm_name
        vmxml.max_mem = max_mem
        vmxml.current_mem = current_mem
        vmxml.vcpu = vcpu
        # Generate os
        vm_os = vm_xml.VMOSXML()
        vm_os.type = os_type
        vm_os.arch = os_arch
        vm_os.init = os_init
        vmxml.os = vm_os
        # Generate emulator
        emulator = Emulator()
        emulator.path = emulator_path
        # Generate console
        console = Console()
        filesystem = Filesystem()
        filesystem.accessmode = fs_accessmode
        filesystem.source = {'dir': install_root}
        filesystem.target = {'dir': fs_target}
        # Add emulator and console in devices
        devices = vm_xml.VMXMLDevices()
        devices.append(emulator)
        devices.append(console)
        devices.append(filesystem)
        # Add network device
        network = Interface(type_name=interface_type)
        network.mac_address = utils_net.generate_mac_address_simple()
        network.source = {interface_type: net_name}
        devices.append(network)
        vmxml.set_devices(devices)
        return vmxml

    def check_state(expected_state):
        result = virsh.domstate(vm_name, uri=uri)
        utlv.check_exit_status(result)
        vm_state = result.stdout.strip()
        if vm_state == expected_state:
            logging.info("Get expected state: %s", vm_state)
        else:
            raise TestFail("Get unexpected state: %s", vm_state)

    virsh_args = {'uri': uri, 'debug': True}
    try:
        vmxml = generate_container_xml()
        with open(vmxml.xml, 'r') as f:
            logging.info("Container XML:\n%s", f.read())

        if full_os:
            if not os.path.exists(install_root):
                os.mkdir(install_root)
            # Install core os under installroot
            cmd = "yum --releasever=/ --installroot=%s" % install_root
            cmd += " --nogpgcheck -y groupinstall core"
            process.run(cmd, shell=True)
            # Fix root login on console
            process.run("echo 'pts/0' >> %s/etc/securetty" % install_root,
                        shell=True)
            for i in ["session    required     pam_selinux.so close",
                      "session    required     pam_selinux.so open",
                      "session    required     pam_loginuid.so"]:
                process.run('sed -i s/"%s\"/"#%s"/g %s/etc/pam.d/login' %
                            (i, i, install_root), shell=True)
                # Fix root login for sshd
                process.run('sed -i s/"%s\"/"#%s"/g %s/etc/pam.d/sshd' %
                            (i, i, install_root), shell=True)

            # Config basic network
            net_file = install_root + '/etc/sysconfig/network'
            with open(net_file, 'w') as f:
                f.write('NETWORKING=yes\nHOSTNAME=%s\n' % vm_name)
            net_script = install_root + '/etc/sysconfig/network-scripts/ifcfg-eth0'
            with open(net_script, 'w') as f:
                f.write('DEVICE=eth0\nBOOTPROTO=dhcp\nONBOOT=yes\n')

            # Set root password and enable sshd
            session = aexpect.ShellSession("chroot %s" % install_root)
            session.sendline('echo %s|passwd root --stdin' % passwd)
            session.sendline('chkconfig sshd on')
            session.close()

        # Create
        result = virsh.create(vmxml.xml, **virsh_args)
        utlv.check_exit_status(result)
        check_state('running')

        # Destroy
        result = virsh.destroy(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        if not virsh.domain_exists(vm_name, **virsh_args):
            logging.info("Destroy transient LXC domain successfully")
        else:
            raise TestFail("Transient LXC domain still exist after destroy")

        # Define
        result = virsh.define(vmxml.xml, **virsh_args)
        utlv.check_exit_status(result)
        check_state('shut off')

        # List
        result = virsh.dom_list('--inactive', **virsh_args)
        utlv.check_exit_status(result)
        if re.findall("(%s)\s+shut off" % vm_name, result.stdout):
            logging.info("Find %s in virsh list output", vm_name)
        else:
            raise TestFail("Not find %s in virsh list output")

        # Dumpxml
        result = virsh.dumpxml(vm_name, uri=uri, debug=False)
        utlv.check_exit_status(result)

        # Edit
        edit_vcpu = '2'
        logging.info("Change vcpu of LXC container to %s", edit_vcpu)
        edit_cmd = [r":%s /[0-9]*<\/vcpu>/" + edit_vcpu + r"<\/vcpu>"]
        if not utlv.exec_virsh_edit(vm_name, edit_cmd, connect_uri=uri):
            raise TestFail("Run edit command fail")
        else:
            result = virsh.dumpxml(vm_name, **virsh_args)
            new_vcpu = re.search(r'(\d*)</vcpu>', result.stdout).group(1)
            if new_vcpu == edit_vcpu:
                logging.info("vcpu number is expected after do edit")
            else:
                raise TestFail("vcpu number is unexpected after do edit")

        # Start
        result = virsh.start(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        check_state('running')

        # Suspend
        result = virsh.suspend(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        check_state('paused')

        # Resume
        result = virsh.resume(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        check_state('running')

        # Reboot(not supported on RHEL6)
        result = virsh.reboot(vm_name, **virsh_args)
        supported_err = 'not supported by the connection driver: virDomainReboot'
        if supported_err in result.stderr.strip():
            logging.info("Reboot is not supported")
        else:
            utlv.check_exit_status(result)

        # Destroy
        result = virsh.destroy(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        check_state('shut off')

        # Undefine
        result = virsh.undefine(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        if not virsh.domain_exists(vm_name, **virsh_args):
            logging.info("Undefine LXC domain successfully")
        else:
            raise TestFail("LXC domain still exist after undefine")

    finally:
        virsh.remove_domain(vm_name, **virsh_args)
        if full_os and os.path.exists(install_root):
            shutil.rmtree(install_root)
Beispiel #32
0
def run(test, params, env):
    """
    Test the tpm virtual devices
    1. prepare a guest with different tpm devices
    2. check whether the guest can be started
    3. check the xml and qemu cmd line, even swtpm for vtpm
    4. check tpm usage in guest os
    """
    # Tpm passthrough supported since libvirt 1.0.5.
    if not libvirt_version.version_compare(1, 0, 5):
        test.cancel("Tpm device is not supported "
                    "on current libvirt version.")
    # Tpm passthrough supported since qemu 2.12.0-49.
    if not utils_misc.compare_qemu_version(2, 9, 0, is_rhev=False):
        test.cancel("Tpm device is not supported " "on current qemu version.")

    tpm_model = params.get("tpm_model")
    backend_type = params.get("backend_type")
    backend_version = params.get("backend_version")
    device_path = params.get("device_path")
    tpm_num = int(params.get("tpm_num", 1))
    # After first start of vm with vtpm, do operations, check it still works
    vm_operate = params.get("vm_operate")
    # Sub-operation(e.g.domrename) under vm_operate(e.g.restart)
    vm_oprt = params.get("vm_oprt")
    secret_uuid = params.get("secret_uuid")
    secret_value = params.get("secret_value")
    # Change encryption state: from plain to encrypted, or reverse.
    encrypt_change = params.get("encrypt_change")
    secret_uuid = params.get("secret_uuid")
    prepare_secret = ("yes" == params.get("prepare_secret", "no"))
    remove_dev = ("yes" == params.get("remove_dev", "no"))
    multi_vms = ("yes" == params.get("multi_vms", "no"))
    # Remove swtpm state file
    rm_statefile = ("yes" == params.get("rm_statefile", "no"))
    test_suite = ("yes" == params.get("test_suite", "no"))
    restart_libvirtd = ("yes" == params.get("restart_libvirtd", "no"))
    no_backend = ("yes" == params.get("no_backend", "no"))
    status_error = ("yes" == params.get("status_error", "no"))
    err_msg = params.get("xml_errmsg", "")
    loader = params.get("loader", "")
    nvram = params.get("nvram", "")
    uefi_disk_url = params.get("uefi_disk_url", "")
    download_file_path = os.path.join(data_dir.get_data_dir(),
                                      "uefi_disk.qcow2")
    persistent_state = ("yes" == params.get("persistent_state", "no"))

    libvirt_version.is_libvirt_feature_supported(params)

    # Tpm emulator tpm-tis_model for aarch64 supported since libvirt 7.1.0
    if platform.machine() == 'aarch64' and tpm_model == 'tpm-tis' \
        and backend_type == 'emulator' \
            and not libvirt_version.version_compare(7, 1, 0):

        test.cancel("Tpm emulator tpm-tis_model for aarch64 "
                    "is not supported on current libvirt")

    # Check tpm chip on host for passthrough testing
    if backend_type == "passthrough":
        dmesg_info = process.getoutput("dmesg|grep tpm -wi", shell=True)
        logging.debug("dmesg info about tpm:\n %s", dmesg_info)
        dmesg_error = re.search("No TPM chip found|TPM is disabled",
                                dmesg_info)
        if dmesg_error:
            test.cancel(dmesg_error.group())
        else:
            # Try to check host tpm chip version
            tpm_v = None
            if re.search("2.0 TPM", dmesg_info):
                tpm_v = "2.0"
                if not utils_package.package_install("tpm2-tools"):
                    # package_install() return 'True' if succeed
                    test.error("Failed to install tpm2-tools on host")
            else:
                if re.search("1.2 TPM", dmesg_info):
                    tpm_v = "1.2"
                # If "1.2 TPM" or no version info in dmesg, try to test a tpm1.2 at first
                if not utils_package.package_install("tpm-tools"):
                    if tpm_v == "1.2":
                        test.error("Failed to install tpm-tools on host")
                    else:
                        logging.debug("Failed to install tpm-tools on host")
    # Check host env for vtpm testing
    elif backend_type == "emulator":
        if not utils_misc.compare_qemu_version(4, 0, 0, is_rhev=False):
            test.cancel("vtpm(emulator backend) is not supported "
                        "on current qemu version.")
        # Install swtpm pkgs on host for vtpm emulation
        if not utils_package.package_install(["swtpm", "swtpm-tools"]):
            test.error("Failed to install swtpm swtpm-tools on host")

    def replace_os_disk(vm_xml, vm_name, nvram):
        """
        Replace os(nvram) and disk(uefi) for x86 vtpm test

        :param vm_xml: current vm's xml
        :param vm_name: current vm name
        :param nvram: nvram file path of vm
        """
        # Add loader, nvram in <os>
        nvram = nvram.replace("<VM_NAME>", vm_name)
        dict_os_attrs = {
            "loader_readonly": "yes",
            "secure": "yes",
            "loader_type": "pflash",
            "loader": loader,
            "nvram": nvram
        }
        vm_xml.set_os_attrs(**dict_os_attrs)
        logging.debug("Set smm=on in VMFeaturesXML")
        # Add smm in <features>
        features_xml = vm_xml.features
        features_xml.smm = "on"
        vm_xml.features = features_xml
        vm_xml.sync()
        # Replace disk with an uefi image
        if not utils_package.package_install("wget"):
            test.error("Failed to install wget on host")
        if uefi_disk_url.count("EXAMPLE"):
            test.error("Please provide the URL %s" % uefi_disk_url)
        else:
            download_cmd = ("wget %s -O %s" %
                            (uefi_disk_url, download_file_path))
            process.system(download_cmd, verbose=False, shell=True)
        vm = env.get_vm(vm_name)
        uefi_disk = {'disk_source_name': download_file_path}
        libvirt.set_vm_disk(vm, uefi_disk)

    vm_names = params.get("vms").split()
    vm_name = vm_names[0]
    vm = env.get_vm(vm_name)
    domuuid = vm.get_uuid()
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    os_xml = getattr(vm_xml, "os")
    host_arch = platform.machine()
    if backend_type == "emulator" and host_arch == 'x86_64':
        if not utils_package.package_install("OVMF"):
            test.error("Failed to install OVMF or edk2-ovmf pkgs on host")
        if os_xml.xmltreefile.find('nvram') is None:
            replace_os_disk(vm_xml, vm_name, nvram)
            vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    if vm.is_alive():
        vm.destroy()

    vm2 = None
    if multi_vms:
        if len(vm_names) > 1:
            vm2_name = vm_names[1]
            vm2 = env.get_vm(vm2_name)
            vm2_xml = VMXML.new_from_inactive_dumpxml(vm2_name)
            vm2_xml_backup = vm2_xml.copy()
        else:
            # Clone additional vms if needed
            try:
                utils_path.find_command("virt-clone")
            except utils_path.CmdNotFoundError:
                if not utils_package.package_install(["virt-install"]):
                    test.cancel("Failed to install virt-install on host")
            vm2_name = "vm2_" + utils_misc.generate_random_string(5)
            ret_clone = utils_libguestfs.virt_clone_cmd(vm_name,
                                                        vm2_name,
                                                        True,
                                                        timeout=360,
                                                        debug=True)
            if ret_clone.exit_status:
                test.error(
                    "Need more than one domains, but error occured when virt-clone."
                )
            vm2 = vm.clone(vm2_name)
            vm2_xml = VMXML.new_from_inactive_dumpxml(vm2_name)
        if vm2.is_alive():
            vm2.destroy()

    service_mgr = service.ServiceManager()

    def check_dumpxml(vm_name):
        """
        Check whether the added devices are shown in the guest xml

        :param vm_name: current vm name
        """
        logging.info("------Checking guest dumpxml------")
        if tpm_model:
            pattern = '<tpm model="%s">' % tpm_model
        else:
            # The default tpm model is "tpm-tis"
            pattern = '<tpm model="tpm-tis">'
        # Check tpm model
        xml_after_adding_device = VMXML.new_from_dumpxml(vm_name)
        logging.debug("xml after add tpm dev is %s", xml_after_adding_device)
        if pattern not in astring.to_text(xml_after_adding_device):
            test.fail("Can not find the %s tpm device xml "
                      "in the guest xml file." % tpm_model)
        # Check backend type
        pattern = '<backend type="%s"' % backend_type
        if pattern not in astring.to_text(xml_after_adding_device):
            test.fail("Can not find the %s backend type xml for tpm dev "
                      "in the guest xml file." % backend_type)
        # Check backend version
        if backend_version:
            check_ver = backend_version if backend_version != 'none' else '2.0'
            pattern = '"emulator" version="%s"' % check_ver
            if pattern not in astring.to_text(xml_after_adding_device):
                test.fail(
                    "Can not find the %s backend version xml for tpm dev "
                    "in the guest xml file." % check_ver)
        # Check device path
        if backend_type == "passthrough":
            pattern = '<device path="/dev/tpm0"'
            if pattern not in astring.to_text(xml_after_adding_device):
                test.fail("Can not find the %s device path xml for tpm dev "
                          "in the guest xml file." % device_path)
        # Check encryption secret
        if prepare_secret:
            pattern = '<encryption secret="%s" />' % encryption_uuid
            if pattern not in astring.to_text(xml_after_adding_device):
                test.fail("Can not find the %s secret uuid xml for tpm dev "
                          "in the guest xml file." % encryption_uuid)
        logging.info('------PASS on guest dumpxml check------')

    def check_qemu_cmd_line(vm, vm_name, domid):
        """
        Check whether the added devices are shown in the qemu cmd line

        :param vm: current vm
        :param vm_name: current vm name
        :param domid: domain id for checking vtpm socket file
        """
        logging.info("------Checking qemu cmd line------")
        if not vm.get_pid():
            test.fail('VM pid file missing.')
        with open('/proc/%s/cmdline' % vm.get_pid()) as cmdline_file:
            cmdline = cmdline_file.read()
            logging.debug("Qemu cmd line info:\n %s", cmdline)
        # Check tpm model
        pattern_list = ["-device.%s" % tpm_model]
        # Check backend type
        if backend_type == "passthrough":
            dev_num = re.search(r"\d+", device_path).group()
            backend_segment = "id=tpm-tpm%s" % dev_num
        else:
            # emulator backend
            backend_segment = "id=tpm-tpm0,chardev=chrtpm"
        pattern_list.append("-tpmdev.%s,%s" % (backend_type, backend_segment))
        # Check chardev socket for vtpm
        if backend_type == "emulator":
            pattern_list.append(
                "-chardev.socket,id=chrtpm,"
                "path=.*/run/libvirt/qemu/swtpm/%s-%s-swtpm.sock" %
                (domid, vm_name))
        for pattern in pattern_list:
            if not re.search(pattern, cmdline):
                if not remove_dev:
                    test.fail("Can not find the %s for tpm device "
                              "in qemu cmd line." % pattern)
            elif remove_dev:
                test.fail("%s still exists after remove vtpm and restart" %
                          pattern)
        logging.info("------PASS on qemu cmd line check------")

    def check_swtpm(domid, domuuid, vm_name):
        """
        Check swtpm cmdline and files for vtpm.

        :param domid: domain id for checking vtpm files
        :param domuuid: domain uuid for checking vtpm state file
        :param vm_name: current vm name
        """
        logging.info("------Checking swtpm cmdline and files------")
        # Check swtpm cmdline
        swtpm_pid = utils_misc.get_pid("%s-swtpm.pid" % vm_name)
        if not swtpm_pid:
            if not remove_dev:
                test.fail('swtpm pid file missing.')
            else:
                return
        elif remove_dev:
            test.fail(
                'swtpm pid file still exists after remove vtpm and restart')
        with open('/proc/%s/cmdline' % swtpm_pid) as cmdline_file:
            cmdline = cmdline_file.read()
            logging.debug("Swtpm cmd line info:\n %s", cmdline)
        pattern_list = [
            "--daemon", "--ctrl", "--tpmstate", "--log", "--tpm2", "--pid"
        ]
        if prepare_secret:
            pattern_list.extend(["--key", "--migration-key"])
        for pattern in pattern_list:
            if not re.search(pattern, cmdline):
                test.fail("Can not find the %s for tpm device "
                          "in swtpm cmd line." % pattern)
        # Check swtpm files
        file_list = [
            "/var/run/libvirt/qemu/swtpm/%s-%s-swtpm.sock" % (domid, vm_name)
        ]
        file_list.append("/var/lib/libvirt/swtpm/%s/tpm2" % domuuid)
        file_list.append("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm_name)
        file_list.append("/var/run/libvirt/qemu/swtpm/%s-%s-swtpm.pid" %
                         (domid, vm_name))
        for swtpm_file in file_list:
            if not os.path.exists(swtpm_file):
                test.fail("Swtpm file: %s does not exist" % swtpm_file)
        logging.info("------PASS on Swtpm cmdline and files check------")

    def get_tpm2_tools_cmd(session=None):
        """
        Get tpm2-tools pkg version and return corresponding getrandom cmd

        :session: guest console session
        :return: tpm2_getrandom cmd usage
        """
        cmd = 'rpm -q tpm2-tools'
        get_v_tools = session.cmd(cmd) if session else process.run(
            cmd).stdout_text
        v_tools_list = get_v_tools.strip().split('-')
        if session:
            logging.debug("The tpm2-tools version is %s", v_tools_list[2])
        v_tools = int(v_tools_list[2].split('.')[0])
        return "tpm2_getrandom 8" if v_tools < 4 else "tpm2_getrandom -T device:/dev/tpm0 8 --hex"

    def get_host_tpm_bef(tpm_v):
        """
        Test host tpm function and identify its real version before passthrough
        Since sometimes dmesg info doesn't include tpm msg, need use tpm-tool or
        tpm2-tools to try the function.

        :param tpm_v: host tpm version get from dmesg info
        :return: host tpm version
        """
        logging.info("------Checking host tpm device before passthrough------")
        # Try tcsd tool for suspected tpm1.2 chip on host
        tpm_real_v = tpm_v
        if tpm_v != "2.0":
            if not service_mgr.start('tcsd'):
                # service_mgr.start() return 'True' if succeed
                if tpm_v == "1.2":
                    test.fail("Host tcsd.serivce start failed")
                else:
                    # Means tpm_v got nothing from dmesg, log failure here and
                    # go to next 'if' to try tpm2.0 tools.
                    logging.info("Host tcsd.serivce start failed")
            else:
                tpm_real_v = "1.2"
                logging.info("Host tpm version info:")
                result = process.run("tpm_version", ignore_status=False)
                logging.debug("[host]# tpm_version\n %s", result.stdout)
                time.sleep(2)
                service_mgr.stop('tcsd')
        if tpm_v != "1.2":
            # Try tpm2.0 tools
            if not utils_package.package_install("tpm2-tools"):
                test.error("Failed to install tpm2-tools on host")
            tpm2_getrandom_cmd = get_tpm2_tools_cmd()
            if process.run(tpm2_getrandom_cmd, ignore_status=True).exit_status:
                test.cancel("Both tcsd and tpm2-tools can not work, "
                            "pls check your host tpm version and test env.")
            else:
                tpm_real_v = "2.0"
        logging.info("------PASS on host tpm device check------")
        return tpm_real_v

    def test_host_tpm_aft(tpm_real_v):
        """
        Test host tpm function after passthrough

        :param tpm_real_v: host tpm real version indentified from testing
        """
        logging.info("------Checking host tpm device after passthrough------")
        if tpm_real_v == "1.2":
            if service_mgr.start('tcsd'):
                time.sleep(2)
                service_mgr.stop('tcsd')
                test.fail(
                    "Host tpm should not work after passthrough to guest.")
            else:
                logging.info("Expected failure: Tpm is being used by guest.")
        elif tpm_real_v == "2.0":
            tpm2_getrandom_cmd = get_tpm2_tools_cmd()
            if not process.run(tpm2_getrandom_cmd,
                               ignore_status=True).exit_status:
                test.fail(
                    "Host tpm should not work after passthrough to guest.")
            else:
                logging.info("Expected failure: Tpm is being used by guest.")
        logging.info("------PASS on host tpm device check------")

    def test_guest_tpm(expect_version, session, expect_fail):
        """
        Test tpm function in guest

        :param expect_version: guest tpm version, as host version, or emulator specified
        :param session: Guest session to be tested
        :param expect_fail: guest tpm is expectedly fail to work
        """
        logging.info("------Checking guest tpm device work------")
        if expect_version == "1.2":
            # Install tpm-tools and test by tcsd method
            if not utils_package.package_install(["tpm-tools"], session, 360):
                test.error("Failed to install tpm-tools package in guest")
            else:
                status, output = session.cmd_status_output(
                    "systemctl start tcsd")
                logging.debug("Command output: %s", output)
                if status:
                    if expect_fail:
                        test.cancel(
                            "tpm-crb passthrough only works with host tpm2.0, "
                            "but your host tpm version is 1.2")
                    else:
                        test.fail("Failed to start tcsd.service in guest")
                else:
                    dev_output = session.cmd_output("ls /dev/|grep tpm")
                    logging.debug("Command output: %s", dev_output)
                    status, output = session.cmd_status_output("tpm_version")
                    logging.debug("Command output: %s", output)
                    if status:
                        test.fail("Guest tpm can not work")
        else:
            # If expect_version is tpm2.0, install and test by tpm2-tools
            if not utils_package.package_install(["tpm2-tools"], session, 360):
                test.error("Failed to install tpm2-tools package in guest")
            else:
                tpm2_getrandom_cmd = get_tpm2_tools_cmd(session)
                status1, output1 = session.cmd_status_output(
                    "ls /dev/|grep tpm")
                logging.debug("Command output: %s", output1)
                status2, output2 = session.cmd_status_output(
                    tpm2_getrandom_cmd)
                logging.debug("Command output: %s", output2)
                if status1 or status2:
                    if not expect_fail:
                        test.fail("Guest tpm can not work")
                    else:
                        d_status, d_output = session.cmd_status_output("date")
                        if d_status:
                            test.fail("Guest OS doesn't work well")
                        logging.debug("Command output: %s", d_output)
                elif expect_fail:
                    test.fail("Expect fail but guest tpm still works")
        logging.info("------PASS on guest tpm device work check------")

    def run_test_suite_in_guest(session):
        """
        Run kernel test suite for guest tpm.

        :param session: Guest session to be tested
        """
        logging.info("------Checking kernel test suite for guest tpm------")
        boot_info = session.cmd('uname -r').strip().split('.')
        kernel_version = '.'.join(boot_info[:2])
        # Download test suite per current guest kernel version
        parent_path = "https://cdn.kernel.org/pub/linux/kernel"
        if float(kernel_version) < 5.3:
            major_version = "5"
            file_version = "5.3"
        else:
            major_version = boot_info[0]
            file_version = kernel_version
        src_url = "%s/v%s.x/linux-%s.tar.xz" % (parent_path, major_version,
                                                file_version)
        download_cmd = "wget %s -O %s" % (src_url, "/root/linux.tar.xz")
        output = session.cmd_output(download_cmd, timeout=480)
        logging.debug("Command output: %s", output)
        # Install neccessary pkgs to build test suite
        if not utils_package.package_install(
            ["tar", "make", "gcc", "rsync", "python2"], session, 360):
            test.fail("Failed to install specified pkgs in guest OS.")
        # Unzip the downloaded test suite
        status, output = session.cmd_status_output(
            "tar xvJf /root/linux.tar.xz -C /root")
        if status:
            test.fail("Uzip failed: %s" % output)
        # Specify using python2 to run the test suite per supporting
        test_path = "/root/linux-%s/tools/testing/selftests" % file_version
        sed_cmd = "sed -i 's/python -m unittest/python2 -m unittest/g' %s/tpm2/test_*.sh" % test_path
        output = session.cmd_output(sed_cmd)
        logging.debug("Command output: %s", output)
        # Build and and run the .sh files of test suite
        status, output = session.cmd_status_output(
            "make -C %s TARGETS=tpm2 run_tests" % test_path, timeout=360)
        logging.debug("Command output: %s", output)
        if status:
            test.fail("Failed to run test suite in guest OS.")
        for test_sh in ["test_smoke.sh", "test_space.sh"]:
            pattern = "ok .* selftests: tpm2: %s" % test_sh
            if not re.search(pattern, output) or ("not ok" in output):
                test.fail("test suite check failed.")
        logging.info("------PASS on kernel test suite check------")

    def persistent_test(vm, vm_xml):
        """
        Test for vtpm with persistent_state.
        """
        vm.undefine("--nvram")
        virsh.create(vm_xml.xml, **virsh_dargs)
        domuuid = vm.get_uuid()
        state_file = "/var/lib/libvirt/swtpm/%s/tpm2/tpm2-00.permall" % domuuid
        process.run("ls -Z %s" % state_file)
        session = vm.wait_for_login()
        test_guest_tpm("2.0", session, False)
        session.close()
        virsh.dom_list("--transient", debug=True)
        vm.destroy()
        if not os.path.exists(state_file):
            test.fail("Swtpm state file: %s does not exist after destroy vm'" %
                      state_file)
        process.run("ls -Z %s" % state_file)

    def reuse_by_vm2(tpm_dev):
        """
        Try to add same tpm to a second guest, when it's being used by one guest.

        :param tpm_dev: tpm device to be added into guest xml
        """
        logging.info("------Trying to add same tpm to a second domain------")
        vm2_xml.remove_all_device_by_type('tpm')
        vm2_xml.add_device(tpm_dev)
        vm2_xml.sync()
        ret = virsh.start(vm2_name, ignore_status=True, debug=True)
        if backend_type == "passthrough":
            if ret.exit_status:
                logging.info("Expected failure when try to passthrough a tpm"
                             " that being used by another guest")
                return
            test.fail("Reuse a passthroughed tpm should not succeed.")
        elif ret.exit_status:
            # emulator backend
            test.fail(
                "Vtpm for each guest should not interfere with each other")

    try:
        tpm_real_v = None
        sec_uuids = []
        new_name = ""
        virsh_dargs = {"debug": True, "ignore_status": False}
        vm_xml.remove_all_device_by_type('tpm')
        tpm_dev = Tpm()
        if tpm_model:
            tpm_dev.tpm_model = tpm_model
        if not no_backend:
            backend = tpm_dev.Backend()
            if backend_type != 'none':
                backend.backend_type = backend_type
                if backend_type == "passthrough":
                    tpm_real_v = get_host_tpm_bef(tpm_v)
                    logging.debug("The host tpm real version is %s",
                                  tpm_real_v)
                    if device_path:
                        backend.device_path = device_path
                if backend_type == "emulator":
                    if backend_version != 'none':
                        backend.backend_version = backend_version
                    if persistent_state:
                        backend.persistent_state = "yes"
                    if prepare_secret:
                        auth_sec_dict = {
                            "sec_ephemeral": "no",
                            "sec_private": "yes",
                            "sec_desc": "sample vTPM secret",
                            "sec_usage": "vtpm",
                            "sec_name": "VTPM_example"
                        }
                        encryption_uuid = libvirt.create_secret(auth_sec_dict)
                        if secret_value != 'none':
                            virsh.secret_set_value(encryption_uuid,
                                                   "open sesame",
                                                   encode=True,
                                                   debug=True)
                        sec_uuids.append(encryption_uuid)
                        if encrypt_change != 'encrpt':
                            # plain_to_encrypt will not add encryption on first start
                            if secret_uuid == 'invalid':
                                encryption_uuid = encryption_uuid[:-1]
                            backend.encryption_secret = encryption_uuid
                        if secret_uuid == "change":
                            auth_sec_dict["sec_desc"] = "sample2 vTPM secret"
                            auth_sec_dict["sec_name"] = "VTPM_example2"
                            new_encryption_uuid = libvirt.create_secret(
                                auth_sec_dict)
                            virsh.secret_set_value(new_encryption_uuid,
                                                   "open sesame",
                                                   encode=True,
                                                   debug=True)
                            sec_uuids.append(new_encryption_uuid)
                    if secret_uuid == 'nonexist':
                        backend.encryption_secret = "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"
            tpm_dev.backend = backend
        logging.debug("tpm dev xml to add is:\n %s", tpm_dev)
        for num in range(tpm_num):
            vm_xml.add_device(tpm_dev, True)
        if persistent_state:
            persistent_test(vm, vm_xml)
            return
        else:
            ret = virsh.define(vm_xml.xml, ignore_status=True, debug=True)
        expected_match = ""
        if not err_msg:
            expected_match = "Domain .*%s.* defined from %s" % (vm_name,
                                                                vm_xml.xml)
        libvirt.check_result(ret, err_msg, "", False, expected_match)
        if err_msg:
            # Stop test when get expected failure
            return
        if vm_operate != "restart":
            check_dumpxml(vm_name)
        # For default model, no need start guest to test
        if tpm_model:
            expect_fail = False
            try:
                vm.start()
            except VMStartError as detail:
                if secret_value == 'none' or secret_uuid == 'nonexist':
                    logging.debug("Expected failure: %s", detail)
                    return
                else:
                    test.fail(detail)
            domuuid = vm.get_uuid()
            if vm_operate or restart_libvirtd:
                # Make sure OS works before vm operate or restart libvirtd
                session = vm.wait_for_login()
                test_guest_tpm("2.0", session, False)
                session.close()
                if restart_libvirtd:
                    utils_libvirtd.libvirtd_restart()
                swtpm_statedir = "/var/lib/libvirt/swtpm/%s" % domuuid
                if vm_operate == "resume":
                    virsh.suspend(vm_name, **virsh_dargs)
                    time.sleep(3)
                    virsh.resume(vm_name, **virsh_dargs)
                elif vm_operate == "snapshot":
                    virsh.snapshot_create_as(
                        vm_name, "sp1 --memspec file=/tmp/testvm_sp1",
                        **virsh_dargs)
                elif vm_operate in ["restart", "create"]:
                    vm.destroy()
                    if vm_operate == "create":
                        virsh.undefine(vm_name,
                                       options="--nvram",
                                       **virsh_dargs)
                        if os.path.exists(swtpm_statedir):
                            test.fail(
                                "Swtpm state dir: %s still exist after vm undefine"
                                % swtpm_statedir)
                        virsh.create(vm_xml.xml, **virsh_dargs)
                    else:
                        if vm_oprt == "domrename":
                            new_name = "vm_" + utils_misc.generate_random_string(
                                5)
                            virsh.domrename(vm_name, new_name, **virsh_dargs)
                            new_vm = libvirt_vm.VM(new_name, vm.params,
                                                   vm.root_dir,
                                                   vm.address_cache)
                            vm = new_vm
                            vm_name = new_name
                        elif secret_value == 'change':
                            logging.info("Changing secret value...")
                            virsh.secret_set_value(encryption_uuid,
                                                   "new sesame",
                                                   encode=True,
                                                   debug=True)
                        elif not restart_libvirtd:
                            # remove_dev or do other vm operations during restart
                            vm_xml.remove_all_device_by_type('tpm')
                            if secret_uuid == "change" or encrypt_change:
                                # Change secret uuid, or change encrytion state:from plain to encrypted, or on the contrary
                                if encrypt_change == 'plain':
                                    # Change from encrypted state to plain:redefine a tpm dev without encryption
                                    tpm_dev = Tpm()
                                    tpm_dev.tpm_model = tpm_model
                                    backend = tpm_dev.Backend()
                                    backend.backend_type = backend_type
                                    backend.backend_version = backend_version
                                else:
                                    # Use a new secret's uuid
                                    if secret_uuid == "change":
                                        encryption_uuid = new_encryption_uuid
                                    backend.encryption_secret = encryption_uuid
                                tpm_dev.backend = backend
                                logging.debug(
                                    "The new tpm dev xml to add for restart vm is:\n %s",
                                    tpm_dev)
                                vm_xml.add_device(tpm_dev, True)
                            if encrypt_change in ['encrpt', 'plain']:
                                # Avoid sync() undefine removing the state file
                                vm_xml.define()
                            else:
                                vm_xml.sync()
                        if rm_statefile:
                            swtpm_statefile = "%s/tpm2/tpm2-00.permall" % swtpm_statedir
                            logging.debug("Removing state file: %s",
                                          swtpm_statefile)
                            os.remove(swtpm_statefile)
                        ret = virsh.start(vm_name,
                                          ignore_status=True,
                                          debug=True)
                        libvirt.check_exit_status(ret, status_error)
                        if status_error and ret.exit_status != 0:
                            return
                    if not remove_dev:
                        check_dumpxml(vm_name)
                elif vm_operate == 'managedsave':
                    virsh.managedsave(vm_name, **virsh_dargs)
                    time.sleep(5)
                    if secret_value == 'change':
                        logging.info("Changing secret value...")
                        virsh.secret_set_value(encryption_uuid,
                                               "new sesame",
                                               encode=True,
                                               debug=True)
                        if rm_statefile:
                            swtpm_statefile = "%s/tpm2/tpm2-00.permall" % swtpm_statedir
                            logging.debug("Removing state file: %s",
                                          swtpm_statefile)
                            os.remove(swtpm_statefile)
                    ret = virsh.start(vm_name, ignore_status=True, debug=True)
                    libvirt.check_exit_status(ret, status_error)
                    if status_error and ret.exit_status != 0:
                        return
            domid = vm.get_id()
            check_qemu_cmd_line(vm, vm_name, domid)
            if backend_type == "passthrough":
                if tpm_real_v == "1.2" and tpm_model == "tpm-crb":
                    expect_fail = True
                expect_version = tpm_real_v
                test_host_tpm_aft(tpm_real_v)
            else:
                # emulator backend
                if remove_dev:
                    expect_fail = True
                expect_version = backend_version
                check_swtpm(domid, domuuid, vm_name)
            session = vm.wait_for_login()
            if test_suite:
                run_test_suite_in_guest(session)
            else:
                test_guest_tpm(expect_version, session, expect_fail)
            session.close()
            if multi_vms:
                reuse_by_vm2(tpm_dev)
                if backend_type != "passthrough":
                    #emulator backend
                    check_dumpxml(vm2_name)
                    domid = vm2.get_id()
                    domuuid = vm2.get_uuid()
                    check_qemu_cmd_line(vm2, vm2_name, domid)
                    check_swtpm(domid, domuuid, vm2_name)
                    session = vm2.wait_for_login()
                    test_guest_tpm(backend_version, session, expect_fail)
                    session.close()

    finally:
        # Remove renamed domain if it exists
        if new_name:
            virsh.remove_domain(new_name, "--nvram", debug=True)
        if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" %
                          new_name):
            os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % new_name)
        # Remove snapshot if exists
        if vm_operate == "snapshot":
            snapshot_lists = virsh.snapshot_list(vm_name)
            if len(snapshot_lists) > 0:
                libvirt.clean_up_snapshots(vm_name, snapshot_lists)
                for snap in snapshot_lists:
                    virsh.snapshot_delete(vm_name, snap, "--metadata")
                if os.path.exists("/tmp/testvm_sp1"):
                    os.remove("/tmp/testvm_sp1")
        # Clear guest os
        if test_suite:
            session = vm.wait_for_login()
            logging.info("Removing dir /root/linux-*")
            output = session.cmd_output("rm -rf /root/linux-*")
            logging.debug("Command output:\n %s", output)
            session.close()
        if vm_operate == "create":
            vm.define(vm_xml.xml)
        vm_xml_backup.sync(options="--nvram --managed-save")
        # Remove swtpm log file in case of impact on later runs
        if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" %
                          vm.name):
            os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm.name)
        if os.path.exists("/var/lib/libvirt/swtpm/%s" % domuuid):
            shutil.rmtree("/var/lib/libvirt/swtpm/%s" % domuuid)
        for sec_uuid in set(sec_uuids):
            virsh.secret_undefine(sec_uuid, ignore_status=True, debug=True)
        if vm2:
            if len(vm_names) > 1:
                vm2_xml_backup.sync(options="--nvram")
            else:
                virsh.remove_domain(vm2_name,
                                    "--nvram --remove-all-storage",
                                    debug=True)
            if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" %
                              vm2.name):
                os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" %
                          vm2.name)
def run(test, params, env):
    """
    Test command: virsh domif-setlink and domif-getlink.

    The command   set and get link state of a virtual interface
    1. Prepare test environment.
    2. Perform virsh domif-setlink and domif-getlink operation.
    3. Recover test environment.
    4. Confirm the test result.
    """

    def domif_setlink(vm, device, operation, options):
        """
        Set the domain link state

        :param vm : domain name
        :param device : domain virtual interface
        :param opration : domain virtual interface state
        :param options : some options like --config

        """

        return virsh.domif_setlink(vm, device, operation, options, debug=True)

    def domif_getlink(vm, device, options):
        """
        Get the domain link state

        :param vm : domain name
        :param device : domain virtual interface
        :param options : some options like --config

        """

        return virsh.domif_getlink(vm, device, options,
                                   ignore_status=True, debug=True)

    def guest_cmd_check(cmd, session, pattern):
        """
        Check cmd output with pattern in session
        """
        try:
            cmd_status, output = session.cmd_status_output(cmd, timeout=10)
            logging.info("exit: %s, output: %s",
                         cmd_status, output)
            return re.search(pattern, output)
        except (aexpect.ShellTimeoutError, aexpect.ShellStatusError) as e:
            logging.debug(e)
            return re.search(pattern, str(e.__str__))

    def guest_if_state(if_name, session):
        """
        Get the domain link state from the guest
        """
        # Get link state by ethtool
        cmd = "ethtool %s" % if_name
        pattern = "Link detected: ([a-zA-Z]+)"
        ret = guest_cmd_check(cmd, session, pattern)
        if ret:
            return ret.group(1) == "yes"
        else:
            return False

    def check_update_device(vm, if_name, session):
        """
        Change link state by upadte-device command, Check the results
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)

        # Get interface xml object
        iface = vmxml.get_devices(device_type="interface")[0]
        if iface.address:
            del iface.address

        # Change link state to up
        iface.link_state = "up"
        iface.xmltreefile.write()
        ret = virsh.update_device(vm.name, iface.xml,
                                  ignore_status=True, debug=True)
        if ret.exit_status:
            logging.error("Failed to update device to up state")
            return False
        if not guest_if_state(if_name, session):
            logging.error("Guest link should be up now")
            return False

        # Change link state to down
        iface.link_state = "down"
        iface.xmltreefile.write()
        ret = virsh.update_device(vm.name, iface.xml,
                                  ignore_status=True, debug=True)
        if ret.exit_status:
            logging.error("Failed to update device to down state")
            return False
        if guest_if_state(if_name, session):
            logging.error("Guest link should be down now")
            return False

        # Passed all test
        return True

    vm_name = []
    # vm_name list:first element for original name in config
    vm_name.append(params.get("main_vm", "avocado-vt-vm1"))
    vm = env.get_vm(vm_name[0])
    options = params.get("if_options", "--config")
    start_vm = params.get("start_vm", "no")
    domain = params.get("domain", "name")
    if_device = params.get("if_device", "net")
    if_name = params.get("if_name", "vnet0")
    if_operation = params.get("if_operation", "up")
    status_error = params.get("status_error", "no")
    mac_address = vm.get_virsh_mac_address(0)
    check_link_state = "yes" == params.get("check_link_state", "no")
    check_link_by_update_device = "yes" == params.get(
        "excute_update_device", "no")
    device = "vnet0"
    username = params.get("username")
    password = params.get("password")

    # Back up xml file.
    vm_xml_file = os.path.join(data_dir.get_tmp_dir(), "vm.xml")
    virsh.dumpxml(vm_name[0], extra="--inactive", to_file=vm_xml_file)

    # Vm status
    if start_vm == "yes" and vm.is_dead():
        vm.start()

    elif start_vm == "no" and vm.is_alive():
        vm.destroy()

    # vm_name list: second element for 'domain' in virsh command
    if domain == "ID":
        # Get ID for the running domain
        vm_name.append(vm.get_id())
    elif domain == "UUID":
        # Get UUID for the domain
        vm_name.append(vm.get_uuid())
    elif domain == "no_match_UUID":
        # Generate a random UUID
        vm_name.append(uuid.uuid1())
    elif domain == "no_match_name":
        # Generate a random string as domain name
        vm_name.append(utils_misc.generate_random_string(6))
    elif domain == " ":
        # Set domain name empty
        vm_name.append("''")
    else:
        # Set domain name
        vm_name.append(vm_name[0])

    try:
        # Test device net or mac address
        if if_device == "net" and vm.is_alive():
            device = if_name
            # Get all vm's interface device
            device = vm_xml.VMXML.get_net_dev(vm_name[0])[0]

        elif if_device == "mac":
            device = mac_address

        # Test no exist device
        if if_device == "no_exist_net":
            device = "vnet-1"
        elif if_device == "no_exist_mac":
            # Generate random mac address for negative test
            device = utils_net.VirtIface.complete_mac_address("01:02")
        elif if_device == " ":
            device = "''"

        # Setlink opertation
        result = domif_setlink(vm_name[1], device, if_operation, options)
        status = result.exit_status
        logging.info("Setlink done")

        # Getlink opertation
        get_result = domif_getlink(vm_name[1], device, options)
        getlink_output = get_result.stdout.strip()

        # Check the getlink command output
        if status_error == "no":
            if not re.search(if_operation, getlink_output):
                test.fail("Getlink result should "
                          "equal with setlink operation")

        logging.info("Getlink done")
        # If --config or --persistent is given should restart the vm then test link status
        if any(options == option for option in ["--config", "--persistent"]) and vm.is_alive():
            vm.destroy()
            vm.start()
            logging.info("Restart VM")

        elif start_vm == "no":
            vm.start()

        error_msg = None
        if status_error == "no":
            # Serial login the vm to check link status
            # Start vm check the link statue
            session = vm.wait_for_serial_login(username=username,
                                               password=password)
            guest_if_name = utils_net.get_linux_ifname(session, mac_address)

            # Check link state in guest
            if check_link_state:
                if (if_operation == "up" and
                        not guest_if_state(guest_if_name, session)):
                    error_msg = "Link state should be up in guest"
                if (if_operation == "down" and
                        guest_if_state(guest_if_name, session)):
                    error_msg = "Link state should be down in guest"

            # Test of setting link state by update_device command
            if check_link_by_update_device:
                if not check_update_device(vm, guest_if_name, session):
                    error_msg = "Check update_device failed"

            # Set the link up make host connect with vm
            domif_setlink(vm_name[0], device, "up", "")
            if not utils_misc.wait_for(
                    lambda: guest_if_state(guest_if_name, session), 5):
                error_msg = "Link state isn't up in guest"

            # Ignore status of this one
            cmd = 'ip link set down %s' % guest_if_name
            session.cmd_status_output(cmd, timeout=10)
            pattern = "%s:.*state DOWN.*" % guest_if_name
            pattern_cmd = 'ip addr show dev %s' % guest_if_name
            guest_cmd_check(pattern_cmd, session, pattern)

            cmd = 'ip link set up %s' % guest_if_name
            session.cmd_status_output(cmd, timeout=10)
            pattern = "%s:.*state UP.*" % guest_if_name
            if not guest_cmd_check(pattern_cmd, session, pattern):
                error_msg = ("Could not bring up interface %s inside guest"
                             % guest_if_name)
        else:  # negative test
            # stop guest, so state is always consistent on next start
            vm.destroy()

        if error_msg:
            test.fail(error_msg)

        # Check status_error
        if status_error == "yes":
            if status:
                logging.info("Expected error (negative testing). Output: %s",
                             result.stderr.strip())

            else:
                test.fail("Unexpected return code %d "
                          "(negative testing)" % status)
        elif status_error != "no":
            test.error("Invalid value for status_error '%s' "
                       "(must be 'yes' or 'no')" % status_error)
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        virsh.undefine(vm_name[0])
        virsh.define(vm_xml_file)
        os.remove(vm_xml_file)
Beispiel #34
0
def run(test, params, env):
    """
    Test the pull-mode backup function

    Steps:
    1. craete a vm with extra disk vdb
    2. create some data on vdb
    3. start a pull mode full backup on vdb
    4. create some data on vdb
    5. start a pull mode incremental backup
    6. repeat step 5 to 7
    7. check the full/incremental backup file data
    """

    # Cancel the test if libvirt version is too low
    if not libvirt_version.version_compare(6, 0, 0):
        test.cancel("Current libvirt version doesn't support "
                    "incremental backup.")

    hotplug_disk = "yes" == params.get("hotplug_disk", "no")
    original_disk_size = params.get("original_disk_size", "100M")
    original_disk_type = params.get("original_disk_type", "local")
    original_disk_target = params.get("original_disk_target", "vdb")
    scratch_type = params.get("scratch_type", "file")
    reuse_scratch_file = "yes" == params.get("reuse_scratch_file")
    prepare_scratch_file = "yes" == params.get("prepare_scratch_file")
    scratch_blkdev_path = params.get("scratch_blkdev_path")
    scratch_blkdev_size = params.get("scratch_blkdev_size", original_disk_size)
    prepare_scratch_blkdev = "yes" == params.get("prepare_scratch_blkdev")
    nbd_protocol = params.get("nbd_protocol", "unix")
    nbd_socket = params.get("nbd_socket", "/tmp/pull_backup.socket")
    nbd_tcp_port = params.get("nbd_tcp_port", "10809")
    set_exportname = "yes" == params.get("set_exportname")
    set_exportbitmap = "yes" == params.get("set_exportbitmap")
    backup_rounds = int(params.get("backup_rounds", 3))
    backup_error = "yes" == params.get("backup_error")
    tmp_dir = data_dir.get_tmp_dir()

    try:
        vm_name = params.get("main_vm")
        vm = env.get_vm(vm_name)

        # Backup vm xml
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml_backup = vmxml.copy()

        # Enable vm incremental backup capability. This is only a workaround
        # to make sure incremental backup can work for the vm. Code needs to
        # be removded immediately when the function enabled by default, which
        # is tracked by bz1799015
        tree = ET.parse(vmxml.xml)
        root = tree.getroot()
        for elem in root.iter('domain'):
            elem.set('xmlns:qemu',
                     'http://libvirt.org/schemas/domain/qemu/1.0')
            qemu_cap = ET.Element("qemu:capabilities")
            elem.insert(-1, qemu_cap)
            incbackup_cap = ET.Element("qemu:add")
            incbackup_cap.set('capability', 'incremental-backup')
            qemu_cap.insert(1, incbackup_cap)
        vmxml.undefine()
        tmp_vm_xml = os.path.join(tmp_dir, "tmp_vm.xml")
        tree.write(tmp_vm_xml)
        virsh.define(tmp_vm_xml)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        logging.debug("Script insert xml elements to make sure vm can support "
                      "incremental backup. This should be removded when "
                      "bz 1799015 fixed.")

        # Prepare the disk to be backuped.
        disk_params = {}
        disk_path = ""
        if original_disk_type == "local":
            image_name = "{}_image.qcow2".format(original_disk_target)
            disk_path = os.path.join(tmp_dir, image_name)
            libvirt.create_local_disk("file", disk_path, original_disk_size,
                                      "qcow2")
            disk_params = {
                "device_type": "disk",
                "type_name": "file",
                "driver_type": "qcow2",
                "target_dev": original_disk_target,
                "source_file": disk_path
            }
            if original_disk_target:
                disk_params["target_dev"] = original_disk_target
        elif original_disk_type == "iscsi":
            iscsi_host = '127.0.0.1'
            iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                is_setup=True,
                is_login=False,
                image_size=original_disk_size,
                portal_ip=iscsi_host)
            disk_path = ("iscsi://[%s]/%s/%s" %
                         (iscsi_host, iscsi_target, lun_num))
            process.run("qemu-img create -f qcow2 %s %s" %
                        (disk_path, original_disk_size),
                        shell=True,
                        verbose=True)
            disk_params = {
                'device_type': 'disk',
                'type_name': 'network',
                "driver_type": "qcow2",
                'target_dev': original_disk_target
            }
            disk_params_src = {
                'source_protocol': 'iscsi',
                'source_name': iscsi_target + "/%s" % lun_num,
                'source_host_name': iscsi_host,
                'source_host_port': '3260'
            }
            disk_params.update(disk_params_src)
        elif original_disk_type == "gluster":
            gluster_vol_name = "gluster_vol"
            gluster_pool_name = "gluster_pool"
            gluster_img_name = "gluster.qcow2"
            gluster_host_ip = gluster.setup_or_cleanup_gluster(
                is_setup=True,
                vol_name=gluster_vol_name,
                pool_name=gluster_pool_name,
                **params)
            disk_path = 'gluster://%s/%s/%s' % (
                gluster_host_ip, gluster_vol_name, gluster_img_name)
            process.run("qemu-img create -f qcow2 %s %s" %
                        (disk_path, original_disk_size),
                        shell=True,
                        verbose=True)
            disk_params = {
                'device_type': 'disk',
                'type_name': 'network',
                "driver_type": "qcow2",
                'target_dev': original_disk_target
            }
            disk_params_src = {
                'source_protocol': 'gluster',
                'source_name': gluster_vol_name + "/%s" % gluster_img_name,
                'source_host_name': gluster_host_ip,
                'source_host_port': '24007'
            }
            disk_params.update(disk_params_src)
        else:
            test.error("The disk type '%s' not supported in this script.",
                       original_disk_type)
        if hotplug_disk:
            vm.start()
            session = vm.wait_for_login().close()
            disk_xml = libvirt.create_disk_xml(disk_params)
            virsh.attach_device(vm_name, disk_xml, debug=True)
        else:
            disk_xml = libvirt.create_disk_xml(disk_params)
            virsh.attach_device(vm.name,
                                disk_xml,
                                flagstr="--config",
                                debug=True)
            vm.start()
        session = vm.wait_for_login()
        new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys())
        session.close()
        if len(new_disks_in_vm) != 1:
            test.fail("Test disk not prepared in vm")

        # Use the newly added disk as the test disk
        test_disk_in_vm = "/dev/" + new_disks_in_vm[0]

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        vm_disks = list(vmxml.get_disk_all().keys())

        checkpoint_list = []
        is_incremental = False
        backup_file_list = []
        for backup_index in range(backup_rounds):
            # Prepare backup xml
            backup_params = {"backup_mode": "pull"}
            if backup_index > 0:
                is_incremental = True
                backup_params["backup_incremental"] = "checkpoint_" + str(
                    backup_index - 1)

            # Set libvirt default nbd export name and bitmap name
            nbd_export_name = original_disk_target
            nbd_bitmap_name = "backup-" + original_disk_target

            backup_server_dict = {}
            if nbd_protocol == "unix":
                backup_server_dict["transport"] = "unix"
                backup_server_dict["socket"] = nbd_socket
            else:
                backup_server_dict["name"] = "localhost"
                backup_server_dict["port"] = nbd_tcp_port
            backup_params["backup_server"] = backup_server_dict
            backup_disk_xmls = []
            for vm_disk in vm_disks:
                backup_disk_params = {"disk_name": vm_disk}
                if vm_disk != original_disk_target:
                    backup_disk_params["enable_backup"] = "no"
                else:
                    backup_disk_params["enable_backup"] = "yes"
                    backup_disk_params["disk_type"] = scratch_type

                    # Custom nbd export name and bitmap name if required
                    if set_exportname:
                        nbd_export_name = original_disk_target + "_custom_exp"
                        backup_disk_params["exportname"] = nbd_export_name
                    if set_exportbitmap:
                        nbd_bitmap_name = original_disk_target + "_custom_bitmap"
                        backup_disk_params["exportbitmap"] = nbd_bitmap_name

                    # Prepare nbd scratch file/dev params
                    scratch_params = {}
                    if scratch_type == "file":
                        scratch_file_name = "scratch_file_%s" % backup_index
                        scratch_file_path = os.path.join(
                            tmp_dir, scratch_file_name)
                        if prepare_scratch_file:
                            libvirt.create_local_disk("file",
                                                      scratch_file_path,
                                                      original_disk_size,
                                                      "qcow2")
                        scratch_params["file"] = scratch_file_path
                        logging.debug("scratch_params: %s", scratch_params)
                    elif scratch_type == "block":
                        if prepare_scratch_blkdev:
                            scratch_blkdev_path = libvirt.setup_or_cleanup_iscsi(
                                is_setup=True, image_size=scratch_blkdev_size)
                        logging.debug("abcd scratch_blkdev_path:%s",
                                      scratch_blkdev_path)
                        scratch_params["dev"] = scratch_blkdev_path
                    else:
                        test.fail(
                            "We do not support backup scratch type: '%s'" %
                            scratch_type)
                    backup_disk_params["backup_scratch"] = scratch_params

                backup_disk_xml = utils_backup.create_backup_disk_xml(
                    backup_disk_params)
                backup_disk_xmls.append(backup_disk_xml)
            logging.debug("disk list %s", backup_disk_xmls)
            backup_xml = utils_backup.create_backup_xml(
                backup_params, backup_disk_xmls)
            logging.debug("ROUND_%s Backup Xml: %s", backup_index, backup_xml)

            # Prepare checkpoint xml
            checkpoint_name = "checkpoint_%s" % backup_index
            checkpoint_list.append(checkpoint_name)
            cp_params = {"checkpoint_name": checkpoint_name}
            cp_params["checkpoint_desc"] = params.get(
                "checkpoint_desc", "desc of cp_%s" % backup_index)
            disk_param_list = []
            for vm_disk in vm_disks:
                cp_disk_param = {"name": vm_disk}
                if vm_disk != original_disk_target:
                    cp_disk_param["checkpoint"] = "no"
                else:
                    cp_disk_param["checkpoint"] = "bitmap"
                    cp_disk_bitmap = params.get("cp_disk_bitmap")
                    if cp_disk_bitmap:
                        cp_disk_param["bitmap"] = cp_disk_bitmap + str(
                            backup_index)
                disk_param_list.append(cp_disk_param)
            checkpoint_xml = utils_backup.create_checkpoint_xml(
                cp_params, disk_param_list)
            logging.debug("ROUND_%s Checkpoint Xml: %s", backup_index,
                          checkpoint_xml)

            # Start backup
            backup_options = backup_xml.xml + " " + checkpoint_xml.xml

            # Create some data in vdb
            dd_count = "1"
            dd_seek = str(backup_index * 10 + 10)
            dd_bs = "1M"
            utils_backup.dd_data_to_vm_disk(vm, test_disk_in_vm, dd_bs,
                                            dd_seek, dd_count)

            if reuse_scratch_file:
                backup_options += " --reuse-external"
            backup_result = virsh.backup_begin(vm_name,
                                               backup_options,
                                               debug=True)
            if backup_result.exit_status:
                raise utils_backup.BackupBeginError(
                    backup_result.stderr.strip())

            backup_file_path = os.path.join(
                tmp_dir, "backup_file_%s.qcow2" % str(backup_index))
            backup_file_list.append(backup_file_path)
            if not is_incremental:
                # Do full backup
                if nbd_protocol == "unix":
                    nbd_export = ("nbd+unix:///%s?socket=%s" %
                                  (nbd_export_name, nbd_socket))
                elif nbd_protocol == "tcp":
                    nbd_export = ("nbd://localhost:%s/%s" %
                                  (nbd_tcp_port, nbd_export_name))
                utils_backup.pull_full_backup_to_file(nbd_export,
                                                      backup_file_path)
                logging.debug("Full backup to: %s", backup_file_path)
            else:
                # Do incremental backup
                nbd_params = {
                    "nbd_protocol": nbd_protocol,
                    "nbd_export": nbd_export_name
                }
                if nbd_protocol == "tcp":
                    nbd_params["nbd_tcp_port"] = nbd_tcp_port
                elif nbd_protocol == "unix":
                    nbd_params["nbd_socket"] = nbd_socket
                utils_backup.pull_incremental_backup_to_file(
                    nbd_params, backup_file_path, nbd_bitmap_name,
                    original_disk_size)
            virsh.domjobabort(vm_name, debug=True)

        for checkpoint_name in checkpoint_list:
            virsh.checkpoint_delete(vm_name, checkpoint_name, debug=True)
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Compare the backup data and original data
        original_data_file = os.path.join(tmp_dir, "original_data.qcow2")
        cmd = "qemu-img convert -f qcow2 %s -O qcow2 %s" % (disk_path,
                                                            original_data_file)
        process.run(cmd, shell=True, verbose=True)
        for backup_file in backup_file_list:
            if not utils_backup.cmp_backup_data(original_data_file,
                                                backup_file):
                test.fail("Backup and original data are not identical for"
                          "'%s' and '%s'" % (disk_path, backup_file))
            else:
                logging.debug("'%s' contains correct backup data", backup_file)
    except utils_backup.BackupBeginError as details:
        if backup_error:
            logging.debug("Backup failed as expected.")
        else:
            test.fail(details)
    finally:
        # Remove checkpoints
        if "checkpoint_list" in locals() and checkpoint_list:
            for checkpoint_name in checkpoint_list:
                virsh.checkpoint_delete(vm_name, checkpoint_name)

        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Restoring vm
        vmxml_backup.sync()

        # Remove iscsi devices
        if original_disk_type == "iscsi" or scratch_type == "block":
            libvirt.setup_or_cleanup_iscsi(False)

        # Remove gluster devices
        if original_disk_type == "gluster":
            gluster.setup_or_cleanup_gluster(is_setup=False,
                                             vol_name=gluster_vol_name,
                                             pool_name=gluster_pool_name,
                                             **params)
Beispiel #35
0
def run(test, params, env):
    """
    Test pci/pcie-to-pci bridge

    Hotplug interface to pci/pcie-to-pci bridge, then check xml and
    inside vm.
    Hotunplug interface, then check xml and inside vm
    Other test scenarios of pci/pcie-to-pci bridge
    """

    def create_pci_device(pci_model, pci_model_name, **kwargs):
        """
        Create a pci/pcie bridge

        :param pci_model: model of pci controller device
        :param pci_model_name: model name of pci controller device
        :param kwargs: other k-w args that needed to create device
        :return: the newly created device object
        """
        pci_bridge = Controller('pci')
        pci_bridge.model = pci_model
        pci_bridge.model_name = {'name': pci_model_name}
        if 'index' in kwargs:
            pci_bridge.index = kwargs['index']
        if 'address' in kwargs:
            pci_bridge.address = pci_bridge.new_controller_address(
                attrs=eval(kwargs['address']))

        logging.debug('pci_bridge: %s', pci_bridge)
        return pci_bridge

    def create_iface(iface_model, iface_source, **kwargs):
        """
        Create an interface to be attached to vm

        :param iface_model: model of the interface device
        :param iface_source: source of the interface device
        :param kwargs: other k-w args that needed to create device
        :return: the newly created interface object
        """
        iface = Interface('network')
        iface.model = iface_model
        iface.source = eval(iface_source)

        if 'mac' in kwargs:
            iface.mac_address = kwargs['mac']
        else:
            mac = utils_net.generate_mac_address_simple()
            iface.mac_address = mac

        if 'address' in kwargs:
            iface.address = iface.new_iface_address(attrs=eval(kwargs['address']))

        logging.debug('iface: %s', iface)
        return iface

    vm_name = params.get('main_vm')
    status_error = 'yes' == params.get('status_error', 'no')
    err_msg = params.get('err_msg', '').split(';')
    case = params.get('case', '')
    hotplug = 'yes' == params.get('hotplug', 'no')

    need_pci_br = 'yes' == params.get('need_pci_br', 'no')
    pci_model = params.get('pci_model', 'pci')
    pci_model_name = params.get('pci_model_name')
    pci_br_kwargs = eval(params.get('pci_br_kwargs', '{}'))

    pci_br_has_device = 'yes' == params.get('pci_br_has_device', 'no')
    sound_dev_model_type = params.get('sound_dev_model_type', '')
    sound_dev_address = params.get('sound_dev_address', '')

    iface_model = params.get('iface_model', '')
    iface_source = params.get('iface_source', '')
    iface_kwargs = eval(params.get('iface_kwargs', '{}'))

    max_slots = int(params.get('max_slots', 31))
    pcie_br_count = int(params.get('pcie_br_count', 3))

    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()
    vm = env.get_vm(vm_name)

    try:

        # Check if there is a pci/pcie-to-pci bridge, if so,
        # just use the existing pci/pcie-to-pci-bridge to test
        ori_pci_br = [dev for dev in vmxml.get_devices('controller')
                      if dev.type == 'pci' and dev.model == pci_model]

        if need_pci_br:
            # If there is not a pci/pcie-to-pci bridge to test,
            # create one and add to vm
            if not ori_pci_br:
                logging.info('No %s on vm, create one', pci_model)
                pci_bridge = create_pci_device(pci_model, pci_model_name)
                vmxml.add_device(pci_bridge)
                vmxml.sync()
                logging.debug(virsh.dumpxml(vm_name))

            # Check if pci/pcie-to-pci bridge is successfully added
            vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
            cur_pci_br = [dev for dev in vmxml.get_devices('controller')
                          if dev.type == 'pci' and dev.model == pci_model]
            if not cur_pci_br:
                test.error('Failed to add %s controller to vm xml' % pci_model)

            pci_br = cur_pci_br[0]
            logging.debug('pci_br: %s', pci_br)
            pci_br_index = pci_br.index

        # If test scenario requires another pci device on pci/pcie-to-pci
        # bridge before hotplug, add a sound device and make sure
        # the 'bus' is same with pci bridge index
        if need_pci_br and pci_br_has_device:
            sound_dev = Sound()
            sound_dev.model_type = sound_dev_model_type
            sound_dev.address = eval(sound_dev_address % pci_br_index)
            logging.debug('sound_dev.address: %s', sound_dev.address)
            vmxml.add_device(sound_dev)
            if case != 'vm_with_pcie_br_1_br':
                vmxml.sync()

        # Test hotplug scenario
        if hotplug:
            vm.start()
            vm.wait_for_login().close()

            # Create interface to be hotplugged
            logging.info('Create interface to be hotplugged')
            iface = create_iface(iface_model, iface_source)
            mac = iface.mac_address

            result = virsh.attach_device(vm_name, iface.xml, debug=True)
            libvirt.check_exit_status(result)

            xml_after_attach = VMXML.new_from_dumpxml(vm_name)
            logging.debug(virsh.dumpxml(vm_name))

            # Check if the iface with given mac address is successfully
            # attached with address bus equal to pcie/pci bridge's index
            iface_list = [
                iface for iface in xml_after_attach.get_devices('interface')
                if iface.mac_address == mac and
                int(iface.address['attrs']['bus'], 16) == int(pci_br_index, 16)
            ]

            logging.debug('iface list after attach: %s', iface_list)
            if not iface_list:
                test.error('Failed to attach interface %s' % iface)

            # Check inside vm
            def check_inside_vm(session, expect=True):
                ip_output = session.cmd('ip a')
                logging.debug('output of "ip a": %s', ip_output)

                return expect if mac in ip_output else not expect

            session = vm.wait_for_serial_login()
            if not utils_misc.wait_for(lambda: check_inside_vm(session, True),
                                       timeout=60, step=5):
                test.fail('Check interface inside vm failed,'
                          'interface not successfully attached:'
                          'not found mac address %s' % mac)
            session.close()

            # Test hotunplug
            result = virsh.detach_device(vm_name, iface.xml, debug=True)
            libvirt.check_exit_status(result)

            logging.debug(virsh.dumpxml(vm_name))

            # Check if the iface with given mac address has been
            # successfully detached
            xml_after_detach = VMXML.new_from_dumpxml(vm_name)
            iface_list_after_detach = [
                iface for iface in xml_after_detach.get_devices('interface')
                if iface.mac_address == mac
            ]

            logging.debug('iface list after detach: %s', iface_list_after_detach)
            if iface_list_after_detach:
                test.fail('Failed to detach device: %s' % iface)

            # Check again inside vm
            session = vm.wait_for_serial_login()
            if not utils_misc.wait_for(lambda: check_inside_vm(session, False),
                                       timeout=60, step=5):
                test.fail('Check interface inside vm failed,'
                          'interface not successfully detached:'
                          'found mac address %s' % mac)
            session.close()

        # Other test scenarios of pci/pcie
        if case:
            logging.debug('iface_kwargs: %s', iface_kwargs)

            # Setting pcie-to-pci-bridge model name !=pcie-pci-bridge.
            # or Invalid controller index for pcie-to-pci-bridge.
            if case in ('wrong_model_name', 'invalid_index'):
                pci_bridge = create_pci_device(pci_model, pci_model_name,
                                               **pci_br_kwargs)
                vmxml.add_device(pci_bridge)
                result_to_check = virsh.define(vmxml.xml, debug=True)

            # Attach device with invalid slot to pcie-to-pci-bridge
            if case == 'attach_with_invalid_slot':
                iface = create_iface(iface_model, iface_source, **iface_kwargs)
                vmxml.add_device(iface)
                result_to_check = virsh.define(vmxml.xml, debug=True)

            # Test that pcie-to-pci-bridge has 31 available slots
            if case == 'max_slots':
                target_bus = cur_pci_br[0].index
                target_bus = hex(int(target_bus))
                logging.debug('target_bus: %s', target_bus)

                # Attach 32 interfaces
                for i in range(max_slots + 1):
                    logging.debug('address: %s', iface_kwargs['address'])
                    new_iface_kwargs = {'address': iface_kwargs['address']
                                        % (target_bus, hex(i + 1))}
                    iface = create_iface(iface_model, iface_source,
                                         **new_iface_kwargs)
                    logging.info('Attaching the %d th interface', i + 1)
                    result_in_loop = virsh.attach_device(
                        vm_name, iface.xml, flagstr='--config', debug=True)

                    # Attaching the 32rd interfaces will fail
                    if i == max_slots:
                        status_error = True
                    libvirt.check_exit_status(result_in_loop,
                                              expect_error=status_error)
                logging.debug(virsh.dumpxml(vm_name))

                # Get all devices on pcie-to-pci-bridge from new xml
                # Test if it matches with value of max_slots
                new_xml = VMXML.new_from_dumpxml(vm_name)
                device_on_pci_br = [
                    dev for dev in new_xml.get_devices('interface')
                    if dev.address['type_name'] == 'pci' and
                    int(dev.address['attrs']['bus'], 16) == int(target_bus, 16)
                ]

                logging.info('All slots of pcie-to-pci-bridge is %d',
                             len(device_on_pci_br))
                if len(device_on_pci_br) != max_slots:
                    test.fail('Max slots is %d instead of %d' %
                              (len(device_on_pci_br), max_slots))

            # Define a guest with pcie-to-pci-bridge controller's index <=bus
            if case.startswith('index_v_bus'):
                last_pci_index = max([
                    int(dev.index) for dev in vmxml.get_devices('controller')
                    if dev.type == 'pci'])

                # New index of new pcie-bridge should be +1
                new_index = last_pci_index + 1
                if case.endswith('less_than'):
                    new_bus = new_index + 1
                elif case.endswith('equal_to'):
                    new_bus = new_index
                else:
                    test.error('Invalid test: %s' % case)

                pci_br_kwargs['index'] = new_index
                pci_br_kwargs['address'] = pci_br_kwargs['address'] % (new_bus)
                logging.debug('pci_br_kwargs: %s', pci_br_kwargs)

                pcie_br = create_pci_device(pci_model, pci_model_name, **pci_br_kwargs)
                vmxml.add_device(pcie_br)
                result_to_check = virsh.define(vmxml.xml, debug=True)

            # Test define & start an VM with/without pcie-to-pci-bridge
            if case.startswith('vm_with_pcie_br'):
                if case.endswith('1_br'):
                    pass
                elif case.endswith('multi_br'):
                    # Add $pcie_br_count pcie-root-port to vm
                    for i in range(pcie_br_count):
                        temp_xml = VMXML.new_from_inactive_dumpxml(vm_name)
                        port = create_pci_device('pcie-root-port', 'pcie-root-port')
                        temp_xml.add_device(port)
                        temp_xml.sync()

                    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
                    pci_root_ports = [dev for dev in vmxml.get_devices('controller')
                                      if dev.type == 'pci' and
                                      dev.model == 'pcie-root-port']
                    indexs = sorted([hex(int(dev.index)) for dev in pci_root_ports])
                    logging.debug('indexs: %s', indexs)

                    # Add $pcie_br_count pcie-to-pci-bridge to vm,
                    # on separated pcie-root-port
                    for i in range(pcie_br_count):
                        new_kwargs = {
                            'address': pci_br_kwargs['address'] % indexs[-i - 1]}
                        pcie_br = create_pci_device(pci_model, pci_model_name,
                                                    **new_kwargs)
                        vmxml.add_device(pcie_br)

                elif case.endswith('no_br'):
                    # Delete all pcie-to-pci-bridge
                    for dev in ori_pci_br:
                        vmxml.del_device(dev)
                    vmxml.sync()

                    # Add an pci device(rtl8139 nic)
                    iface = create_iface(iface_model, iface_source)
                    vmxml.add_device(iface)
                else:
                    test.error('Invalid test: %s' % case)

                # Test define and start vm with new xml settings
                result_define_vm = virsh.define(vmxml.xml, debug=True)
                libvirt.check_exit_status(result_define_vm)
                result_start_vm = virsh.start(vm_name, debug=True)
                libvirt.check_exit_status(result_start_vm)

                # Login to make sure vm is actually started
                vm.create_serial_console()
                vm.wait_for_serial_login().close()

                logging.debug(virsh.dumpxml(vm_name))

                # Get all pcie-to-pci-bridge after test operations
                new_vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
                cur_pci_br = [dev for dev in new_vmxml.get_devices('controller')
                              if dev.type == 'pci' and dev.model == pci_model]
                logging.debug('cur_pci_br: %s', cur_pci_br)

                if case.endswith('multi_br'):
                    # Check whether all new pcie-to-pci-br are successfullly added
                    if len(cur_pci_br) - len(ori_pci_br) != pcie_br_count:
                        test.fail('Not all pcie-to-pci-br successfully added.'
                                  'Should be %d, actually is %d' %
                                  (pcie_br_count, len(cur_pci_br) - len(ori_pci_br)))

                elif case.endswith('no_br'):
                    # Check if a pcie-to-pci-br will be automatically created
                    if len(cur_pci_br) == 0:
                        test.fail('No new pcie-to-pci-bridge automatically created')

            # Check command result if there is a result to be checked
            if 'result_to_check' in locals():
                libvirt.check_exit_status(result_to_check, expect_error=status_error)
                libvirt.check_result(result_to_check, expected_fails=err_msg)

    finally:
        bkxml.sync()
Beispiel #36
0
    def trigger_events(dom, events_list=[]):
        """
        Trigger various events in events_list

        :param dom: the vm objects corresponding to the domain
        :return: the expected output that virsh event command prints out
        """
        expected_events_list = []
        save_path = os.path.join(tmpdir, "%s_event.save" % dom.name)
        print(dom.name)
        xmlfile = dom.backup_xml()

        try:
            for event in events_list:
                if event in [
                        'start', 'restore', 'create', 'define', 'undefine'
                ]:
                    if dom.is_alive():
                        dom.destroy()
                        if event in ['create', 'define']:
                            dom.undefine()
                else:
                    if not dom.is_alive():
                        dom.start()
                        dom.wait_for_login().close()
                        if event == "resume":
                            dom.pause()

                if event == "undefine":
                    virsh.undefine(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Undefined Removed")
                elif event == "create":
                    virsh.create(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                elif event == "destroy":
                    virsh.destroy(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Destroyed")
                elif event == "define":
                    virsh.define(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Added")
                elif event == "start":
                    virsh.start(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                    dom.wait_for_login().close()
                elif event == "suspend":
                    virsh.suspend(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                elif event == "resume":
                    virsh.resume(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "save":
                    virsh.save(dom.name, save_path, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Saved")
                elif event == "restore":
                    if not os.path.exists(save_path):
                        logging.error("%s not exist", save_path)
                    else:
                        virsh.restore(save_path, **virsh_dargs)
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Started Restored")
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Resumed Snapshot")
                elif event == "edit":
                    #Check whether 'description' element exists.
                    domxml = virsh.dumpxml(dom.name).stdout.strip()
                    find_desc = parseString(domxml).getElementsByTagName(
                        "description")
                    if find_desc == []:
                        #If not exists, add one for it.
                        logging.info("Adding <description> to guest")
                        virsh.desc(dom.name, "--config",
                                   "Added desc for testvm", **virsh_dargs)
                    #The edit operation is to delete 'description' element.
                    edit_cmd = [r":g/<description.*<\/description>/d"]
                    utlv.exec_virsh_edit(dom.name, edit_cmd)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Updated")
                elif event == "shutdown":
                    if signal_name is None:
                        virsh.shutdown(dom.name, **virsh_dargs)
                        # Wait a few seconds for shutdown finish
                        time.sleep(3)
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            #Shutdown reason distinguished from qemu_2.9.0-9
                            expected_events_list.append(
                                "'lifecycle' for %s:"
                                " Shutdown Finished after guest request")
                    else:
                        os.kill(dom.get_pid(), getattr(signal, signal_name))
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            expected_events_list.append(
                                "'lifecycle' for %s:"
                                " Shutdown Finished after host request")
                    if not utils_misc.compare_qemu_version(2, 9, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Shutdown Finished")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Shutdown")
                elif event == "reset":
                    virsh.reset(dom.name, **virsh_dargs)
                    expected_events_list.append("'reboot' for %s")
                elif event == "vcpupin":
                    virsh.vcpupin(dom.name, '0', '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.vcpupin0: 0")
                elif event == "emulatorpin":
                    virsh.emulatorpin(dom.name, '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.emulatorpin: 0")
                elif event == "setmem":
                    mem_size = int(params.get("mem_size", 512000))
                    virsh.setmem(dom.name, mem_size, **virsh_dargs)
                    expected_events_list.append("'balloon-change' for %s:")
                elif event == "device-added-removed":
                    add_disk(dom.name, new_disk, 'vdb', '')
                    expected_events_list.append("'device-added' for %s:"
                                                " virtio-disk1")
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " virtio-disk1")
                    iface_xml_obj = create_iface_xml()
                    iface_xml_obj.xmltreefile.write()
                    virsh.detach_device(dom.name, iface_xml_obj.xml,
                                        **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " net0")
                    time.sleep(2)
                    virsh.attach_device(dom.name, iface_xml_obj.xml,
                                        **virsh_dargs)
                    expected_events_list.append("'device-added' for %s:"
                                                " net0")
                elif event == "change-media":
                    target_device = "hdc"
                    device_target_bus = params.get("device_target_bus", "ide")
                    disk_blk = vm_xml.VMXML.get_disk_blk(dom.name)
                    logging.info("disk_blk %s", disk_blk)
                    if target_device not in disk_blk:
                        logging.info("Adding cdrom to guest")
                        if dom.is_alive():
                            dom.destroy()
                        add_disk(
                            dom.name, "''", target_device,
                            ("--type cdrom --sourcetype file --driver qemu " +
                             "--config --targetbus %s" % device_target_bus))
                        dom.start()
                    all_options = new_disk + " --insert"
                    virsh.change_media(dom.name, target_device, all_options,
                                       **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " opened")
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " closed")
                    all_options = new_disk + " --eject"
                    virsh.change_media(dom.name, target_device, all_options,
                                       **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " opened")

                else:
                    test.error("Unsupported event: %s" % event)
                # Event may not received immediately
                time.sleep(3)
        finally:
            if os.path.exists(save_path):
                os.unlink(save_path)
            if os.path.exists(new_disk):
                os.unlink(new_disk)
        return [(dom.name, event) for event in expected_events_list]
def run(test, params, env):
    """
    Test for basic serial character device function.

    1) Define the VM with specified serial device and define result meets
       expectation.
    2) Test whether defined XML meets expectation
    3) Start the guest and check if start result meets expectation
    4) Test the function of started serial device
    5) Shutdown the VM and check whether cleaned up properly
    6) Clean up environment
    """

    def set_targets(serial):
        """
        Prepare a serial device XML according to parameters
        """
        machine = platform.machine()
        if "ppc" in machine:
            serial.target_model = 'spapr-vty'
            serial.target_type = 'spapr-vio-serial'
        elif "aarch" in machine:
            serial.target_model = 'pl011'
            serial.target_type = 'system-serial'
        else:
            serial.target_model = target_type
            serial.target_type = target_type

    def prepare_spice_graphics_device():
        """
        Prepare a spice graphics device XML according to parameters
        """
        graphic = Graphics(type_name='spice')
        graphic.autoport = "yes"
        graphic.port = "-1"
        graphic.tlsPort = "-1"
        return graphic

    def prepare_serial_device():
        """
        Prepare a serial device XML according to parameters
        """
        local_serial_type = serial_type
        if serial_type == "tls":
            local_serial_type = "tcp"
        serial = librarian.get('serial')(local_serial_type)

        serial.target_port = "0"

        set_targets(serial)

        sources = []
        logging.debug(sources_str)
        for source_str in sources_str.split():
            source_dict = {}
            for att in source_str.split(','):
                key, val = att.split(':')
                source_dict[key] = val
            sources.append(source_dict)
        serial.sources = sources
        return serial

    def prepare_console_device():
        """
        Prepare a serial device XML according to parameters
        """
        local_serial_type = serial_type
        if serial_type == "tls":
            local_serial_type = "tcp"
        console = librarian.get('console')(local_serial_type)
        console.target_type = console_target_type
        console.target_port = console_target_port

        sources = []
        logging.debug(sources_str)
        for source_str in sources_str.split():
            source_dict = {}
            for att in source_str.split(','):
                key, val = att.split(':')
                source_dict[key] = val
            sources.append(source_dict)
        console.sources = sources
        return console

    def define_and_check():
        """
        Predict the error message when defining and try to define the guest
        with testing serial device.
        """
        fail_patts = []
        if serial_type in ['dev', 'file', 'pipe', 'unix'] and not any(
                ['path' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing source path attribute for char device")
        if serial_type in ['tcp'] and not any(
                ['host' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing source host attribute for char device")
        if serial_type in ['tcp', 'udp'] and not any(
                ['service' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing source service attribute for char "
                              "device")
        if serial_type in ['spiceport'] and not any(
                ['channel' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing source channel attribute for char "
                              "device")
        if serial_type in ['nmdm'] and not any(
                ['master' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing master path attribute for nmdm device")
        if serial_type in ['nmdm'] and not any(
                ['slave' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing slave path attribute for nmdm device")
        if serial_type in ['spicevmc']:
            fail_patts.append(r"spicevmc device type only supports virtio")

        vm_xml.undefine()
        res = vm_xml.virsh.define(vm_xml.xml)
        libvirt.check_result(res, expected_fails=fail_patts)
        return not res.exit_status

    def check_xml():
        """
        Predict the result serial device and generated console device
        and check the result domain XML against expectation
        """
        console_cls = librarian.get('console')

        local_serial_type = serial_type

        if serial_type == 'tls':
            local_serial_type = 'tcp'
        # Predict expected serial and console XML
        expected_console = console_cls(local_serial_type)

        if local_serial_type == 'udp':
            sources = []
            for source in serial_dev.sources:
                if 'service' in source and 'mode' not in source:
                    source['mode'] = 'connect'
                sources.append(source)
        else:
            sources = serial_dev.sources

        expected_console.sources = sources

        if local_serial_type == 'tcp':
            if 'protocol_type' in local_serial_type:
                expected_console.protocol_type = serial_dev.protocol_type
            else:
                expected_console.protocol_type = "raw"

        expected_console.target_port = serial_dev.target_port
        if 'target_type' in serial_dev:
            expected_console.target_type = serial_dev.target_type
        expected_console.target_type = console_target_type
        logging.debug("Expected console XML is:\n%s", expected_console)

        # Get current serial and console XML
        current_xml = VMXML.new_from_dumpxml(vm_name)
        serial_elem = current_xml.xmltreefile.find('devices/serial')
        console_elem = current_xml.xmltreefile.find('devices/console')
        if console_elem is None:
            test.fail("Expect generate console automatically, "
                      "but found none.")
        if serial_elem and console_target_type != 'serial':
            test.fail("Don't Expect exist serial device, "
                      "but found:\n%s" % serial_elem)

        cur_console = console_cls.new_from_element(console_elem)
        logging.debug("Current console XML is:\n%s", cur_console)
        # Compare current serial and console with oracle.
        if not expected_console == cur_console:
            # "==" has been override
            test.fail("Expect generate console:\n%s\nBut got:\n%s" %
                      (expected_console, cur_console))

        if console_target_type == 'serial':
            serial_cls = librarian.get('serial')
            expected_serial = serial_cls(local_serial_type)
            expected_serial.sources = sources

            set_targets(expected_serial)

            if local_serial_type == 'tcp':
                if 'protocol_type' in local_serial_type:
                    expected_serial.protocol_type = serial_dev.protocol_type
                else:
                    expected_serial.protocol_type = "raw"
            expected_serial.target_port = serial_dev.target_port
            if serial_elem is None:
                test.fail("Expect exist serial device, "
                          "but found none.")
            cur_serial = serial_cls.new_from_element(serial_elem)
            if target_type == 'pci-serial':
                if cur_serial.address is None:
                    test.fail("Expect serial device address is not assigned")
                else:
                    logging.debug("Serial address is: %s", cur_serial.address)

            logging.debug("Expected serial XML is:\n%s", expected_serial)
            logging.debug("Current serial XML is:\n%s", cur_serial)
            # Compare current serial and console with oracle.
            if target_type != 'pci-serial' and not expected_serial == cur_serial:
                # "==" has been override
                test.fail("Expect serial device:\n%s\nBut got:\n "
                          "%s" % (expected_serial, cur_serial))

    def prepare_serial_console():
        """
        Prepare serial console to connect with guest serial according to
        the serial type
        """
        is_server = console_type == 'server'
        if serial_type == 'unix':
            # Unix socket path should match SELinux label
            socket_path = '/var/lib/libvirt/qemu/virt-test'
            console = Console('unix', socket_path, is_server)
        elif serial_type == 'tls':
            host = '127.0.0.1'
            service = 5556
            console = Console('tls', (host, service), is_server,
                              custom_pki_path)
        elif serial_type == 'tcp':
            host = '127.0.0.1'
            service = 2445
            console = Console('tcp', (host, service), is_server)
        elif serial_type == 'udp':
            host = '127.0.0.1'
            service = 2445
            console = Console('udp', (host, service), is_server)
        elif serial_type == 'file':
            socket_path = '/var/log/libvirt/virt-test'
            console = Console('file', socket_path, is_server)
        elif serial_type == 'pipe':
            socket_path = '/tmp/virt-test'
            console = Console('pipe', socket_path, is_server)
        else:
            logging.debug("Serial type %s don't support console test yet.",
                          serial_type)
            console = None
        return console

    def check_qemu_cmdline():
        """
        Check if VM's qemu command line meets expectation.
        """
        cmdline = open('/proc/%s/cmdline' % vm.get_pid()).read()
        logging.debug('Qemu command line: %s', cmdline)
        options = cmdline.split('\x00')
        ser_opt = None
        con_opt = None
        ser_dev = None
        con_dev = None
        exp_ser_opt = None
        exp_ser_dev = None
        exp_con_opt = None
        exp_con_dev = None
        # Get serial and console options from qemu command line
        for idx, opt in enumerate(options):
            if opt == '-chardev':
                if 'id=charserial' in options[idx + 1]:
                    ser_opt = options[idx + 1]
                    ser_dev = options[idx + 3]
                    logging.debug('Serial option is: %s', ser_opt)
                    logging.debug('Serial device option is: %s', ser_dev)
                if 'id=charconsole' in options[idx + 1]:
                    con_opt = options[idx + 1]
                    con_dev = options[idx + 3]
                    logging.debug('Console option is: %s', con_opt)
                    logging.debug('Console device option is: %s', con_dev)

        # Get expected serial and console options from params
        if serial_type == 'dev':
            ser_type = 'tty'
        elif serial_type in ['unix', 'tcp', 'tls']:
            ser_type = 'socket'
        else:
            ser_type = serial_type

        exp_ser_opts = [ser_type, 'id=charserial0']
        if serial_type in ['dev', 'file', 'pipe', 'unix']:
            for source in serial_dev.sources:
                if 'path' in source:
                    path = source['path']
            if serial_type == 'file':
                # Use re to make this fdset number flexible
                exp_ser_opts.append('path=/dev/fdset/\d+')
                exp_ser_opts.append('append=on')
            elif serial_type == 'unix':
                # Use re to make this fd number flexible
                exp_ser_opts.append('fd=\d+')
            else:
                exp_ser_opts.append('path=%s' % path)
        elif serial_type in ['tcp', 'tls']:
            for source in serial_dev.sources:
                if 'host' in source:
                    host = source['host']
                if 'service' in source:
                    port = source['service']
            exp_ser_opts.append('host=%s' % host)
            exp_ser_opts.append('port=%s' % port)
        elif serial_type in ['udp']:
            localaddr = ''
            localport = '0'
            for source in serial_dev.sources:
                if source['mode'] == 'connect':
                    if 'host' in source:
                        host = source['host']
                    if 'service' in source:
                        port = source['service']
                else:
                    if 'host' in source:
                        localaddr = source['host']
                    if 'service' in source:
                        localport = source['service']
            exp_ser_opts.append('host=%s' % host)
            exp_ser_opts.append('port=%s' % port)
            exp_ser_opts.append('localaddr=%s' % localaddr)
            exp_ser_opts.append('localport=%s' % localport)

        if serial_type in ['unix', 'tcp', 'udp', 'tls']:
            for source in serial_dev.sources:
                if 'mode' in source:
                    mode = source['mode']
            if mode == 'bind':
                exp_ser_opts.append('server')
                exp_ser_opts.append('nowait')

        if serial_type == 'tls':
            exp_ser_opts.append('tls-creds=objcharserial0_tls0')

        exp_ser_opt = ','.join(exp_ser_opts)

        if "ppc" in platform.machine():
            exp_ser_devs = ['spapr-vty', 'chardev=charserial0',
                            'reg=0x30000000']
            if libvirt_version.version_compare(3, 9, 0):
                exp_ser_devs.insert(2, 'id=serial0')
        else:
            logging.debug('target_type: %s', target_type)
            if target_type == 'pci-serial':
                exp_ser_devs = ['pci-serial', 'chardev=charserial0',
                                'id=serial0', 'bus=pci.\d+', 'addr=0x\d+']
            else:
                exp_ser_devs = ['isa-serial', 'chardev=charserial0',
                                'id=serial0']
        exp_ser_dev = ','.join(exp_ser_devs)

        if console_target_type != 'serial' or serial_type == 'spicevmc':
            exp_ser_opt = None
            exp_ser_dev = None
        logging.debug("exp_ser_opt: %s", exp_ser_opt)
        logging.debug("ser_opt: %s", ser_opt)

        # Check options against expectation
        if exp_ser_opt is not None and re.match(exp_ser_opt, ser_opt) is None:
            test.fail('Expect get qemu command serial option "%s", '
                      'but got "%s"' % (exp_ser_opt, ser_opt))
        if exp_ser_dev is not None and ser_dev is not None \
           and re.match(exp_ser_dev, ser_dev) is None:
            test.fail(
                'Expect get qemu command serial device option "%s", '
                'but got "%s"' % (exp_ser_dev, ser_dev))

        if console_target_type == 'virtio':
            exp_con_opts = [serial_type, 'id=charconsole1']
            exp_con_opt = ','.join(exp_con_opts)

            exp_con_devs = []
            if console_target_type == 'virtio':
                exp_con_devs.append('virtconsole')
            exp_con_devs += ['chardev=charconsole1', 'id=console1']
            exp_con_dev = ','.join(exp_con_devs)
            if con_opt != exp_con_opt:
                test.fail(
                    'Expect get qemu command console option "%s", '
                    'but got "%s"' % (exp_con_opt, con_opt))
            if con_dev != exp_con_dev:
                test.fail(
                    'Expect get qemu command console device option "%s", '
                    'but got "%s"' % (exp_con_dev, con_dev))

    def check_serial_console(console, username, password):
        """
        Check whether console works properly by read the result for read only
        console and login and emit a command for read write console.
        """
        if serial_type in ['file', 'tls']:
            _, output = console.read_until_output_matches(['.*[Ll]ogin:'])
        else:
            console.wait_for_login(username, password)
            status, output = console.cmd_status_output('pwd')
            logging.debug("Command status: %s", status)
            logging.debug("Command output: %s", output)

    def cleanup(objs_list):
        """
        Clean up test environment
        """
        if serial_type == 'file':
            if os.path.exists('/var/log/libvirt/virt-test'):
                os.remove('/var/log/libvirt/virt-test')

        # recovery test environment
        for obj in objs_list:
            obj.auto_recover = True
            del obj

    def get_console_type():
        """
        Check whether console should be started before or after guest starting.
        """
        if serial_type in ['tcp', 'unix', 'udp', 'tls']:
            for source in serial_dev.sources:
                if 'mode' in source and source['mode'] == 'connect':
                    return 'server'
            return 'client'
        elif serial_type in ['file']:
            return 'client'
        elif serial_type in ['pipe']:
            return 'server'

    serial_type = params.get('serial_type', 'pty')
    sources_str = params.get('serial_sources', '')
    username = params.get('username')
    password = params.get('password')
    console_target_type = params.get('console_target_type', 'serial')
    target_type = params.get('target_type', 'isa-serial')
    console_target_port = params.get('console_target_port', '0')
    second_serial_console = params.get('second_serial_console', 'no') == 'yes'
    custom_pki_path = params.get('custom_pki_path', '/etc/pki/libvirt-chardev')
    auto_recover = params.get('auto_recover', 'no')
    client_pwd = params.get('client_pwd', None)
    server_pwd = params.get('server_pwd', None)

    args_list = [client_pwd, server_pwd]

    for arg in args_list:
        if arg and arg.count("ENTER.YOUR."):
            raise test.fail("Please assign a value for %s!" % arg)

    vm_name = params.get('main_vm', 'virt-tests-vm1')
    vm = env.get_vm(vm_name)

    # it's used to clean up TLS objs later
    objs_list = []

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    try:
        if console_target_type != 'virtio':
            vm_xml.remove_all_device_by_type('serial')
            vm_xml.remove_all_device_by_type('console')
        if serial_type == "tls":
            test_dict = dict(params)
            tls_obj = TLSConnection(test_dict)
            if auto_recover == "yes":
                objs_list.append(tls_obj)
            tls_obj.conn_setup(False, True, True)

        serial_dev = prepare_serial_device()
        if console_target_type == 'serial' or second_serial_console:
            logging.debug('Serial device:\n%s', serial_dev)
            vm_xml.add_device(serial_dev)
            if serial_type == "spiceport":
                spice_graphics_dev = prepare_spice_graphics_device()
                logging.debug('Spice graphics device:\n%s', spice_graphics_dev)
                vm_xml.add_device(spice_graphics_dev)

        console_dev = prepare_console_device()
        logging.debug('Console device:\n%s', console_dev)
        vm_xml.add_device(console_dev)
        if second_serial_console:
            console_target_type = 'serial'
            console_target_port = '1'
            console_dev = prepare_console_device()
            logging.debug('Console device:\n%s', console_dev)
            vm_xml.add_device(console_dev)
            vm_xml.undefine()
            res = virsh.define(vm_xml.xml)
            if res.stderr.find(r'Only the first console can be a serial port'):
                logging.debug("Test only the first console can be a serial"
                              "succeeded")
                return
            test.fail("Test only the first console can be serial failed.")

        console_type = get_console_type()

        if not define_and_check():
            logging.debug("Can't define the VM, exiting.")
            return
        if console_target_type != 'virtio':
            check_xml()

        expected_fails = []
        if serial_type == 'nmdm' and platform.system() != 'FreeBSD':
            expected_fails.append("unsupported chardev 'nmdm'")

        # Prepare console before start when console is server
        if console_type == 'server':
            console = prepare_serial_console()

        res = virsh.start(vm_name)
        libvirt.check_result(res, expected_fails, [])
        if res.exit_status:
            logging.debug("Can't start the VM, exiting.")
            return
        check_qemu_cmdline()
        # Prepare console after start when console is client
        if console_type == 'client':
            console = prepare_serial_console()

        if (console_type is not None and serial_type != 'tls' and
           console_type != 'server'):
            check_serial_console(console, username, password)

        vm.destroy()

    finally:
        cleanup(objs_list)
        vm_xml_backup.sync()
Beispiel #38
0
def run(test, params, env):
    """
    Test pure checkpoint commands
    """

    def prepare_checkpoints(disk="vdb", num=1, cp_prefix="test_checkpoint_"):
        """
        Create checkpoints for specific disk

        :param disk: The disk to create checkpoint.
        :param num: How many checkpoints to be created
        :param cp_prefix: The prefix to name the checkpoint.
        """
        option_pattern = ("{0} --diskspec vda,checkpoint=no "
                          "--diskspec {1},checkpoint=bitmap,bitmap={0}")
        for i in range(num):
            # create checkpoints
            checkpoint_name = cp_prefix + str(i)
            options = option_pattern.format(checkpoint_name, disk)
            virsh.checkpoint_create_as(vm_name, options, **virsh_dargs)
            current_checkpoints.append(checkpoint_name)

    # Cancel the test if libvirt version is too low
    if not libvirt_version.version_compare(6, 0, 0):
        test.cancel("Current libvirt version doesn't support "
                    "incremental backup.")

    checkpoint_cmd = params.get("checkpoint_cmd")
    cmd_flag = params.get("flag")
    required_checkpoints = int(params.get("required_checkpoints", 0))
    test_disk_size = params.get("test_disk_size", "100M")
    test_disk_target = params.get("test_disk_target", "vdb")
    status_error = "yes" == params.get("status_error")
    tmp_dir = data_dir.get_tmp_dir()
    current_checkpoints = []
    virsh_dargs = {'debug': True, 'ignore_status': False}

    try:
        vm_name = params.get("main_vm")
        vm = env.get_vm(vm_name)

        # Backup vm xml
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml_backup = vmxml.copy()

        # Enable vm incremental backup capability. This is only a workaround
        # to make sure incremental backup can work for the vm. Code needs to
        # be removded immediately when the function enabled by default, which
        # is tracked by bz1799015
        tree = ET.parse(vmxml.xml)
        root = tree.getroot()
        for elem in root.iter('domain'):
            elem.set('xmlns:qemu', 'http://libvirt.org/schemas/domain/qemu/1.0')
            qemu_cap = ET.Element("qemu:capabilities")
            elem.insert(-1, qemu_cap)
            incbackup_cap = ET.Element("qemu:add")
            incbackup_cap.set('capability', 'incremental-backup')
            qemu_cap.insert(1, incbackup_cap)
        vmxml.undefine()
        tmp_vm_xml = os.path.join(tmp_dir, "tmp_vm.xml")
        tree.write(tmp_vm_xml)
        virsh.define(tmp_vm_xml)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        logging.debug("Script insert xml elements to make sure vm can support "
                      "incremental backup. This should be removded when "
                      "bz 1799015 fixed.")
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Prepare the disk to be used.
        disk_params = {}
        disk_path = ""
        image_name = "{}_image.qcow2".format(test_disk_target)
        disk_path = os.path.join(tmp_dir, image_name)
        libvirt.create_local_disk("file", disk_path, test_disk_size,
                                  "qcow2")
        disk_params = {"device_type": "disk",
                       "type_name": "file",
                       "driver_type": "qcow2",
                       "target_dev": test_disk_target,
                       "source_file": disk_path}
        disk_xml = libvirt.create_disk_xml(disk_params)
        virsh.attach_device(vm.name, disk_xml,
                            flagstr="--config", **virsh_dargs)
        vm.start()
        session = vm.wait_for_login().close()
        if required_checkpoints > 0:
            prepare_checkpoints(test_disk_target, required_checkpoints)
        if checkpoint_cmd == "checkpoint-create":
            if not current_checkpoints:
                test.fail("No existing checkpoints prepared.")
            if "--redefine" in cmd_flag:
                no_domain = "yes" == params.get("no_domain")
                cp_dumpxml_options = ""
                if no_domain:
                    cp_dumpxml_options = "--no-domain"
                checkpoint_redef = current_checkpoints[0]
                cp_xml = checkpoint_xml.CheckpointXML.new_from_checkpoint_dumpxml(
                        vm_name, checkpoint_redef, cp_dumpxml_options)
                logging.debug("Checkpoint XML to be redefined is: %s", cp_xml)
                xml_file = cp_xml.xml
                virsh.checkpoint_delete(vm_name, checkpoint_redef,
                                        "--metadata", **virsh_dargs)
                cmd_options = xml_file + " " + cmd_flag
                result = virsh.checkpoint_create(vm_name, cmd_options, debug=True)
                libvirt.check_exit_status(result, status_error)
        elif checkpoint_cmd == "checkpoint-create-as":
            if "--print-xml" in cmd_flag:
                checkpoint_name = "test_checkpoint_0"
                options = ("{0} --diskspec vda,checkpoint=no --diskspec {1},"
                           "checkpoint=bitmap,bitmap={0} "
                           "--print-xml".format(checkpoint_name, test_disk_target))
                virsh.checkpoint_create_as(vm_name, options, **virsh_dargs)
                # The checkpiont should not be created, so we have following check
                cp_list_result = virsh.checkpoint_list(vm_name, checkpoint_name, debug=True)
                libvirt.check_exit_status(cp_list_result, True)
        elif checkpoint_cmd == "checkpoint-info":
            if len(current_checkpoints) != 3:
                test.fail("We should prepare 3 checkpoints.")
            parent_checkpoint = current_checkpoints[0]
            test_checkpoint = current_checkpoints[1]
            stdout = virsh.checkpoint_info(vm_name, test_checkpoint,
                                           **virsh_dargs).stdout_text.strip()
            if (
                    not re.search("domain.*%s" % vm_name, stdout, re.IGNORECASE) or
                    not re.search("parent.*%s" % parent_checkpoint, stdout, re.IGNORECASE) or
                    not re.search("children.*1", stdout, re.IGNORECASE) or
                    not re.search("descendants.*1", stdout, re.IGNORECASE)
               ):
                test.fail("checkpoint-info return inaccurate informaion: %s" % stdout)
        elif checkpoint_cmd == "checkpoint-list":
            logic_error = False
            if not cmd_flag:
                stdout = virsh.checkpoint_list(vm_name,
                                               **virsh_dargs).stdout_text.strip()
                for checkpoint in current_checkpoints:
                    if checkpoint not in stdout:
                        logic_error = True
            elif cmd_flag == "--parent":
                stdout = virsh.checkpoint_list(vm_name, cmd_flag,
                                               **virsh_dargs).stdout_text.strip()
                for checkpoint in current_checkpoints:
                    if checkpoint == current_checkpoints[-1]:
                        if stdout.count(checkpoint) != 1:
                            logic_error = True
                    else:
                        if stdout.count(checkpoint) != 2:
                            logic_error = True
            elif cmd_flag == "--roots":
                stdout = virsh.checkpoint_list(vm_name, cmd_flag,
                                               **virsh_dargs).stdout_text.strip()
                for checkpoint in current_checkpoints:
                    if checkpoint == current_checkpoints[0]:
                        if stdout.count(checkpoint) != 1:
                            logic_eror = True
                    else:
                        if stdout.count(checkpoint) != 0:
                            logic_error = True
            elif cmd_flag == "--tree":
                stdout = virsh.checkpoint_list(vm_name, cmd_flag,
                                               **virsh_dargs).stdout_text.strip()
                lines = stdout.splitlines()
                prev_indent_num = -1
                for line in lines:
                    for checkpoint in current_checkpoints:
                        if checkpoint in line:
                            cur_indent_num = line.rstrip().count(" ")
                            if cur_indent_num <= prev_indent_num:
                                logic_error = True
                                break
                            prev_indent_num = cur_indent_num
            elif cmd_flag == "--name":
                stdout = virsh.checkpoint_list(vm_name, cmd_flag,
                                               **virsh_dargs).stdout_text.strip()
                checkpoint_names = stdout.splitlines()
                if not operator.eq(checkpoint_names, current_checkpoints):
                    logic_error = True
            elif cmd_flag == "--topological":
                stdout = virsh.checkpoint_list(vm_name, cmd_flag,
                                               **virsh_dargs).stdout_text.strip()
                for checkpoint in current_checkpoints:
                    if checkpoint not in stdout:
                        logical_error = True
            elif cmd_flag == "--from":
                cmd_options = cmd_flag + " " + current_checkpoints[0]
                stdout = virsh.checkpoint_list(vm_name, cmd_options,
                                               **virsh_dargs).stdout_text.strip()
                if (current_checkpoints[0] in stdout
                        or current_checkpoints[2] in stdout
                        or current_checkpoints[1] not in stdout):
                    logic_error = True
            elif cmd_flag == "--descendants":
                cmd_options = cmd_flag + " " + current_checkpoints[0]
                stdout = virsh.checkpoint_list(vm_name, cmd_options,
                                               **virsh_dargs).stdout_text.strip()
                if (current_checkpoints[0] in stdout
                        or current_checkpoints[1] not in stdout
                        or current_checkpoints[2] not in stdout):
                    logic_error = True
            elif cmd_flag == "--no-leaves":
                stdout = virsh.checkpoint_list(vm_name, cmd_flag,
                                               **virsh_dargs).stdout_text.strip()
                if (current_checkpoints[0] not in stdout
                        or current_checkpoints[1] not in stdout
                        or current_checkpoints[2] in stdout):
                    logic_error = True
            elif cmd_flag == "--leaves":
                stdout = virsh.checkpoint_list(vm_name, cmd_flag,
                                               **virsh_dargs).stdout_text.strip()
                if (current_checkpoints[0] in stdout
                        or current_checkpoints[1] in stdout
                        or current_checkpoints[2] not in stdout):
                    logic_error = True
            if logic_error:
                test.fail("checkpoint-list with '%s' gives wrong output"
                          % cmd_flag)
        elif checkpoint_cmd == "checkpoint-dumpxml":
            if "--size" in cmd_flag:
                test.cancel("'--size' not supported yet(bz1814573)")
            elif "--security-info" in cmd_flag:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
                password = "******"
                vm_xml.VMXML.set_graphics_attr(vm_name, {'passwd': password})
                vm.start()
                vm.wait_for_login().close()
                prepare_checkpoints()
                test_checkpoint = current_checkpoints[0]
                stdout = virsh.checkpoint_dumpxml(vm_name,
                                                  test_checkpoint,
                                                  **virsh_dargs).stdout_text.strip()
                if password in stdout:
                    logging.debug("checkpoint xml is: %s", stdout)
                    test.fail("Security info displayed in unsecurity dumpxml.")
                stdout = virsh.checkpoint_dumpxml(vm_name,
                                                  test_checkpoint + " --security-info",
                                                  **virsh_dargs).stdout_text.strip()
                if password not in stdout:
                    logging.debug("checkpoint xml is: %s", stdout)
                    test.fail("Security info not displayed in security dumpxml.")
        elif checkpoint_cmd == "virsh_list":
            stdout = virsh.dom_list(cmd_flag, **virsh_dargs).stdout_text.strip()
            if ((vm_name in stdout and cmd_flag == "--without-checkpoint") or
                    (vm_name not in stdout and cmd_flag == "--with-checkpoint")):
                test.fail("virsh list with '%s' contains wrong data" % cmd_flag)
    finally:
        # Remove checkpoints
        if "current_checkpoints" in locals() and current_checkpoints:
            for checkpoint_name in current_checkpoints:
                virsh.checkpoint_delete(vm_name, checkpoint_name,
                                        ignore_status=True, debug=True)

        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Restoring vm
        vmxml_backup.sync()
def run(test, params, env):
    """
    Test command: virsh desc.

    This command allows to show or modify description or title of a domain.
    1). For running domain, get/set description&title with options.
    2). For shut off domian, get/set description&title with options.
    3). For persistent/transient domain, get/set description&title with options.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    options = params.get("desc_option", "")
    persistent_vm = params.get("persistent_vm", "yes")

    def run_cmd(name, options, desc_str, status_error):
        """
        Run virsh desc command

        :return: cmd output
        """
        cmd_result = virsh.desc(name, options, desc_str, ignore_status=True, debug=True)
        output = cmd_result.stdout.strip()
        err = cmd_result.stderr.strip()
        status = cmd_result.exit_status
        if status_error == "no" and status:
            raise error.TestFail(err)
        elif status_error == "yes" and status == 0:
            raise error.TestFail("Expect fail, but run successfully.")
        return output

    def vm_state_switch():
        """
        Switch the vm state
        """
        if vm.is_dead():
            vm.start()
        if vm.is_alive():
            vm.destroy()

    def desc_check(name, desc_str, state_switch):
        """
        Check the domain's description or title
        """
        ret = False
        if state_switch:
            vm_state_switch()
        output = run_cmd(name, "", "", "no")
        if desc_str == output:
            logging.debug("Domain desc check successfully.")
            ret = True
        else:
            logging.error("Domain desc check fail.")
        if state_switch:
            vm_state_switch()
        return ret

    def run_test():
        """
        Get/Set vm desc by running virsh desc command.
        """
        status_error = params.get("status_error", "no")
        desc_str = params.get("desc_str", "")
        state_switch = False
        # Test 1: get vm desc
        run_cmd(vm_name, options, "", status_error)
        # Test 2: set vm desc
        if options.count("--config") and vm.is_persistent():
            state_switch = True
        if options.count("--live") and vm.state() == "shut off":
            status_error = "yes"
        if len(desc_str) == 0:
            desc_str = "New Description/title for the %s vm" % vm.state()
            logging.debug("Use the default desc message: %s", desc_str)
        run_cmd(vm_name, options, desc_str, status_error)
        desc_check(vm_name, desc_str, state_switch)

    # Prepare transient/persistent vm
    original_xml = vm.backup_xml()
    if persistent_vm == "no" and vm.is_persistent():
        vm.undefine()
    elif persistent_vm == "yes" and not vm.is_persistent():
        vm.define(original_xml)
    try:
        if vm.is_dead():
            vm.start()
        run_test()
        # Recvoer the vm and shutoff it
        if persistent_vm == "yes":
            vm.define(original_xml)
            vm.destroy()
            run_test()
    finally:
        vm.destroy()
        virsh.define(original_xml)
        os.remove(original_xml)
def run(test, params, env):
    """
    Test vcpu affinity feature as follows:
    positive test:
        1. use vcpu cpuset in xml to define vcpu affinity
        2. use cputune cpuset in xml to define vcpu affinity
        3. use offline-to-online host cpu as cpuset to run virsh vcpupin
        4. set vcpu placement in xml to auto and check xml result
        5. set vcpu cpuset in xml without placement defined and check xml result
    negative test:
        1. use outrange cpuset as vcpu cpuset in xml to define vcpu affinity
        2. use outrange cpuset as cputune cpuset in xml to define vcpu affinity
        3. use invalid cpuset as cputune cpuset in xml to define vcpu affinity
        4. use duplicate vcpu in xml to define vcpu affinity
        5. use offline host cpu as cputune cpuset to run virsh vcpupin
        6. set vcpu affinity for none exists vcpu and check xml result
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    cpuset_mask = params.get("cpuset_mask", "")
    vcpu = params.get("vcpu", "0")
    maxvcpu = params.get("maxvcpu", "8")
    current_vcpu = params.get("current_vcpu", "3")
    check = params.get("check", "")
    config_xml = params.get("config_xml", "")

    status_error = "yes" == params.get("status_error", "no")
    define_fail = "yes" == params.get("define_fail", "no")
    start_fail = "yes" == params.get("start_fail", "no")
    runtime_fail = "yes" == params.get("runtime_fail", "no")
    vm_down = "yes" == params.get("vm_down", "no")

    vcpu_cpuset = params.get("vcpu_cpuset", "")
    cputune_cpuset = params.get("cputune_cpuset", "")
    vcpu_placement = params.get("vcpu_placement", "static")
    err_msg = params.get("err_msg", "")
    start_timeout = int(params.get("start_timeout", "60"))
    offline_hostcpus = params.get("offline_hostcpus", "")
    machine_cpuset_path = params.get("machine_cpuset_path", "")

    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    def check_vcpu_affinity():
        """
        check vcpu affinity defined by vcpu cpuset or cputune cpuset
        """
        affinity = vcpu_cpuset if not cputune_cpuset else cputune_cpuset
        affinity = {vcpu: affinity}
        virsh.vcpuinfo(vm_name, debug=True)

        vmxml_live = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(vmxml_live)

        # if vcpu >= maxvcpu, the cputune should not exist in xml
        if int(vcpu) >= int(maxvcpu):
            try:
                if hasattr(vmxml_live, 'cputune'):
                    test.fail("cputune tag is set when vcpu >= maxvcpu")
        # check the expected vcpu affinity with the one got from running vm
                elif not utils_hotplug.check_affinity(vm, affinity):
                    test.fail("vcpu affinity check fail")
            except xcepts.LibvirtXMLError:
                pass

    try:
        hostcpu_num = int(cpu.total_cpus_count())

        # online all host cpus
        for x in range(hostcpu_num):
            if cpu.online(x):
                test.fail("fail to online cpu{}".format(x))

        # use vcpu cpuset or/and cputune cpuset to define xml
        del vmxml.cputune
        del vmxml.vcpus
        del vmxml.placement
        vmxml.vcpu = int(maxvcpu)
        vmxml.current_vcpu = current_vcpu

        # config vcpu cpuset for cpuset range test
        num = 1 if not status_error else 0
        cpuset_new = "0-{},^{}".format(hostcpu_num-num, cpuset_mask)
        if (config_xml == "vcpu" and check.endswith("range_cpuset")):
            vcpu_cpuset = cpuset_new
        vmxml.cpuset = vcpu_cpuset

        if vcpu_placement:
            vmxml.placement = vcpu_placement

        if config_xml == "cputune":
            cputune = vm_xml.VMCPUTuneXML()
            if check.endswith("range_cpuset"):
                cputune_cpuset = cpuset_new
            if check.endswith("duplicate_vcpu"):
                cputune.vcpupins = [{'vcpu': vcpu, 'cpuset': "2"}, {'vcpu': vcpu, 'cpuset': "3"}]
            else:
                cputune.vcpupins = [{'vcpu': vcpu, 'cpuset': cputune_cpuset}]
            vmxml.cputune = cputune

        logging.debug(vmxml)
        if status_error and define_fail:
            result_to_check = virsh.define(vmxml.xml, debug=True)
        else:
            vmxml.sync()

        # start the vm
        if status_error and start_fail:
            result_to_check = virsh.start(vm_name, debug=True)

        if (not status_error) or runtime_fail:
            result_to_check = virsh.start(vm_name, debug=True, ignore_status=False)
            vm.wait_for_login(timeout=start_timeout).close()

            # test vcpu cpuset in offline/online  host cpu scenario
            if check.endswith("offline_hostcpu"):
                if vm_down:
                    vm.shutdown()
                for x in offline_hostcpus.split(','):
                    if cpu.offline(x):
                        test.fail("fail to offline cpu{}".format(x))
                    logging.debug("offline host cpu {}".format(x))
                if vm_down:
                    vm.start()
                    vm.wait_for_login(timeout=start_timeout).close()
                if not status_error:
                    # online host cpu
                    if cpu.online(cputune_cpuset):
                        test.fail("fail to online cpu{}".format(cputune_cpuset))

            # run virsh vcpupin to config vcpu affinity
            if check.startswith("cputune") and (not config_xml):
                result_to_check = virsh.vcpupin(vm_name, vcpu, cputune_cpuset, debug=True)

            if check == "vcpu_placement":
                check_vcpu_placement(test, params)
            elif not status_error:
                check_vcpu_affinity()

        if 'result_to_check' in locals():
            if err_msg:
                err_msg = err_msg.split(";")
            libvirt.check_result(result_to_check, err_msg)

    finally:
        vmxml_backup.sync()

        # recovery the host cpu env
        for x in range(hostcpu_num):
            cpu.online(x)
        cmd = "echo '0-{}' > {}".format(hostcpu_num-1, machine_cpuset_path)
        process.run(cmd, shell=True)
Beispiel #41
0
    def modify_iface_xml(update, status_error=False):
        """
        Modify interface xml options
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        xml_devices = vmxml.devices
        iface_index = xml_devices.index(
            xml_devices.by_device_tag("interface")[0])
        iface = xml_devices[iface_index]
        if iface_model:
            iface.model = iface_model
        else:
            del iface.model
        if iface_type:
            iface.type_name = iface_type
        source = eval(iface_source)
        if iface_source:
            iface.source = source
        else:
            del iface.source
        driver_dict = {}
        driver_host = {}
        driver_guest = {}
        if iface_driver:
            driver_dict = eval(iface_driver)
        if iface_driver_host:
            driver_host = eval(iface_driver_host)
        if iface_driver_guest:
            driver_guest = eval(iface_driver_guest)
        iface.driver = iface.new_driver(driver_attr=driver_dict,
                                        driver_host=driver_host,
                                        driver_guest=driver_guest)
        if iface.address:
            del iface.address

        logging.debug("New interface xml file: %s", iface)
        if unprivileged_user:
            # Create disk image for unprivileged user
            disk_index = xml_devices.index(
                xml_devices.by_device_tag("disk")[0])
            disk_xml = xml_devices[disk_index]
            logging.debug("source: %s", disk_xml.source)
            disk_source = disk_xml.source.attrs["file"]
            cmd = ("cp -f %s %s && chmod a+rw %s" %
                   (disk_source, dst_disk, dst_disk))
            utils.run(cmd)
            disk_xml.source = disk_xml.new_disk_source(
                attrs={"file": dst_disk})
            vmxml.devices = xml_devices
            # Remove all channels to avoid of permission problem
            channels = vmxml.get_devices(device_type="channel")
            for channel in channels:
                vmxml.del_device(channel)

            vmxml.xmltreefile.write()
            logging.debug("New VM xml: %s", vmxml)
            utils.run("chmod a+rw %s" % vmxml.xml)
            virsh.define(vmxml.xml, **virsh_dargs)
        # Try to modify interface xml by update-device or edit xml
        elif update:
            iface.xmltreefile.write()
            ret = virsh.update_device(vm_name, iface.xml,
                                      ignore_status=True)
            libvirt.check_exit_status(ret, status_error)
        else:
            vmxml.devices = xml_devices
            vmxml.xmltreefile.write()
            vmxml.sync()
def run(test, params, env):
    """
    Test extended TSEG on Q35 machine types
    <smm state='on'>
        <tseg unit='MiB'>48</tseg>
    </smm>

    Steps:
    1) Edit VM xml for smm or tseg sub element
    2) Verify if Guest can boot as expected
    3) On i440 machine types, the property does not support.
       On Q35 machine types, both Seabios and OVMF Guest can bootup
    """

    vm_name = params.get("main_vm", "")
    vm = env.get_vm(vm_name)
    smm_state = params.get("smm_state", "off")
    unit = params.get("tseg_unit")
    size = params.get("tseg_size")
    boot_type = params.get("boot_type", "")
    loader_type = params.get("loader_type")
    loader = params.get("loader")
    err_msg = params.get("error_msg", "")
    vm_arch_name = params.get("vm_arch_name", "x86_64")
    status_error = ("yes" == params.get("status_error", "no"))

    if not libvirt_version.version_compare(4, 5, 0):
        test.cancel("TSEG does not support in "
                    "current libvirt version")

    if (boot_type == "seabios" and
            not utils_package.package_install('seabios-bin')):
        test.cancel("Failed to install Seabios")

    if (boot_type == 'ovmf' and
            not utils_package.package_install('OVMF')):
        test.cancel("Failed to install OVMF")

    # Back VM XML
    v_xml_backup = vm_xml.VMXML.new_from_dumpxml(vm_name)
    v_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)

    try:
        # Specify boot loader for OVMF
        if boot_type == 'ovmf':
            os_xml = v_xml.os
            os_xml.loader_type = loader_type
            os_xml.loader = loader
            os_xml.loader_readonly = "yes"
            v_xml.os = os_xml

        try:
            features_xml = v_xml.features
        except xcepts.LibvirtXMLNotFoundError:
            if vm_arch_name == 'x86_64':
                # ACPI is required for UEFI on x86_64
                v_xml.xmltreefile.create_by_xpath("/features/acpi")
                features_xml = v_xml.features
            else:
                features_xml = vm_xml.VMFeaturesXML()

        features_xml.smm = smm_state
        if unit and size:
            features_xml.smm_tseg_unit = unit
            features_xml.smm_tseg = size
        v_xml.features = features_xml

        logging.debug("New VM XML is:\n%s", v_xml)
        ret = virsh.define(v_xml.xml)
        utlv.check_result(ret, expected_fails=err_msg)

        # Check result
        if not status_error:
            vm.start()
            if unit and size:
                # If tseg unit is KiB, convert it to MiB
                # as vm dumpxml convert it automatically
                if unit == 'KiB':
                    unit, size = unify_to_MiB(unit, size)
                expect_line = "<tseg unit=\"%s\">%s</tseg>" % (unit, size)
                utlv.check_dumpxml(vm, expect_line)
                # Qemu cmdline use mbytes
                unit, tseg_mbytes = unify_to_MiB(unit, size)
                expect_line = '-global mch.extended-tseg-mbytes=%s' % size
                utlv.check_qemu_cmd_line(expect_line)
    finally:
        logging.debug("Restore the VM XML")
        if vm.is_alive():
            vm.destroy()
        # OVMF enable nvram by default
        v_xml_backup.sync(options="--nvram")
Beispiel #43
0
def run(test, params, env):
    """
    Test extended TSEG on Q35 machine types
    <smm state='on'>
        <tseg unit='MiB'>48</tseg>
    </smm>

    Steps:
    1) Edit VM xml for smm or tseg sub element
    2) Verify if Guest can boot as expected
    3) On i440 machine types, the property does not support.
       On Q35 machine types, both Seabios and OVMF Guest can bootup
    """

    vm_name = params.get("main_vm", "")
    vm = env.get_vm(vm_name)
    smm_state = params.get("smm_state", "off")
    unit = params.get("tseg_unit")
    size = params.get("tseg_size")
    boot_type = params.get("boot_type", "")
    loader_type = params.get("loader_type")
    loader = params.get("loader")
    err_msg = params.get("error_msg", "")
    vm_arch_name = params.get("vm_arch_name", "x86_64")
    status_error = ("yes" == params.get("status_error", "no"))

    if not libvirt_version.version_compare(4, 5, 0):
        test.cancel("TSEG does not support in " "current libvirt version")

    if (boot_type == "seabios"
            and not utils_package.package_install('seabios-bin')):
        test.cancel("Failed to install Seabios")

    if (boot_type == 'ovmf' and not utils_package.package_install('OVMF')):
        test.cancel("Failed to install OVMF")

    # Back VM XML
    v_xml_backup = vm_xml.VMXML.new_from_dumpxml(vm_name)
    v_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)

    try:
        # Specify boot loader for OVMF
        if boot_type == 'ovmf':
            os_xml = v_xml.os
            os_xml.loader_type = loader_type
            os_xml.loader = loader
            os_xml.loader_readonly = "yes"
            v_xml.os = os_xml

        try:
            features_xml = v_xml.features
        except xcepts.LibvirtXMLNotFoundError:
            if vm_arch_name == 'x86_64':
                # ACPI is required for UEFI on x86_64
                v_xml.xmltreefile.create_by_xpath("/features/acpi")
                features_xml = v_xml.features
            else:
                features_xml = vm_xml.VMFeaturesXML()

        features_xml.smm = smm_state
        if unit and size:
            features_xml.smm_tseg_unit = unit
            features_xml.smm_tseg = size
        v_xml.features = features_xml

        logging.debug("New VM XML is:\n%s", v_xml)
        ret = virsh.define(v_xml.xml)
        utlv.check_result(ret, expected_fails=err_msg)

        # Check result
        if not status_error:
            vm.start()
            if unit and size:
                # If tseg unit is KiB, convert it to MiB
                # as vm dumpxml convert it automatically
                if unit == 'KiB':
                    unit, size = unify_to_MiB(unit, size)
                expect_line = "<tseg unit=\"%s\">%s</tseg>" % (unit, size)
                utlv.check_dumpxml(vm, expect_line)
                # Qemu cmdline use mbytes
                unit, tseg_mbytes = unify_to_MiB(unit, size)
                expect_line = '-global mch.extended-tseg-mbytes=%s' % size
                utlv.check_qemu_cmd_line(expect_line)
    finally:
        logging.debug("Restore the VM XML")
        if vm.is_alive():
            vm.destroy()
        # OVMF enable nvram by default
        v_xml_backup.sync(options="--nvram")
Beispiel #44
0
                # New index of new pcie-bridge should be +1
                new_index = last_pci_index + 1
                if case.endswith('less_than'):
                    new_bus = new_index + 1
                elif case.endswith('equal_to'):
                    new_bus = new_index
                else:
                    test.error('Invalid test: %s' % case)

                pci_br_kwargs['index'] = new_index
                pci_br_kwargs['address'] = pci_br_kwargs['address'] % (new_bus)
                logging.debug('pci_br_kwargs: %s', pci_br_kwargs)

                pcie_br = create_pci_device(pci_model, pci_model_name, **pci_br_kwargs)
                vmxml.add_device(pcie_br)
                result_to_check = virsh.define(vmxml.xml, debug=True)

            # Test define & start an VM with/without pcie-to-pci-bridge
            if case.startswith('vm_with_pcie_br'):
                if case.endswith('1_br'):
                    pass
                elif case.endswith('multi_br'):
                    # Add $pcie_br_count pcie-root-port to vm
                    for i in range(pcie_br_count):
                        temp_xml = VMXML.new_from_inactive_dumpxml(vm_name)
                        port = create_pci_device('pcie-root-port', 'pcie-root-port')
                        temp_xml.add_device(port)
                        temp_xml.sync()

                    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
                    pci_root_ports = [dev for dev in vmxml.get_devices('controller')
def run(test, params, env):
    """
    Test command: virsh domif-setlink and domif-getlink.

    The command   set and get link state of a virtual interface
    1. Prepare test environment.
    2. Perform virsh domif-setlink and domif-getlink operation.
    3. Recover test environment.
    4. Confirm the test result.
    """

    def domif_setlink(vm, device, operation, options):
        """
        Set the domain link state

        :param vm : domain name
        :param device : domain virtual interface
        :param opration : domain virtual interface state
        :param options : some options like --config

        """

        return virsh.domif_setlink(vm, device, operation, options, debug=True)

    def domif_getlink(vm, device, options):
        """
        Get the domain link state

        :param vm : domain name
        :param device : domain virtual interface
        :param options : some options like --config

        """

        return virsh.domif_getlink(vm, device, options,
                                   ignore_status=True, debug=True)

    def guest_if_state(if_name, session):
        """
        Get the domain link state from the guest
        """
        # Get link state by ethtool
        cmd = "ethtool %s" % if_name
        cmd_status, output = session.cmd_status_output(cmd)
        logging.info("ethtool exit: %s, output: %s",
                     cmd_status, output)
        link_state = re.search("Link detected: ([a-zA-Z]+)",
                               output).group(1)
        return link_state == "yes"

    def check_update_device(vm, if_name, session):
        """
        Change link state by upadte-device command, Check the results
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)

        # Get interface xml object
        iface = vmxml.get_devices(device_type="interface")[0]
        if iface.address:
            del iface.address

        # Change link state to up
        iface.link_state = "up"
        iface.xmltreefile.write()
        ret = virsh.update_device(vm.name, iface.xml,
                                  ignore_status=True, debug=True)
        if ret.exit_status:
            logging.error("Failed to update device to up state")
            return False
        if not guest_if_state(if_name, session):
            logging.error("Guest link should be up now")
            return False

        # Change link state to down
        iface.link_state = "down"
        iface.xmltreefile.write()
        ret = virsh.update_device(vm.name, iface.xml,
                                  ignore_status=True, debug=True)
        if ret.exit_status:
            logging.error("Failed to update device to down state")
            return False
        if guest_if_state(if_name, session):
            logging.error("Guest link should be down now")
            return False

        # Passed all test
        return True

    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    options = params.get("if_options", "--config")
    start_vm = params.get("start_vm", "no")
    if_device = params.get("if_device", "net")
    if_name = params.get("if_name", "vnet0")
    if_operation = params.get("if_operation", "up")
    status_error = params.get("status_error", "no")
    mac_address = vm.get_virsh_mac_address(0)
    check_link_state = "yes" == params.get("check_link_state", "no")
    check_link_by_update_device = "yes" == params.get(
        "excute_update_device", "no")
    device = "vnet0"

    # Back up xml file.
    vm_xml_file = os.path.join(test.tmpdir, "vm.xml")
    virsh.dumpxml(vm_name, extra="--inactive", to_file=vm_xml_file)

    # Vm status
    if start_vm == "yes" and vm.is_dead():
        vm.start()

    elif start_vm == "no" and vm.is_alive():
        vm.destroy()

    try:
        # Test device net or mac address
        if if_device == "net" and vm.is_alive():
            device = if_name
            # Get all vm's interface device
            device = vm_xml.VMXML.get_net_dev(vm_name)[0]

        elif if_device == "mac":
            device = mac_address

        # Setlink opertation
        result = domif_setlink(vm_name, device, if_operation, options)
        status = result.exit_status
        logging.info("Setlink done")

        # Getlink opertation
        get_result = domif_getlink(vm_name, device, options)
        getlink_output = get_result.stdout.strip()

        # Check the getlink command output
        if not re.search(if_operation, getlink_output) and status_error == "no":
            raise error.TestFail("Getlink result should "
                                 "equal with setlink operation ", getlink_output)

        logging.info("Getlink done")
        # If --config is given should restart the vm then test link status
        if options == "--config" and vm.is_alive():
            vm.destroy()
            vm.start()
            logging.info("Restart VM")

        elif start_vm == "no":
            vm.start()

        error_msg = None
        if status_error == "no":
            # Serial login the vm to check link status
            # Start vm check the link statue
            session = vm.wait_for_serial_login()
            guest_if_name = utils_net.get_linux_ifname(session, mac_address)

            # Check link state in guest
            if check_link_state:
                if (if_operation == "up" and
                        not guest_if_state(guest_if_name, session)):
                    error_msg = "Link state should be up in guest"
                if (if_operation == "down" and
                        guest_if_state(guest_if_name, session)):
                    error_msg = "Link state should be down in guest"

            # Test of setting link state by update_device command
            if check_link_by_update_device:
                if not check_update_device(vm, guest_if_name, session):
                    error_msg = "Check update_device failed"

            # Set the link up make host connect with vm
            domif_setlink(vm_name, device, "up", "")
            # Ignore status of this one
            cmd_status = session.cmd_status('ifdown %s' % guest_if_name)
            cmd_status = session.cmd_status('ifup %s' % guest_if_name)
            if cmd_status != 0:
                error_msg = ("Could not bring up interface %s inside guest"
                             % guest_if_name)
        else:  # negative test
            # stop guest, so state is always consistent on next start
            vm.destroy()

        if error_msg:
            raise error.TestFail(error_msg)

        # Check status_error
        if status_error == "yes":
            if status:
                logging.info("Expected error (negative testing). Output: %s",
                             result.stderr.strip())

            else:
                raise error.TestFail("Unexpected return code %d "
                                     "(negative testing)" % status)
        elif status_error == "no":
            status = cmd_status
            if status:
                raise error.TestFail("Unexpected error (positive testing). "
                                     "Output: %s" % result.stderr.strip())

        else:
            raise error.TestError("Invalid value for status_error '%s' "
                                  "(must be 'yes' or 'no')" % status_error)
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        virsh.undefine(vm_name)
        virsh.define(vm_xml_file)
        os.remove(vm_xml_file)
Beispiel #46
0
def run(test, params, env):
    """
    Test vcpu affinity feature as follows:
    positive test:
        1. use vcpu cpuset in xml to define vcpu affinity
        2. use cputune cpuset in xml to define vcpu affinity
        3. use offline-to-online host cpu as cpuset to run virsh vcpupin
        4. set vcpu placement in xml to auto and check xml result
        5. set vcpu cpuset in xml without placement defined and check xml result
        6. specify vcpu affinity for inactive vcpu
    negative test:
        1. use outrange cpuset as vcpu cpuset in xml to define vcpu affinity
        2. use outrange cpuset as cputune cpuset in xml to define vcpu affinity
        3. use invalid cpuset as cputune cpuset in xml to define vcpu affinity
        4. use duplicate vcpu in xml to define vcpu affinity
        5. use offline host cpu as cputune cpuset to run virsh vcpupin
        6. set vcpu affinity for none exists vcpu and check xml result
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    cpuset_mask = params.get("cpuset_mask", "")
    vcpu = params.get("vcpu", "0")
    setvcpus_option = params.get("setvcpus_option", "")
    setvcpus_count = params.get("setvcpus_count", "0")
    vcpupin_option = params.get("vcpupin_option", "")
    maxvcpu = params.get("maxvcpu", "8")
    current_vcpu = params.get("current_vcpu", "3")
    check = params.get("check", "")
    config_xml = params.get("config_xml", "")

    status_error = "yes" == params.get("status_error", "no")
    define_fail = "yes" == params.get("define_fail", "no")
    start_fail = "yes" == params.get("start_fail", "no")
    runtime_fail = "yes" == params.get("runtime_fail", "no")
    hotplug_vcpu = "yes" == params.get("hotplug_vcpu", "no")

    vcpu_cpuset = params.get("vcpu_cpuset", "")
    cputune_cpuset = params.get("cputune_cpuset", "")
    vcpu_placement = params.get("vcpu_placement", "static")
    err_msg = params.get("err_msg", "")
    start_timeout = int(params.get("start_timeout", "180"))
    offline_hostcpus = params.get("offline_hostcpus", "")
    machine_cpuset_path = params.get("machine_cpuset_path", "")

    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    def check_vcpu_affinity():
        """
        check vcpu affinity defined by vcpu cpuset or cputune cpuset
        """
        affinity = vcpu_cpuset if not cputune_cpuset else cputune_cpuset
        affinity = {vcpu: affinity}
        virsh.vcpuinfo(vm_name, debug=True)
        host_cpu_count = cpuutil.total_cpus_count()

        vmxml_live = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(vmxml_live)

        # if vcpu >= maxvcpu, the cputune should not exist in xml
        if int(vcpu) >= int(maxvcpu):
            try:
                if hasattr(vmxml_live, 'cputune'):
                    test.fail("cputune tag is set when vcpu >= maxvcpu")
            except xcepts.LibvirtXMLError:
                pass
        elif "config" in vcpupin_option:
            vcpu_affinity = cpu.affinity_from_vcpupin(vm, vcpu, vcpupin_option)
            affinity = cpu.cpus_string_to_affinity_list(
                str(affinity[vcpu]), host_cpu_count)
            logging.debug("vcpu_affinity {}".format(vcpu_affinity))
            logging.debug("affinity {}".format(affinity))
            if vcpu_affinity[int(vcpu)] != affinity:
                test.fail("vcpu affinity check fail")
        # check the expected vcpu affinity with the one got from running vm
        elif not cpu.check_affinity(vm, affinity):
            test.fail("vcpu affinity check fail")

    try:
        hostcpu_num = int(cpuutil.total_cpus_count())

        # online all host cpus
        for x in range(1, hostcpu_num):
            if cpuutil.online(x):
                test.fail("fail to online cpu{}".format(x))

        # use vcpu cpuset or/and cputune cpuset to define xml
        del vmxml.cputune
        del vmxml.vcpus
        del vmxml.placement
        vmxml.vcpu = int(maxvcpu)
        vmxml.current_vcpu = current_vcpu

        # Remove cpu topology to avoid that it doesn't match vcpu count
        if vmxml.get_cpu_topology():
            new_cpu = vmxml.cpu
            del new_cpu.topology
            vmxml.cpu = new_cpu

        # config vcpu cpuset for cpuset range test
        num = 1 if not status_error else 0
        cpuset_new = "0-{},^{}".format(hostcpu_num-num, cpuset_mask)
        if (config_xml == "vcpu" and check.endswith("range_cpuset")):
            vcpu_cpuset = cpuset_new
        vmxml.cpuset = vcpu_cpuset

        if vcpu_placement:
            vmxml.placement = vcpu_placement

            # Remove numatune node since it will be automatically set
            # under 'auto' state
            if vcpu_placement == 'auto':
                vmxml.xmltreefile.remove_by_xpath('/numatune', remove_all=True)
                vmxml.xmltreefile.write()

        if config_xml == "cputune":
            cputune = vm_xml.VMCPUTuneXML()
            if check.endswith("range_cpuset"):
                cputune_cpuset = cpuset_new
            if check.endswith("duplicate_vcpu"):
                cputune.vcpupins = [{'vcpu': vcpu, 'cpuset': "2"}, {'vcpu': vcpu, 'cpuset': "3"}]
            else:
                cputune.vcpupins = [{'vcpu': vcpu, 'cpuset': cputune_cpuset}]
            vmxml.cputune = cputune

        logging.debug(vmxml)
        if status_error and define_fail:
            result_to_check = virsh.define(vmxml.xml, debug=True)
        else:
            vmxml.sync()

        # test vcpu cpuset in offline/online host cpu scenario
        if check.endswith("offline_hostcpu"):
            for x in offline_hostcpus.split(','):
                if cpuutil.offline(x):
                    test.fail("fail to offline cpu{}".format(x))
                logging.debug("offline host cpu {}".format(x))

        # start the vm
        if status_error and start_fail:
            result_to_check = virsh.start(vm_name, debug=True)

        if (not status_error) or runtime_fail:
            vm.start()
            vm.wait_for_login(timeout=start_timeout).close()

            # test vcpu cpuset in offline/online host cpu scenario
            if check.endswith("offline_hostcpu") and not status_error:
                # online host cpu
                if cpuutil.online(cputune_cpuset):
                    test.fail("fail to online cpu{}".format(cputune_cpuset))

            # run virsh vcpupin to config vcpu affinity
            if check.startswith("cputune") and (not config_xml):
                result_to_check = virsh.vcpupin(vm_name, vcpu, cputune_cpuset, vcpupin_option, debug=True)

            # hotplug vcpu test scenario
            if hotplug_vcpu:
                virsh.setvcpus(vm_name, setvcpus_count, setvcpus_option, debug=True, ignore_status=False)

            libvirtd_restart = False
            while True:
                if check == "vcpu_placement":
                    check_vcpu_placement(test, params)
                elif not status_error:
                    check_vcpu_affinity()
                if libvirtd_restart:
                    break
                # restart libvirtd and check vcpu affinity again
                utils_libvirtd.Libvirtd().restart()
                libvirtd_restart = True

        if 'result_to_check' in locals():
            if err_msg:
                err_msg = err_msg.split(";")
            libvirt.check_result(result_to_check, err_msg)

    finally:
        vmxml_backup.sync()

        # recovery the host cpu env
        for x in range(1, hostcpu_num):
            cpuutil.online(x)
        cmd = "echo '0-{}' > {}".format(hostcpu_num-1, machine_cpuset_path)
        process.run(cmd, shell=True)
Beispiel #47
0
def run_virsh_autostart(test, params, env):
    """
    Test command: virsh autostart

    Set(or disable) autostart for a domain
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    persistent_vm = "yes" == params.get("persistent_vm", "yes")
    readonly_mode = "yes" == params.get("readonly_mode", "no")
    autostart_vm = "yes" == params.get("autostart_vm", "no")
    autostart_extra = params.get("autostart_extra", "")
    status_error = "yes" == params.get("status_error", "no")

    # Prepare transient/persistent vm
    original_xml = vm.backup_xml()
    if not persistent_vm and vm.is_persistent():
        vm.undefine()
    elif persistent_vm and not vm.is_persistent():
        vm.define(original_xml)

    original_autost = vm.is_autostart()
    logging.debug("Original VM %s autostart: %s", vm_name, original_autost)
    options = " "
    if not autostart_vm:
        options = "--disable "
    if autostart_extra:
        options += autostart_extra
    # Readonly mode
    ro_flag = False
    if readonly_mode:
        ro_flag = True

    # Result check
    def autostart_check():
        """
        Check if the VM autostart
        """
        res = False
        if autostart_vm and vm.is_autostart() and vm.is_alive():
            logging.debug("VM autostart as expected")
            res = True
        if not autostart_vm and not vm.is_autostart() and vm.is_dead():
            logging.debug("VM not autostart as expected")
            res = True
        return res

    # Run test
    try:
        # Make sure the VM is inactive(except transient VM)
        if vm.is_persistent() and vm.is_alive():
            vm.destroy()
        cmd_result = virsh.autostart(vm_name, options, ignore_status=True, debug=True, readonly=ro_flag)
        err = cmd_result.stderr.strip()
        status = cmd_result.exit_status
        # Restart libvirtd and sleep 2
        utils_libvirtd.libvirtd_restart()
        if not status_error:
            if status:
                raise error.TestFail(err)
            elif not autostart_check():
                raise error.TestFail("Autostart check fail")
        elif status_error and status == 0:
            raise error.TestFail("Expect fail, but run successfully.")
    finally:
        # Recover env
        vm.destroy()
        if not vm.is_persistent():
            virsh.define(original_xml)
            os.remove(original_xml)
        if original_autost and not vm.is_autostart():
            virsh.autostart(vm_name, "")
        elif not original_autost and vm.is_autostart():
            virsh.autostart(vm_name, "--disable")
def run(test, params, env):
    """
    Integration test of backup and backing_chain.

    Steps:
    1. craete a vm with extra disk vdb
    2. create some data on vdb
    3. start a pull mode full backup on vdb
    4. create some data on vdb
    5. start a pull mode incremental backup
    6. repeat step 5 to 7
    7. before the last round of backup job, do a blockcommit/pull/copy
    8. check the full/incremental backup file data
    """

    def run_blk_cmd():
        """
        Run blockcommit/blockpull/blockcopy command.
        """

        def run_blockpull():
            """
            Run blockpull command.
            """
            if from_to == "mid_to_top":
                cmd_option = ("--base {0}[{1}] --wait").format(original_disk_target,
                                                               middle_layer1_index)
            elif from_to == "base_to_top":
                cmd_option = ("--base {0}[{1}] --wait").format(original_disk_target,
                                                               base_layer_index)
            virsh.blockpull(vm_name, original_disk_target, cmd_option,
                            debug=True, ignore_status=False)

        def run_blockcommit():
            """
            Run blockcommit command.
            """
            if from_to == "top_to_base":
                # Do blockcommit from top layer to base layer
                cmd_option = ("--top {0}[{1}] --base {0}[{2}] --active --pivot "
                              "--wait".format(original_disk_target,
                                              top_layer_index,
                                              base_layer_index)
                              )

            elif from_to == "mid_to_mid":
                # Do blockcommit from middle layer to another middle layer
                if len(indice) < 4:
                    test.fail("At lease 4 layers required for the test 'mid_to_mid'")
                cmd_option = ("--top {0}[{1}] --base {0}[{2}] "
                              "--wait".format(original_disk_target,
                                              middle_layer1_index,
                                              middle_layer2_index)
                              )
            elif from_to == "top_to_mid":
                # Do blockcommit from top layer to middle layer
                cmd_option = ("--top {0}[{1}] --base {0}[{2}] --active --pivot "
                              "--wait".format(original_disk_target,
                                              top_layer_index,
                                              middle_layer1_index)
                              )
            elif from_to == "mid_to_base":
                # Do blockcommit from middle layer to base layer
                cmd_option = ("--top {0}[{1}] --base {0}[{2}] "
                              "--wait".format(original_disk_target,
                                              middle_layer1_index,
                                              base_layer_index)
                              )
            virsh.blockcommit(vm_name, original_disk_target, cmd_option,
                              debug=True, ignore_stauts=False)

        def run_blockcopy():
            """
            Run blockcopy command.
            """
            copy_dest = os.path.join(tmp_dir, "copy_dest.qcow2")
            cmd_option = "--wait --verbose --transient-job --pivot"
            if blockcopy_method == "shallow_copy":
                cmd_option += " --shallow"
            if blockcopy_reuse == "reuse_external":
                cmd_option += " --reuse-external"
                if blockcopy_method == "shallow_copy":
                    create_img_cmd = "qemu-img create -f qcow2 -F qcow2 -b %s %s"
                    create_img_cmd %= (backend_img, copy_dest)
                else:
                    create_img_cmd = "qemu-img create -f qcow2 %s %s"
                    create_img_cmd %= (copy_dest, original_disk_size)
                process.run(create_img_cmd, shell=True, ignore_status=False)
            virsh.blockcopy(vm_name, original_disk_target, copy_dest, cmd_option,
                            debug=True, ignore_status=False)

        # Get disk backing store indice info in vm disk xml
        cur_vm_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        cur_disk_xmls = cur_vm_xml.get_devices(device_type="disk")
        cur_test_disk_xml = ''
        for disk_xml in cur_disk_xmls:
            if disk_xml.target['dev'] == original_disk_target:
                cur_test_disk_xml = disk_xml
                logging.debug("Current disk xml for %s is:\n %s",
                              original_disk_target, cur_test_disk_xml)
                break
        indice = re.findall(r".*index=['|\"](\d+)['|\"].*", str(cur_test_disk_xml))
        logging.debug("backing store indice for %s is: %s",
                      original_disk_target, indice)
        if len(indice) < 3:
            test.fail("At least 3 layers required for the test.")
        top_layer_index = indice[0]
        middle_layer1_index = indice[1]
        middle_layer2_index = indice[-2]
        base_layer_index = indice[-1]
        logging.debug("Following backing store will be used: %s",
                      "top:%s; middle_1: %s, middle_2:%s, base: %s"
                      % (top_layer_index, middle_layer1_index,
                         middle_layer2_index, base_layer_index)
                      )
        # Start the block command
        if blockcommand == "blockpull":
            run_blockpull()
        if blockcommand == "blockcommit":
            run_blockcommit()
        if blockcommand == "blockcopy":
            run_blockcopy()

    def create_shutoff_snapshot(original_img, snapshot_img):
        """
        Create shutoff snapshot, which means the disk snapshot is not controlled
        by libvirt, but created directly by qemu command.

        :param original_img: The image we will take shutoff snapshot for.
        :param snapshot_img: The newly created shutoff snapshot image.
        """
        cmd = "qemu-img info --output=json -f qcow2 {}".format(original_img)
        img_info = process.run(cmd, shell=True, ignore_status=False).stdout_text
        json_data = json.loads(img_info)
        cmd = "qemu-img create -f qcow2 -F qcow2 -b {0} {1}".format(original_img, snapshot_img)
        process.run(cmd, shell=True, ignore_status=False)
        try:
            bitmaps = json_data['format-specific']['data']['bitmaps']
            for bitmap in bitmaps:
                bitmap_flags = bitmap['flags']
                bitmap_name = bitmap['name']
                if 'auto' in bitmap_flags and 'in-use' not in bitmap_flags:
                    cmd = "qemu-img bitmap -f qcow2 {0} --add {1}".format(snapshot_img, bitmap_name)
                    process.run(cmd, shell=True, ignore_status=False)
        except Exception as bitmap_error:
            logging.debug("Cannot add bitmap to new image, skip it: %s",
                          bitmap_error)

    # Cancel the test if libvirt version is too low
    if not libvirt_version.version_compare(6, 0, 0):
        test.cancel("Current libvirt version doesn't support "
                    "incremental backup.")

    # vm's origianl disk config
    original_disk_size = params.get("original_disk_size", "100M")
    original_disk_type = params.get("original_disk_type", "local")
    original_disk_target = params.get("original_disk_target", "vdb")

    # pull mode backup config
    scratch_type = params.get("scratch_type", "file")
    nbd_protocol = params.get("nbd_protocol", "tcp")
    nbd_tcp_port = params.get("nbd_tcp_port", "10809")

    # test config
    backup_rounds = int(params.get("backup_rounds", 4))
    shutoff_snapshot = "yes" == params.get("shutoff_snapshot")
    blockcommand = params.get("blockcommand")
    from_to = params.get("from_to")
    blockcopy_method = params.get("blockcopy_method")
    blockcopy_reuse = params.get("blockcopy_reuse")
    backup_error = "yes" == params.get("backup_error")
    tmp_dir = data_dir.get_tmp_dir()

    try:
        vm_name = params.get("main_vm")
        vm = env.get_vm(vm_name)

        # Backup vm xml
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml_backup = vmxml.copy()
        disks_not_tested = list(vmxml.get_disk_all().keys())
        logging.debug("Not tested disks are: %s", disks_not_tested)

        # Enable vm incremental backup capability. This is only a workaround
        # to make sure incremental backup can work for the vm. Code needs to
        # be removded immediately when the function enabled by default, which
        # is tracked by bz1799015
        tree = ET.parse(vmxml.xml)
        root = tree.getroot()
        for elem in root.iter('domain'):
            elem.set('xmlns:qemu', 'http://libvirt.org/schemas/domain/qemu/1.0')
            qemu_cap = ET.Element("qemu:capabilities")
            elem.insert(-1, qemu_cap)
            incbackup_cap = ET.Element("qemu:add")
            incbackup_cap.set('capability', 'incremental-backup')
            qemu_cap.insert(1, incbackup_cap)
        vmxml.undefine()
        tmp_vm_xml = os.path.join(tmp_dir, "tmp_vm.xml")
        tree.write(tmp_vm_xml)
        virsh.define(tmp_vm_xml)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        logging.debug("Script insert xml elements to make sure vm can support "
                      "incremental backup. This should be removded when "
                      "bz 1799015 fixed.")

        # Destroy vm before test
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Prepare the disk to be backuped.
        disk_params = {}
        disk_path = ""
        if original_disk_type == "local":
            image_name = "%s_image.qcow2" % original_disk_target
            disk_path = os.path.join(tmp_dir, image_name)
            libvirt.create_local_disk("file", disk_path, original_disk_size,
                                      "qcow2")
            disk_params = {"device_type": "disk",
                           "type_name": "file",
                           "driver_type": "qcow2",
                           "target_dev": original_disk_target,
                           "source_file": disk_path}
            if original_disk_target:
                disk_params["target_dev"] = original_disk_target
        else:
            logging.cancel("The disk type '%s' not supported in this script.",
                           original_disk_type)
        disk_xml = libvirt.create_disk_xml(disk_params)
        virsh.attach_device(vm.name, disk_xml,
                            flagstr="--config", debug=True)
        vm.start()
        session = vm.wait_for_login()
        new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys())
        session.close()
        if len(new_disks_in_vm) != 1:
            test.fail("Test disk not prepared in vm")

        # Use the newly added disk as the test disk
        test_disk_in_vm = "/dev/" + new_disks_in_vm[0]

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        vm_disks = list(vmxml.get_disk_all().keys())

        checkpoint_list = []
        is_incremental = False
        backup_file_list = []
        snapshot_list = []
        cur_disk_xml = disk_xml
        cur_disk_path = disk_path
        cur_disk_params = disk_params
        backend_img = ""
        for backup_index in range(backup_rounds):
            # Do external snapshot
            if shutoff_snapshot:
                virsh.detach_disk(vm.name, original_disk_target,
                                  extra="--persistent",
                                  ignore_status=False, debug=True)
                if vm.is_alive():
                    vm.destroy(gracefully=False)
                shutoff_snapshot_name = "shutoff_snap_%s" % str(backup_index)
                shutoff_snapshot_path = os.path.join(tmp_dir, shutoff_snapshot_name)

                create_shutoff_snapshot(cur_disk_path, shutoff_snapshot_path)
                cur_disk_params["source_file"] = shutoff_snapshot_path
                cur_disk_xml = libvirt.create_disk_xml(cur_disk_params)
                virsh.attach_device(vm.name, cur_disk_xml,
                                    flagstr="--config",
                                    ignore_status=False,
                                    debug=True)
                vm.start()
                vm.wait_for_login().close()
                cur_disk_path = shutoff_snapshot_path
            else:
                snapshot_name = "snap_%s" % str(backup_index)
                snapshot_option = ""
                snapshot_file_name = os.path.join(tmp_dir, snapshot_name)
                for disk_name in disks_not_tested:
                    snapshot_option += "--diskspec %s,snapshot=no " % disk_name
                snapshot_option += "--diskspec %s,file=%s" % (original_disk_target,
                                                              snapshot_file_name)
                virsh.snapshot_create_as(vm_name,
                                         "%s --disk-only %s" % (snapshot_name,
                                                                snapshot_option),
                                         debug=True)
                snapshot_list.append(snapshot_name)

            # Prepare backup xml
            backup_params = {"backup_mode": "pull"}
            if backup_index > 0:
                is_incremental = True
                backup_params["backup_incremental"] = "checkpoint_" + str(backup_index - 1)

            # Set libvirt default nbd export name and bitmap name
            nbd_export_name = original_disk_target
            nbd_bitmap_name = "backup-" + original_disk_target

            backup_server_dict = {"name": "localhost",
                                  "port": nbd_tcp_port}
            backup_params["backup_server"] = backup_server_dict
            backup_disk_xmls = []
            for vm_disk in vm_disks:
                backup_disk_params = {"disk_name": vm_disk}
                if vm_disk != original_disk_target:
                    backup_disk_params["enable_backup"] = "no"
                else:
                    backup_disk_params["enable_backup"] = "yes"
                    backup_disk_params["disk_type"] = scratch_type

                    # Prepare nbd scratch file/dev params
                    scratch_params = {}
                    scratch_file_name = "scratch_file_%s" % backup_index
                    scratch_file_path = os.path.join(tmp_dir, scratch_file_name)
                    scratch_params["file"] = scratch_file_path
                    logging.debug("scratch_params: %s", scratch_params)
                    backup_disk_params["backup_scratch"] = scratch_params
                backup_disk_xml = utils_backup.create_backup_disk_xml(
                        backup_disk_params)
                backup_disk_xmls.append(backup_disk_xml)
            logging.debug("disk list %s", backup_disk_xmls)
            backup_xml = utils_backup.create_backup_xml(backup_params,
                                                        backup_disk_xmls)
            logging.debug("ROUND_%s Backup Xml: %s", backup_index, backup_xml)

            # Prepare checkpoint xml
            checkpoint_name = "checkpoint_%s" % backup_index
            checkpoint_list.append(checkpoint_name)
            cp_params = {"checkpoint_name": checkpoint_name}
            cp_params["checkpoint_desc"] = params.get("checkpoint_desc",
                                                      "desc of cp_%s" % backup_index)
            disk_param_list = []
            for vm_disk in vm_disks:
                cp_disk_param = {"name": vm_disk}
                if vm_disk != original_disk_target:
                    cp_disk_param["checkpoint"] = "no"
                else:
                    cp_disk_param["checkpoint"] = "bitmap"
                    cp_disk_bitmap = params.get("cp_disk_bitmap")
                    if cp_disk_bitmap:
                        cp_disk_param["bitmap"] = cp_disk_bitmap + str(backup_index)
                disk_param_list.append(cp_disk_param)
            checkpoint_xml = utils_backup.create_checkpoint_xml(cp_params,
                                                                disk_param_list)
            logging.debug("ROUND_%s Checkpoint Xml: %s",
                          backup_index, checkpoint_xml)

            # Start backup
            backup_options = backup_xml.xml + " " + checkpoint_xml.xml

            # Create some data in vdb
            dd_count = "1"
            dd_seek = str(backup_index * 10 + 10)
            dd_bs = "1M"
            utils_disk.dd_data_to_vm_disk(vm, test_disk_in_vm, dd_bs,
                                          dd_seek, dd_count)

            backup_result = virsh.backup_begin(vm_name, backup_options,
                                               debug=True)
            if backup_result.exit_status:
                raise utils_backup.BackupBeginError(backup_result.stderr.strip())

            backup_file_path = os.path.join(
                    tmp_dir, "backup_file_%s.qcow2" % str(backup_index))
            backup_file_list.append(backup_file_path)
            if not is_incremental:
                # Do full backup
                nbd_export = ("nbd://localhost:%s/%s" %
                              (nbd_tcp_port, nbd_export_name))
                utils_backup.pull_full_backup_to_file(nbd_export,
                                                      backup_file_path)
                logging.debug("Full backup to: %s", backup_file_path)
            else:
                # Do incremental backup
                nbd_params = {"nbd_protocol": nbd_protocol,
                              "nbd_export": nbd_export_name,
                              "nbd_tcp_port": nbd_tcp_port}
                utils_backup.pull_incremental_backup_to_file(
                        nbd_params, backup_file_path, nbd_bitmap_name,
                        original_disk_size)
            virsh.domjobabort(vm_name, debug=True)
            # Start to run the blockcommit/blockpull cmd before the last round
            # of backup job, this is to test if the block command will keep the
            # dirty bitmap data.
            if backup_index == backup_rounds - 2:
                run_blk_cmd()
                cur_disk_path = vm.get_blk_devices()[original_disk_target]['source']

            if backup_index == backup_rounds - 3:
                backend_img = vm.get_blk_devices()[original_disk_target]['source']

        # Get current active image for the test disk
        vm_disks = vm.get_blk_devices()
        current_active_image = vm_disks[original_disk_target]['source']
        logging.debug("The current active image for '%s' is '%s'",
                      original_disk_target, current_active_image)

        for checkpoint_name in checkpoint_list:
            virsh.checkpoint_delete(vm_name, checkpoint_name,
                                    debug=True, ignore_status=False)
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Compare the backup data and original data

        original_data_file = os.path.join(tmp_dir, "original_data.qcow2")
        cmd = "qemu-img convert -f qcow2 %s -O qcow2 %s" % (current_active_image, original_data_file)
        process.run(cmd, shell=True, verbose=True)
        for backup_file in backup_file_list:
            if not utils_backup.cmp_backup_data(original_data_file, backup_file):
                test.fail("Backup and original data are not identical for"
                          "'%s' and '%s'" % (current_active_image, backup_file))
            else:
                logging.debug("'%s' contains correct backup data", backup_file)
    except utils_backup.BackupBeginError as details:
        if backup_error:
            logging.debug("Backup failed as expected.")
        else:
            test.fail(details)
    finally:
        # Remove checkpoints' metadata again to make sure vm has no checkpoints
        if "checkpoint_list" in locals():
            for checkpoint_name in checkpoint_list:
                virsh.checkpoint_delete(vm_name, checkpoint_name,
                                        options="--metadata")
        # Remove snapshots
        if "snapshot_list" in locals():
            for snapshot_name in snapshot_list:
                virsh.snapshot_delete(vm_name, "%s --metadata" % snapshot_name,
                                      debug=True)

        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Restoring vm
        vmxml_backup.sync()

        for file_name in os.listdir(tmp_dir):
            file_path = os.path.join(tmp_dir, file_name)
            if 'env' not in file_path:
                if os.path.isfile(file_path):
                    os.remove(file_path)
                elif os.path.isdir(file_path):
                    shutil.rmtree(file_path)
Beispiel #49
0
def run(test, params, env):
    """
    Test command: virsh desc.

    This command allows to show or modify description or title of a domain.
    1). For running domain, get/set description&title with options.
    2). For shut off domian, get/set description&title with options.
    3). For persistent/transient domain, get/set description&title with options.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    options = params.get("desc_option", "")
    persistent_vm = params.get("persistent_vm", "yes")
    domain = params.get("domain", "name")
    if domain == "UUID":
        vm_name = vm.get_uuid()
    elif domain == "invalid_domain":
        vm_name = "domain_" + str(uuid.uuid1())
    elif domain == "invalid_uuid":
        vm_name = uuid.uuid1()

    def run_cmd(name, options, desc_str, status_error):
        """
        Run virsh desc command

        :return: cmd output
        """
        if "--edit" not in options:
            cmd_result = virsh.desc(name, options, desc_str, ignore_status=True,
                                    debug=True)
            output = cmd_result.stdout.strip()
            err = cmd_result.stderr.strip()
            status = cmd_result.exit_status
        else:
            logging.debug("Setting domain desc \"%s\" by --edit", desc_str)
            session = aexpect.ShellSession("sudo -s")
            try:
                session.sendline("virsh -c %s desc %s --edit" %
                                 (vm.connect_uri, name))
                session.sendline("dgg")
                session.sendline("dG")
                session.sendline(":%s/^$/" + desc_str + "/")
                session.send('\x1b')
                session.send('ZZ')
                match, text = session.read_until_any_line_matches(
                    [r"Domain description updated successfully"],
                    timeout=10, internal_timeout=1)
                session.close()
                if match == -1:
                    status = 0
                    output = "Domain description updated successfully"
                else:
                    status = 1
                    err = "virsh desc --edit fails"
            except Exception:
                test.fail("Fail to create session.")
        if status_error == "no" and status:
            test.fail(err)
        elif status_error == "yes" and status == 0:
            test.fail("Expect fail, but run successfully.")
        return output

    def vm_state_switch():
        """
        Switch the vm state
        """
        if vm.is_dead():
            vm.start()
        if vm.is_alive():
            vm.destroy()

    def desc_check(name, desc_str, options):
        """
        Check the domain's description or title
        """
        ret = False
        state_switch = False
        if options.count("--config") and vm.is_persistent():
            state_switch = True
        # If both --live and --config are specified, the --config
        # option takes precedence on getting the current description
        # and both live configuration and config are updated while
        # setting the description.
        # This situation just happens vm is alive
        if options.count("--config") and options.count("--live"):
            # Just test options exclude --config (--live [--title])
            desc_check(name, desc_str, options.replace("--config", ""))
            # Just test options exclude --live (--config [--title])
            desc_check(name, desc_str, options.replace("--live", ""))
            ret = True
        else:
            if state_switch:
                vm_state_switch()
            # --new-desc and --edit option should not appear in check
            if options.count("--edit") or options.count("--new-desc"):
                output = run_cmd(name, "", "", "no")
            else:
                output = run_cmd(name, options, "", "no")
            if desc_str == output:
                logging.debug("Domain desc check successfully.")
                ret = True
            else:
                test.fail("Expect fail, but run successfully.")

        return ret

    def run_test():
        """
        Get/Set vm desc by running virsh desc command.
        """
        status_error = params.get("status_error", "no")
        desc_str = params.get("desc_str", "")
        # Test 1: get vm desc
        if "--edit" not in options:
            if "--new-desc" in options:
                run_cmd(vm_name, options, "", "yes")
            else:
                run_cmd(vm_name, options, "", status_error)
        # Test 2: set vm desc
        if options.count("--live") and vm.state() == "shut off":
            status_error = "yes"
        if len(desc_str) == 0 and status_error == "no":
            desc_str = "New Description/title for the %s vm" % vm.state()
            logging.debug("Use the default desc message: %s", desc_str)
        run_cmd(vm_name, options, desc_str, status_error)
        if status_error == "no":
            desc_check(vm_name, desc_str, options)

    # Prepare transient/persistent vm
    original_xml = vm.backup_xml()
    if persistent_vm == "no" and vm.is_persistent():
        vm.undefine()
    elif persistent_vm == "yes" and not vm.is_persistent():
        vm.define(original_xml)
    try:
        if vm.is_dead():
            vm.start()
        if domain == "ID":
            vm_name = vm.get_id()
        run_test()
        # Recvoer the vm and shutoff it
        if persistent_vm == "yes" and domain != "ID":
            vm.define(original_xml)
            vm.destroy()
            run_test()
    finally:
        vm.destroy(False)
        virsh.define(original_xml)
        os.remove(original_xml)
Beispiel #50
0
def run(test, params, env):
    """
    Test command: virsh domif-setlink and domif-getlink.

    The command   set and get link state of a virtual interface
    1. Prepare test environment.
    2. Perform virsh domif-setlink and domif-getlink operation.
    3. Recover test environment.
    4. Confirm the test result.
    """
    def domif_setlink(vm, device, operation, options):
        """
        Set the domain link state

        :param vm : domain name
        :param device : domain virtual interface
        :param opration : domain virtual interface state
        :param options : some options like --config

        """

        return virsh.domif_setlink(vm, device, operation, options, debug=True)

    def domif_getlink(vm, device, options):
        """
        Get the domain link state

        :param vm : domain name
        :param device : domain virtual interface
        :param options : some options like --config

        """

        return virsh.domif_getlink(vm,
                                   device,
                                   options,
                                   ignore_status=True,
                                   debug=True)

    def guest_cmd_check(cmd, session, pattern):
        """
        Check cmd output with pattern in session
        """
        try:
            cmd_status, output = session.cmd_status_output(cmd, timeout=10)
            logging.info("exit: %s, output: %s", cmd_status, output)
            return re.search(pattern, output)
        except (aexpect.ShellTimeoutError, aexpect.ShellStatusError) as e:
            logging.debug(e)
            return re.search(pattern, str(e.__str__))

    def guest_if_state(if_name, session):
        """
        Get the domain link state from the guest
        """
        # Get link state by ethtool
        cmd = "ethtool %s" % if_name
        pattern = "Link detected: ([a-zA-Z]+)"
        ret = guest_cmd_check(cmd, session, pattern)
        if ret:
            return ret.group(1) == "yes"
        else:
            return False

    def check_update_device(vm, if_name, session):
        """
        Change link state by upadte-device command, Check the results
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)

        # Get interface xml object
        iface = vmxml.get_devices(device_type="interface")[0]
        if iface.address:
            del iface.address

        # Change link state to up
        iface.link_state = "up"
        iface.xmltreefile.write()
        ret = virsh.update_device(vm.name,
                                  iface.xml,
                                  ignore_status=True,
                                  debug=True)
        if ret.exit_status:
            logging.error("Failed to update device to up state")
            return False
        if not utils_misc.wait_for(lambda: guest_if_state(if_name, session),
                                   5):
            logging.error("Guest link should be up now")
            return False

        # Change link state to down
        iface.link_state = "down"
        iface.xmltreefile.write()
        ret = virsh.update_device(vm.name,
                                  iface.xml,
                                  ignore_status=True,
                                  debug=True)
        if ret.exit_status:
            logging.error("Failed to update device to down state")
            return False
        if utils_misc.wait_for(lambda: guest_if_state(if_name, session), 5):
            logging.error("Guest link should be down now")
            return False

        # Passed all test
        return True

    vm_name = []
    # vm_name list:first element for original name in config
    vm_name.append(params.get("main_vm", "avocado-vt-vm1"))
    vm = env.get_vm(vm_name[0])
    options = params.get("if_options", "--config")
    start_vm = params.get("start_vm", "no")
    domain = params.get("domain", "name")
    if_device = params.get("if_device", "net")
    if_name = params.get("if_name", "vnet0")
    if_operation = params.get("if_operation", "up")
    status_error = params.get("status_error", "no")
    mac_address = vm.get_virsh_mac_address(0)
    model_type = params.get("model_type", "virtio")
    check_link_state = "yes" == params.get("check_link_state", "no")
    check_link_by_update_device = "yes" == params.get("excute_update_device",
                                                      "no")
    device = "vnet0"
    username = params.get("username")
    password = params.get("password")
    post_action = params.get("post_action")
    save_file = os.path.join(data_dir.get_data_dir(), "vm.save")

    # Back up xml file.
    vm_xml_file = os.path.join(data_dir.get_data_dir(), "vm.xml")
    virsh.dumpxml(vm_name[0], extra="--inactive", to_file=vm_xml_file)

    # Update model type of the interface, delete the pci address to let libvirt
    # generate a new one suitable for the model
    if vm.is_alive():
        vm.destroy()
    iface_dict = {'model': model_type, 'del_addr': 'yes'}
    libvirt.modify_vm_iface(vm_name[0], "update_iface", iface_dict)
    # Vm status
    if start_vm == "yes" and vm.is_dead():
        vm.start()

    elif start_vm == "no" and vm.is_alive():
        vm.destroy()

    # vm_name list: second element for 'domain' in virsh command
    if domain == "ID":
        # Get ID for the running domain
        vm_name.append(vm.get_id())
    elif domain == "UUID":
        # Get UUID for the domain
        vm_name.append(vm.get_uuid())
    elif domain == "no_match_UUID":
        # Generate a random UUID
        vm_name.append(uuid.uuid1())
    elif domain == "no_match_name":
        # Generate a random string as domain name
        vm_name.append(utils_misc.generate_random_string(6))
    elif domain == " ":
        # Set domain name empty
        vm_name.append("''")
    else:
        # Set domain name
        vm_name.append(vm_name[0])

    try:
        # Test device net or mac address
        if if_device == "net" and vm.is_alive():
            device = if_name
            # Get all vm's interface device
            device = vm_xml.VMXML.get_net_dev(vm_name[0])[0]

        elif if_device == "mac":
            device = mac_address

        # Test no exist device
        if if_device == "no_exist_net":
            device = "vnet-1"
        elif if_device == "no_exist_mac":
            # Generate random mac address for negative test
            device = utils_net.VirtIface.complete_mac_address("01:02")
        elif if_device == " ":
            device = "''"

        # Setlink opertation
        result = domif_setlink(vm_name[1], device, if_operation, options)
        status = result.exit_status
        logging.info("Setlink done")

        if post_action == "restart_libvirtd":
            utils_libvirtd.libvirtd_restart()
        elif post_action == "save_restore":
            vm.save_to_file(save_file)
            vm.restore_from_file(save_file)

        # Getlink opertation
        get_result = domif_getlink(vm_name[1], device, options)
        getlink_output = get_result.stdout.strip()

        # Check the getlink command output
        if status_error == "no":
            if not re.search(if_operation, getlink_output):
                test.fail("Getlink result should "
                          "equal with setlink operation")

        logging.info("Getlink done")

        # Check guest xml about link status
        if post_action == "save_restore":
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
            iface = vmxml.get_devices(device_type="interface")[0]
            logging.debug("Guest current interface xml is %s" % iface)
            if iface.link_state != if_operation:
                test.fail("link state in guest xml should be %s" %
                          if_operation)

        # If --config or --persistent is given should restart the vm then test link status
        if any(options == option
               for option in ["--config", "--persistent"]) and vm.is_alive():
            vm.destroy()
            vm.start()
            logging.info("Restart VM")

        elif start_vm == "no":
            vm.start()

        error_msg = None
        if status_error == "no" and not post_action:
            # Serial login the vm to check link status
            # Start vm check the link statue
            session = vm.wait_for_serial_login(username=username,
                                               password=password)
            guest_if_name = utils_net.get_linux_ifname(session, mac_address)

            # Check link state in guest
            if check_link_state:
                if (if_operation == "up"
                        and not guest_if_state(guest_if_name, session)):
                    error_msg = "Link state should be up in guest"
                if (if_operation == "down"
                        and guest_if_state(guest_if_name, session)):
                    error_msg = "Link state should be down in guest"
                if error_msg:
                    test.fail(error_msg)

            # Test of setting link state by update_device command
            if check_link_by_update_device:
                if not check_update_device(vm, guest_if_name, session):
                    test.fail("Check update_device failed")

            # Set the link up make host connect with vm
            domif_setlink(vm_name[0], device, "up", "")
            if not utils_misc.wait_for(
                    lambda: guest_if_state(guest_if_name, session), 5):
                test.fail("Link state isn't up in guest")

            # Ignore status of this one
            cmd = 'ip link set %s down;' % guest_if_name
            session.cmd_status_output(cmd, timeout=10)
            pattern = "%s:.*state DOWN.*" % guest_if_name
            pattern_cmd = 'ip addr show dev %s' % guest_if_name
            guest_cmd_check(pattern_cmd, session, pattern)

            cmd = 'ip link set %s up;' % guest_if_name
            session.cmd_status_output(cmd, timeout=10)
            pattern = "%s:.*state UP.*" % guest_if_name
            if not utils_misc.wait_for(
                    lambda: guest_cmd_check(pattern_cmd, session, pattern),
                    timeout=20):
                test.fail("Could not bring up interface %s inside guest" %
                          guest_if_name)
        else:  # negative test
            # stop guest, so state is always consistent on next start
            vm.destroy()
        # Check status_error
        if status_error == "yes":
            if status:
                logging.info("Expected error (negative testing). Output: %s",
                             result.stderr.strip())

            else:
                test.fail("Unexpected return code %d "
                          "(negative testing)" % status)
        elif status_error != "no":
            test.error("Invalid value for status_error '%s' "
                       "(must be 'yes' or 'no')" % status_error)
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        virsh.undefine(vm_name[0])
        virsh.define(vm_xml_file)
        os.remove(vm_xml_file)
        if os.path.exists(save_file):
            os.remove(save_file)
def run_virsh_domif_setlink_getlink(test, params, env):
    """
    Test command: virsh domif-setlink and domif-getlink.

    The command   set and get link state of a virtual interface
    1. Prepare test environment.
    2. Perform virsh domif-setlink and domif-getlink operation.
    3. Recover test environment.
    4. Confirm the test result.
    """

    def domif_setlink(vm, device, operation, options):
        """
        Set the domain link state

        :param vm : domain name
        :param device : domain virtual interface
        :param opration : domain virtual interface state
        :param options : some options like --config

        """

        return virsh.domif_setlink(vm, device, operation, options, debug=True)

    def domif_getlink(vm, device, options):
        """
        Get the domain link state

        :param vm : domain name
        :param device : domain virtual interface
        :param options : some options like --config

        """

        return virsh.domif_getlink(vm, device, options, ignore_status=True, debug=True)

    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    options = params.get("if_options", "--config")
    start_vm = params.get("start_vm", "no")
    if_device = params.get("if_device", "net")
    if_name = params.get("if_name", "vnet0")
    if_operation = params.get("if_operation", "up")
    if_name_re = params.get("if_ifname_re", r"\s*\d+:\s+([[a-zA-Z]+\d+):")
    status_error = params.get("status_error", "no")
    mac_address = vm.get_virsh_mac_address(0)
    device = "vnet0"

    # Back up xml file.
    vm_xml_file = os.path.join(test.tmpdir, "vm.xml")
    virsh.dumpxml(vm_name, extra="--inactive", to_file=vm_xml_file)

    # Vm status
    if start_vm == "yes" and vm.is_dead():
        vm.start()

    elif start_vm == "no" and vm.is_alive():
        vm.destroy()

    # Test device net or mac address
    if if_device == "net" and vm.is_alive():
        device = if_name
        # Get all vm's interface device
        device = vm_xml.VMXML.get_net_dev(vm_name)[0]

    elif if_device == "mac":
        device = mac_address

    # Setlink opertation
    result = domif_setlink(vm_name, device, if_operation, options)
    status = result.exit_status
    logging.info("Setlink done")

    # Getlink opertation
    get_result = domif_getlink(vm_name, device, options)
    getlink_output = get_result.stdout.strip()

    # Check the getlink command output
    if not re.search(if_operation, getlink_output) and status_error == "no":
        raise error.TestFail("Getlink result should " "equal with setlink operation ", getlink_output)

    logging.info("Getlink done")
    # If --config is given should restart the vm then test link status
    if options == "--config" and vm.is_alive():
        vm.destroy()
        vm.start()
        logging.info("Restart VM")

    elif start_vm == "no":
        vm.start()

    error_msg = None
    if status_error == "no":
        # Serial login the vm to check link status
        # Start vm check the link statue
        session = vm.wait_for_serial_login()
        cmd = "ip add |grep -i '%s' -B1|grep -i 'state %s' " % (mac_address, if_operation)
        cmd_status, output = session.cmd_status_output(cmd)
        logging.info("====%s==%s===", cmd_status, output)
        # Set the link up make host connect with vm
        domif_setlink(vm_name, device, "up", "")
        # Bring up referenced guest nic
        guest_if_name = re.search(if_name_re, output).group(1)
        # Ignore status of this one
        cmd_status = session.cmd_status("ifdown %s" % guest_if_name)
        cmd_status = session.cmd_status("ifup %s" % guest_if_name)
        if cmd_status != 0:
            error_msg = "Could not bring up interface %s inside guest" % guest_if_name
    else:  # negative test
        # stop guest, so state is always consistent on next start
        vm.destroy()

    # Recover VM.
    if vm.is_alive():
        vm.destroy(gracefully=False)
    virsh.undefine(vm_name)
    virsh.define(vm_xml_file)
    os.remove(vm_xml_file)

    if error_msg:
        raise error.TestFail(error_msg)

    # Check status_error
    if status_error == "yes":
        if status:
            logging.info("Expected error (negative testing). Output: %s", result.stderr.strip())

        else:
            raise error.TestFail("Unexpected return code %d " "(negative testing)" % status)
    elif status_error == "no":
        status = cmd_status
        if status:
            raise error.TestFail("Unexpected error (positive testing). " "Output: %s" % result.stderr.strip())

    else:
        raise error.TestError("Invalid value for status_error '%s' " "(must be 'yes' or 'no')" % status_error)
Beispiel #52
0
def run(test, params, env):
    """
    Test only ppc hosts
    """
    if 'ppc64le' not in platform.machine().lower():
        test.cancel('This case is for ppc only.')
    vm_name = params.get('main_vm', 'EXAMPLE')
    status_error = 'yes' == params.get('status_error', 'no')
    case = params.get('case', '')
    error_msg = params.get('error_msg', '')

    # Backup vm xml
    bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        # Assign address to panic device
        if case == 'panic_address':

            # Check if there is already a panic device on vm, remove it if true
            origin_panic = vmxml.get_devices('panic')
            if origin_panic:
                for dev in origin_panic:
                    vmxml.del_device(dev)
                vmxml.sync()

            # Create panic device to add to vm
            panic_dev = Panic()
            panic_dev.model = 'pseries'
            panic_dev.addr_type = 'isa'
            panic_dev.addr_iobase = '0x505'
            logging.debug(panic_dev)
            vmxml.add_device(panic_dev)
            vmxml.sync()
            cmd_result = virsh.start(vm_name, debug=True, ignore_status=True)

        # Get Ethernet pci devices
        if case == 'unavail_pci_device':
            lspci = process.run('lspci|grep Ethernet',
                                shell=True).stdout_text.splitlines()
            pci_ids = [line.split()[0] for line in lspci]
            logging.debug(pci_ids)
            max_id = max([int(pci_id.split('.')[-1]) for pci_id in pci_ids])
            prefix = pci_ids[-1].split('.')[0]

            # Create fake pci ids
            for i in range(5):
                max_id += 1
                # function must be <= 7
                if max_id > 7:
                    break
                new_pci_id = '.'.join([prefix, str(max_id)])
                new_pci_xml = libvirt.create_hostdev_xml(new_pci_id,
                                                         xmlfile=False)
                vmxml.add_device(new_pci_xml)
            vmxml.sync()
            logging.debug('Vm xml after adding unavailable pci devices: \n%s',
                          vmxml)

        # Check result if there's a result to check
        if 'cmd_result' in locals():
            libvirt.check_exit_status(cmd_result, status_error)
            if error_msg:
                libvirt.check_result(cmd_result, [error_msg])

    finally:
        # In case vm disappeared after test
        if case == 'unavail_pci_device':
            virsh.define(bk_xml.xml, debug=True)
        else:
            bk_xml.sync()
Beispiel #53
0
                vm.destroy()
        except error.CmdError, detail:
            logging.error("Detail: %s", detail)

    # Check if VM exists.
    vm_exist = virsh.domain_exists(vm.name, uri=uri)

    # Check if xml file exists.
    xml_exist = False
    if (os.path.exists("/etc/libvirt/qemu/%s.xml" % vm_name) or
            os.path.exists("/etc/xen/%s" % vm_name)):
        xml_exist = True

    # Recover main VM.
    if not virsh.domain_exists(vm.name, uri=uri):
        s_define = virsh.define(xml_file)
        if s_define is not True or not virsh.domain_exists(vm.name, uri=uri):
            logging.error("Failed to define %s.", vm.name)

    # Check results.
    if status_error == 'yes':
        if status == 0:
            raise error.TestFail("virsh undefine return unexpected result.")
    else:
        if status != 0:
            raise error.TestFail("virsh undefine failed.")
        if undefine_twice == "yes":
            if status2 == 0:
                raise error.TestFail("Undefine the same VM twice succeeded.")
        if vm_exist:
            raise error.TestFail("VM still exists after undefine.")
Beispiel #54
0
    def trigger_events(dom, events_list=[]):
        """
        Trigger various events in events_list

        :param dom: the vm objects corresponding to the domain
        :return: the expected output that virsh event command prints out
        """
        expected_events_list = []
        save_path = os.path.join(tmpdir, "%s_event.save" % dom.name)
        print(dom.name)
        xmlfile = dom.backup_xml()

        try:
            for event in events_list:
                logging.debug("Current event is: %s", event)
                if event in [
                        'start', 'restore', 'create', 'edit', 'define',
                        'undefine', 'crash'
                ]:
                    if dom.is_alive():
                        dom.destroy()
                        if event in ['create', 'define']:
                            dom.undefine()
                else:
                    if not dom.is_alive():
                        dom.start()
                        dom.wait_for_login().close()
                        if event == "resume":
                            dom.pause()

                if event == "undefine":
                    virsh.undefine(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Undefined Removed")
                elif event == "create":
                    virsh.create(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                elif event == "destroy":
                    virsh.destroy(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Destroyed")
                elif event == "define":
                    virsh.define(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Added")
                elif event == "start":
                    virsh.start(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                    dom.wait_for_login().close()
                elif event == "suspend":
                    virsh.suspend(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    if not libvirt_version.version_compare(5, 3, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Suspended Paused")
                elif event == "resume":
                    virsh.resume(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "save":
                    virsh.save(dom.name, save_path, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Saved")
                elif event == "restore":
                    if not os.path.exists(save_path):
                        logging.error("%s not exist", save_path)
                    else:
                        virsh.restore(save_path, **virsh_dargs)
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Started Restored")
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Resumed Snapshot")
                elif event == "edit":
                    #Check whether 'description' element exists.
                    domxml = virsh.dumpxml(dom.name).stdout.strip()
                    find_desc = parseString(domxml).getElementsByTagName(
                        "description")
                    if find_desc == []:
                        #If not exists, add one for it.
                        logging.info("Adding <description> to guest")
                        virsh.desc(dom.name, "--config",
                                   "Added desc for testvm", **virsh_dargs)
                    #The edit operation is to delete 'description' element.
                    edit_cmd = [r":g/<description.*<\/description>/d"]
                    utlv.exec_virsh_edit(dom.name, edit_cmd)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Updated")
                elif event == "shutdown":
                    if signal_name is None:
                        virsh.shutdown(dom.name, **virsh_dargs)
                        # Wait a few seconds for shutdown finish
                        time.sleep(3)
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            #Shutdown reason distinguished from qemu_2.9.0-9
                            expected_events_list.append(
                                "'lifecycle' for %s:"
                                " Shutdown Finished after guest request")
                    else:
                        os.kill(dom.get_pid(), getattr(signal, signal_name))
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            expected_events_list.append(
                                "'lifecycle' for %s:"
                                " Shutdown Finished after host request")
                    if not utils_misc.compare_qemu_version(2, 9, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Shutdown Finished")
                    wait_for_shutoff(dom)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Shutdown")
                elif event == "crash":
                    if not vmxml.xmltreefile.find('devices').findall('panic'):
                        # Set panic device
                        panic_dev = Panic()
                        panic_dev.model = panic_model
                        panic_dev.addr_type = addr_type
                        panic_dev.addr_iobase = addr_iobase
                        vmxml.add_device(panic_dev)
                    vmxml.on_crash = "coredump-restart"
                    vmxml.sync()
                    logging.info("Guest xml now is: %s", vmxml)
                    dom.start()
                    session = dom.wait_for_login()
                    # Stop kdump in the guest
                    session.cmd("systemctl stop kdump", ignore_all_errors=True)
                    # Enable sysRq
                    session.cmd("echo 1 > /proc/sys/kernel/sysrq")
                    try:
                        # Crash the guest
                        session.cmd("echo c > /proc/sysrq-trigger", timeout=90)
                    except (ShellTimeoutError,
                            ShellProcessTerminatedError) as details:
                        logging.info(details)
                    session.close()
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Crashed Panicked")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "reset":
                    virsh.reset(dom.name, **virsh_dargs)
                    expected_events_list.append("'reboot' for %s")
                elif event == "vcpupin":
                    virsh.vcpupin(dom.name, '0', '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.vcpupin0: 0")
                elif event == "emulatorpin":
                    virsh.emulatorpin(dom.name, '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.emulatorpin: 0")
                elif event == "setmem":
                    mem_size = int(params.get("mem_size", 512000))
                    virsh.setmem(dom.name, mem_size, **virsh_dargs)
                    expected_events_list.append("'balloon-change' for %s:")
                elif event == "device-added-removed":
                    add_disk(dom.name, new_disk, 'vdb', '')
                    expected_events_list.append("'device-added' for %s:"
                                                " virtio-disk1")
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " virtio-disk1")
                    iface_xml_obj = create_iface_xml()
                    iface_xml_obj.xmltreefile.write()
                    virsh.detach_device(dom.name, iface_xml_obj.xml,
                                        **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " net0")
                    time.sleep(2)
                    virsh.attach_device(dom.name, iface_xml_obj.xml,
                                        **virsh_dargs)
                    expected_events_list.append("'device-added' for %s:"
                                                " net0")
                elif event == "block-threshold":
                    add_disk(dom.name, new_disk, 'vdb', '', format=disk_format)
                    logging.debug(process.run('qemu-img info %s -U' %
                                              new_disk))
                    virsh.domblkthreshold(vm_name, 'vdb', '100M')
                    session = dom.wait_for_login()
                    session.cmd(
                        "mkfs.ext4 /dev/vdb && mount /dev/vdb /mnt && ls /mnt && "
                        "dd if=/dev/urandom of=/mnt/bigfile bs=1M count=300 && sync"
                    )
                    time.sleep(5)
                    session.close()
                    expected_events_list.append(
                        "'block-threshold' for %s:"
                        " dev: vdb(%s)  104857600 29368320")
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                elif event == "change-media":
                    target_device = "hdc"
                    device_target_bus = params.get("device_target_bus", "ide")
                    disk_blk = vm_xml.VMXML.get_disk_blk(dom.name)
                    logging.info("disk_blk %s", disk_blk)
                    if target_device not in disk_blk:
                        logging.info("Adding cdrom to guest")
                        if dom.is_alive():
                            dom.destroy()
                        add_disk(
                            dom.name, "''", target_device,
                            ("--type cdrom --sourcetype file --driver qemu " +
                             "--config --targetbus %s" % device_target_bus))
                        dom.start()
                    all_options = new_disk + " --insert"
                    virsh.change_media(dom.name, target_device, all_options,
                                       **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " opened")
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " closed")
                    all_options = new_disk + " --eject"
                    virsh.change_media(dom.name, target_device, all_options,
                                       **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " opened")
                elif event == "hwclock":
                    session = dom.wait_for_login()
                    try:
                        session.cmd("hwclock --systohc", timeout=60)
                    except (ShellTimeoutError,
                            ShellProcessTerminatedError) as details:
                        logging.info(details)
                    session.close()
                    expected_events_list.append("'rtc-change' for %s:")
                elif event == "metadata_set":
                    metadata_uri = params.get("metadata_uri")
                    metadata_key = params.get("metadata_key")
                    metadata_value = params.get("metadata_value")
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="",
                                   key=metadata_key,
                                   new_metadata=metadata_value,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                elif event == "metadata_edit":
                    metadata_uri = "http://herp.derp/"
                    metadata_key = "herp"
                    metadata_value = "<derp xmlns:foobar='http://foo.bar/'>foo<bar></bar></derp>"
                    virsh_cmd = r"virsh metadata %s --uri %s --key %s %s"
                    virsh_cmd = virsh_cmd % (dom.name, metadata_uri,
                                             metadata_key, "--edit")
                    session = aexpect.ShellSession("sudo -s")
                    logging.info("Running command: %s", virsh_cmd)
                    try:
                        session.sendline(virsh_cmd)
                        session.sendline(r":insert")
                        session.sendline(metadata_value)
                        session.sendline(".")
                        session.send('ZZ')
                        remote.handle_prompts(session,
                                              None,
                                              None,
                                              r"[\#\$]\s*$",
                                              debug=True,
                                              timeout=60)
                    except Exception as e:
                        test.error("Error occured: %s" % e)
                    session.close()
                    # Check metadata after edit
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="",
                                   key=metadata_key,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                elif event == "metadata_remove":
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="--remove",
                                   key=metadata_key,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                else:
                    test.error("Unsupported event: %s" % event)
                # Event may not received immediately
                time.sleep(3)
        finally:
            if os.path.exists(save_path):
                os.unlink(save_path)
            if os.path.exists(new_disk):
                os.unlink(new_disk)
        return [(dom.name, event) for event in expected_events_list]
Beispiel #55
0
        time.sleep(delay)
        vm_ip = vm.get_address()
        s_ping, o_ping = utils_test.ping(vm_ip, count=2, timeout=delay)
        logging.info(o_ping)
        if s_ping != 0:
            raise error.TestError("%s did not respond after %d sec."
                                  % (vm.name, delay))

        # Prepare for --dname dest_exist_vm
        if extra.count("dest_exist_vm"):
            logging.debug("Preparing a new vm on destination for exist dname")
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
            vmxml.vm_name = extra.split()[1].strip()
            del vmxml.uuid
            # Define a new vm on destination for --dname
            virsh.define(vmxml.xml, uri=dest_uri)

        # Prepare for --xml.
        logging.debug("Preparing new xml file for --xml option.")
        if options.count("xml") or extra.count("xml"):
            dest_xmlfile = params.get("virsh_migrate_xml", "")
            if dest_xmlfile:
                ret_attach = vm.attach_interface("--type bridge --source "
                                                 "virbr0 --target tmp-vnet",
                                                 True, True)
                if not ret_attach:
                    exception = True
                    raise error.TestError("Attaching nic to %s failed."
                                          % vm.name)
                ifaces = vm_xml.VMXML.get_net_dev(vm.name)
                new_nic_mac = vm.get_virsh_mac_address(ifaces.index("tmp-vnet"))
Beispiel #56
0
def run(test, params, env):
    """
    Test push-mode incremental backup

    Steps:
    1. create a vm with extra disk vdb
    2. create some data on vdb in vm
    3. start a push mode full backup on vdb
    4. create some data on vdb in vm
    5. start a push mode incremental backup
    6. repeat step 4 and 5 as required
    7. check the full/incremental backup file data
    """
    def backup_job_done(vm_name, vm_disk):
        """
        Check if a backup job for a vm's specific disk is finished.

        :param vm_name: vm's name
        :param vm_disk: the disk to be checked, such as 'vdb'
        :return: 'True' means job finished
        """
        result = virsh.blockjob(vm_name, vm_disk, debug=True)
        if "no current block job" in result.stdout_text.strip().lower():
            return True

    # Cancel the test if libvirt version is too low
    if not libvirt_version.version_compare(6, 0, 0):
        test.cancel("Current libvirt version doesn't support "
                    "incremental backup.")

    hotplug_disk = "yes" == params.get("hotplug_disk", "no")
    original_disk_size = params.get("original_disk_size", "100M")
    original_disk_type = params.get("original_disk_type", "local")
    original_disk_target = params.get("original_disk_target", "vdb")
    target_driver = params.get("target_driver", "qcow2")
    target_type = params.get("target_type", "file")
    target_blkdev_path = params.get("target_blkdev_path")
    target_blkdev_size = params.get("target_blkdev_size", original_disk_size)
    reuse_target_file = "yes" == params.get("reuse_target_file")
    prepare_target_file = "yes" == params.get("prepare_target_file")
    prepare_target_blkdev = "yes" == params.get("prepare_target_blkdev")
    backup_rounds = int(params.get("backup_rounds", 3))
    backup_error = "yes" == params.get("backup_error")
    tmp_dir = data_dir.get_tmp_dir()
    virsh_dargs = {'debug': True, 'ignore_status': True}

    try:
        vm_name = params.get("main_vm")
        vm = env.get_vm(vm_name)

        # Backup vm xml
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml_backup = vmxml.copy()

        # Enable vm incremental backup capability. This is only a workaround
        # to make sure incremental backup can work for the vm. Code needs to
        # be removded immediately when the function enabled by default, which
        # is tracked by bz1799015
        tree = ET.parse(vmxml.xml)
        root = tree.getroot()
        for elem in root.iter('domain'):
            elem.set('xmlns:qemu',
                     'http://libvirt.org/schemas/domain/qemu/1.0')
            qemu_cap = ET.Element("qemu:capabilities")
            elem.insert(-1, qemu_cap)
            incbackup_cap = ET.Element("qemu:add")
            incbackup_cap.set('capability', 'incremental-backup')
            qemu_cap.insert(1, incbackup_cap)
        vmxml.undefine()
        tmp_vm_xml = os.path.join(tmp_dir, "tmp_vm.xml")
        tree.write(tmp_vm_xml)
        virsh.define(tmp_vm_xml)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        logging.debug("Script insert xml elements to make sure vm can support "
                      "incremental backup. This should be removded when "
                      "bz 1799015 fixed.")

        # Prepare the disk to be backuped.
        disk_params = {}
        disk_path = ""
        if original_disk_type == "local":
            image_name = "{}_image.qcow2".format(original_disk_target)
            disk_path = os.path.join(tmp_dir, image_name)
            libvirt.create_local_disk("file", disk_path, original_disk_size,
                                      "qcow2")
            disk_params = {
                "device_type": "disk",
                "type_name": "file",
                "driver_type": "qcow2",
                "target_dev": original_disk_target,
                "source_file": disk_path
            }
            if original_disk_target:
                disk_params["target_dev"] = original_disk_target
        elif original_disk_type == "ceph":
            ceph_mon_ip = params.get("ceph_mon_ip",
                                     "libvirtauthceph.usersys.redhat.com")
            ceph_host_port = params.get("ceph_host_port", "6789")
            ceph_disk_name = params.get("ceph_disk_name",
                                        "avocado-vt-pool/inc_bkup.qcow2")
            ceph_pool_name = ceph_disk_name.split('/')[0]
            ceph_file_name = ceph_disk_name.split('/')[1]
            ceph_client_name = params.get("ceph_client_name", "client.admin")
            ceph_client_key = params.get(
                "ceph_client_key", "AQDkY/xd2pqyLhAAPQ2Yrla/nGe1PazR4/n+IQ==")
            ceph_auth_user = params.get("ceph_auth_user", "admin")
            ceph_auth_key = params.get(
                "ceph_auth_key", "AQDkY/xd2pqyLhAAPQ2Yrla/nGe1PazR4/n+IQ==")
            auth_sec_usage_type = "ceph"

            enable_auth = "yes" == params.get("enable_auth", "yes")
            key_file = os.path.join(tmp_dir, "ceph.key")
            key_opt = ""
            # Prepare a blank params to confirm if delete the configure at the end of the test
            ceph_cfg = ""
            if not utils_package.package_install(["ceph-common"]):
                test.error("Failed to install ceph-common")
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_ip)
            if enable_auth:
                # If enable auth, prepare a local file to save key
                if ceph_client_name and ceph_client_key:
                    with open(key_file, 'w') as f:
                        f.write("[%s]\n\tkey = %s\n" %
                                (ceph_client_name, ceph_client_key))
                    key_opt = "--keyring %s" % key_file
                    auth_sec_dict = {
                        "sec_usage": auth_sec_usage_type,
                        "sec_name": "ceph_auth_secret"
                    }
                    auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                    virsh.secret_set_value(auth_sec_uuid,
                                           ceph_auth_key,
                                           debug=True)
                    disk_params_auth = {
                        "auth_user": ceph_auth_user,
                        "secret_type": auth_sec_usage_type,
                        "secret_uuid": auth_sec_uuid,
                        "auth_in_source": True
                    }
                else:
                    test.error("No ceph client name/key provided.")
                disk_path = "rbd:%s:mon_host=%s:keyring=%s" % (
                    ceph_disk_name, ceph_mon_ip, key_file)
            ceph.rbd_image_rm(ceph_mon_ip, ceph_pool_name, ceph_file_name,
                              ceph_cfg, key_file)
            process.run("qemu-img create -f qcow2 %s %s" %
                        (disk_path, original_disk_size),
                        shell=True,
                        verbose=True)
            disk_params = {
                'device_type': 'disk',
                'type_name': 'network',
                "driver_type": "qcow2",
                'target_dev': original_disk_target
            }
            disk_params_src = {
                'source_protocol': 'rbd',
                'source_name': ceph_disk_name,
                'source_host_name': ceph_mon_ip,
                'source_host_port': ceph_host_port
            }
            disk_params.update(disk_params_src)
            disk_params.update(disk_params_auth)
        else:
            test.error("The disk type '%s' not supported in this script." %
                       original_disk_type)
        if hotplug_disk:
            vm.start()
            session = vm.wait_for_login().close()
            disk_xml = libvirt.create_disk_xml(disk_params)
            virsh.attach_device(vm_name, disk_xml, debug=True)
        else:
            disk_xml = libvirt.create_disk_xml(disk_params)
            virsh.attach_device(vm.name,
                                disk_xml,
                                flagstr="--config",
                                debug=True)
            vm.start()
        session = vm.wait_for_login()
        new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys())
        session.close()
        if len(new_disks_in_vm) != 1:
            test.fail("Test disk not prepared in vm")

        # Use the newly added disk as test disk
        test_disk_in_vm = "/dev/" + new_disks_in_vm[0]
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        vm_disks = list(vmxml.get_disk_all().keys())

        checkpoint_list = []
        is_incremental = False
        backup_path_list = []
        for backup_index in range(backup_rounds):
            # Prepare backup xml
            backup_params = {"backup_mode": "push"}
            if backup_index > 0:
                is_incremental = True
                backup_params["backup_incremental"] = "checkpoint_" + str(
                    backup_index - 1)

            backup_disk_xmls = []
            for vm_disk in vm_disks:
                backup_disk_params = {"disk_name": vm_disk}
                if vm_disk != original_disk_target:
                    backup_disk_params["enable_backup"] = "no"
                else:
                    backup_disk_params["enable_backup"] = "yes"
                    backup_disk_params["disk_type"] = target_type
                    target_params = {}
                    if target_type == "file":
                        target_file_name = "target_file_%s" % backup_index
                        target_file_path = os.path.join(
                            tmp_dir, target_file_name)
                        if prepare_target_file:
                            libvirt.create_local_disk("file", target_file_path,
                                                      original_disk_size,
                                                      target_driver)
                        target_params["file"] = target_file_path
                        logging.debug("target_params: %s", target_params)
                        backup_path_list.append(target_file_path)
                    elif target_type == "block":
                        if prepare_target_blkdev:
                            target_blkdev_path = libvirt.setup_or_cleanup_iscsi(
                                is_setup=True, image_size=target_blkdev_size)
                        target_params["dev"] = target_blkdev_path
                        backup_path_list.append(target_blkdev_path)
                    else:
                        test.fail(
                            "We do not support backup target type: '%s'" %
                            target_type)
                    backup_disk_params["backup_target"] = target_params
                    driver_params = {"type": target_driver}
                    backup_disk_params["backup_driver"] = driver_params
                backup_disk_xml = utils_backup.create_backup_disk_xml(
                    backup_disk_params)
                backup_disk_xmls.append(backup_disk_xml)
            logging.debug("disk list %s", backup_disk_xmls)
            backup_xml = utils_backup.create_backup_xml(
                backup_params, backup_disk_xmls)
            logging.debug("ROUND_%s Backup Xml: %s", backup_index, backup_xml)
            # Prepare checkpoint xml
            checkpoint_name = "checkpoint_%s" % backup_index
            checkpoint_list.append(checkpoint_name)
            cp_params = {"checkpoint_name": checkpoint_name}
            cp_params["checkpoint_desc"] = params.get(
                "checkpoint_desc", "desc of cp_%s" % backup_index)
            disk_param_list = []
            for vm_disk in vm_disks:
                cp_disk_param = {"name": vm_disk}
                if vm_disk != original_disk_target:
                    cp_disk_param["checkpoint"] = "no"
                else:
                    cp_disk_param["checkpoint"] = "bitmap"
                    cp_disk_bitmap = params.get("cp_disk_bitmap")
                    if cp_disk_bitmap:
                        cp_disk_param["bitmap"] = cp_disk_bitmap + str(
                            backup_index)
                disk_param_list.append(cp_disk_param)
            checkpoint_xml = utils_backup.create_checkpoint_xml(
                cp_params, disk_param_list)
            logging.debug("ROUND_%s Checkpoint Xml: %s", backup_index,
                          checkpoint_xml)

            # Start backup
            backup_options = backup_xml.xml + " " + checkpoint_xml.xml

            # Create some data in vdb
            dd_count = "1"
            dd_seek = str(backup_index * 10 + 10)
            dd_bs = "1M"
            utils_backup.dd_data_to_vm_disk(vm, test_disk_in_vm, dd_bs,
                                            dd_seek, dd_count)

            if reuse_target_file:
                backup_options += " --reuse-external"
            backup_result = virsh.backup_begin(vm_name,
                                               backup_options,
                                               debug=True)
            if backup_result.exit_status:
                raise utils_backup.BackupBeginError(
                    backup_result.stderr.strip())

            # Wait for the backup job actually finished
            if not utils_misc.wait_for(
                    lambda: backup_job_done(vm_name, original_disk_target),
                    60):
                test.fail("Backup job not finished in 60s")

        for checkpoint_name in checkpoint_list:
            virsh.checkpoint_delete(vm_name, checkpoint_name, debug=True)
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Compare the backup data and original data
        original_data_file = os.path.join(tmp_dir, "original_data.qcow2")
        cmd = "qemu-img convert -f qcow2 %s -O qcow2 %s" % (disk_path,
                                                            original_data_file)
        process.run(cmd, shell=True, verbose=True)

        for backup_path in backup_path_list:
            if target_driver == "qcow2":
                # Clear backup image's backing file before comparison
                qemu_cmd = ("qemu-img rebase -u -f qcow2 -b '' -F qcow2 %s" %
                            backup_path)
                process.run(qemu_cmd, shell=True, verbose=True)
            if not utils_backup.cmp_backup_data(
                    original_data_file,
                    backup_path,
                    backup_file_driver=target_driver):
                test.fail("Backup and original data are not identical for"
                          "'%s' and '%s'" % (disk_path, backup_path))
            else:
                logging.debug("'%s' contains correct backup data", backup_path)
    except utils_backup.BackupBeginError as details:
        if backup_error:
            logging.debug("Backup failed as expected.")
        else:
            test.fail(details)
    finally:
        # Remove checkpoints
        if "checkpoint_list" in locals() and checkpoint_list:
            for checkpoint_name in checkpoint_list:
                virsh.checkpoint_delete(vm_name, checkpoint_name)

        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Restoring vm
        vmxml_backup.sync()

        # Remove iscsi devices
        libvirt.setup_or_cleanup_iscsi(False)

        # Remove ceph related data
        if original_disk_type == "ceph":
            ceph.rbd_image_rm(ceph_mon_ip, ceph_pool_name, ceph_file_name,
                              ceph_cfg, key_file)
            if "auth_sec_uuid" in locals() and auth_sec_uuid:
                virsh.secret_undefine(auth_sec_uuid)
            if "ceph_cfg" in locals() and os.path.exists(ceph_cfg):
                os.remove(ceph_cfg)
            if os.path.exists(key_file):
                os.remove(key_file)
Beispiel #57
0
    def modify_iface_xml(update, status_error=False):
        """
        Modify interface xml options
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        xml_devices = vmxml.devices
        iface_index = xml_devices.index(
            xml_devices.by_device_tag("interface")[0])
        iface = xml_devices[iface_index]
        if iface_model:
            iface.model = iface_model
        else:
            del iface.model
        if iface_type:
            iface.type_name = iface_type
        del iface.source
        source = ast.literal_eval(iface_source)
        if source:
            net_ifs = utils_net.get_net_if(state="UP")
            # Check source device is valid or not,
            # if it's not in host interface list, try to set
            # source device to first active interface of host
            if (iface.type_name == "direct" and
                    source.has_key('dev') and
                    source['dev'] not in net_ifs):
                logging.warn("Source device %s is not a interface"
                             " of host, reset to %s",
                             source['dev'], net_ifs[0])
                source['dev'] = net_ifs[0]
            iface.source = source
        backend = ast.literal_eval(iface_backend)
        if backend:
            iface.backend = backend
        driver_dict = {}
        driver_host = {}
        driver_guest = {}
        if iface_driver:
            driver_dict = ast.literal_eval(iface_driver)
        if iface_driver_host:
            driver_host = ast.literal_eval(iface_driver_host)
        if iface_driver_guest:
            driver_guest = ast.literal_eval(iface_driver_guest)
        iface.driver = iface.new_driver(driver_attr=driver_dict,
                                        driver_host=driver_host,
                                        driver_guest=driver_guest)
        if iface.address:
            del iface.address

        logging.debug("New interface xml file: %s", iface)
        if unprivileged_user:
            # Create disk image for unprivileged user
            disk_index = xml_devices.index(
                xml_devices.by_device_tag("disk")[0])
            disk_xml = xml_devices[disk_index]
            logging.debug("source: %s", disk_xml.source)
            disk_source = disk_xml.source.attrs["file"]
            cmd = ("cp -fZ {0} {1} && chown {2}:{2} {1}"
                   "".format(disk_source, dst_disk, unprivileged_user))
            utils.run(cmd)
            disk_xml.source = disk_xml.new_disk_source(
                attrs={"file": dst_disk})
            vmxml.devices = xml_devices
            # Remove all channels to avoid of permission problem
            channels = vmxml.get_devices(device_type="channel")
            for channel in channels:
                vmxml.del_device(channel)

            vmxml.xmltreefile.write()
            logging.debug("New VM xml: %s", vmxml)
            utils.run("chmod a+rw %s" % vmxml.xml)
            virsh.define(vmxml.xml, **virsh_dargs)
        # Try to modify interface xml by update-device or edit xml
        elif update:
            iface.xmltreefile.write()
            ret = virsh.update_device(vm_name, iface.xml,
                                      ignore_status=True)
            libvirt.check_exit_status(ret, status_error)
        else:
            vmxml.devices = xml_devices
            vmxml.xmltreefile.write()
            vmxml.sync()
Beispiel #58
0
def run(test, params, env):
    """
    Test hpt resizing
    """
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)
    status_error = 'yes' == params.get('status_error', 'no')
    error_msg = eval(params.get('error_msg', '[]'))

    hpt_attrs = eval(params.get('hpt_attrs', '{}'))
    hpt_order_path = params.get('hpt_order_path', '')
    cpu_attrs = eval(params.get('cpu_attrs', '{}'))
    numa_cell = eval(params.get('numa_cell', '{}'))
    hugepage = 'yes' == params.get('hugepage', 'no')
    maxpagesize = int(params.get('maxpagesize', 0))
    check_hp = 'yes' == params.get('check_hp', 'no')
    qemu_check = params.get('qemu_check', '')
    skip_p8 = 'yes' == params.get('skip_p8', 'no')

    def set_hpt(vmxml, sync, **attrs):
        """
        Set resizing value to vm xml

        :param vmxml: xml of vm to be manipulated
        :param sync: whether to sync vmxml after
        :param attrs: attrs to set to hpt xml
        """
        if vmxml.xmltreefile.find('/features'):
            features_xml = vmxml.features
        else:
            features_xml = vm_xml.VMFeaturesXML()
        hpt_xml = vm_xml.VMFeaturesHptXML()
        for attr in attrs:
            setattr(hpt_xml, attr, attrs[attr])
        features_xml.hpt = hpt_xml
        vmxml.features = features_xml
        logging.debug(vmxml)
        if sync:
            vmxml.sync()

    def set_cpu(vmxml, **attrs):
        """
        Set cpu attrs for vmxml according to given attrs

        :param vmxml: xml of vm to be manipulated
        :param attrs: attrs to set to cpu xml
        """
        if vmxml.xmltreefile.find('cpu'):
            cpu = vmxml.cpu
        else:
            cpu = vm_xml.VMCPUXML()
        if 'numa_cell' in attrs:
            cpu.xmltreefile.create_by_xpath('/numa')
            attrs['numa_cell'] = cpu.dicts_to_cells(attrs['numa_cell'])
        for key in attrs:
            setattr(cpu, key, attrs[key])
        vmxml.cpu = cpu
        vmxml.sync()

    def set_memory(vmxml):
        """
        Set memory attributes in vm xml
        """
        vmxml.max_mem_rt = int(params.get('max_mem_rt', 30670848))
        vmxml.max_mem_rt_slots = int(params.get('max_mem_rt_slots', 16))
        vmxml.max_mem_rt_unit = params.get('max_mem_rt_unit', 'KiB')

        logging.debug(numa_cell)
        if numa_cell:
            # Remove cpu topology to avoid that it doesn't match vcpu count
            if vmxml.get_cpu_topology():
                new_cpu = vmxml.cpu
                new_cpu.del_topology()
                vmxml.cpu = new_cpu
            vmxml.vcpu = max([int(cell['cpus'][-1]) for cell in numa_cell]) + 1
        vmxml.sync()

    def check_hpt_order(session, resizing=''):
        """
        Return htp order in hpt_order file by default
        If 'resizing' is disabled, test updating htp_order
        """
        if not hpt_order_path:
            test.cancel('No hpt order path provided.')
        hpt_order = session.cmd_output('cat %s' % hpt_order_path).strip()
        hpt_order = int(hpt_order)
        logging.info('Current hpt_order is %d', hpt_order)
        if resizing == 'disabled':
            cmd_result = session.cmd_status_output(
                'echo %d > %s' % (hpt_order + 1, hpt_order_path))
            result = process.CmdResult(stderr=cmd_result[1],
                                       exit_status=cmd_result[0])
            libvirt.check_exit_status(result, True)
            libvirt.check_result(result, error_msg)
        return hpt_order

    def check_hp_in_vm(session, page_size):
        """
        Check if hugepage size is correct inside vm

        :param session: the session of the running vm
        :param page_size: the expected pagesize to be checked inside vm
        """
        expect = False if int(page_size) == 65536 else True
        meminfo = session.cmd_output('cat /proc/meminfo|grep Huge')
        logging.info('meminfo: \n%s', meminfo)
        pattern = 'Hugepagesize:\s+%d\s+kB' % int(page_size / 1024)
        logging.info('"%s" should %s be found in meminfo output', pattern,
                     '' if expect else 'not')
        result = expect == bool(re.search(pattern, meminfo))
        if not result:
            test.fail('meminfo output not meet expectation')

        # Check PAGE_SIZE in another way
        if not expect:
            conf_page_size = session.cmd_output('getconf PAGE_SIZE')
            logging.debug('Output of "getconf PAGE_SIZE": %s', conf_page_size)
            if int(conf_page_size) != int(page_size):
                test.fail(
                    'PAGE_SIZE not correct, should be %r, actually is %r' %
                    (page_size, conf_page_size))

    bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        arch = platform.machine()
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        resizing = hpt_attrs.get('resizing')

        # Test on ppc64le hosts
        if arch.lower() == 'ppc64le':
            cpu_arch = cpu.get_family() if hasattr(
                cpu, 'get_family') else cpu.get_cpu_arch()
            logging.debug('cpu_arch is: %s', cpu_arch)
            if skip_p8 and cpu_arch == 'power8':
                test.cancel('This case is not for POWER8')
            if maxpagesize and not utils_misc.compare_qemu_version(3, 1, 0):
                test.cancel('Qemu version is too low, '
                            'does not support maxpagesize setting')
            if maxpagesize == 16384 and cpu_arch == 'power9':
                test.cancel('Power9 does not support 16M pagesize.')

            set_hpt(vmxml, True, **hpt_attrs)
            if cpu_attrs or numa_cell:
                if numa_cell:
                    cpu_attrs['numa_cell'] = numa_cell
                set_cpu(vmxml, **cpu_attrs)
            if hugepage:
                vm_mem = vmxml.max_mem
                host_hp_size = utils_memory.get_huge_page_size()

                # Make 100m extra memory just to be safe
                hp_count = max((vm_mem + 102400) // host_hp_size, 1200)
                vm_xml.VMXML.set_memoryBacking_tag(vm_name, hpgs=True)

                # Set up hugepage env
                mnt_source, hp_path, fstype = 'hugetlbfs', '/dev/hugepages', 'hugetlbfs'
                if not os.path.isdir(hp_path):
                    process.run('mkdir %s' % hp_path, verbose=True)
                utils_memory.set_num_huge_pages(hp_count)
                if utils_misc.is_mounted(mnt_source,
                                         hp_path,
                                         fstype,
                                         verbose=True):
                    utils_misc.umount(mnt_source,
                                      hp_path,
                                      fstype,
                                      verbose=True)
                utils_misc.mount(mnt_source, hp_path, fstype, verbose=True)

                # Restart libvirtd service to make sure mounted hugepage
                # be recognized
                utils_libvirtd.libvirtd_restart()

            if resizing == 'enabled':
                set_memory(vmxml)
            logging.debug('vmxml: \n%s', vmxml)

            # Start vm and check if start succeeds
            result = virsh.start(vm_name, debug=True)
            libvirt.check_exit_status(result, expect_error=status_error)

            # if vm is not supposed to start, terminate test
            if status_error:
                libvirt.check_result(result, error_msg)
                return

            libvirt.check_qemu_cmd_line(qemu_check)
            session = vm.wait_for_login()
            hpt_order = check_hpt_order(session, resizing)

            # Check hugepage inside vm
            if check_hp:
                check_hp_in_vm(session, maxpagesize * 1024)

            if resizing == 'enabled':
                mem_xml = utils_hotplug.create_mem_xml(
                    tg_size=int(params.get('mem_size', 2048000)),
                    tg_sizeunit=params.get('size_unit', 'KiB'),
                    tg_node=int(params.get('mem_node', 0)),
                    mem_model=params.get('mem_model', 'dimm'))
                logging.debug(mem_xml)

                # Attach memory device to the guest for 12 times
                # that will reach the maximum memory limitation
                for i in range(12):
                    virsh.attach_device(vm_name,
                                        mem_xml.xml,
                                        debug=True,
                                        ignore_status=False)
                xml_after_attach = vm_xml.VMXML.new_from_dumpxml(vm_name)
                logging.debug(xml_after_attach)

                # Check dumpxml of the guest,
                # check if each device has its alias
                for i in range(12):
                    pattern = "alias\s+name=[\'\"]dimm%d[\'\"]" % i
                    logging.debug('Searching for %s', pattern)
                    if not re.search(pattern, str(
                            xml_after_attach.xmltreefile)):
                        test.fail('Missing memory alias: %s' % pattern)

        # Test on non-ppc64le hosts
        else:
            set_hpt(vmxml, sync=False, **hpt_attrs)
            result = virsh.define(vmxml.xml)
            libvirt.check_exit_status(result, status_error)
            libvirt.check_result(result, error_msg)

    finally:
        bk_xml.sync()
        if hugepage:
            utils_misc.umount('hugetlbfs', '/dev/hugepages', 'hugetlbfs')
            utils_memory.set_num_huge_pages(0)
Beispiel #59
0
def run_virsh_setvcpus(test, params, env):
    """
    Test command: virsh setvcpus.

    The conmand can change the number of virtual CPUs in the guest domain.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh setvcpus operation.
    3.Recover test environment.
    4.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    xml_file = params.get("setvcpus_xml_file", "vm.xml")
    virsh.dumpxml(vm_name, extra="--inactive", to_file=xml_file)
    tmp_file = params.get("setvcpus_tmp_file", "tmp.xml")
    pre_vm_state = params.get("setvcpus_pre_vm_state")
    command = params.get("setvcpus_command", "setvcpus")
    options = params.get("setvcpus_options")
    domain = params.get("setvcpus_domain")
    count = params.get("setvcpus_count")
    extra_param = params.get("setvcpus_extra_param")
    count_option = "%s %s" % (count, extra_param)
    status_error = params.get("status_error")

    def get_current_vcpus():
        """
        Get current vcpu number.
        """
        vcpus_set = ""
        virsh.dumpxml(vm_name, extra="", to_file=tmp_file)
        dom = parse(tmp_file)
        root = dom.documentElement
        vcpus_2 = root.getElementsByTagName("vcpu")
        for n in vcpus_2:
            vcpus_set += n.getAttribute("current")
            vcpus_set = int(vcpus_set)
        dom.unlink()
        return vcpus_set

    if vm.is_alive():
        vm.destroy()
    vm_xml = libvirt_xml.VMXML()
    vm_xml.set_vm_vcpus(vm_name, 2)
    vm.start()
    vm.wait_for_login()

    if status_error == "no":
        vcpus_new = len(vm.vcpuinfo())
    domid = vm.get_id()
    domuuid = vm.get_uuid()
    if pre_vm_state == "paused":
        vm.pause()
    elif pre_vm_state == "shut off":
        vm.destroy()

    if domain == "remote_name":
        remote_ssh_addr = params.get("remote_ip", None)
        remote_addr = params.get("local_ip", None)
        remote_password = params.get("remote_password", None)
        host_type = virsh.driver()
        if host_type == "qemu":
            remote_string = "qemu+ssh://%s/system" % remote_addr
        elif host_type == "xen":
            remote_string = "xen+ssh://%s" % remote_addr
        command = "virsh -c %s setvcpus %s 1 --live" % (remote_string, vm_name)
        if virsh.has_command_help_match(command, "--live") is None:
            status_error = "yes"
        session = remote.remote_login(
            "ssh", remote_ssh_addr, "22", "root", remote_password, "#")
        session.cmd_output('LANG=C')
        status, output = session.cmd_status_output(command, internal_timeout=5)
        session.close()
        vcpus_current = len(vm.vcpuinfo())
    else:
        if domain == "name":
            dom_option = vm_name
        elif domain == "id":
            dom_option = domid
            if params.get("setvcpus_hex_id") is not None:
                dom_option = hex(int(domid))
            elif params.get("setvcpus_invalid_id") is not None:
                dom_option = params.get("setvcpus_invalid_id")
        elif domain == "uuid":
            dom_option = domuuid
            if params.get("setvcpus_invalid_uuid") is not None:
                dom_option = params.get("setvcpus_invalid_uuid")
        else:
            dom_option = domain
        option_list = options.split(" ")
        for item in option_list:
            if virsh.has_command_help_match(command, item) is None:
                status_error = "yes"
                break
        status = virsh.setvcpus(
            dom_option, count_option, options, ignore_status=True).exit_status
        if pre_vm_state == "paused":
            virsh.resume(vm_name, ignore_status=True)
        if status_error == "no":
            if status == 0:
                if pre_vm_state == "shut off":
                    if options == "--config":
                        vcpus_set = len(vm.vcpuinfo())
                    elif options == "--current":
                        vcpus_set = get_current_vcpus()
                    elif options == "--maximum --config":
                        vcpus_set = ""
                        dom = parse("/etc/libvirt/qemu/%s.xml" % vm_name)
                        vcpus_set = dom.getElementsByTagName(
                            "vcpu")[0].firstChild.data
                        vcpus_set = int(vcpus_set)
                        dom.unlink()
                else:
                    vcpus_set = len(vm.vcpuinfo())
                if domain == "id":
                    cmd_chk = "cat /etc/libvirt/qemu/%s.xml" % vm_name
                    output1 = commands.getoutput(cmd_chk)
                    logging.info("guest-info:\n%s" % output1)

    virsh.destroy(vm_name)
    virsh.undefine(vm_name)
    virsh.define(xml_file)
    if os.path.exists(xml_file):
        os.remove(xml_file)
    if os.path.exists(tmp_file):
        os.remove(tmp_file)

    # check status_error
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    else:
        if status != 0:
            raise error.TestFail("Run failed with right command")
        else:
            if options == "--maximum --config":
                if vcpus_set != 4:
                    raise error.TestFail("Run failed with right command1")
            elif domain == "id":
                if options == "--config":
                    if vcpus_set != vcpus_new or not re.search('<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1):
                        raise error.TestFail("Run failed with right command2")
                elif options == "--config --live":
                    if vcpus_set != 1 or not re.search('<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1):
                        raise error.TestFail("Run failed with right command3")
                else:
                    if vcpus_set != 1 or re.search('<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1):
                        raise error.TestFail("Run failed with right command4")
            else:
                if vcpus_set != 1:
                    raise error.TestFail("Run failed with right command5")
Beispiel #60
0
def run(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Label the VM and disk with proper label.
    (3).Start VM and check the context.
    (4).Destroy VM and check the context.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_undefine_define_host_selinux",
                               "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_undefine_define_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_undefine_define_vm_sec_model", "selinux")
    sec_label = params.get("svirt_undefine_define_vm_sec_label", None)
    sec_relabel = params.get("svirt_undefine_define_vm_sec_relabel", "yes")
    sec_dict = {
        'type': sec_type,
        'model': sec_model,
        'label': sec_label,
        'relabel': sec_relabel
    }
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get varialbles about image.
    img_label = params.get('svirt_undefine_define_disk_label')
    # Label the disks of VM with img_label.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    for disk in list(disks.values()):
        disk_path = disk['source']
        backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file(
            filename=disk_path)
        utils_selinux.set_context_of_file(filename=disk_path,
                                          context=img_label)
    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the context of the VM.
    vmxml.set_seclabel([sec_dict])
    vmxml.sync()

    try:
        xml_file = (os.path.join(data_dir.get_tmp_dir(), "vmxml"))
        if vm.is_alive():
            vm.destroy()
        virsh.dumpxml(vm.name, to_file=xml_file)
        cmd_result = virsh.undefine(vm.name)
        if cmd_result.exit_status:
            test.fail("Failed to undefine vm." "Detail: %s" % cmd_result)
        cmd_result = virsh.define(xml_file)
        if cmd_result.exit_status:
            test.fail("Failed to define vm." "Detail: %s" % cmd_result)
    finally:
        # clean up
        for path, label in list(backup_labels_of_disks.items()):
            utils_selinux.set_context_of_file(filename=path, context=label)
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)