Exemplo n.º 1
0
def config_feature_memory_backing(vmxml, **kwargs):
    """
    Config libvirt VM XML to influence how virtual memory pages are backed
    by host pages.

    :param vmxml: VMXML instance
    :param kwargs: Function keywords
    :return: Corresponding feature flag in qem cmdline
    """
    # Both 'nosharepages' and 'locked' are supported since 1.0.6
    if not libvirt_version.version_compare(1, 0, 6):
        raise error.TestNAError("Element is not supported in current"
                                " libvirt version")
    qemu_flags = []
    no_sharepages = "yes" == kwargs.get("nosharepages", "no")
    locked = "yes" == kwargs.get("locked", "no")
    if no_sharepages:
        # On RHEL6, the flag is 'redhat-disable-KSM'
        # On RHEL7 & Fedora, the flag is 'mem-merge=off'
        qemu_flags.append(['mem-merge=off', 'redhat-disable-KSM'])
    if locked:
        qemu_flags.append("mlock=on")
    try:
        vm_xml.VMXML.set_memoryBacking_tag(vmxml.vm_name,
                                           hpgs=False,
                                           nosp=no_sharepages,
                                           locked=locked)
    except Exception, detail:
        logging.error("Update VM XML fail: %s", detail)
Exemplo n.º 2
0
def run(test, params, env):
    """
    Test command: virsh dump.

    This command can dump the core of a domain to a file for analysis.
    1. Positive testing
        1.1 Dump domain with valid options.
        1.2 Avoid file system cache when dumping.
        1.3 Compress the dump images to valid/invalid formats.
    2. Negative testing
        2.1 Dump domain to a non-exist directory.
        2.2 Dump domain with invalid option.
        2.3 Dump a shut-off domain.
    """

    vm_name = params.get("main_vm", "vm1")
    vm = env.get_vm(vm_name)
    options = params.get("dump_options")
    dump_file = params.get("dump_file", "vm.core")
    dump_dir = params.get("dump_dir", data_dir.get_tmp_dir())
    if os.path.dirname(dump_file) is "":
        dump_file = os.path.join(dump_dir, dump_file)
    dump_image_format = params.get("dump_image_format")
    start_vm = params.get("start_vm") == "yes"
    paused_after_start_vm = params.get("paused_after_start_vm") == "yes"
    status_error = params.get("status_error", "no") == "yes"
    timeout = int(params.get("check_pid_timeout", "5"))
    memory_dump_format = params.get("memory_dump_format", "")
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    def wait_pid_active(pid, timeout=5):
        """
        Wait for pid in running status

        :param: pid: Desired pid
        :param: timeout: Max time we can wait
        """
        cmd = ("cat /proc/%d/stat | awk '{print $3}'" % pid)
        try:
            while (True):
                timeout = timeout - 1
                if not timeout:
                    test.cancel("Time out for waiting pid!")
                pid_status = process.run(cmd, ignore_status=False, shell=True).stdout.strip()
                if pid_status != "R":
                    time.sleep(1)
                    continue
                else:
                    break
        except Exception, detail:
            test.fail(detail)
Exemplo n.º 3
0
 def check_qemu_cmd_line():
     """
     Check whether the added devices are shown in the qemu cmd line
     """
     if not vm.get_pid():
         test.fail('VM pid file missing.')
     with open('/proc/%s/cmdline' % vm.get_pid()) as cmdline_file:
         cmdline = cmdline_file.read()
     # Check sound model
     if sound_model == "ac97":
         pattern = r"-device.AC97"
     elif sound_model == "ich6":
         pattern = r"-device.intel-hda"
     else:
         pattern = r"-device.ich9-intel-hda"
     if not re.search(pattern, cmdline):
         test.fail("Can not find the %s sound device "
                   "in qemu cmd line." % sound_model)
     # Check codec type
     if sound_model in ["ich6", "ich9"]:
         if codec_type == "micro":
             pattern = r"-device.hda-micro"
         else:
             # Duplex is default in qemu cli even codec not set
             # But before 0.9.13, no codec_type so no default
             if libvirt_version.version_compare(0, 9, 13):
                 pattern = r"-device.hda-duplex"
         if not re.search(pattern, cmdline):
             test.fail("Can not find the %s codec for sound dev "
                       "in qemu cmd line." % codec_type)
def run(test, params, env):
    """
    Test virsh nodedev-detach and virsh nodedev-reattach

    (1).Init variables for test.
    (2).Check variables.
    (3).do nodedev_detach_reattach.
    """
    # Init variables
    device_name = params.get('nodedev_device_name', 'ENTER.YOUR.PCI.DEVICE')
    device_opt = params.get('nodedev_device_opt', '')
    status_error = ('yes' == params.get('status_error', 'no'))
    # check variables.
    if device_name.count('ENTER'):
        raise error.TestNAError('Param device_name is not configured.')

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    # do nodedev_detach_reattach
    try:
        do_nodedev_detach_reattach(device_name, params, device_opt)
    except error.TestFail, e:
        # Do nodedev detach and reattach failed.
        if status_error:
            return
        else:
            raise error.TestFail("Test failed in positive case."
                                 "error: %s" % e)
Exemplo n.º 5
0
def config_feature_pv_eoi(vmxml, **kwargs):
    """
    Config libvirt VM XML to enable/disable PV EOI feature.

    :param vmxml: VMXML instance
    :param kwargs: Function keywords
    :return: Corresponding feature flag in qem cmdline
    """
    # This attribute supported since 0.10.2 (QEMU only)
    if not libvirt_version.version_compare(0, 10, 2):
        raise error.TestNAError("PV eoi is not supported in current"
                                " libvirt version")
    qemu_flags = []
    eoi_enable = kwargs.get('eoi_enable', 'on')
    if eoi_enable == 'on':
        qemu_flags.append('+kvm_pv_eoi')
    elif eoi_enable == 'off':
        qemu_flags.append('-kvm_pv_eoi')
    else:
        logging.error("Invaild value %s, eoi_enable must be 'on' or 'off'",
                      eoi_enable)
    try:
        vmxml_feature = vmxml.features
        if vmxml_feature.has_feature('apic'):
            vmxml_feature.remove_feature('apic')
        vmxml_feature.add_feature('apic', 'eoi', eoi_enable)
        vmxml.features = vmxml_feature
        logging.debug("Update VM XML:\n%s", vmxml)
        vmxml.sync()
    except Exception, detail:
        logging.error("Update VM XML fail: %s", detail)
    def detach_reattach_nodedev(device_address, params, options=""):
        """
        Do the detach and reattach.

        Step1.Do detach.
        Step2.Check the result of detach.
        Step3.Do reattach.
        Step4.Check the result of reattach
        """
        # Libvirt acl polkit related params
        uri = params.get("virsh_uri")
        unprivileged_user = params.get('unprivileged_user')
        readonly = (params.get('nodedev_detach_readonly', 'no') == 'yes')
        if unprivileged_user:
            if unprivileged_user.count('EXAMPLE'):
                unprivileged_user = '******'

        # Do the detach
        logging.debug('Node device name is %s.', device_address)
        CmdResult = virsh.nodedev_detach(device_address, options,
                                         unprivileged_user=unprivileged_user,
                                         uri=uri, readonly=readonly)
        # Check the exit_status.
        libvirt.check_exit_status(CmdResult)
        # Check the driver.
        driver = get_driver_readlink(device_address)
        logging.debug('Driver after detach is %s.', driver)
        if libvirt_version.version_compare(1, 1, 1):
            device_driver_name = 'vfio-pci'
        else:
            device_driver_name = 'pci-stub'
        if (driver is None) or (not driver.endswith(device_driver_name)):
            test.fail("Driver for %s is not %s "
                      "after nodedev-detach" % (device_address, device_driver_name))
        # Do the reattach.
        CmdResult = virsh.nodedev_reattach(device_address, options)
        # Check the exit_status.
        libvirt.check_exit_status(CmdResult)
        # Check the driver.
        driver = get_driver_readlink(device_address)
        if libvirt_version.version_compare(1, 1, 1):
            device_driver_name = 'vfio-pci'
        else:
            device_driver_name = 'pci-stub'
        if driver and driver.endswith(device_driver_name):
            test.fail("Driver for %s is not %s "
                      "after nodedev-detach" % (device_address, device_driver_name))
def run(test, params, env):
    """
    Test command: virsh find-storage-pool-sources

    1. Prepare env to provide source storage if use localhost:
       1). For 'netfs' source type, setup nfs server
       2). For 'iscsi' source type, setup iscsi server
       3). For 'logical' type pool, setup iscsi storage to create vg
       4). Prepare srcSpec xml file if not given
    2. Find the pool sources by running virsh cmd
    """

    source_type = params.get("source_type", "")
    source_host = params.get("source_host", "127.0.0.1")
    srcSpec = params.get("source_Spec", "")
    vg_name = params.get("vg_name", "virttest_vg_0")
    ro_flag = "yes" == params.get("readonly_mode", "no")
    status_error = "yes" == params.get("status_error", "no")
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    if not source_type:
        raise error.TestFail("Command requires <type> value")

    cleanup_nfs = False
    cleanup_iscsi = False
    cleanup_logical = False

    # Prepare source storage
    if source_host == "127.0.0.1":
        if source_type == "netfs":
            # Set up nfs
            res = utils_test.libvirt.setup_or_cleanup_nfs(True)
            selinux_bak = res["selinux_status_bak"]
            cleanup_nfs = True
        if source_type in ["iscsi", "logical"]:
            # Set up iscsi
            iscsi_device = utils_test.libvirt.setup_or_cleanup_iscsi(True)
            # If we got nothing, force failure
            if not iscsi_device:
                raise error.TestFail("Did not setup an iscsi device")
            cleanup_iscsi = True
            if source_type == "logical":
                # Create vg by using iscsi device
                try:
                    lv_utils.vg_create(vg_name, iscsi_device)
                except Exception, detail:
                    utils_test.libvirt.setup_or_cleanup_iscsi(False)
                    raise error.TestFail("vg_create failed: %s" % detail)
                cleanup_logical = True
 def init_device(self, index):
     consoleclass = self.test_params.vmxml.get_device_class('console')
     console_device = consoleclass(type_name=self.type,
                                   virsh_instance=self.test_params.virsh)
     # Assume default domain console device on port 0 and index starts at 0
     console_device.add_target(type=self.targettype, port=str(index + 1))
     if hasattr(self, 'alias') and libvirt_version.version_compare(3, 9, 0):
         console_device.alias = {'name': self.alias + str(index)}
     return console_device
Exemplo n.º 9
0
def run(test, params, env):
    """
    Test command: virsh nwfilter-list.

    1) Prepare parameters.
    2) Run nwfilter-list command.
    3) Check result.
    """
    # Prepare parameters
    options_ref = params.get("list_options_ref", "")
    status_error = params.get("status_error", "no")
    filter_name = []

    # acl polkit params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    + " libvirt version.")

    virsh_dargs = {'ignore_status': True, 'debug': True}
    if params.get('setup_libvirt_polkit') == 'yes':
        virsh_dargs['unprivileged_user'] = unprivileged_user
        virsh_dargs['uri'] = uri

    # Run command
    cmd_result = virsh.nwfilter_list(options=options_ref, **virsh_dargs)
    output = cmd_result.stdout.strip()
    status = cmd_result.exit_status

    # Check result
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command.")
    elif status_error == "no":
        if status:
            raise error.TestFail("Run failed with right command.")

        # Retrieve filter name from output and check the cfg file
        output_list = output.split('\n')
        for i in range(2, len(output_list)):
            filter_name.append(output_list[i].split()[1])
        for i in range(len(filter_name)):
            xml_path = "%s/%s.xml" % (NWFILTER_ETC_DIR, filter_name[i])
            if not os.path.exists(xml_path):
                raise error.TestFail("Can't find list filter %s xml under %s"
                                     % (filter_name[i], NWFILTER_ETC_DIR))
            else:
                logging.debug("list filter %s xml found under %s" %
                              (filter_name[i], NWFILTER_ETC_DIR))
Exemplo n.º 10
0
def run(test, params, env):
    """
    Test command: virsh nwfilter-undefine.

    1) Prepare parameters.
    2) Run nwfilter-undefine command.
    3) Check result.
    4) Clean env
    """
    # Prepare parameters
    filter_ref = params.get("undefine_filter_ref", "")
    options_ref = params.get("undefine_options_ref", "")
    status_error = params.get("status_error", "no")

    # libvirt acl polkit related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")
    # Backup filter xml
    if filter_ref:
        new_filter = libvirt_xml.NwfilterXML()
        filterxml = new_filter.new_from_filter_dumpxml(filter_ref)
        logging.debug("the filter xml is: %s" % filterxml.xmltreefile)
        filter_xml = filterxml.xmltreefile.name

    # Run command
    cmd_result = virsh.nwfilter_undefine(filter_ref, options=options_ref,
                                         unprivileged_user=unprivileged_user,
                                         uri=uri,
                                         ignore_status=True, debug=True)
    status = cmd_result.exit_status

    # Check result
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command.")
    elif status_error == "no":
        if status:
            raise error.TestFail("Run failed with right command.")
        chk_result = check_list(filter_ref)
        if chk_result:
            raise error.TestFail("filter %s show up in filter list." %
                                 filter_ref)

    # Clean env
    if status == 0:
        virsh.nwfilter_define(filter_xml, options="",
                              ignore_status=True, debug=True)
Exemplo n.º 11
0
def run(test, params, env):
    """
    Test virsh nwfilter-edit with uuid.

    1) Prepare parameters.
    2) Run nwfilter-edit command.
    3) Check result.
    4) Clean env
    """
    # Prepare parameters
    filter_name = params.get("edit_filter_name", "")
    status_error = params.get("status_error", "no")
    new_uuid = "11111111-1111-1111-1111-111111111111"
    edit_cmd = ":2s/<uuid>.*$/<uuid>%s<\/uuid>/" % new_uuid

    # Since commit 46a811d, the logic changed for not allow update filter
    # uuid, so decide status_error with libvirt version.
    if libvirt_version.version_compare(1, 2, 7):
        status_error = True
    else:
        status_error = False

    # Backup filter xml
    new_filter = libvirt_xml.NwfilterXML()
    filterxml = new_filter.new_from_filter_dumpxml(filter_name)
    logging.debug("the filter xml is: %s" % filterxml.xmltreefile)

    try:
        # Run command
        session = aexpect.ShellSession("sudo -s")
        try:
            session.sendline("virsh nwfilter-edit %s" % filter_name)
            session.sendline(edit_cmd)
            # Press ESC
            session.send('\x1b')
            # Save and quit
            session.send('ZZ')
            remote.handle_prompts(session, None, None, r"[\#\$]\s*$")
            session.close()
            if not status_error:
                logging.info("Succeed to do nwfilter edit")
            else:
                test.fail("edit uuid should fail but got succeed.")
        except (aexpect.ShellError, aexpect.ExpectError, remote.LoginTimeoutError) as details:
            log = session.get_output()
            session.close()
            if "Try again? [y,n,f,?]:" in log and status_error:
                logging.debug("edit uuid failed as expected.")
            else:
                test.fail("Failed to do nwfilter-edit: %s\n%s"
                          % (details, log))
    finally:
        # Clean env
        virsh.nwfilter_undefine(filter_name, debug=True)
        virsh.nwfilter_define(filterxml.xml, debug=True)
Exemplo n.º 12
0
    def dump_nodedev_xml(dev_name, dev_opt="", **dargs):
        """
        Do dumpxml and check the result.

        step1.execute nodedev-dumpxml command.
        step1.compare info in xml with info in sysfs.

        :param dev_name: name of device.
        :param dev_opt: command extra options
        :param dargs: extra dict args
        """
        result = virsh.nodedev_dumpxml(dev_name, options=dev_opt, **dargs)
        libvirt.check_exit_status(result)
        logging.debug('Executing "virsh nodedev-dumpxml %s" finished.', dev_name)
        # Compare info in xml with info in sysfs.
        nodedevice_xml = nodedev_xml.NodedevXML.new_from_dumpxml(dev_name)

        if not nodedevice_xml.validates:
            test.error("nodedvxml of %s is not validated." % (dev_name))
        # Get the dict of key to value in xml.
        # nodedev_dict_xml contain the all keys and values in xml need checking.
        nodedev_dict_xml = nodedevice_xml.get_key2value_dict()

        # Get the dict of key to path in sysfs.
        # nodedev_syspath_dict contain the all keys and the path of file which contain
        #                 information for each key.
        nodedev_syspath_dict = nodedevice_xml.get_key2syspath_dict()

        # Get the values contained in files.
        # nodedev_dict_sys contain the all keys and values in sysfs.
        nodedev_dict_sys = {}
        for key, filepath in list(nodedev_syspath_dict.items()):
            with open(filepath, 'r') as f:
                value = f.readline().rstrip('\n')
            nodedev_dict_sys[key] = value

        # Compare the value in xml and in syspath.
        for key in nodedev_dict_xml:
            xml_value = nodedev_dict_xml.get(key)
            sys_value = nodedev_dict_sys.get(key)

            if not xml_value == sys_value:
                if (key == 'numa_node' and not
                        libvirt_version.version_compare(1, 2, 5)):
                    logging.warning("key: %s in xml is not supported yet" % key)
                else:
                    test.error("key: %s in xml is %s,"
                               "but in sysfs is %s." %
                               (key, xml_value, sys_value))
            else:
                continue

        logging.debug("Compare info in xml and info in sysfs finished"
                      "for device %s.", dev_name)
Exemplo n.º 13
0
 def init_device(self, index):
     filepath = self.make_filepath(index)
     self.make_source(filepath)
     serialclass = self.test_params.vmxml.get_device_class('serial')
     serial_device = serialclass(type_name=self.type_name,
                                 virsh_instance=self.test_params.virsh)
     serial_device.add_source(path=filepath)
     # Assume default domain serial device on port 0 and index starts at 0
     serial_device.add_target(port=str(index + 1))
     if hasattr(self, 'alias') and libvirt_version.version_compare(3, 9, 0):
         serial_device.alias = {'name': self.alias + str(index)}
     return serial_device
 def device_hotplug():
     if not libvirt_version.version_compare(3, 10, 0):
         detach_device(pci_devs, pci_ids)
     # attach the device in hotplug mode
     result = virsh.attach_device(vm_name, dev.xml,
                                  flagstr="--live", debug=True)
     if result.exit_status:
         test.error(result.stdout.strip())
     else:
         logging.debug(result.stdout.strip())
     if not utils_misc.wait_for(check_attach_pci, timeout):
         test.fail("timeout value is not sufficient")
Exemplo n.º 15
0
def do_nodedev_dumpxml(dev_name, dev_opt="", **dargs):
    """
    Do dumpxml and check the result.

    (1).execute nodedev-dumpxml command.
    (2).compare info in xml with info in sysfs.

    :param dev_name: name of device.
    :param dev_opt: command extra options
    :param dargs: extra dict args
    :raise TestFail: if execute command failed
                     or check result failed.
    """
    result = virsh.nodedev_dumpxml(dev_name, options=dev_opt, **dargs)
    if result.exit_status:
        raise error.TestError("Dumpxml node device %s failed.\n"
                              "Detail:%s." % (dev_name, result.stderr))
    logging.debug('Executing "virsh nodedev-dumpxml %s" finished.', dev_name)
    # compare info in xml with info in sysfs.
    nodedevxml = nodedev_xml.NodedevXML.new_from_dumpxml(dev_name)
    if not nodedevxml.validates:
        raise error.TestError("nodedvxml of %s is not validated." % (dev_name))
    # Get the dict of key to value in xml.
    # key2value_dict_xml contain the all keys and values in xml need checking.
    key2value_dict_xml = nodedevxml.get_key2value_dict()
    # Get the dict of key to path in sysfs.
    # key2syspath_dict contain the all keys and the path of file which contain
    #                 information for each key.
    key2syspath_dict = nodedevxml.get_key2syspath_dict()
    # Get the values contained in files.
    # key2value_dict_sys contain the all keys and values in sysfs.
    key2value_dict_sys = {}
    for key, filepath in key2syspath_dict.items():
        value = utils.read_one_line(filepath)
        key2value_dict_sys[key] = value

    # Compare the value in xml and in syspath.
    for key in key2value_dict_xml:
        value_xml = key2value_dict_xml.get(key)
        value_sys = key2value_dict_sys.get(key)
        if not value_xml == value_sys:
            if (key == 'numa_node' and not
                    libvirt_version.version_compare(1, 2, 5)):
                logging.warning("key: %s in xml is not supported yet" % key)
            else:
                raise error.TestError("key: %s in xml is %s,"
                                      "but in sysfs is %s." %
                                      (key, value_xml, value_sys))
        else:
            continue

    logging.debug("Compare info in xml and info in sysfs finished"
                  "for device %s.", dev_name)
Exemplo n.º 16
0
 def init_device(self, index):
     channelclass = self.test_params.vmxml.get_device_class('channel')
     channel_device = channelclass(type_name=self.type,
                                   virsh_instance=self.test_params.virsh)
     if hasattr(self, 'sourcemode') and hasattr(self, 'sourcepath'):
         channel_device.add_source(mode=self.sourcemode,
                                   path=self.sourcepath)
     if hasattr(self, 'targettype') and hasattr(self, 'targetname'):
         channel_device.add_target(type=self.targettype,
                                   name=self.targetname)
     if hasattr(self, 'alias') and libvirt_version.version_compare(3, 9, 0):
         channel_device.alias = {'name': self.alias + str(index)}
     return channel_device
Exemplo n.º 17
0
    def init_device(self, index):
        controllerclass = self.test_params.vmxml.get_device_class('controller')
        controller_device = controllerclass(type_name=self.type,
                                            virsh_instance=self.test_params.virsh)
        controller_device.model = self.model

        if not libvirt_version.version_compare(1, 3, 5):
            controller_devices = []
            devices = self.test_params.vmxml.get_devices()
            try:
                controller_devices = devices.by_device_tag('controller')
            except xcepts.LibvirtXMLError:
                # Handle case without any controller tags
                pass
            controller_device.index = str(0)
            for controller in controller_devices:
                if controller['type'] == controller_device.type:
                    controller_device.index = str(int(controller_device.index) + 1)

        if hasattr(self, 'alias') and libvirt_version.version_compare(3, 9, 0):
            controller_device.alias = {'name': self.alias + str(index)}
        return controller_device
    def check_shareable(at_with_shareable, test_twice):
        """
        check if current libvirt version support shareable option

        at_with_shareable: True or False. Whether attach disk with shareable option
        test_twice: True or False. Whether perform operations twice
        return: True or cancel the test
        """
        if at_with_shareable or test_twice:
            if libvirt_version.version_compare(3, 9, 0):
                return True
            else:
                test.cancel("Current libvirt version doesn't support shareable feature")
Exemplo n.º 19
0
    def make_filepath(self, index):
        """Return full path to unique filename per device index"""
        # auto-cleaned at end of test
        if self.type_name == 'file':
            if libvirt_version.version_compare(3, 2, 0):
                serial_dir = '/var/log/libvirt'
            else:
                serial_dir = self.test_params.serial_dir
        else:
            serial_dir = data_dir.get_tmp_dir()

        return os.path.join(serial_dir, 'serial_%s_%s-%d.log'
                            % (self.type_name, self.identifier, index))
Exemplo n.º 20
0
    def run_bandwidth_test(check_net=False, check_iface=False):
        """
        Test bandwidth option for network or interface by tc command.
        """
        iface_inbound = ast.literal_eval(iface_bandwidth_inbound)
        iface_outbound = ast.literal_eval(iface_bandwidth_outbound)
        net_inbound = ast.literal_eval(net_bandwidth_inbound)
        net_outbound = ast.literal_eval(net_bandwidth_outbound)
        net_bridge_name = ast.literal_eval(net_bridge)["name"]
        iface_name = libvirt.get_ifname_host(vm_name, iface_mac)

        try:
            if check_net and net_inbound:
                # Check qdisc rules
                cmd = "tc -d qdisc show dev %s" % net_bridge_name
                qdisc_output = utils.run(cmd).stdout
                logging.debug("Bandwidth qdisc output: %s", qdisc_output)
                if not qdisc_output.count("qdisc ingress ffff:"):
                    raise error.TestFail("Can't find ingress setting")
                check_class_rules(
                    net_bridge_name, "1:1", {"average": net_inbound["average"], "peak": net_inbound["peak"]}
                )
                check_class_rules(net_bridge_name, "1:2", net_inbound)

            # Check filter rules on bridge interface
            if check_net and net_outbound:
                check_filter_rules(net_bridge_name, net_outbound)

            # Check class rules on interface inbound settings
            if check_iface and iface_inbound:
                check_class_rules(
                    iface_name,
                    "1:1",
                    {
                        "average": iface_inbound["average"],
                        "peak": iface_inbound["peak"],
                        "burst": iface_inbound["burst"],
                    },
                )
                if iface_inbound.has_key("floor"):
                    if not libvirt_version.version_compare(1, 0, 1):
                        raise error.TestNAError("Not supported Qos" " options 'floor'")

                    check_class_rules(net_bridge_name, "1:3", {"floor": iface_inbound["floor"]})

            # Check filter rules on interface outbound settings
            if check_iface and iface_outbound:
                check_filter_rules(iface_name, iface_outbound)
        except AssertionError:
            utils.log_last_traceback()
            raise error.TestFail("Failed to check network bandwidth")
 def device_hotunplug():
     result = virsh.detach_device(vm_name, dev.xml,
                                  flagstr="--live", debug=True)
     if result.exit_status:
         test.fail(result.stdout.strip())
     else:
         logging.debug(result.stdout.strip())
     # Fix me
     # the purpose of waiting here is after detach the device from
     #  guest it need time to perform any other operation on the device
     time.sleep(timeout)
     if not libvirt_version.version_compare(3, 10, 0):
         pci_devs.sort()
         reattach_device(pci_devs, pci_ids)
Exemplo n.º 22
0
    def run_bandwidth_test(check_net=False, check_iface=False):
        """
        Test bandwidth option for network or interface by tc command.
        """
        iface_inbound = ast.literal_eval(iface_bandwidth_inbound)
        iface_outbound = ast.literal_eval(iface_bandwidth_outbound)
        net_inbound = ast.literal_eval(net_bandwidth_inbound)
        net_outbound = ast.literal_eval(net_bandwidth_outbound)
        net_bridge_name = ast.literal_eval(net_bridge)["name"]
        iface_name = libvirt.get_ifname_host(vm_name, iface_mac)

        try:
            if check_net and net_inbound:
                # Check qdisc rules
                cmd = "tc -d qdisc show dev %s" % net_bridge_name
                qdisc_output = process.system_output(cmd)
                logging.debug("Bandwidth qdisc output: %s", qdisc_output)
                if not qdisc_output.count("qdisc ingress ffff:"):
                    test.fail("Can't find ingress setting")
                check_class_rules(net_bridge_name, "1:1",
                                  {"average": net_inbound["average"],
                                   "peak": net_inbound["peak"]})
                check_class_rules(net_bridge_name, "1:2", net_inbound)

            # Check filter rules on bridge interface
            if check_net and net_outbound:
                check_filter_rules(net_bridge_name, net_outbound)

            # Check class rules on interface inbound settings
            if check_iface and iface_inbound:
                check_class_rules(iface_name, "1:1",
                                  {'average': iface_inbound['average'],
                                   'peak': iface_inbound['peak'],
                                   'burst': iface_inbound['burst']})
                if iface_inbound.has_key("floor"):
                    if not libvirt_version.version_compare(1, 0, 1):
                        test.cancel("Not supported Qos options 'floor'")

                    check_class_rules(net_bridge_name, "1:3",
                                      {'floor': iface_inbound["floor"]})

            # Check filter rules on interface outbound settings
            if check_iface and iface_outbound:
                check_filter_rules(iface_name, iface_outbound)
        except AssertionError:
            stacktrace.log_exc_info(sys.exc_info())
            test.fail("Failed to check network bandwidth")
Exemplo n.º 23
0
def run(test, params, env):
    """
    Test command virsh nodedev-dumpxml.

    (1).get param from params.
    (2).do nodedev dumpxml.
    (3).clean up.
    """
    # Init variables.
    status_error = ('yes' == params.get('status_error', 'no'))
    device_name = params.get('nodedev_device_name', None)
    device_opt = params.get('nodedev_device_opt', "")

    # acl polkit params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    virsh_dargs = {}
    if params.get('setup_libvirt_polkit') == 'yes':
        virsh_dargs['unprivileged_user'] = unprivileged_user
        virsh_dargs['uri'] = uri

    # do nodedev dumpxml.
    try:
        do_nodedev_dumpxml(dev_name=device_name, dev_opt=device_opt,
                           **virsh_dargs)
        if status_error:
            raise error.TestFail('Nodedev dumpxml successed in negative test.')
        else:
            pass
    except error.TestError, e:
        if status_error:
            pass
        else:
            raise error.TestFail('Nodedev dumpxml failed in positive test.'
                                 'Error: %s' % e)
Exemplo n.º 24
0
def run(test, params, env):
    """
    Test vm features
    """
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)

    hyperv_attr = eval(params.get('hyperv_attr', '{}'))

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()

    try:

        # Set hyperv attribs if there're attribs to set
        if hyperv_attr:
            if set(hyperv_attr.keys()).intersection(('tlbflush',
                                                     'frequencies',
                                                     'reenlightenment')):

                # Compare libvirt version to decide if test is valid
                if not libvirt_version.version_compare(5, 0, 0):
                    test.cancel('This version of libvirt does\'nt support '
                                'the setting: %r' % hyperv_attr)

            vm_xml.VMXML.set_vm_features(
                vm_name,
                **{'hyperv_%s_state' % key: value
                   for key, value in hyperv_attr.items()}
            )

            # Test vm start
            ret = virsh.start(vm_name, debug=True)
            libvirt.check_exit_status(ret)
            vm.wait_for_login().close()

            # Check hyperv settings in qemu command line
            for attr in hyperv_attr:
                libvirt.check_qemu_cmd_line('hv_' + attr)

    finally:
        bkxml.sync()
Exemplo n.º 25
0
 def init_device(self, index):
     """
     Initialize and return instance of device xml for index
     """
     self.make_image_file(index)
     disk_class = self.test_params.vmxml.get_device_class('disk')
     disk_device = disk_class(type_name=self.devtype,
                              virsh_instance=self.test_params.virsh)
     disk_device.driver = {'name': 'qemu', 'type': 'raw'}
     # No source elements by default
     source_properties = {'attrs':
                          {'file': self.make_image_file_path(index)}}
     source = disk_device.new_disk_source(**source_properties)
     disk_device.source = source  # Modified copy, not original
     dev_name = self.devname(index)
     disk_device.target = {'dev': dev_name, 'bus': self.targetbus}
     # libvirt will automatically add <address> element
     if hasattr(self, 'alias') and libvirt_version.version_compare(3, 9, 0):
         disk_device.alias = {'name': self.alias + str(index)}
     return disk_device
    def set_get_speed(vm_name, expected_value, status_error=False,
                      options_extra="", **virsh_dargs):
        """Set speed and check its result"""
        result = virsh.migrate_setspeed(vm_name, expected_value,
                                        options_extra, **virsh_dargs)
        status = result.exit_status
        err = result.stderr.strip()

        # Check status_error
        if status_error:
            if status == 0 or err == "":
                # Without code for bz1083483 applied, this will succeed
                # when it shouldn't be succeeding.
                if bz1083483 and not libvirt_version.version_compare(1, 2, 4):
                    raise error.TestNAError("bz1083483 should result in fail")
                else:
                    raise error.TestFail("Expect fail, but run successfully!")

            # no need to perform getspeed if status_error is true
            return
        else:
            if status != 0 or err != "":
                raise error.TestFail("Run failed with right "
                                     "virsh migrate-setspeed command")

        result = virsh.migrate_getspeed(vm_name, **virsh_dargs)
        status = result.exit_status
        actual_value = result.stdout.strip()
        err = result.stderr.strip()

        if status != 0 or err != "":
            raise error.TestFail("Run failed with virsh migrate-getspeed")

        logging.info("The expected bandwidth is %s MiB/s, "
                     "the actual bandwidth is %s MiB/s"
                     % (expected_value, actual_value))

        if int(actual_value) != int(expected_value):
            raise error.TestFail("Bandwidth value from getspeed "
                                 "is different from expected value "
                                 "set by setspeed")
Exemplo n.º 27
0
def run(test, params, env):
    """
    Test command: virsh vol-resize

    Resize the capacity of the given volume (default bytes).
    1. Define and start a given type pool.
    2. Create a volume in the pool.
    3. Do vol-resize.
    4. Check the volume info.
    5. Delete the volume and pool.

    TODO:
    Add volume shrink test after libvirt uptream support it.
    """

    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    emulated_image = params.get("emulated_image")
    emulated_image_size = params.get("emulated_image_size")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format")
    vol_capacity = params.get("vol_capacity")
    vol_new_capacity = params.get("vol_new_capacity")
    resize_option = params.get("resize_option", "")
    check_vol_size = "yes" == params.get("check_vol_size", "yes")
    status_error = "yes" == params.get("status_error", "no")

    if not libvirt_version.version_compare(1, 0, 0):
        if "--allocate" in resize_option:
            raise error.TestNAError("'--allocate' flag is not supported in"
                                    " current libvirt version.")

    # libvirt acl polkit related params
    uri = params.get("virsh_uri")
    unpri_user = params.get('unprivileged_user')
    if unpri_user:
        if unpri_user.count('EXAMPLE'):
            unpri_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    libv_pvt = libvirt.PoolVolumeTest(test, params)
    try:
        libv_pool = libvirt_storage.StoragePool()
        # Raise error if given name pool already exist
        if libv_pool.pool_exists(pool_name):
            raise error.TestError("Pool '%s' already exist", pool_name)
        else:
            # Create a new pool
            libv_pvt.pre_pool(pool_name, pool_type, pool_target,
                              emulated_image, image_size=emulated_image_size)
            pool_info = libv_pool.pool_info(pool_name)
            for key in pool_info:
                logging.debug("Pool info: %s = %s", key, pool_info[key])
            # Deal with vol_new_capacity, '--capacity' only accpet integer
            if vol_new_capacity == "pool_available":
                pool_avai = pool_info["Available"].split()
                vol_new_capacity = pool_avai[0].split('.')[0] + pool_avai[1]
            if vol_new_capacity == "pool_capacity":
                pool_capa = pool_info["Capacity"].split()
                vol_new_capacity = pool_capa[0].split('.')[0] + pool_capa[1]

        # Create a volume
        libv_pvt.pre_vol(vol_name=vol_name, vol_format=vol_format,
                         capacity=vol_capacity, allocation=None,
                         pool_name=pool_name)
        libv_vol = libvirt_storage.PoolVolume(pool_name)
        check_vol_info(libv_vol, vol_name)

        # The volume size may not accurate as we expect after resize, such as:
        # 1) vol_new_capacity = 1b with --delta option, the volume size will not
        #    change; run
        # 2) vol_new_capacity = 1KB with --delta option, the volume size will
        #    increase 1024 not 1000
        # So we can disable volume size check after resize
        if check_vol_size:
            vol_path = libv_vol.list_volumes()[vol_name]
            expect_info = get_expect_info(vol_new_capacity, vol_path,
                                          resize_option)
            logging.debug("Expect volume info: %s", expect_info)
        else:
            expect_info = {}

        # Run vol-resize
        result = virsh.vol_resize(vol_name, vol_new_capacity, pool_name,
                                  resize_option, uri=uri,
                                  unprivileged_user=unpri_user,
                                  debug=True)
        if not status_error:
            if result.exit_status != 0:
                raise error.TestFail(result.stdout.strip())
            else:
                #if check_vol_info(libv_vol, vol_name, expect_info):
                logging.debug("Volume %s resize check pass.", vol_name)
                #else:
                #    raise error.TestFail("Volume %s resize check fail." %
                #                         vol_name)
        elif result.exit_status == 0:
            raise error.TestFail("Expect resize fail but run successfully.")
    finally:
        # Clean up
        try:
            libv_pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                  emulated_image)
        except error.TestFail, detail:
            logging.error(str(detail))
def run(test, params, env):
    """
    Do test for vol-download and vol-upload

    Basic steps are
    1. Create pool with type defined in cfg
    2. Create image with writing data in it
    3. Get md5 value before operation
    4. Do vol-download/upload with options(offset, length)
    5. Check md5 value after operation
    """

    pool_type = params.get("vol_download_upload_pool_type")
    pool_name = params.get("vol_download_upload_pool_name")
    pool_target = params.get("vol_download_upload_pool_target")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(test.tmpdir, pool_target)
    vol_name = params.get("vol_download_upload_vol_name")
    file_name = params.get("vol_download_upload_file_name")
    file_path = os.path.join(test.tmpdir, file_name)
    offset = params.get("vol_download_upload_offset")
    length = params.get("vol_download_upload_length")
    capacity = params.get("vol_download_upload_capacity")
    allocation = params.get("vol_download_upload_allocation")
    frmt = params.get("vol_download_upload_format")
    operation = params.get("vol_download_upload_operation")
    create_vol = ("yes" == params.get("vol_download_upload_create_vol", "yes"))
    setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit")

    # libvirt acl polkit related params
    uri = params.get("virsh_uri")
    unpri_user = params.get('unprivileged_user')
    if unpri_user:
        if unpri_user.count('EXAMPLE'):
            unpri_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if setup_libvirt_polkit:
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    try:
        pvt = utlv.PoolVolumeTest(test, params)
        pvt.pre_pool(pool_name,
                     pool_type,
                     pool_target,
                     "volumetest",
                     pre_disk_vol=["50M"])
        # According to BZ#1138523, we need inpect the right name
        # (disk partition) for new volume
        if pool_type == "disk":
            vol_name = utlv.new_disk_vol_name(pool_name)
            if vol_name is None:
                raise error.TestError("Fail to generate volume name")
            # update polkit rule as the volume name changed
            if setup_libvirt_polkit:
                vol_pat = r"lookup\('vol_name'\) == ('\S+')"
                new_value = "lookup('vol_name') == '%s'" % vol_name
                utlv.update_polkit_rule(params, vol_pat, new_value)
        if create_vol:
            pvt.pre_vol(vol_name, frmt, capacity, allocation, pool_name)

        vol_list = virsh.vol_list(pool_name).stdout.strip()
        # iscsi volume name is different from others
        if pool_type == "iscsi":
            vol_name = vol_list.split('\n')[2].split()[0]

        vol_path = virsh.vol_path(vol_name, pool_name,
                                  ignore_status=False).stdout.strip()
        logging.debug("vol_path is %s", vol_path)

        # Add command options
        if pool_type is not None:
            options = " --pool %s" % pool_name
        if offset is not None:
            options += " --offset %s" % offset
            offset = int(offset)
        else:
            offset = 0

        if length is not None:
            options += " --length %s" % length
            length = int(length)
        else:
            length = 0
        logging.debug("%s options are %s", operation, options)

        if operation == "upload":
            # write date to file
            write_file(file_path)

            # Set length for calculate the offset + length in the following
            # func get_pre_post_digest() and digest()
            if length == 0:
                length = 1048576

            def get_pre_post_digest():
                """
                Get pre region and post region digest if have offset and length
                :return: pre digest and post digest
                """
                # Get digest of pre region before offset
                if offset != 0:
                    digest_pre = digest(vol_path, 0, offset)
                else:
                    digest_pre = 0
                logging.debug("pre region digest read from %s 0-%s is %s",
                              vol_path, offset, digest_pre)
                # Get digest of post region after offset+length
                digest_post = digest(vol_path, offset + length, 0)
                logging.debug("post region digest read from %s %s-0 is %s",
                              vol_path, offset + length, digest_post)

                return (digest_pre, digest_post)

            # Get pre and post digest before operation for compare
            (ori_pre_digest, ori_post_digest) = get_pre_post_digest()
            ori_digest = digest(file_path, 0, 0)
            logging.debug("ori digest read from %s is %s", file_path,
                          ori_digest)

            if setup_libvirt_polkit:
                utils.run("chmod 666 %s" % file_path)

            # Do volume upload
            result = virsh.vol_upload(vol_name,
                                      file_path,
                                      options,
                                      unprivileged_user=unpri_user,
                                      uri=uri,
                                      debug=True)
            if result.exit_status == 0:
                # Get digest after operation
                (aft_pre_digest, aft_post_digest) = get_pre_post_digest()
                aft_digest = digest(vol_path, offset, length)
                logging.debug("aft digest read from %s is %s", vol_path,
                              aft_digest)

                # Compare the pre and post part before and after
                if ori_pre_digest == aft_pre_digest and \
                   ori_post_digest == aft_post_digest:
                    logging.info("file pre and aft digest match")
                else:
                    raise error.TestFail(
                        "file pre or post digests do not"
                        "match, in %s", operation)

        if operation == "download":
            # Write date to volume
            write_file(vol_path)

            # Record the digest value before operation
            ori_digest = digest(vol_path, offset, length)
            logging.debug("original digest read from %s is %s", vol_path,
                          ori_digest)

            utils.run("touch %s" % file_path)
            if setup_libvirt_polkit:
                utils.run("chmod 666 %s" % file_path)

            # Do volume download
            result = virsh.vol_download(vol_name,
                                        file_path,
                                        options,
                                        unprivileged_user=unpri_user,
                                        uri=uri,
                                        debug=True)
            if result.exit_status == 0:
                # Get digest after operation
                aft_digest = digest(file_path, 0, 0)
                logging.debug("new digest read from %s is %s", file_path,
                              aft_digest)

        if result.exit_status != 0:
            raise error.TestFail("Fail to %s volume: %s" %
                                 (operation, result.stderr))

        # Compare the change part on volume and file
        if ori_digest == aft_digest:
            logging.info("file digests match, volume %s suceed", operation)
        else:
            raise error.TestFail(
                "file digests do not match, volume %s failed" % operation)

    finally:
        pvt.cleanup_pool(pool_name, pool_type, pool_target, "volumetest")
        if os.path.isfile(file_path):
            os.remove(file_path)
Exemplo n.º 29
0
def run(test, params, env):
    """
    1. Create a pool
    2. Create n number of volumes(vol-create-as)
    3. Check the volume details from the following commands
       vol-info
       vol-key
       vol-list
       vol-name
       vol-path
       vol-pool
       qemu-img info
    4. Delete the volume and check in vol-list
    5. Repeat the steps for number of volumes given
    6. Delete the pool and target
    TODO: Handle negative testcases
    """

    def delete_volume(expected_vol):
        """
        Deletes Volume
        """
        pool_name = expected_vol['pool_name']
        vol_name = expected_vol['name']
        pv = libvirt_storage.PoolVolume(pool_name)
        if not pv.delete_volume(vol_name):
            raise error.TestFail("Delete volume failed." % vol_name)
        else:
            logging.debug("Volume: %s successfully deleted on pool: %s",
                          vol_name, pool_name)

    def get_vol_list(pool_name, vol_name):
        """
        Parse the volume list
        """
        output = virsh.vol_list(pool_name, "--details")
        rg = re.compile(
            r'^(\S+)\s+(\S+)\s+(\S+)\s+(\d+.\d+\s\S+)\s+(\d+.\d+.*)')
        vol = {}
        vols = []
        volume_detail = None
        for line in output.stdout.splitlines():
            match = re.search(rg, line.lstrip())
            if match is not None:
                vol['name'] = match.group(1)
                vol['path'] = match.group(2)
                vol['type'] = match.group(3)
                vol['capacity'] = match.group(4)
                vol['allocation'] = match.group(5)
                vols.append(vol)
                vol = {}
        for volume in vols:
            if volume['name'] == vol_name:
                volume_detail = volume
        return volume_detail

    def norm_capacity(capacity):
        """
        Normalize the capacity values to bytes
        """
        # Normaize all values to bytes
        norm_capacity = {}
        des = {'B': 'B', 'bytes': 'B', 'b': 'B', 'kib': 'K',
               'KiB': 'K', 'K': 'K', 'k': 'K', 'KB': 'K',
               'mib': 'M', 'MiB': 'M', 'M': 'M', 'm': 'M',
               'MB': 'M', 'gib': 'G', 'GiB': 'G', 'G': 'G',
               'g': 'G', 'GB': 'G', 'Gb': 'G', 'tib': 'T',
               'TiB': 'T', 'TB': 'T', 'T': 'T', 't': 'T'
               }
        val = {'B': 1,
               'K': 1024,
               'M': 1048576,
               'G': 1073741824,
               'T': 1099511627776
               }

        reg_list = re.compile(r'(\S+)\s(\S+)')
        match_list = re.search(reg_list, capacity['list'])
        if match_list is not None:
            mem_value = float(match_list.group(1))
            norm = val[des[match_list.group(2)]]
            norm_capacity['list'] = int(mem_value * norm)
        else:
            raise error.TestFail("Error in parsing capacity value in"
                                 " virsh vol-list")

        match_info = re.search(reg_list, capacity['info'])
        if match_info is not None:
            mem_value = float(match_info.group(1))
            norm = val[des[match_list.group(2)]]
            norm_capacity['info'] = int(mem_value * norm)
        else:
            raise error.TestFail("Error in parsing capacity value "
                                 "in virsh vol-info")

        norm_capacity['qemu_img'] = capacity['qemu_img']
        norm_capacity['xml'] = int(capacity['xml'])

        return norm_capacity

    def check_vol(expected, avail=True):
        """
        Checks the expected volume details with actual volume details from
        vol-dumpxml
        vol-list
        vol-info
        vol-key
        vol-path
        qemu-img info
        """
        error_count = 0

        pv = libvirt_storage.PoolVolume(expected['pool_name'])
        vol_exists = pv.volume_exists(expected['name'])
        if vol_exists:
            if not avail:
                error_count += 1
                logging.error("Expect volume %s not exists but find it",
                              expected['name'])
                return error_count
        else:
            if avail:
                error_count += 1
                logging.error("Expect volume %s exists but not find it",
                              expected['name'])
                return error_count
            else:
                logging.info("Volume %s checked successfully for deletion",
                             expected['name'])
                return error_count

        actual_list = get_vol_list(expected['pool_name'], expected['name'])
        actual_info = pv.volume_info(expected['name'])
        # Get values from vol-dumpxml
        volume_xml = vol_xml.VolXML.new_from_vol_dumpxml(expected['name'],
                                                         expected['pool_name'])

        # Check against virsh vol-key
        vol_key = virsh.vol_key(expected['name'], expected['pool_name'])
        if vol_key.stdout.strip() != volume_xml.key:
            logging.error("Volume key is mismatch \n%s"
                          "Key from xml: %s\nKey from command: %s",
                          expected['name'], volume_xml.key, vol_key)
            error_count += 1
        else:
            logging.debug("virsh vol-key for volume: %s successfully"
                          " checked against vol-dumpxml", expected['name'])

        # Check against virsh vol-name
        get_vol_name = virsh.vol_name(expected['path'])
        if get_vol_name.stdout.strip() != expected['name']:
            logging.error("Volume name mismatch\n"
                          "Expected name: %s\nOutput of vol-name: %s",
                          expected['name'], get_vol_name)

        # Check against virsh vol-path
        vol_path = virsh.vol_path(expected['name'], expected['pool_name'])
        if expected['path'] != vol_path.stdout.strip():
            logging.error("Volume path mismatch for volume: %s\n"
                          "Expected path: %s\nOutput of vol-path: %s\n",
                          expected['name'],
                          expected['path'], vol_path)
            error_count += 1
        else:
            logging.debug("virsh vol-path for volume: %s successfully checked"
                          " against created volume path", expected['name'])

        # Check path against virsh vol-list
        if expected['path'] != actual_list['path']:
            logging.error("Volume path mismatch for volume:%s\n"
                          "Expected Path: %s\nPath from virsh vol-list: %s",
                          expected['name'], expected['path'],
                          actual_list['path'])
            error_count += 1
        else:
            logging.debug("Path of volume: %s from virsh vol-list "
                          "successfully checked against created "
                          "volume path", expected['name'])

        # Check path against virsh vol-dumpxml
        if expected['path'] != volume_xml.path:
            logging.error("Volume path mismatch for volume: %s\n"
                          "Expected Path: %s\nPath from virsh vol-dumpxml: %s",
                          expected['name'], expected['path'], volume_xml.path)
            error_count += 1

        else:
            logging.debug("Path of volume: %s from virsh vol-dumpxml "
                          "successfully checked against created volume path",
                          expected['name'])

        # Check type against virsh vol-list
        if expected['type'] != actual_list['type']:
            logging.error("Volume type mismatch for volume: %s\n"
                          "Expected Type: %s\n Type from vol-list: %s",
                          expected['name'], expected['type'],
                          actual_list['type'])
            error_count += 1
        else:
            logging.debug("Type of volume: %s from virsh vol-list "
                          "successfully checked against the created "
                          "volume type", expected['name'])

        # Check type against virsh vol-info
        if expected['type'] != actual_info['Type']:
            logging.error("Volume type mismatch for volume: %s\n"
                          "Expected Type: %s\n Type from vol-info: %s",
                          expected['name'], expected['type'],
                          actual_info['Type'])
            error_count += 1
        else:
            logging.debug("Type of volume: %s from virsh vol-info successfully"
                          " checked against the created volume type",
                          expected['name'])

        # Check name against virsh vol-info
        if expected['name'] != actual_info['Name']:
            logging.error("Volume name mismatch for volume: %s\n"
                          "Expected name: %s\n Name from vol-info: %s",
                          expected['name'],
                          expected['name'], actual_info['Name'])
            error_count += 1
        else:
            logging.debug("Name of volume: %s from virsh vol-info successfully"
                          " checked against the created volume name",
                          expected['name'])

        # Check format from against qemu-img info
        img_info = utils_misc.get_image_info(expected['path'])
        if expected['format']:
            if expected['format'] != img_info['format']:
                logging.error("Volume format mismatch for volume: %s\n"
                              "Expected format: %s\n"
                              "Format from qemu-img info: %s",
                              expected['name'], expected['format'],
                              img_info['format'])
                error_count += 1
            else:
                logging.debug("Format of volume: %s from qemu-img info "
                              "checked successfully against the created "
                              "volume format", expected['name'])

        # Check format against vol-dumpxml
        if expected['format']:
            if expected['format'] != volume_xml.format:
                logging.error("Volume format mismatch for volume: %s\n"
                              "Expected format: %s\n"
                              "Format from vol-dumpxml: %s",
                              expected['name'], expected['format'],
                              volume_xml.format)
                error_count += 1
            else:
                logging.debug("Format of volume: %s from virsh vol-dumpxml "
                              "checked successfully against the created"
                              " volume format", expected['name'])

        logging.info(expected['encrypt_format'])
        # Check encrypt against vol-dumpxml
        if expected['encrypt_format']:
            # As the 'default' format will change to specific valut(qcow), so
            # just output it here
            logging.debug("Encryption format of volume '%s' is: %s",
                          expected['name'], volume_xml.encryption.format)
            # And also output encryption secret uuid
            secret_uuid = volume_xml.encryption.secret['uuid']
            logging.debug("Encryption secret of volume '%s' is: %s",
                          expected['name'], secret_uuid)
            if expected['encrypt_secret']:
                if expected['encrypt_secret'] != secret_uuid:
                    logging.error("Encryption secret mismatch for volume: %s\n"
                                  "Expected secret uuid: %s\n"
                                  "Secret uuid from vol-dumpxml: %s",
                                  expected['name'], expected['encrypt_secret'],
                                  secret_uuid)
                    error_count += 1
                else:
                    # If no set encryption secret value, automatically
                    # generate a secret value at the time of volume creation
                    logging.debug("Volume encryption secret is %s", secret_uuid)

        # Check pool name against vol-pool
        vol_pool = virsh.vol_pool(expected['path'])
        if expected['pool_name'] != vol_pool.stdout.strip():
            logging.error("Pool name mismatch for volume: %s against"
                          "virsh vol-pool", expected['name'])
            error_count += 1
        else:
            logging.debug("Pool name of volume: %s checked successfully"
                          " against the virsh vol-pool", expected['name'])

        norm_cap = {}
        capacity = {}
        capacity['list'] = actual_list['capacity']
        capacity['info'] = actual_info['Capacity']
        capacity['xml'] = volume_xml.capacity
        capacity['qemu_img'] = img_info['vsize']
        norm_cap = norm_capacity(capacity)
        delta_size = params.get('delta_size', "1024")
        if abs(expected['capacity'] - norm_cap['list']) > delta_size:
            logging.error("Capacity mismatch for volume: %s against virsh"
                          " vol-list\nExpected: %s\nActual: %s",
                          expected['name'], expected['capacity'],
                          norm_cap['list'])
            error_count += 1
        else:
            logging.debug("Capacity value checked successfully against"
                          " virsh vol-list for volume %s", expected['name'])

        if abs(expected['capacity'] - norm_cap['info']) > delta_size:
            logging.error("Capacity mismatch for volume: %s against virsh"
                          " vol-info\nExpected: %s\nActual: %s",
                          expected['name'], expected['capacity'],
                          norm_cap['info'])
            error_count += 1
        else:
            logging.debug("Capacity value checked successfully against"
                          " virsh vol-info for volume %s", expected['name'])

        if abs(expected['capacity'] - norm_cap['xml']) > delta_size:
            logging.error("Capacity mismatch for volume: %s against virsh"
                          " vol-dumpxml\nExpected: %s\nActual: %s",
                          expected['name'], expected['capacity'],
                          norm_cap['xml'])
            error_count += 1
        else:
            logging.debug("Capacity value checked successfully against"
                          " virsh vol-dumpxml for volume: %s",
                          expected['name'])

        if abs(expected['capacity'] - norm_cap['qemu_img']) > delta_size:
            logging.error("Capacity mismatch for volume: %s against "
                          "qemu-img info\nExpected: %s\nActual: %s",
                          expected['name'], expected['capacity'],
                          norm_cap['qemu_img'])
            error_count += 1
        else:
            logging.debug("Capacity value checked successfully against"
                          " qemu-img info for volume: %s",
                          expected['name'])
        return error_count

    def get_all_secrets():
        """
        Return all exist libvirt secrets uuid in a list
        """
        secret_list = []
        secrets = virsh.secret_list().stdout.strip()
        for secret in secrets.splitlines()[2:]:
            secret_list.append(secret.strip().split()[0])
        return secret_list

    # Initialize the variables
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(test.tmpdir, pool_target)
    vol_name = params.get("volume_name")
    vol_number = int(params.get("number_of_volumes", "2"))
    capacity = params.get("volume_size", "1048576")
    allocation = params.get("volume_allocation", "1048576")
    vol_format = params.get("volume_format")
    source_name = params.get("gluster_source_name", "gluster-vol1")
    source_path = params.get("gluster_source_path", "/")
    encrypt_format = params.get("vol_encrypt_format")
    encrypt_secret = params.get("encrypt_secret")
    emulated_image = params.get("emulated_image")
    emulated_image_size = params.get("emulated_image_size")
    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            raise error.TestNAError("Gluster pool is not supported in current"
                                    " libvirt version.")

    try:
        str_capa = utils_misc.normalize_data_size(capacity, "B")
        int_capa = int(str(str_capa).split('.')[0])
    except ValueError:
        raise error.TestError("Translate size %s to 'B' failed" % capacity)
    try:
        str_capa = utils_misc.normalize_data_size(allocation, "B")
        int_allo = int(str(str_capa).split('.')[0])
    except ValueError:
        raise error.TestError("Translate size %s to 'B' failed" % allocation)

    # Stop multipathd to avoid start pool fail(For fs like pool, the new add
    # disk may in use by device-mapper, so start pool will report disk already
    # mounted error).
    multipathd = service.Factory.create_service("multipathd")
    multipathd_status = multipathd.status()
    if multipathd_status:
        multipathd.stop()

    # Get exists libvirt secrets before test
    ori_secrets = get_all_secrets()
    expected_vol = {}
    vol_type = 'file'
    if pool_type in ['disk', 'logical']:
        vol_type = 'block'
    if pool_type == 'gluster':
        vol_type = 'network'
    logging.debug("Debug:\npool_name:%s\npool_type:%s\npool_target:%s\n"
                  "vol_name:%s\nvol_number:%s\ncapacity:%s\nallocation:%s\n"
                  "vol_format:%s", pool_name, pool_type, pool_target,
                  vol_name, vol_number, capacity, allocation, vol_format)

    libv_pvt = utlv.PoolVolumeTest(test, params)
    # Run Testcase
    total_err_count = 0
    try:
        # Create a new pool
        libv_pvt.pre_pool(pool_name=pool_name,
                          pool_type=pool_type,
                          pool_target=pool_target,
                          emulated_image=emulated_image,
                          image_size=emulated_image_size,
                          source_name=source_name,
                          source_path=source_path)
        for i in range(vol_number):
            volume_name = "%s_%d" % (vol_name, i)
            expected_vol['pool_name'] = pool_name
            expected_vol['pool_type'] = pool_type
            expected_vol['pool_target'] = pool_target
            expected_vol['capacity'] = int_capa
            expected_vol['allocation'] = int_allo
            expected_vol['format'] = vol_format
            expected_vol['name'] = volume_name
            expected_vol['type'] = vol_type
            expected_vol['encrypt_format'] = encrypt_format
            expected_vol['encrypt_secret'] = encrypt_secret
            # Creates volume
            if pool_type != "gluster":
                expected_vol['path'] = pool_target + '/' + volume_name
                new_volxml = vol_xml.VolXML()
                new_volxml.name = volume_name
                new_volxml.capacity = int_capa
                new_volxml.allocation = int_allo
                if vol_format:
                    new_volxml.format = vol_format
                encrypt_dict = {}
                if encrypt_format:
                    encrypt_dict.update({"format": encrypt_format})
                if encrypt_secret:
                    encrypt_dict.update({"secret": {'uuid': encrypt_secret}})
                if encrypt_dict:
                    new_volxml.encryption = new_volxml.new_encryption(**encrypt_dict)
                logging.debug("Volume XML for creation:\n%s", str(new_volxml))
                virsh.vol_create(pool_name, new_volxml.xml, debug=True)
            else:
                ip_addr = utlv.get_host_ipv4_addr()
                expected_vol['path'] = "gluster://%s/%s/%s" % (ip_addr,
                                                               source_name,
                                                               volume_name)
                utils.run("qemu-img create -f %s %s %s" % (vol_format,
                                                           expected_vol['path'],
                                                           capacity))
            virsh.pool_refresh(pool_name)
            # Check volumes
            total_err_count += check_vol(expected_vol)
            # Delete volume and check for results
            delete_volume(expected_vol)
            total_err_count += check_vol(expected_vol, False)
        if total_err_count > 0:
            raise error.TestFail("Get %s errors when checking volume" % total_err_count)
    finally:
        # Clean up
        for sec in get_all_secrets():
            if sec not in ori_secrets:
                virsh.secret_undefine(sec)
        try:
            libv_pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                  emulated_image, source_name=source_name)
        except error.TestFail, detail:
            logging.error(str(detail))
        if multipathd_status:
            multipathd.start()
Exemplo n.º 30
0
def run(test, params, env):
    '''
    Test the command virsh pool-create-as

    (1) Prepare backend storage device
    (2) Define secret xml and set secret value
    (3) Test pool-create-as or virsh pool-define with authenication
    '''

    pool_options = params.get('pool_options', '')
    pool_name = params.get('pool_name')
    pool_type = params.get('pool_type')
    pool_target = params.get('pool_target', '')
    status_error = params.get('status_error') == "yes"

    # iscsi options
    emulated_size = params.get("iscsi_image_size", "1")
    iscsi_host = params.get("iscsi_host", "127.0.0.1")
    chap_user = params.get("iscsi_user")
    chap_passwd = params.get("iscsi_password")

    # ceph options
    ceph_auth_user = params.get("ceph_auth_user")
    ceph_auth_key = params.get("ceph_auth_key")
    ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS")
    ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
    ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME")
    ceph_client_name = params.get("ceph_client_name")
    ceph_client_key = params.get("ceph_client_key")
    key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
    key_opt = "--keyring %s" % key_file

    # auth options
    auth_usage = (params.get('auth_usage') == 'yes')
    auth_uuid = (params.get('auth_uuid') == 'yes')
    sec_ephemeral = params.get("secret_ephemeral", "no")
    sec_private = params.get("secret_private", "yes")
    sec_desc = params.get("secret_description")
    auth_type = params.get("auth_type")
    sec_usage = params.get("secret_usage_type")
    sec_target = params.get("secret_usage_target")
    sec_name = params.get("secret_name")
    auth_sec_dict = {
        "sec_ephemeral": sec_ephemeral,
        "sec_private": sec_private,
        "sec_desc": sec_desc,
        "sec_usage": sec_usage,
        "sec_target": sec_target,
        "sec_name": sec_name
    }

    if sec_usage == "iscsi":
        auth_username = chap_user
        sec_password = chap_passwd
        secret_usage = sec_target

    if sec_usage == "ceph":
        auth_username = ceph_auth_user
        sec_password = ceph_auth_key
        secret_usage = sec_name

    if pool_target and not os.path.isdir(pool_target):
        if os.path.isfile(pool_target):
            logging.error('<target> must be a directory')
        else:
            os.makedirs(pool_target)

    def setup_ceph_auth():
        disk_path = ("rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip))
        disk_path += (":id=%s:key=%s" % (ceph_auth_user, ceph_auth_key))

        if not utils_package.package_install(["ceph-common"]):
            test.error("Failed to install ceph-common")

        with open(key_file, 'w') as f:
            f.write("[%s]\n\tkey = %s\n" % (ceph_client_name, ceph_client_key))

        # Delete the disk if it exists
        cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
               "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
        process.run(cmd, ignore_status=True, shell=True)

        # Create an local image and make FS on it.
        img_file = os.path.join(data_dir.get_tmp_dir(), "test.img")
        disk_cmd = ("qemu-img create -f raw {0} 10M && mkfs.ext4 -F {0}".
                    format(img_file))
        process.run(disk_cmd, ignore_status=False, shell=True)

        # Convert the image to remote storage
        # Ceph can only support raw format
        disk_cmd = ("qemu-img convert -O %s %s %s" %
                    ("raw", img_file, disk_path))
        process.run(disk_cmd, ignore_status=False, shell=True)

    def setup_iscsi_auth():
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
            is_setup=True,
            is_login=False,
            image_size=emulated_size,
            chap_user=chap_user,
            chap_passwd=chap_passwd)
        return iscsi_target

    def check_auth_in_xml(dparams):
        sourcexml = pool_xml.PoolXML.new_from_dumpxml(pool_name).get_source()
        with open(sourcexml.xml) as xml_f:
            logging.debug("Source XML is: \n%s", xml_f.read())

        # Check result
        try:
            for name, v_expect in dparams.items():
                if v_expect != sourcexml[name]:
                    test.fail("Expect to find %s=%s, but got %s=%s" %
                              (name, v_expect, name, sourcexml[name]))
        except xcepts.LibvirtXMLNotFoundError as details:
            if "usage not found" in str(details) and auth_uuid:
                pass  # Not a auth_usage test
            elif "uuid not found" in str(details) and auth_usage:
                pass  # Not a auth_uuid test
            else:
                test.fail(details)

    def check_result(result, expect_error=False):
        # pool-define-as return CmdResult
        if isinstance(result, process.CmdResult):
            result = (result.exit_status == 0)  # True means run success

        if expect_error:
            if result:
                test.fail("Expect to fail but run success")
        elif not expect_error:
            if not result:
                test.fail("Expect to succeed but run failure")
        else:
            logging.info("It's an expected error")

    if not libvirt_version.version_compare(3, 9, 0):
        test.cancel("Pool create/define with authentication"
                    " not support in this libvirt version")

    sec_uuid = ""
    img_file = ""
    libvirt_pool = libvirt_storage.StoragePool()
    try:
        # Create secret xml and set value
        encode = True
        if sec_usage == "ceph":
            encode = False  # Ceph key already encoded
        sec_uuid = libvirt.create_secret(auth_sec_dict)
        virsh.secret_set_value(sec_uuid,
                               sec_password,
                               encode=encode,
                               debug=True)

        if sec_usage == "iscsi":
            iscsi_dev = setup_iscsi_auth()
            pool_options += (" --source-host %s --source-dev %s"
                             " --auth-type %s --auth-username %s" %
                             (iscsi_host, iscsi_dev, auth_type, auth_username))

        if sec_usage == "ceph":
            setup_ceph_auth()
            rbd_pool = ceph_disk_name.split('/')[0]
            pool_options += (
                " --source-host %s --source-name %s"
                " --auth-type %s --auth-username %s" %
                (ceph_host_ip, rbd_pool, auth_type, auth_username))

        if auth_usage:
            pool_options += " --secret-usage %s" % secret_usage

        if auth_uuid:
            pool_options += " --secret-uuid %s" % sec_uuid

        # Run test cases
        func_name = params.get("test_func", "pool_create_as")
        logging.info('Perform test runner: %s', func_name)
        if func_name == "pool_create_as":
            func = virsh.pool_create_as
        if func_name == "pool_define_as":
            func = virsh.pool_define_as
        result = func(pool_name,
                      pool_type,
                      pool_target,
                      extra=pool_options,
                      debug=True)

        # Check status_error
        check_result(result, expect_error=status_error)
        if not status_error:
            # Check pool status
            pool_status = libvirt_pool.pool_state(pool_name)
            if ((pool_status == 'inactive' and func_name == "pool_define_as")
                    or
                (pool_status == "active" and func_name == "pool_create_as")):
                logging.info("Expected pool status:%s" % pool_status)
            else:
                test.fail("Not an expected pool status: %s" % pool_status)
            # Check pool dumpxml
            dict_expect = {
                "auth_type": auth_type,
                "auth_username": auth_username,
                "secret_usage": secret_usage,
                "secret_uuid": sec_uuid
            }
            check_auth_in_xml(dict_expect)
    finally:
        # Clean up
        logging.info("Start to cleanup")
        if os.path.exists(img_file):
            os.remove(img_file)
        virsh.secret_undefine(sec_uuid, ignore_status=True)
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
        if libvirt_pool.pool_exists(pool_name):
            libvirt_pool.delete_pool(pool_name)
Exemplo n.º 31
0
def run(test, params, env):
    """
    Test the command virsh maxvcpus

    (1) Call virsh maxvcpus
    (2) Call virsh -c remote_uri maxvcpus
    (3) Call virsh maxvcpus with an unexpected option
    """

    # get the params from subtests.
    # params for general.
    option = params.get("virsh_maxvcpus_options")
    status_error = params.get("status_error")
    connect_arg = params.get("connect_arg", "")

    # params for transport connect.
    local_ip = params.get("local_ip", "ENTER.YOUR.LOCAL.IP")
    local_pwd = params.get("local_pwd", "ENTER.YOUR.LOCAL.ROOT.PASSWORD")
    server_ip = params.get("remote_ip", local_ip)
    server_pwd = params.get("remote_pwd", local_pwd)
    transport_type = params.get("connect_transport_type", "local")
    transport = params.get("connect_transport", "ssh")

    # check the config
    if (connect_arg == "transport" and transport_type == "remote"
            and local_ip.count("ENTER")):
        raise exceptions.TestSkipError("Parameter local_ip is not configured "
                                       "in remote test.")
    if (connect_arg == "transport" and transport_type == "remote"
            and local_pwd.count("ENTER")):
        raise exceptions.TestSkipError("Parameter local_pwd is not configured "
                                       "in remote test.")

    if connect_arg == "transport":
        canonical_uri_type = virsh.driver()

        if transport == "ssh":
            ssh_connection = utils_conn.SSHConnection(server_ip=server_ip,
                                                      server_pwd=server_pwd,
                                                      client_ip=local_ip,
                                                      client_pwd=local_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()

            connect_uri = libvirt_vm.get_uri_with_transport(
                uri_type=canonical_uri_type,
                transport=transport,
                dest_ip=server_ip)
    else:
        connect_uri = connect_arg

    if libvirt_version.version_compare(2, 3, 0):
        try:
            maxvcpus = None
            # make sure we take maxvcpus from right host, helps incase remote
            virsh_dargs = {'uri': connect_uri}
            virsh_instance = virsh.Virsh(virsh_dargs)
            try:
                capa = capability_xml.CapabilityXML(virsh_instance)
                host_arch = capa.arch
                maxvcpus = capa.get_guest_capabilities(
                )['hvm'][host_arch]['maxcpus']
            except:
                raise exceptions.TestFail("Failed to get maxvcpus from "
                                          "capabilities xml\n%s" % capa)
            if not maxvcpus:
                raise exceptions.TestFail("Failed to get guest section for "
                                          "host arch: %s from capabilities "
                                          "xml\n%s" % (host_arch, capa))
        except Exception, details:
            raise exceptions.TestFail(
                "Failed get the virsh instance with uri: "
                "%s\n Details: %s" % (connect_uri, details))
Exemplo n.º 32
0
def run(test, params, env):
    """
    Test command: virsh net-start.
    """
    # Gather test parameters
    uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri",
                                                      "default"))
    status_error = "yes" == params.get("status_error", "no")
    inactive_default = "yes" == params.get("net_start_inactive_default", "yes")
    net_ref = params.get("net_start_net_ref", "netname")  # default is tested
    extra = params.get("net_start_options_extra", "")  # extra cmd-line params.

    # make easy to maintain
    virsh_dargs = {'uri': uri, 'debug': True, 'ignore_status': True}
    virsh_instance = virsh.VirshPersistent(**virsh_dargs)

    # libvirt acl polkit related params
    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    virsh_uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    # Get all network instance
    origin_nets = network_xml.NetworkXML.new_all_networks_dict(virsh_instance)

    # Prepare default network for following test.
    try:
        default_netxml = origin_nets['default']
    except KeyError:
        virsh_instance.close_session()
        test.cancel("Test requires default network to exist")
    try:
        # To confirm default network is active
        if not default_netxml.active:
            default_netxml.active = True

        # inactive default according test's need
        if inactive_default:
            logging.info("Stopped default network")
            default_netxml.active = False

        # State before run command
        origin_state = virsh_instance.net_state_dict()
        logging.debug("Origin network(s) state: %s", origin_state)

        if net_ref == "netname":
            net_ref = default_netxml.name
        elif net_ref == "netuuid":
            net_ref = default_netxml.uuid

        if params.get('setup_libvirt_polkit') == 'yes':
            virsh_dargs = {'uri': virsh_uri, 'unprivileged_user': unprivileged_user,
                           'debug': False, 'ignore_status': True}
        if params.get('net_start_readonly', 'no') == 'yes':
            virsh_dargs = {'uri': uri, 'debug': True, 'readonly': True, 'ignore_status': True}

        # Run test case
        if 'unprivileged_user' in virsh_dargs and status_error:
            test_virsh = virsh.VirshPersistent(unprivileged_user=virsh_dargs['unprivileged_user'])
            virsh_dargs.pop('unprivileged_user')
            result = test_virsh.net_start(net_ref, extra, **virsh_dargs)
            test_virsh.close_session()
        else:
            result = virsh.net_start(net_ref, extra, **virsh_dargs)
        logging.debug(result)
        status = result.exit_status

        # Get current net_stat_dict
        current_state = virsh_instance.net_state_dict()
        logging.debug("Current network(s) state: %s", current_state)
        if 'default' not in current_state:
            test.fail('Network "default" cannot be found')
        is_default_active = current_state['default']['active']

        # Check status_error
        if status_error:
            if not status:
                test.fail("Run successfully with wrong command!")
        else:
            if status:
                test.fail("Run failed with right command")
            else:
                if not is_default_active:
                    test.fail("Execute cmd successfully but "
                              "default is inactive actually.")
    finally:
        virsh_instance.close_session()
        virsh.net_start('default', debug=True, ignore_status=True)
Exemplo n.º 33
0
def run(test, params, env):
    """
    Test virsh vol-create-from command to cover the following matrix:

    pool = [source, destination]
    pool_type = [dir, disk, fs, logical, netfs, iscsi, scsi]
    volume_format = [raw, qcow2, qed]

    Note, both 'iscsi' and 'scsi' type pools don't support create volume by
    virsh, so which can't be destination pools. And for disk pool, it can't
    create volume with specified format.
    """

    src_pool_type = params.get("src_pool_type")
    src_pool_target = params.get("src_pool_target")
    src_emulated_image = params.get("src_emulated_image")
    src_vol_format = params.get("src_vol_format")
    dest_pool_type = params.get("dest_pool_type")
    dest_pool_target = params.get("dest_pool_target")
    dest_emulated_image = params.get("dest_emulated_image")
    dest_vol_format = params.get("dest_vol_format")
    prealloc_option = params.get("prealloc_option")
    status_error = params.get("status_error", "no")

    if not libvirt_version.version_compare(1, 0, 0):
        if "--prealloc-metadata" in prealloc_option:
            test.cancel("metadata preallocation not supported in"
                        " current libvirt version.")

    vol_file = ""
    try:
        # Create the src/dest pool
        src_pool_name = "virt-%s-pool" % src_pool_type
        dest_pool_name = "virt-%s-pool" % dest_pool_type

        pvt = utlv.PoolVolumeTest(test, params)
        pvt.pre_pool(src_pool_name,
                     src_pool_type,
                     src_pool_target,
                     src_emulated_image,
                     image_size="40M",
                     pre_disk_vol=["1M"])

        if src_pool_type != dest_pool_type:
            pvt.pre_pool(dest_pool_name,
                         dest_pool_type,
                         dest_pool_target,
                         dest_emulated_image,
                         image_size="100M",
                         pre_disk_vol=["2M"])

        # Print current pools for debugging
        logging.debug("Current pools:%s",
                      libvirt_storage.StoragePool().list_pools())

        # Create the src vol
        # 8M is the minimal size for logical volume(PPC)
        # 4M is the minimal size for logical volume(x86)
        vol_size = params.get("image_volume_size", "16777216")
        if src_pool_type in ["dir", "logical", "netfs", "fs"]:
            src_vol_name = "src_vol"
            pvt.pre_vol(vol_name=src_vol_name,
                        vol_format=src_vol_format,
                        capacity=vol_size,
                        allocation=None,
                        pool_name=src_pool_name)
        else:
            src_vol_name = list(utlv.get_vol_list(src_pool_name).keys())[0]
        # Prepare vol xml file
        dest_vol_name = "dest_vol"
        # According to BZ#1138523, we need inpect the right name
        # (disk partition) for new volume
        if dest_pool_type == "disk":
            dest_vol_name = utlv.new_disk_vol_name(dest_pool_name)
            if dest_vol_name is None:
                test.error("Fail to generate volume name")
        if dest_pool_type == "disk":
            dest_vol_format = ""
            prealloc_option = ""
        vol_xml = """
<volume>
  <name>%s</name>
  <capacity unit='bytes'>%s</capacity>
  <target>
    <format type='%s'/>
  </target>
</volume>
""" % (dest_vol_name, vol_size, dest_vol_format)
        logging.debug("Prepare the volume xml: %s", vol_xml)
        vol_file = os.path.join(data_dir.get_tmp_dir(), "dest_vol.xml")
        with open(vol_file, 'w') as xml_object:
            xml_object.write(vol_xml)

        # iSCSI and SCSI type pool can't create vols via virsh
        if dest_pool_type in ["iscsi", "scsi"]:
            test.fail("Unsupport create vol for %s type pool" % dest_pool_type)
        # Metadata preallocation is not supported for block volumes
        if dest_pool_type in ["disk", "logical"]:
            prealloc_option = ""
        pid_before_run = utils_misc.get_pid("libvirtd")
        # Run run_virsh_vol_create_from to create dest vol
        cmd_result = virsh.vol_create_from(dest_pool_name,
                                           vol_file,
                                           src_vol_name,
                                           src_pool_name,
                                           prealloc_option,
                                           ignore_status=True,
                                           debug=True)
        pid_after_run = utils_misc.get_pid("libvirtd")
        logging.debug("%s, %s" % (pid_before_run, pid_after_run))
        # According to bug 1363636, we need check libvirtd status
        if pid_before_run != pid_after_run:
            test.fail("Pls check libvirtd has crashed")
        status = cmd_result.exit_status
        # Check result
        if status_error == "no":
            if status == 0:
                dest_pv = libvirt_storage.PoolVolume(dest_pool_name)
                dest_volumes = list(dest_pv.list_volumes().keys())
                logging.debug("Current volumes in %s: %s", dest_pool_name,
                              dest_volumes)
                if dest_vol_name not in dest_volumes:
                    test.fail("Can't find volume: % from pool: %s" %
                              (dest_vol_name, dest_pool_name))
            else:
                test.fail(cmd_result.stderr)
        else:
            if status:
                logging.debug("Expect error: %s", cmd_result.stderr)
            else:
                test.fail("Expect fail, but run successfully!")
    finally:
        # Cleanup: both src and dest should be removed
        try:
            pvt.cleanup_pool(src_pool_name, src_pool_type, src_pool_target,
                             src_emulated_image)
        except exceptions.TestFail as detail:
            logging.error(str(detail))
        if src_pool_type != dest_pool_type:
            pvt.cleanup_pool(dest_pool_name, dest_pool_type, dest_pool_target,
                             dest_emulated_image)
        if os.path.isfile(vol_file):
            os.remove(vol_file)
Exemplo n.º 34
0
def run(test, params, env):
    """
    Test send-key command, include all types of codeset and sysrq

    For normal sendkey test, we create a file to check the command
    execute by send-key. For sysrq test, check the /var/log/messages
    and guest status
    """

    if not virsh.has_help_command('send-key'):
        raise exceptions.TestSkipError("This version of libvirt does not "
                                       "support the send-key test")

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    status_error = ("yes" == params.get("status_error", "no"))
    options = params.get("sendkey_options", "")
    sysrq_test = ("yes" == params.get("sendkey_sysrq", "no"))
    sleep_time = int(params.get("sendkey_sleeptime", 2))
    readonly = params.get("readonly", False)
    username = params.get("username")
    password = params.get("password")
    create_file = params.get("create_file_name")
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise exceptions.TestSkipError("API acl test not supported in "
                                           "current libvirt version.")

    def send_line(send_str):
        """
        send string to guest with send-key and end with Enter
        """
        for send_ch in list(send_str):
            virsh.sendkey(vm_name, "KEY_%s" % send_ch.upper(),
                          ignore_status=False)

        virsh.sendkey(vm_name, "KEY_ENTER",
                      ignore_status=False)

    vm = env.get_vm(vm_name)
    session = vm.wait_for_login()

    if sysrq_test:
        # Is 'rsyslog' installed on guest? It'll be what writes out
        # to /var/log/messages
        if not utils_package.package_install("rsyslog", session):
            raise exceptions.TestFail("Fail to install rsyslog, make sure"
                                      " that you have usable repo in "
                                      "guest")

        # clear messages, restart rsyslog, and make sure it's running
        session.cmd("echo '' > /var/log/messages")
        session.cmd("service rsyslog restart")
        ps_stat = session.cmd_status("ps aux |grep rsyslog")
        if ps_stat != 0:
            raise exceptions.TestFail("rsyslog is not running in guest")

        # enable sysrq
        session.cmd("echo 1 > /proc/sys/kernel/sysrq")

    # make sure the environment is clear
    if create_file is not None:
        session.cmd("rm -rf %s" % create_file)

    try:
        # wait for tty1 started
        tty1_stat = "ps aux|grep tty[1]"
        timeout = 60
        while timeout >= 0 and \
                session.get_command_status(tty1_stat) != 0:
            time.sleep(1)
            timeout = timeout - 1
        if timeout < 0:
            raise exceptions.TestFail("Can not wait for tty1 started in 60s")

        # send user and passwd to guest to login
        send_line(username)
        time.sleep(2)
        send_line(password)
        time.sleep(2)

        output = virsh.sendkey(vm_name, options, readonly=readonly,
                               unprivileged_user=unprivileged_user,
                               uri=uri)
        time.sleep(sleep_time)
        if output.exit_status != 0:
            if status_error:
                logging.info("Failed to sendkey to guest as expected, Error:"
                             "%s.", output.stderr)
                return
            else:
                raise exceptions.TestFail("Failed to send key to guest, Error:"
                                          "%s." % output.stderr)
        elif status_error:
            raise exceptions.TestFail("Expect fail, but succeed indeed.")

        if create_file is not None:
            # check if created file exist
            cmd_ls = "ls %s" % create_file
            sec_status, sec_output = session.get_command_status_output(cmd_ls)
            if sec_status == 0:
                logging.info("Succeed to create file with send key")
            else:
                raise exceptions.TestFail("Fail to create file with send key, "
                                          "Error:%s" % sec_output)
        elif sysrq_test:
            # check /var/log/message info according to different key

            # Since there's no guarantee when messages will be written
            # we'll do a check and wait loop for up to 60 seconds
            timeout = 60
            while timeout >= 0:
                if "KEY_H" in options:
                    get_status = session.cmd_status("cat /var/log/messages|"
                                                    "grep 'SysRq.*HELP'")
                elif "KEY_M" in options:
                    get_status = session.cmd_status("cat /var/log/messages|"
                                                    "grep 'SysRq.*Show Memory'")
                elif "KEY_T" in options:
                    get_status = session.cmd_status("cat /var/log/messages|"
                                                    "grep 'SysRq.*Show State'")
                elif "KEY_B" in options:
                    client_session = vm.wait_for_login()
                    result = virsh.domstate(vm_name, '--reason', ignore_status=True)
                    output = result.stdout.strip()
                    logging.debug("The guest state: %s", output)
                    if not output.count("booted"):
                        get_status = 1
                    else:
                        get_status = 0
                    client_session.close()

                if get_status == 0:
                    timeout = -1
                else:
                    session.cmd("echo \"virsh sendkey waiting\" >> /var/log/messages")
                    time.sleep(1)
                    timeout = timeout - 1

            if get_status != 0:
                raise exceptions.TestFail("SysRq does not take effect in "
                                          "guest, options is %s" % options)
            else:
                logging.info("Succeed to send SysRq command")
        else:
            raise exceptions.TestFail("Test cfg file invalid: either "
                                      "sysrq_params or create_file_name must "
                                      "be defined")

    finally:
        if create_file is not None:
            session.cmd("rm -rf %s" % create_file)
        session.close()
Exemplo n.º 35
0
def run(test, params, env):
    """
    Test virsh {at|de}tach-device command.

    1) Prepare test environment and its parameters
    2) Operate virsh on one or more devices
    3) Check functionality of each device
    4) Check functionality of mmconfig option
    5) Restore domain
    6) Handle results
    """

    dev_obj = params.get("vadu_dev_objs")
    # Skip chardev hotplug on rhel6 host as it is not supported
    if "Serial" in dev_obj:
        if not libvirt_version.version_compare(1, 1, 0):
            raise error.TestNAError("You libvirt version not supported"
                                    " attach/detach Serial devices")

    logging.info("Preparing initial VM state")
    # Prepare test environment and its parameters
    test_params = TestParams(params, env, test)
    if test_params.start_vm:
        # Make sure VM is working
        test_params.main_vm.verify_alive()
        test_params.main_vm.wait_for_login().close()
    else:  # VM not suppose to be started
        if test_params.main_vm.is_alive():
            test_params.main_vm.destroy(gracefully=True)
    # Capture backup of original XML early in test
    test_params.vmxml = VMXML.new_from_inactive_dumpxml(
        test_params.main_vm.name)
    # All devices should share same access state
    test_params.virsh = virsh.Virsh(ignore_status=True)
    logging.info("Creating %d test device instances", len(test_params.devs))
    # Create test objects from cfg. class names via subclasses above
    test_devices = [globals()[class_name](test_params)  # instantiate
                    for class_name in test_params.devs]  # vadu_dev_objs
    operational_results = []
    preboot_results = []
    pstboot_results = []
    try:
        operational_action(test_params, test_devices, operational_results)
        # Fail early if attach-device return value is not expected
        analyze_results(test_params=test_params,
                        operational_results=operational_results)

        #  Can't do functional testing with a cold VM, only test hot-attach
        preboot_action(test_params, test_devices, preboot_results)

        logging.info("Preparing test VM state for post-boot functional testing")
        if test_params.start_vm:
            # Hard-reboot required
            test_params.main_vm.destroy(gracefully=True,
                                        free_mac_addresses=False)
        try:
            test_params.main_vm.start()
        except virt_vm.VMStartError as details:
            raise error.TestFail('VM Failed to start for some reason!: %s' % details)
        # Signal devices reboot is finished
        for test_device in test_devices:
            test_device.booted = True
        test_params.main_vm.wait_for_login().close()
        postboot_action(test_params, test_devices, pstboot_results)
        analyze_results(test_params=test_params,
                        preboot_results=preboot_results,
                        pstboot_results=pstboot_results)
    finally:
        logging.info("Restoring VM from backup, then checking results")
        test_params.main_vm.destroy(gracefully=False,
                                    free_mac_addresses=False)
        test_params.vmxml.undefine()
        test_params.vmxml.restore()  # Recover the original XML
        test_params.vmxml.define()
        if not test_params.start_vm:
            # Test began with not start_vm, shut it down.
            test_params.main_vm.destroy(gracefully=True)
        # Device cleanup can raise multiple exceptions, do it last:
        logging.info("Cleaning up test devices")
        test_params.cleanup(test_devices)
Exemplo n.º 36
0
def run(test, params, env):
    """
    Test remote access with TCP, TLS connection
    """

    test_dict = dict(params)
    vm_name = test_dict.get("main_vm")
    status_error = test_dict.get("status_error", "no")
    allowed_dn_str = params.get("tls_allowed_dn_list")
    if allowed_dn_str:
        allowed_dn_list = []
        if not libvirt_version.version_compare(1, 0, 0):
            # Reverse the order in the dn list to workaround the
            # feature changes between RHEL 6 and RHEL 7
            dn_list = allowed_dn_str.split(",")
            dn_list.reverse()
            allowed_dn_str = ','.join(dn_list)
        allowed_dn_list.append(allowed_dn_str)
        test_dict['tls_allowed_dn_list'] = allowed_dn_list
    transport = test_dict.get("transport")
    plus = test_dict.get("conn_plus", "+")
    config_ipv6 = test_dict.get("config_ipv6", "no")
    tls_port = test_dict.get("tls_port", "")
    listen_addr = test_dict.get("listen_addr", "0.0.0.0")
    ssh_port = test_dict.get("ssh_port", "")
    tcp_port = test_dict.get("tcp_port", "")
    server_ip = test_dict.get("server_ip")
    server_user = test_dict.get("server_user")
    server_pwd = test_dict.get("server_pwd")
    no_any_config = params.get("no_any_config", "no")
    sasl_type = test_dict.get("sasl_type", "gssapi")
    sasl_user_pwd = test_dict.get("sasl_user_pwd")
    sasl_allowed_users = test_dict.get("sasl_allowed_users")
    server_cn = test_dict.get("server_cn")
    custom_pki_path = test_dict.get("custom_pki_path")
    rm_client_key_cmd = test_dict.get("remove_client_key_cmd")
    rm_client_cert_cmd = test_dict.get("remove_client_cert_cmd")
    ca_cn_new = test_dict.get("ca_cn_new")
    no_verify = test_dict.get("no_verify", "no")
    ipv6_addr_des = test_dict.get("ipv6_addr_des")
    tls_sanity_cert = test_dict.get("tls_sanity_cert")
    restart_libvirtd = test_dict.get("restart_libvirtd", "yes")
    diff_virt_ver = test_dict.get("diff_virt_ver", "no")
    driver = test_dict.get("test_driver", "qemu")
    uri_path = test_dict.get("uri_path", "/system")
    virsh_cmd = params.get("virsh_cmd", "list")
    action = test_dict.get("libvirtd_action", "restart")
    uri_user = test_dict.get("uri_user", "")
    unix_sock_dir = test_dict.get("unix_sock_dir")
    mkdir_cmd = test_dict.get("mkdir_cmd")
    rmdir_cmd = test_dict.get("rmdir_cmd")
    adduser_cmd = test_dict.get("adduser_cmd")
    deluser_cmd = test_dict.get("deluser_cmd")
    auth_conf = test_dict.get("auth_conf")
    auth_conf_cxt = test_dict.get("auth_conf_cxt")
    polkit_pkla = test_dict.get("polkit_pkla")
    polkit_pkla_cxt = test_dict.get("polkit_pkla_cxt")
    ssh_setup = test_dict.get("ssh_setup", "no")
    tcp_setup = test_dict.get("tcp_setup", "no")
    tls_setup = test_dict.get("tls_setup", "no")
    unix_setup = test_dict.get("unix_setup", "no")
    ssh_recovery = test_dict.get("ssh_auto_recovery", "yes")
    tcp_recovery = test_dict.get("tcp_auto_recovery", "yes")
    tls_recovery = test_dict.get("tls_auto_recovery", "yes")
    unix_recovery = test_dict.get("unix_auto_recovery", "yes")

    port = ""
    # extra URI arguments
    extra_params = ""
    # it's used to clean up SSH, TLS, TCP, UNIX and SASL objs later
    objs_list = []
    # redirect LIBVIRT_DEBUG log into test log later
    test_dict["logfile"] = test.logfile

    # Make sure all of parameters are assigned a valid value
    check_parameters(test_dict, test)
    # Make sure libvirtd on remote is running
    server_session = remote.wait_for_login('ssh', server_ip, '22',
                                           server_user, server_pwd,
                                           r"[\#\$]\s*$")

    remote_libvirtd = utils_libvirtd.Libvirtd(server_session)
    if not remote_libvirtd.is_running():
        logging.debug("start libvirt on remote")
        res = remote_libvirtd.start()
        if not res:
            status, output = server_session.cmd_status_output("journalctl -xe")
            test.error("Failed to start libvirtd on remote. [status]: %s "
                       "[output]: %s." % (status, output))
    server_session.close()

    # only simply connect libvirt daemon then return
    if no_any_config == "yes":
        test_dict["uri"] = "%s%s%s://%s" % (driver, plus, transport, uri_path)
        remote_access(test_dict, test)
        return

    # append extra 'pkipath' argument to URI if exists
    if custom_pki_path:
        extra_params = "?pkipath=%s" % custom_pki_path

    # append extra 'no_verify' argument to URI if exists
    if no_verify == "yes":
        extra_params = "?no_verify=1"

    # append extra 'socket' argument to URI if exists
    if unix_sock_dir:
        extra_params = "?socket=%s/libvirt-sock" % unix_sock_dir

    # generate auth.conf and default under the '/etc/libvirt'
    if auth_conf_cxt and auth_conf:
        cmd = "echo -e '%s' > %s" % (auth_conf_cxt, auth_conf)
        process.system(cmd, ignore_status=True, shell=True)

    # generate polkit_pkla and default under the
    # '/etc/polkit-1/localauthority/50-local.d/'
    if polkit_pkla_cxt and polkit_pkla:
        cmd = "echo -e '%s' > %s" % (polkit_pkla_cxt, polkit_pkla)
        process.system(cmd, ignore_status=True, shell=True)

    # generate remote IP
    if config_ipv6 == "yes" and ipv6_addr_des:
        remote_ip = "[%s]" % ipv6_addr_des
    elif config_ipv6 != "yes" and server_cn:
        remote_ip = server_cn
    elif config_ipv6 != "yes" and ipv6_addr_des:
        remote_ip = "[%s]" % ipv6_addr_des
    elif server_ip and transport != "unix":
        remote_ip = server_ip
    else:
        remote_ip = ""

    # get URI port
    if tcp_port != "":
        port = ":" + tcp_port

    if tls_port != "":
        port = ":" + tls_port

    if ssh_port != "" and not ipv6_addr_des:
        port = ":" + ssh_port

    # generate URI
    uri = "%s%s%s://%s%s%s%s%s" % (driver, plus, transport, uri_user,
                                   remote_ip, port, uri_path, extra_params)
    test_dict["uri"] = uri

    logging.debug("The final test dict:\n<%s>", test_dict)

    if virsh_cmd == "start" and transport != "unix":
        session = remote.wait_for_login("ssh", server_ip, "22", "root",
                                        server_pwd, "#")
        cmd = "virsh domstate %s" % vm_name
        status, output = session.cmd_status_output(cmd)
        if status:
            session.close()
            test.cancel(output)

        session.close()

    try:
        # setup IPv6
        if config_ipv6 == "yes":
            ipv6_obj = IPv6Manager(test_dict)
            objs_list.append(ipv6_obj)
            ipv6_obj.setup()

        # compare libvirt version if needs
        if diff_virt_ver == "yes":
            compare_virt_version(server_ip, server_user, server_pwd, test)

        # setup SSH
        if transport == "ssh" or ssh_setup == "yes":
            if not test_dict.get("auth_pwd"):
                ssh_obj = SSHConnection(test_dict)
                if ssh_recovery == "yes":
                    objs_list.append(ssh_obj)
                # setup test environment
                ssh_obj.conn_setup()
            else:
                # To access to server with password,
                # cleanup authorized_keys on remote
                ssh_pubkey_file = "/root/.ssh/id_rsa.pub"
                if (os.path.exists("/root/.ssh/id_rsa") and
                   os.path.exists(ssh_pubkey_file)):
                    remote_file_obj = remote.RemoteFile(address=server_ip,
                                                        client='scp',
                                                        username=server_user,
                                                        password=server_pwd,
                                                        port='22',
                                                        remote_path="/root/.ssh/authorized_keys")
                    with open(ssh_pubkey_file, 'r') as fd:
                        line = fd.read().split()[-1].rstrip('\n')
                    line = ".*" + line
                    remote_file_obj.remove([line])
                    objs_list.append(remote_file_obj)

        # setup TLS
        if transport == "tls" or tls_setup == "yes":
            tls_obj = TLSConnection(test_dict)
            if tls_recovery == "yes":
                objs_list.append(tls_obj)
            # reserve cert path
            tmp_dir = tls_obj.tmp_dir
            # setup test environment
            if tls_sanity_cert == "no":
                # only setup CA and client
                tls_obj.conn_setup(False, True)
            else:
                # setup CA, server and client
                tls_obj.conn_setup()

        # setup TCP
        if transport == "tcp" or tcp_setup == "yes":
            tcp_obj = TCPConnection(test_dict)
            if tcp_recovery == "yes":
                objs_list.append(tcp_obj)
            # setup test environment
            tcp_obj.conn_setup()

        # create a directory if needs
        if mkdir_cmd:
            process.system(mkdir_cmd, ignore_status=True, shell=True)

        # setup UNIX
        if transport == "unix" or unix_setup == "yes":
            unix_obj = UNIXConnection(test_dict)
            if unix_recovery == "yes":
                objs_list.append(unix_obj)
            # setup test environment
            unix_obj.conn_setup()

        # need to restart libvirt service for negative testing
        if restart_libvirtd == "no":
            remotely_control_libvirtd(server_ip, server_user,
                                      server_pwd, action, status_error)

        # check TCP/IP listening by service
        if restart_libvirtd != "no" and transport != "unix":
            service = 'libvirtd'
            if transport == "ssh":
                service = 'ssh'

            check_listening_port_remote_by_service(server_ip, server_user,
                                                   server_pwd, service,
                                                   port, listen_addr)

        # open the tls/tcp listening port on server
        if transport in ["tls", "tcp"]:
            firewalld_port = port[1:]
            server_session = remote.wait_for_login('ssh', server_ip, '22',
                                                   server_user, server_pwd,
                                                   r"[\#\$]\s*$")
            firewall_cmd = utils_iptables.Firewall_cmd(server_session)
            firewall_cmd.add_port(firewalld_port, 'tcp', permanent=True)
            server_session.close()

        # remove client certifications if exist, only for TLS negative testing
        if rm_client_key_cmd:
            process.system(rm_client_key_cmd, ignore_status=True, shell=True)

        if rm_client_cert_cmd:
            process.system(rm_client_cert_cmd, ignore_status=True, shell=True)

        # add user to specific group
        if adduser_cmd:
            process.system(adduser_cmd, ignore_status=True, shell=True)

        # change /etc/pki/libvirt/servercert.pem then
        # restart libvirt service on the remote host
        if tls_sanity_cert == "no" and ca_cn_new:
            test_dict['ca_cn'] = ca_cn_new
            test_dict['scp_new_cacert'] = 'no'
            tls_obj_new = TLSConnection(test_dict)
            test_dict['tls_obj_new'] = tls_obj_new
            # only setup new CA and server
            tls_obj_new.conn_setup(True, False)

        # setup SASL certification
        # From libvirt-3.2.0, the default sasl change from
        # DIGEST-MD5 to GSSAPI. "sasl_user" is discarded.
        # More details: https://libvirt.org/auth.html#ACL_server_kerberos
        if sasl_user_pwd and sasl_type == 'digest-md5':
            # covert string tuple and list to python data type
            sasl_user_pwd = eval(sasl_user_pwd)
            if sasl_allowed_users:
                sasl_allowed_users = eval(sasl_allowed_users)

            # create a sasl user
            sasl_obj = SASL(test_dict)
            objs_list.append(sasl_obj)
            sasl_obj.setup()

            for sasl_user, sasl_pwd in sasl_user_pwd:
                # need't authentication if the auth.conf is configured by user
                if not auth_conf:
                    test_dict["auth_user"] = sasl_user
                    test_dict["auth_pwd"] = sasl_pwd
                    logging.debug("sasl_user, sasl_pwd = "
                                  "(%s, %s)", sasl_user, sasl_pwd)

                if sasl_allowed_users and sasl_user not in sasl_allowed_users:
                    test_dict["status_error"] = "yes"
                patterns_extra_dict = {"authentication name": sasl_user}
                test_dict["patterns_extra_dict"] = patterns_extra_dict
                remote_access(test_dict, test)
        else:
            remote_access(test_dict, test)

    finally:
        # recovery test environment
        # Destroy the VM after all test are done
        cleanup(objs_list)

        if vm_name:
            vm = env.get_vm(vm_name)
            if vm and vm.is_alive():
                vm.destroy(gracefully=False)

        if transport in ["tcp", "tls"] and 'firewalld_port' in locals():
            server_session = remote.wait_for_login('ssh', server_ip, '22',
                                                   server_user, server_pwd,
                                                   r"[\#\$]\s*$")
            firewall_cmd = utils_iptables.Firewall_cmd(server_session)
            firewall_cmd.remove_port(firewalld_port, 'tcp', permanent=True)
            server_session.close()

        if rmdir_cmd:
            process.system(rmdir_cmd, ignore_status=True, shell=True)

        if deluser_cmd:
            process.system(deluser_cmd, ignore_status=True, shell=True)

        if auth_conf and os.path.isfile(auth_conf):
            os.unlink(auth_conf)

        if polkit_pkla and os.path.isfile(polkit_pkla):
            os.unlink(polkit_pkla)
Exemplo n.º 37
0
def run(test, params, env):
    """
    Test interafce xml options.

    1.Prepare test environment,destroy or suspend a VM.
    2.Edit xml and start the domain.
    3.Perform test operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': False}

    if not utils_package.package_install(["lsof"]):
        test.cancel("Failed to install dependency package lsof" " on host")

    def create_iface_xml(iface_mac):
        """
        Create interface xml file
        """
        iface = Interface(type_name=iface_type)
        source = ast.literal_eval(iface_source)
        if source:
            iface.source = source
        iface.model = iface_model if iface_model else "virtio"
        iface.mac_address = iface_mac
        driver_dict = {}
        driver_host = {}
        driver_guest = {}
        if iface_driver:
            driver_dict = ast.literal_eval(iface_driver)
        if iface_driver_host:
            driver_host = ast.literal_eval(iface_driver_host)
        if iface_driver_guest:
            driver_guest = ast.literal_eval(iface_driver_guest)
        iface.driver = iface.new_driver(driver_attr=driver_dict,
                                        driver_host=driver_host,
                                        driver_guest=driver_guest)
        logging.debug("Create new interface xml: %s", iface)
        return iface

    def modify_iface_xml(update, status_error=False):
        """
        Modify interface xml options
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        xml_devices = vmxml.devices
        iface_index = xml_devices.index(
            xml_devices.by_device_tag("interface")[0])
        iface = xml_devices[iface_index]
        if iface_model:
            iface.model = iface_model
        else:
            del iface.model
        if iface_type:
            iface.type_name = iface_type
        del iface.source
        source = ast.literal_eval(iface_source)
        if source:
            net_ifs = utils_net.get_net_if(state="UP")
            # Check source device is valid or not,
            # if it's not in host interface list, try to set
            # source device to first active interface of host
            if (iface.type_name == "direct" and 'dev' in source
                    and source['dev'] not in net_ifs):
                logging.warn(
                    "Source device %s is not a interface"
                    " of host, reset to %s", source['dev'], net_ifs[0])
                source['dev'] = net_ifs[0]
            iface.source = source
        backend = ast.literal_eval(iface_backend)
        if backend:
            iface.backend = backend
        driver_dict = {}
        driver_host = {}
        driver_guest = {}
        if iface_driver:
            driver_dict = ast.literal_eval(iface_driver)
        if iface_driver_host:
            driver_host = ast.literal_eval(iface_driver_host)
        if iface_driver_guest:
            driver_guest = ast.literal_eval(iface_driver_guest)
        iface.driver = iface.new_driver(driver_attr=driver_dict,
                                        driver_host=driver_host,
                                        driver_guest=driver_guest)
        if iface.address:
            del iface.address

        logging.debug("New interface xml file: %s", iface)
        if unprivileged_user:
            # Create disk image for unprivileged user
            disk_index = xml_devices.index(
                xml_devices.by_device_tag("disk")[0])
            disk_xml = xml_devices[disk_index]
            logging.debug("source: %s", disk_xml.source)
            disk_source = disk_xml.source.attrs["file"]
            cmd = ("cp -fZ {0} {1} && chown {2}:{2} {1}"
                   "".format(disk_source, dst_disk, unprivileged_user))
            process.run(cmd, shell=True)
            disk_xml.source = disk_xml.new_disk_source(
                attrs={"file": dst_disk})
            vmxml.devices = xml_devices
            # Remove all channels to avoid of permission problem
            channels = vmxml.get_devices(device_type="channel")
            for channel in channels:
                vmxml.del_device(channel)

            vmxml.xmltreefile.write()
            logging.debug("New VM xml: %s", vmxml)
            process.run("chmod a+rw %s" % vmxml.xml, shell=True)
            virsh.define(vmxml.xml, **virsh_dargs)
        # Try to modify interface xml by update-device or edit xml
        elif update:
            iface.xmltreefile.write()
            ret = virsh.update_device(vm_name, iface.xml, ignore_status=True)
            libvirt.check_exit_status(ret, status_error)
        else:
            vmxml.devices = xml_devices
            vmxml.xmltreefile.write()
            try:
                vmxml.sync()
            except xcepts.LibvirtXMLError as e:
                if not define_error:
                    test.fail("Define VM fail: %s" % e)

    def check_offloads_option(if_name, driver_options, session=None):
        """
        Check interface offloads by ethtool output
        """
        offloads = {
            "csum": "tx-checksumming",
            "gso": "generic-segmentation-offload",
            "tso4": "tcp-segmentation-offload",
            "tso6": "tx-tcp6-segmentation",
            "ecn": "tx-tcp-ecn-segmentation",
            "ufo": "udp-fragmentation-offload"
        }
        if session:
            ret, output = session.cmd_status_output("ethtool -k %s | head"
                                                    " -18" % if_name)
        else:
            out = process.run("ethtool -k %s | head -18" % if_name, shell=True)
            ret, output = out.exit_status, out.stdout_text
        if ret:
            test.fail("ethtool return error code")
        logging.debug("ethtool output: %s", output)
        for offload in list(driver_options.keys()):
            if offload in offloads:
                if (output.count(offloads[offload]) and not output.count(
                        "%s: %s" %
                    (offloads[offload], driver_options[offload]))):
                    test.fail("offloads option %s: %s isn't"
                              " correct in ethtool output" %
                              (offloads[offload], driver_options[offload]))

    def run_xml_test(iface_mac):
        """
        Test for interface options in vm xml
        """
        # Get the interface object according the mac address
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        iface_devices = vmxml.get_devices(device_type="interface")
        iface = None
        for iface_dev in iface_devices:
            if iface_dev.mac_address == iface_mac:
                iface = iface_dev
        if not iface:
            test.fail("Can't find interface with mac"
                      " '%s' in vm xml" % iface_mac)
        driver_dict = {}
        if iface_driver:
            driver_dict = ast.literal_eval(iface_driver)
        for driver_opt in list(driver_dict.keys()):
            if not driver_dict[driver_opt] == iface.driver.driver_attr[
                    driver_opt]:
                test.fail("Can't see driver option %s=%s in vm xml" %
                          (driver_opt, driver_dict[driver_opt]))
        if iface_target:
            if ("dev" not in iface.target
                    or not iface.target["dev"].startswith(iface_target)):
                test.fail("Can't see device target dev in vm xml")
            # Check macvtap mode by ip link command
            if iface_target == "macvtap" and "mode" in iface.source:
                cmd = "ip -d link show %s" % iface.target["dev"]
                output = process.run(cmd, shell=True).stdout_text
                logging.debug("ip link output: %s", output)
                mode = iface.source["mode"]
                if mode == "passthrough":
                    mode = "passthru"
                if not re.search("macvtap\s+mode %s" % mode, output):
                    test.fail("Failed to verify macvtap mode")

    def run_cmdline_test(iface_mac):
        """
        Test for qemu-kvm command line options
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        ret = process.run(cmd, shell=True)
        logging.debug("Command line %s", ret.stdout_text)
        if test_vhost_net:
            if not ret.stdout_text.count("vhost=on") and not rm_vhost_driver:
                test.fail("Can't see vhost options in"
                          " qemu-kvm command line")

        if iface_model == "virtio":
            model_option = "device virtio-net-pci"
        else:
            model_option = "device rtl8139"
        iface_cmdline = re.findall(
            r"%s,(.+),mac=%s" % (model_option, iface_mac), ret.stdout_text)
        if not iface_cmdline:
            test.fail("Can't see %s with mac %s in command"
                      " line" % (model_option, iface_mac))

        cmd_opt = {}
        for opt in iface_cmdline[0].split(','):
            tmp = opt.rsplit("=")
            cmd_opt[tmp[0]] = tmp[1]
        logging.debug("Command line options %s", cmd_opt)

        driver_dict = {}
        # Test <driver> xml options.
        if iface_driver:
            iface_driver_dict = ast.literal_eval(iface_driver)
            for driver_opt in list(iface_driver_dict.keys()):
                if driver_opt == "name":
                    continue
                elif driver_opt == "txmode":
                    if iface_driver_dict["txmode"] == "iothread":
                        driver_dict["tx"] = "bh"
                    else:
                        driver_dict["tx"] = iface_driver_dict["txmode"]
                elif driver_opt == "queues":
                    driver_dict["mq"] = "on"
                    driver_dict["vectors"] = str(
                        int(iface_driver_dict["queues"]) * 2 + 2)
                else:
                    driver_dict[driver_opt] = iface_driver_dict[driver_opt]
        # Test <driver><host/><driver> xml options.
        if iface_driver_host:
            driver_dict.update(ast.literal_eval(iface_driver_host))
        # Test <driver><guest/><driver> xml options.
        if iface_driver_guest:
            driver_dict.update(ast.literal_eval(iface_driver_guest))

        for driver_opt in list(driver_dict.keys()):
            if (driver_opt not in cmd_opt
                    or not cmd_opt[driver_opt] == driver_dict[driver_opt]):
                test.fail("Can't see option '%s=%s' in qemu-kvm "
                          " command line" %
                          (driver_opt, driver_dict[driver_opt]))
            logging.info("Find %s=%s in qemu-kvm command line" %
                         (driver_opt, driver_dict[driver_opt]))
        if test_backend:
            guest_pid = ret.stdout_text.rsplit()[1]
            cmd = "lsof %s | grep %s" % (backend["tap"], guest_pid)
            if process.system(cmd, ignore_status=True, shell=True):
                test.fail("Guest process didn't open backend file"
                          " %s" % backend["tap"])
            cmd = "lsof %s | grep %s" % (backend["vhost"], guest_pid)
            if process.system(cmd, ignore_status=True, shell=True):
                test.fail("Guest process didn't open backend file"
                          " %s" % backend["vhost"])

    def get_guest_ip(session, mac):
        """
        Wrapper function to get guest ip address
        """
        utils_net.restart_guest_network(session, mac)
        # Wait for IP address is ready
        utils_misc.wait_for(lambda: utils_net.get_guest_ip_addr(session, mac),
                            10)
        return utils_net.get_guest_ip_addr(session, mac)

    def check_user_network(session):
        """
        Check user network ip address on guest
        """
        vm_ips = []
        vm_ips.append(get_guest_ip(session, iface_mac_old))
        if attach_device:
            vm_ips.append(get_guest_ip(session, iface_mac))
        logging.debug("IP address on guest: %s", vm_ips)
        if len(vm_ips) != len(set(vm_ips)):
            test.fail("Duplicated IP address on guest. "
                      "Check bug: https://bugzilla.redhat."
                      "com/show_bug.cgi?id=1147238")

        for vm_ip in vm_ips:
            if vm_ip is None or not vm_ip.startswith("10.0.2."):
                test.fail("Found wrong IP address" " on guest")
        # Check gateway address
        gateway = utils_net.get_net_gateway(session.cmd_output)
        if gateway != "10.0.2.2":
            test.fail("The gateway on guest is not" " right")
        # Check dns server address
        ns_list = utils_net.get_net_nameserver(session.cmd_output)
        if "10.0.2.3" not in ns_list:
            test.fail("The dns server can't be found" " on guest")

    def check_mcast_network(session):
        """
        Check multicast ip address on guests
        """
        username = params.get("username")
        password = params.get("password")
        src_addr = ast.literal_eval(iface_source)['address']
        add_session = additional_vm.wait_for_serial_login(username=username,
                                                          password=password)
        vms_sess_dict = {vm_name: session, additional_vm.name: add_session}

        # Check mcast address on host
        cmd = "netstat -g | grep %s" % src_addr
        if process.run(cmd, ignore_status=True, shell=True).exit_status:
            test.fail("Can't find multicast ip address" " on host")
        vms_ip_dict = {}
        # Get ip address on each guest
        for vms in list(vms_sess_dict.keys()):
            vm_mac = vm_xml.VMXML.get_first_mac_by_name(vms)
            vm_ip = get_guest_ip(vms_sess_dict[vms], vm_mac)
            if not vm_ip:
                test.fail("Can't get multicast ip" " address on guest")
            vms_ip_dict.update({vms: vm_ip})
        if len(set(vms_ip_dict.values())) != len(vms_sess_dict):
            test.fail("Got duplicated multicast ip address")
        logging.debug("Found ips on guest: %s", vms_ip_dict)

        # Run omping server on host
        if not utils_package.package_install(["omping"]):
            test.error("Failed to install omping" " on host")
        cmd = ("iptables -F;omping -m %s %s" %
               (src_addr,
                "192.168.122.1 %s" % ' '.join(list(vms_ip_dict.values()))))
        # Run a backgroup job waiting for connection of client
        bgjob = utils_misc.AsyncJob(cmd)

        # Run omping client on guests
        for vms in list(vms_sess_dict.keys()):
            # omping should be installed first
            if not utils_package.package_install(["omping"],
                                                 vms_sess_dict[vms]):
                test.error("Failed to install omping" " on guest")
            cmd = ("iptables -F; omping -c 5 -T 5 -m %s %s" %
                   (src_addr, "192.168.122.1 %s" % vms_ip_dict[vms]))
            ret, output = vms_sess_dict[vms].cmd_status_output(cmd)
            logging.debug("omping ret: %s, output: %s", ret, output)
            if (not output.count('multicast, xmt/rcv/%loss = 5/5/0%')
                    or not output.count('unicast, xmt/rcv/%loss = 5/5/0%')):
                test.fail("omping failed on guest")
        # Kill the backgroup job
        bgjob.kill_func()

    status_error = "yes" == params.get("status_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    define_error = "yes" == params.get("define_error", "no")
    unprivileged_user = params.get("unprivileged_user")

    # Interface specific attributes.
    iface_type = params.get("iface_type", "network")
    iface_source = params.get("iface_source", "{}")
    iface_driver = params.get("iface_driver")
    iface_model = params.get("iface_model", "virtio")
    iface_target = params.get("iface_target")
    iface_backend = params.get("iface_backend", "{}")
    iface_driver_host = params.get("iface_driver_host")
    iface_driver_guest = params.get("iface_driver_guest")
    attach_device = params.get("attach_iface_device")
    expect_tx_size = params.get("expect_tx_size")
    change_option = "yes" == params.get("change_iface_options", "no")
    update_device = "yes" == params.get("update_iface_device", "no")
    additional_guest = "yes" == params.get("additional_guest", "no")
    serial_login = "******" == params.get("serial_login", "no")
    rm_vhost_driver = "yes" == params.get("rm_vhost_driver", "no")
    test_option_cmd = "yes" == params.get("test_iface_option_cmd", "no")
    test_option_xml = "yes" == params.get("test_iface_option_xml", "no")
    test_vhost_net = "yes" == params.get("test_vhost_net", "no")
    test_option_offloads = "yes" == params.get("test_option_offloads", "no")
    test_iface_user = "******" == params.get("test_iface_user", "no")
    test_iface_mcast = "yes" == params.get("test_iface_mcast", "no")
    test_libvirtd = "yes" == params.get("test_libvirtd", "no")
    test_guest_ip = "yes" == params.get("test_guest_ip", "no")
    test_backend = "yes" == params.get("test_backend", "no")
    check_guest_trans = "yes" == params.get("check_guest_trans", "no")

    if iface_driver_host or iface_driver_guest or test_backend:
        if not libvirt_version.version_compare(1, 2, 8):
            test.cancel("Offloading/backend options not "
                        "supported in this libvirt version")
    if iface_driver and "queues" in ast.literal_eval(iface_driver):
        if not libvirt_version.version_compare(1, 0, 6):
            test.cancel("Queues options not supported"
                        " in this libvirt version")

    if unprivileged_user:
        if not libvirt_version.version_compare(1, 1, 1):
            test.cancel("qemu-bridge-helper not supported" " on this host")
        virsh_dargs["unprivileged_user"] = unprivileged_user
        # Create unprivileged user if needed
        cmd = ("grep {0} /etc/passwd || "
               "useradd {0}".format(unprivileged_user))
        process.run(cmd, shell=True)
        # Need another disk image for unprivileged user to access
        dst_disk = "/tmp/%s.img" % unprivileged_user

    # Destroy VM first
    if vm.is_alive():
        vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    iface_mac_old = vm_xml.VMXML.get_first_mac_by_name(vm_name)
    # iface_mac will update if attach a new interface
    iface_mac = iface_mac_old
    # Additional vm for test
    additional_vm = None
    libvirtd = utils_libvirtd.Libvirtd()

    try:
        # Build the xml and run test.
        try:
            # Prepare interface backend files
            if test_backend:
                if not os.path.exists("/dev/vhost-net"):
                    process.run("modprobe vhost-net", shell=True)
                backend = ast.literal_eval(iface_backend)
                backend_tap = "/dev/net/tun"
                backend_vhost = "/dev/vhost-net"
                if not backend:
                    backend["tap"] = backend_tap
                    backend["vhost"] = backend_vhost
                if not start_error:
                    # Create backend files for normal test
                    if not os.path.exists(backend["tap"]):
                        os.rename(backend_tap, backend["tap"])
                    if not os.path.exists(backend["vhost"]):
                        os.rename(backend_vhost, backend["vhost"])
            # Edit the interface xml.
            if change_option:
                modify_iface_xml(update=False)
                if define_error:
                    return

            if rm_vhost_driver:
                # remove vhost driver on host and
                # the character file /dev/vhost-net
                cmd = ("modprobe -r {0}; "
                       "rm -f /dev/vhost-net".format("vhost_net"))
                if process.system(cmd, ignore_status=True, shell=True):
                    test.error("Failed to remove vhost_net driver")
            else:
                # Load vhost_net driver by default
                cmd = "modprobe vhost_net"
                process.system(cmd, shell=True)

            # Attach a interface when vm is shutoff
            if attach_device == 'config':
                iface_mac = utils_net.generate_mac_address_simple()
                iface_xml_obj = create_iface_xml(iface_mac)
                iface_xml_obj.xmltreefile.write()
                ret = virsh.attach_device(vm_name,
                                          iface_xml_obj.xml,
                                          flagstr="--config",
                                          ignore_status=True)
                libvirt.check_exit_status(ret)

            # Clone additional vm
            if additional_guest:
                guest_name = "%s_%s" % (vm_name, '1')
                # Clone additional guest
                timeout = params.get("clone_timeout", 360)
                utils_libguestfs.virt_clone_cmd(vm_name,
                                                guest_name,
                                                True,
                                                timeout=timeout)
                additional_vm = vm.clone(guest_name)
                additional_vm.start()
                # additional_vm.wait_for_login()

            # Start the VM.
            if unprivileged_user:
                virsh.start(vm_name, **virsh_dargs)
                cmd = ("su - %s -c 'virsh console %s'" %
                       (unprivileged_user, vm_name))
                session = aexpect.ShellSession(cmd)
                session.sendline()
                remote.handle_prompts(session, params.get("username"),
                                      params.get("password"), r"[\#\$]\s*$",
                                      30)
                # Get ip address on guest
                if not get_guest_ip(session, iface_mac):
                    test.error("Can't get ip address on guest")
            else:
                # Will raise VMStartError exception if start fails
                vm.start()
                if serial_login:
                    session = vm.wait_for_serial_login()
                else:
                    session = vm.wait_for_login()
            if start_error:
                test.fail("VM started unexpectedly")

            # Attach a interface when vm is running
            if attach_device == 'live':
                iface_mac = utils_net.generate_mac_address_simple()
                iface_xml_obj = create_iface_xml(iface_mac)
                iface_xml_obj.xmltreefile.write()
                ret = virsh.attach_device(vm_name,
                                          iface_xml_obj.xml,
                                          flagstr="--live",
                                          ignore_status=True,
                                          debug=True)
                libvirt.check_exit_status(ret, status_error)
                # Need sleep here for attachment take effect
                time.sleep(5)

            # Update a interface options
            if update_device:
                modify_iface_xml(update=True, status_error=status_error)

            # Run tests for qemu-kvm command line options
            if test_option_cmd:
                run_cmdline_test(iface_mac)
            # Run tests for vm xml
            if test_option_xml:
                run_xml_test(iface_mac)
            # Run tests for offloads options
            if test_option_offloads:
                if iface_driver_host:
                    ifname_guest = utils_net.get_linux_ifname(
                        session, iface_mac)
                    check_offloads_option(ifname_guest,
                                          ast.literal_eval(iface_driver_host),
                                          session)
                if iface_driver_guest:
                    ifname_host = libvirt.get_ifname_host(vm_name, iface_mac)
                    check_offloads_option(ifname_host,
                                          ast.literal_eval(iface_driver_guest))

            if test_iface_user:
                # Test user type network
                check_user_network(session)
            if test_iface_mcast:
                # Test mcast type network
                check_mcast_network(session)
            # Check guest ip address
            if test_guest_ip:
                if not get_guest_ip(session, iface_mac):
                    test.fail("Guest can't get a" " valid ip address")
            # Check guest RX/TX ring
            if check_guest_trans:
                ifname_guest = utils_net.get_linux_ifname(session, iface_mac)
                ret, outp = session.cmd_status_output("ethtool -g %s" %
                                                      ifname_guest)
                if ret:
                    test.fail("ethtool return error code")
                logging.info("ethtool output is %s", outp)
                driver_dict = ast.literal_eval(iface_driver)
                if expect_tx_size:
                    driver_dict['tx_queue_size'] = expect_tx_size
                for outp_p in outp.split("Current hardware"):
                    if 'rx_queue_size' in driver_dict:
                        if re.search("RX:\s*%s" % driver_dict['rx_queue_size'],
                                     outp_p):
                            logging.info("Find RX setting RX:%s by ethtool",
                                         driver_dict['rx_queue_size'])
                        else:
                            test.fail("Cannot find matching rx setting")
                    if 'tx_queue_size' in driver_dict:
                        if re.search("TX:\s*%s" % driver_dict['tx_queue_size'],
                                     outp_p):
                            logging.info("Find TX settint TX:%s by ethtool",
                                         driver_dict['tx_queue_size'])
                        else:
                            test.fail("Cannot find matching tx setting")

            session.close()
            # Restart libvirtd and guest, then test again
            if test_libvirtd:
                libvirtd.restart()
                vm.destroy()
                vm.start()
                if test_option_xml:
                    run_xml_test(iface_mac)

            # Detach hot/cold-plugged interface at last
            if attach_device and not status_error:
                ret = virsh.detach_device(vm_name,
                                          iface_xml_obj.xml,
                                          flagstr="",
                                          ignore_status=True)
                libvirt.check_exit_status(ret)

        except virt_vm.VMStartError as e:
            logging.info(str(e))
            if not start_error:
                test.fail('VM failed to start\n%s' % e)

    finally:
        # Recover VM.
        logging.info("Restoring vm...")
        # Restore interface backend files
        if test_backend:
            if not os.path.exists(backend_tap):
                os.rename(backend["tap"], backend_tap)
            if not os.path.exists(backend_vhost):
                os.rename(backend["vhost"], backend_vhost)
        if rm_vhost_driver:
            # Restore vhost_net driver
            process.system("modprobe vhost_net", shell=True)
        if unprivileged_user:
            virsh.remove_domain(vm_name, "--remove-all-storage", **virsh_dargs)
        if additional_vm:
            virsh.remove_domain(additional_vm.name, "--remove-all-storage")
            # Kill all omping server process on host
            process.system("pidof omping && killall omping",
                           ignore_status=True,
                           shell=True)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
Exemplo n.º 38
0
def run(test, params, env):
    """
    Test command: virsh dumpxml.

    1) Prepare parameters.
    2) Set options of virsh dumpxml.
    3) Prepare environment: vm_state, etc.
    4) Run dumpxml command.
    5) Recover environment.
    6) Check result.
    """
    def is_dumpxml_of_running_vm(dumpxml, domid):
        """
        To check whether the dumpxml is got during vm is running.
        (Verify the domid in dumpxml)

        :param dumpxml: the output of virsh dumpxml.
        :param domid: the id of vm
        """
        match_string = "<domain.*id='%s'/>" % domid
        if re.search(dumpxml, match_string):
            return True
        return False

    def custom_cpu(vm_name, cpu_match):
        """
        Custom guest cpu match/model/features for --update-cpu option.
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmcpu_xml = vm_xml.VMCPUXML()
        vmcpu_xml['match'] = cpu_match
        vmcpu_xml['model'] = "Penryn"
        vmcpu_xml['vendor'] = "Intel"
        vmcpu_xml.add_feature('xtpr', 'optional')
        vmcpu_xml.add_feature('tm2', 'disable')
        vmcpu_xml.add_feature('est', 'force')
        vmcpu_xml.add_feature('vmx', 'forbid')
        # Unsupport feature 'ia64'
        vmcpu_xml.add_feature('ia64', 'optional')
        vmcpu_xml.add_feature('vme', 'require')
        vmxml['cpu'] = vmcpu_xml
        vmxml.sync()

    def check_cpu(xml, cpu_match):
        """
        Check the dumpxml result for --update-cpu option

        Note, function custom_cpu() hard code these features and policy,
        so after run virsh dumpxml --update-cpu:
        1. For match='minimum', all host support features will added,
           and match='exact'
        2. policy='optional' features(support by host) will update to
           policy='require'
        3. policy='optional' features(unsupport by host) will update to
           policy='disable'
        4. Other policy='disable|force|forbid|require' with keep the
           original values
        """
        vmxml = vm_xml.VMXML()
        vmxml['xml'] = xml
        vmcpu_xml = vmxml['cpu']
        check_pass = True
        require_count = 0
        expect_require_features = 0
        cpu_feature_list = vmcpu_xml.get_feature_list()
        host_capa = capability_xml.CapabilityXML()
        for i in range(len(cpu_feature_list)):
            f_name = vmcpu_xml.get_feature_name(i)
            f_policy = vmcpu_xml.get_feature_policy(i)
            err_msg = "Policy of '%s' is not expected: %s" % (f_name, f_policy)
            expect_policy = "disable"
            if f_name in ["xtpr", "vme", "ia64"]:
                # Check if feature is support on the host
                if host_capa.check_feature_name(f_name):
                    expect_policy = "require"
                if f_policy != expect_policy:
                    logging.error(err_msg)
                    check_pass = False
            if f_name == "tm2":
                if f_policy != "disable":
                    logging.error(err_msg)
                    check_pass = False
            if f_name == "est":
                if f_policy != "force":
                    logging.error(err_msg)
                    check_pass = False
            if f_name == "vmx":
                if f_policy != "forbid":
                    logging.error(err_msg)
                    check_pass = False
            # Count expect require features
            if expect_policy == "require":
                expect_require_features += 1
            # Count actual require features
            if f_policy == "require":
                require_count += 1
        # Check
        if cpu_match == "minimum":
            expect_match = "exact"
            # For different host, the support require features are different,
            # so just check the actual require features greater than the
            # expect number
            if require_count < expect_require_features:
                logging.error("Find %d require features, but expect >=%s",
                              require_count, expect_require_features)
                check_pass = False
        else:
            expect_match = cpu_match
            if require_count != expect_require_features:
                logging.error("Find %d require features, but expect %s",
                              require_count, expect_require_features)
                check_pass = False
        match = vmcpu_xml['match']
        if match != expect_match:
            logging.error("CPU match '%s' is not expected", match)
            check_pass = False
        if vmcpu_xml['model'] != 'Penryn':
            logging.error("CPU model %s is not expected", vmcpu_xml['model'])
            check_pass = False
        if vmcpu_xml['vendor'] != "Intel":
            logging.error("CPU vendor %s is not expected", vmcpu_xml['vendor'])
            check_pass = False
        return check_pass

    # Prepare parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vm_ref = params.get("dumpxml_vm_ref", "domname")
    options_ref = params.get("dumpxml_options_ref", "")
    options_suffix = params.get("dumpxml_options_suffix", "")
    vm_state = params.get("dumpxml_vm_state", "running")
    security_pwd = params.get("dumpxml_security_pwd", "123456")
    status_error = params.get("status_error", "no")
    cpu_match = params.get("cpu_match", "minimum")
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    if options_ref.count("update-cpu"):
        custom_cpu(vm_name, cpu_match)
    elif options_ref.count("security-info"):
        new_xml = backup_xml.copy()
        vm_xml.VMXML.add_security_info(new_xml, security_pwd)
    domuuid = vm.get_uuid()
    domid = vm.get_id()

    # acl polkit params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    # Prepare vm state for test
    if vm_state == "shutoff" and vm.is_alive():
        vm.destroy()  # Confirm vm is shutoff

    if vm_ref == "domname":
        vm_ref = vm_name
    elif vm_ref == "domid":
        vm_ref = domid
    elif vm_ref == "domuuid":
        vm_ref = domuuid
    elif vm_ref == "hex_id":
        if domid == "-":
            vm_ref = domid
        else:
            vm_ref = hex(int(domid))

    if options_suffix:
        options_ref = "%s %s" % (options_ref, options_suffix)

    # Run command
    logging.info("Command:virsh dumpxml %s", vm_ref)
    try:
        try:
            if params.get('setup_libvirt_polkit') == 'yes':
                cmd_result = virsh.dumpxml(vm_ref,
                                           extra=options_ref,
                                           uri=uri,
                                           unprivileged_user=unprivileged_user)
            else:
                cmd_result = virsh.dumpxml(vm_ref, extra=options_ref)
            output = cmd_result.stdout.strip()
            if cmd_result.exit_status:
                raise error.TestFail("dumpxml %s failed.\n"
                                     "Detail: %s.\n" % (vm_ref, cmd_result))
            status = 0
        except error.TestFail, detail:
            status = 1
            output = detail
        logging.debug("virsh dumpxml result:\n%s", output)

        # Recover vm state
        if vm_state == "paused":
            vm.resume()

        # Check result
        if status_error == "yes":
            if status == 0:
                raise error.TestFail("Run successfully with wrong command.")
        elif status_error == "no":
            if status:
                raise error.TestFail("Run failed with right command.")
            else:
                # validate dumpxml file
                # Since validate LibvirtXML functions are been working by
                # cevich, reserving it here. :)
                if options_ref.count("inactive"):
                    if is_dumpxml_of_running_vm(output, domid):
                        raise error.TestFail("Got dumpxml for active vm "
                                             "with --inactive option!")
                elif options_ref.count("update-cpu"):
                    if not check_cpu(output, cpu_match):
                        raise error.TestFail("update-cpu option check fail")
                elif options_ref.count("security-info"):
                    if not output.count("passwd='%s'" % security_pwd):
                        raise error.TestFail("No more cpu info outputed!")
                else:
                    if (vm_state == "shutoff"
                            and is_dumpxml_of_running_vm(output, domid)):
                        raise error.TestFail("Got dumpxml for active vm "
                                             "when vm is shutoff.")
Exemplo n.º 39
0
def run(test, params, env):
    """
    Test per-image DAC disk hotplug to VM.

    (1).Init variables for test.
    (2).Creat disk xml with per-image DAC
    (3).Start VM
    (4).Attach the disk to VM and check result.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user")
    qemu_group = params.get("qemu_group")
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")

    # Get per-image DAC setting
    vol_name = params.get('vol_name')
    target_dev = params.get('target_dev')
    disk_type_name = params.get("disk_type_name")
    img_user = params.get("img_user")
    img_group = params.get("img_group")
    relabel = 'yes' == params.get('relabel', 'yes')

    if not libvirt_version.version_compare(1, 2, 7):
        test.cancel("per-image DAC only supported on version 1.2.7"
                    " and after.")

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)

    img_path = None
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # set qemu conf
        qemu_conf.user = qemu_user
        qemu_conf.group = qemu_group
        if dynamic_ownership:
            qemu_conf.dynamic_ownership = 1
        else:
            qemu_conf.dynamic_ownership = 0
        logging.debug("the qemu.conf content is: %s" % qemu_conf)
        libvirtd.restart()

        first_disk = vm.get_first_disk_devices()
        blk_source = first_disk['source']
        owner_str = format_user_group_str(qemu_user, qemu_group)
        src_usr, src_grp = owner_str.split(':')
        os.chown(blk_source, int(src_usr), int(src_grp))
        vm.start()

        # Init a QemuImg instance and create a img.
        params['image_name'] = vol_name
        tmp_dir = data_dir.get_tmp_dir()
        image = qemu_storage.QemuImg(params, tmp_dir, vol_name)
        # Create a image.
        img_path, result = image.create(params)

        # Create disk xml for attach.
        params['source_file'] = img_path
        sec_label = "%s:%s" % (img_user, img_group)
        params['sec_label'] = sec_label
        params['type_name'] = disk_type_name
        sec_label_id = format_user_group_str(img_user, img_group)

        disk_xml = utlv.create_disk_xml(params)

        # Change img file to qemu:qemu and 660 mode
        os.chown(img_path, 107, 107)
        os.chmod(img_path, 432)

        img_label_before = check_ownership(img_path)
        if img_label_before:
            logging.debug("the image ownership before "
                          "attach: %s" % img_label_before)

        # Do the attach action.
        option = "--persistent"
        result = virsh.attach_device(vm_name,
                                     filearg=disk_xml,
                                     flagstr=option,
                                     debug=True)
        utlv.check_exit_status(result, status_error)

        if not result.exit_status:
            img_label_after = check_ownership(img_path)
            if dynamic_ownership and relabel:
                if img_label_after != sec_label_id:
                    test.fail("The image dac label %s is not "
                              "expected." % img_label_after)

            ret = virsh.detach_disk(vm_name,
                                    target=target_dev,
                                    extra=option,
                                    debug=True)
            utlv.check_exit_status(ret, status_error)
    finally:
        # clean up
        vm.destroy()
        qemu_conf.restore()
        vmxml.sync()
        libvirtd.restart()
        if img_path and os.path.exists(img_path):
            os.unlink(img_path)
Exemplo n.º 40
0
def run(test, params, env):
    """
    Test snapshot-create-as command
    Make sure that the clean repo can be used because qemu-guest-agent need to
    be installed in guest

    The command create a snapshot (disk and RAM) from arguments which including
    the following point
    * virsh snapshot-create-as --print-xml --diskspec --name --description
    * virsh snapshot-create-as --print-xml with multi --diskspec
    * virsh snapshot-create-as --print-xml --memspec
    * virsh snapshot-create-as --description
    * virsh snapshot-create-as --no-metadata
    * virsh snapshot-create-as --no-metadata --print-xml (negative test)
    * virsh snapshot-create-as --atomic --disk-only
    * virsh snapshot-create-as --quiesce --disk-only (positive and negative)
    * virsh snapshot-create-as --reuse-external
    * virsh snapshot-create-as --disk-only --diskspec
    * virsh snapshot-create-as --memspec --reuse-external --atomic(negative)
    * virsh snapshot-create-as --disk-only and --memspec (negative)
    * Create multi snapshots with snapshot-create-as
    * Create snapshot with name a--a a--a--snap1
    """

    if not virsh.has_help_command('snapshot-create-as'):
        test.cancel("This version of libvirt does not support "
                    "the snapshot-create-as test")

    vm_name = params.get("main_vm")
    status_error = params.get("status_error", "no")
    options = params.get("snap_createas_opts")
    multi_num = params.get("multi_num", "1")
    diskspec_num = params.get("diskspec_num", "1")
    bad_disk = params.get("bad_disk")
    reuse_external = "yes" == params.get("reuse_external", "no")
    start_ga = params.get("start_ga", "yes")
    domain_state = params.get("domain_state")
    memspec_opts = params.get("memspec_opts")
    config_format = "yes" == params.get("config_format", "no")
    snapshot_image_format = params.get("snapshot_image_format")
    diskspec_opts = params.get("diskspec_opts")
    create_autodestroy = 'yes' == params.get("create_autodestroy", "no")
    unix_channel = "yes" == params.get("unix_channel", "yes")
    dac_denial = "yes" == params.get("dac_denial", "no")
    check_json_no_savevm = "yes" == params.get("check_json_no_savevm", "no")
    disk_snapshot_attr = params.get('disk_snapshot_attr', 'external')
    set_snapshot_attr = "yes" == params.get("set_snapshot_attr", "no")

    # gluster related params
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    disk_src_protocol = params.get("disk_source_protocol")
    restart_tgtd = params.get("restart_tgtd", "no")
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)

    uri = params.get("virsh_uri")
    usr = params.get('unprivileged_user')
    if usr:
        if usr.count('EXAMPLE'):
            usr = '******'

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            test.cancel("'iscsi' disk doesn't support in"
                        " current libvirt version.")

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    if not libvirt_version.version_compare(1, 2, 7):
        # As bug 1017289 closed as WONTFIX, the support only
        # exist on 1.2.7 and higher
        if disk_src_protocol == 'gluster':
            test.cancel("Snapshot on glusterfs not support in "
                        "current version. Check more info with "
                        "https://bugzilla.redhat.com/buglist.cgi?"
                        "bug_id=1017289,1032370")

    if libvirt_version.version_compare(5, 5, 0):
        # libvirt-5.5.0-2 commit 68e1a05f starts to allow --no-metadata and
        # --print-xml to be used together.
        if "--no-metadata" in options and "--print-xml" in options:
            logging.info("--no-metadata and --print-xml can be used together "
                         "in this libvirt version. Not expecting a failure.")
            status_error = "no"

    opt_names = locals()
    if memspec_opts is not None:
        mem_options = compose_disk_options(test, params, memspec_opts)
        # if the parameters have the disk without "file=" then we only need to
        # add testdir for it.
        if mem_options is None:
            mem_options = os.path.join(data_dir.get_tmp_dir(), memspec_opts)
        options += " --memspec " + mem_options

    tag_diskspec = 0
    dnum = int(diskspec_num)
    if diskspec_opts is not None:
        tag_diskspec = 1
        opt_names['diskopts_1'] = diskspec_opts

    # diskspec_opts[n] is used in cfg when more than 1 --diskspec is used
    if dnum > 1:
        tag_diskspec = 1
        for i in range(1, dnum + 1):
            opt_names["diskopts_%s" % i] = params.get("diskspec_opts%s" % i)

    if tag_diskspec == 1:
        for i in range(1, dnum + 1):
            disk_options = compose_disk_options(test, params,
                                                opt_names["diskopts_%s" % i])
            options += " --diskspec " + disk_options

    logging.debug("options are %s", options)

    vm = env.get_vm(vm_name)
    option_dict = {}
    option_dict = utils_misc.valued_option_dict(options, r' --(?!-)')
    logging.debug("option_dict is %s", option_dict)

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    logging.debug("original xml is %s", vmxml_backup)

    # Generate empty image for negative test
    if bad_disk is not None:
        bad_disk = os.path.join(data_dir.get_tmp_dir(), bad_disk)
        with open(bad_disk, 'w') as bad_file:
            pass

    # Generate external disk
    if reuse_external:
        disk_path = ''
        for i in range(dnum):
            external_disk = "external_disk%s" % i
            if params.get(external_disk):
                disk_path = os.path.join(data_dir.get_tmp_dir(),
                                         params.get(external_disk))
                process.run("qemu-img create -f qcow2 %s 1G" % disk_path,
                            shell=True)
        # Only chmod of the last external disk for negative case
        if dac_denial:
            process.run("chmod 500 %s" % disk_path, shell=True)

    qemu_conf = None
    libvirtd_conf = None
    libvirtd_log_path = None
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # Config "snapshot_image_format" option in qemu.conf
        if config_format:
            qemu_conf = utils_config.LibvirtQemuConfig()
            qemu_conf.snapshot_image_format = snapshot_image_format
            logging.debug("the qemu config file content is:\n %s" % qemu_conf)
            libvirtd.restart()

        if check_json_no_savevm:
            libvirtd_conf = utils_config.LibvirtdConfig()
            libvirtd_conf["log_level"] = '1'
            libvirtd_conf["log_filters"] = '"1:json 3:remote 4:event"'
            libvirtd_log_path = os.path.join(data_dir.get_tmp_dir(),
                                             "libvirtd.log")
            libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path
            logging.debug("the libvirtd config file content is:\n %s" %
                          libvirtd_conf)
            libvirtd.restart()

        if replace_vm_disk:
            libvirt.set_vm_disk(vm, params, tmp_dir)

        if set_snapshot_attr:
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name)
            disk_xml = vmxml_backup.get_devices(device_type="disk")[0]
            vmxml_new.del_device(disk_xml)
            # set snapshot attribute in disk xml
            disk_xml.snapshot = disk_snapshot_attr
            new_disk = disk.Disk(type_name='file')
            new_disk.xmltreefile = disk_xml.xmltreefile
            vmxml_new.add_device(new_disk)
            logging.debug("The vm xml now is: %s" % vmxml_new.xmltreefile)
            vmxml_new.sync()
            vm.start()

        # Start qemu-ga on guest if have --quiesce
        if unix_channel and options.find("quiesce") >= 0:
            vm.prepare_guest_agent()
            session = vm.wait_for_login()
            if start_ga == "no":
                # The qemu-ga could be running and should be killed
                session.cmd("kill -9 `pidof qemu-ga`")
                # Check if the qemu-ga get killed
                stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
                if not stat_ps:
                    # As managed by systemd and set as autostart, qemu-ga
                    # could be restarted, so use systemctl to stop it.
                    session.cmd("systemctl stop qemu-guest-agent")
                    stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
                    if not stat_ps:
                        test.cancel("Fail to stop agent in " "guest")

            if domain_state == "paused":
                virsh.suspend(vm_name)
        else:
            # Remove channel if exist
            if vm.is_alive():
                vm.destroy(gracefully=False)
            xml_inst = vm_xml.VMXML.new_from_dumpxml(vm_name)
            xml_inst.remove_agent_channels()
            vm.start()

        # Record the previous snapshot-list
        snaps_before = virsh.snapshot_list(vm_name)

        # Attach disk before create snapshot if not print xml and multi disks
        # specified in cfg
        if dnum > 1 and "--print-xml" not in options:
            for i in range(1, dnum):
                disk_path = os.path.join(data_dir.get_tmp_dir(),
                                         'disk%s.qcow2' % i)
                process.run("qemu-img create -f qcow2 %s 200M" % disk_path,
                            shell=True)
                virsh.attach_disk(vm_name,
                                  disk_path,
                                  'vd%s' % list(string.ascii_lowercase)[i],
                                  debug=True)

        # Run virsh command
        # May create several snapshots, according to configuration
        for count in range(int(multi_num)):
            if create_autodestroy:
                # Run virsh command in interactive mode
                vmxml_backup.undefine()
                vp = virsh.VirshPersistent()
                vp.create(vmxml_backup['xml'], '--autodestroy')
                cmd_result = vp.snapshot_create_as(vm_name,
                                                   options,
                                                   ignore_status=True,
                                                   debug=True)
                vp.close_session()
                vmxml_backup.define()
            else:
                cmd_result = virsh.snapshot_create_as(vm_name,
                                                      options,
                                                      unprivileged_user=usr,
                                                      uri=uri,
                                                      ignore_status=True,
                                                      debug=True)
                # for multi snapshots without specific snapshot name, the
                # snapshot name is using time string with 1 second
                # incremental, to avoid get snapshot failure with same name,
                # sleep 1 second here.
                if int(multi_num) > 1:
                    time.sleep(1.1)
            output = cmd_result.stdout.strip()
            status = cmd_result.exit_status

            # check status_error
            if status_error == "yes":
                if status == 0:
                    test.fail("Run successfully with wrong command!")
                else:
                    # Check memspec file should be removed if failed
                    if (options.find("memspec") >= 0
                            and options.find("atomic") >= 0):
                        if os.path.isfile(option_dict['memspec']):
                            os.remove(option_dict['memspec'])
                            test.fail("Run failed but file %s exist" %
                                      option_dict['memspec'])
                        else:
                            logging.info("Run failed as expected and memspec"
                                         " file already been removed")
                    # Check domain xml is not updated if reuse external fail
                    elif reuse_external and dac_denial:
                        output = virsh.dumpxml(vm_name).stdout.strip()
                        if "reuse_external" in output:
                            test.fail("Domain xml should not be "
                                      "updated with snapshot image")
                    else:
                        logging.info("Run failed as expected")

            elif status_error == "no":
                if status != 0:
                    test.fail("Run failed with right command: %s" % output)
                else:
                    # Check the special options
                    snaps_list = virsh.snapshot_list(vm_name)
                    logging.debug("snaps_list is %s", snaps_list)

                    check_snapslist(test, vm_name, options, option_dict,
                                    output, snaps_before, snaps_list)

                    # For cover bug 872292
                    if check_json_no_savevm:
                        pattern = "The command savevm has not been found"
                        with open(libvirtd_log_path) as f:
                            for line in f:
                                if pattern in line and "error" in line:
                                    test.fail("'%s' was found: %s" %
                                              (pattern, line))

    finally:
        if vm.is_alive():
            vm.destroy()
        # recover domain xml
        xml_recover(vmxml_backup)
        path = "/var/lib/libvirt/qemu/snapshot/" + vm_name
        if os.path.isfile(path):
            test.fail("Still can find snapshot metadata")

        if disk_src_protocol == 'gluster':
            gluster.setup_or_cleanup_gluster(False,
                                             brick_path=brick_path,
                                             **params)
            libvirtd.restart()

        if disk_src_protocol == 'iscsi':
            libvirt.setup_or_cleanup_iscsi(False, restart_tgtd=restart_tgtd)

        # rm bad disks
        if bad_disk is not None:
            os.remove(bad_disk)
        # rm attach disks and reuse external disks
        if dnum > 1 and "--print-xml" not in options:
            for i in range(dnum):
                disk_path = os.path.join(data_dir.get_tmp_dir(),
                                         'disk%s.qcow2' % i)
                if os.path.exists(disk_path):
                    os.unlink(disk_path)
                if reuse_external:
                    external_disk = "external_disk%s" % i
                    disk_path = os.path.join(data_dir.get_tmp_dir(),
                                             params.get(external_disk))
                    if os.path.exists(disk_path):
                        os.unlink(disk_path)

        # restore config
        if config_format and qemu_conf:
            qemu_conf.restore()

        if libvirtd_conf:
            libvirtd_conf.restore()

        if libvirtd_conf or (config_format and qemu_conf):
            libvirtd.restart()

        if libvirtd_log_path and os.path.exists(libvirtd_log_path):
            os.unlink(libvirtd_log_path)
def run(test, params, env):
    """
    Test qemu-monitor-command blockjobs by migrating with option
    --copy-storage-all or --copy-storage-inc.
    """
    if not libvirt_version.version_compare(1, 0, 1):
        raise error.TestNAError("Blockjob functions - "
                                "complete,pause,resume are"
                                "not supported in current libvirt version.")

    vm = env.get_vm(params.get("main_vm"))
    cpu_size = int(params.get("cpu_size", "1"))
    memory_size = int(params.get("memory_size", "1048576"))
    primary_target = vm.get_first_disk_devices()["target"]
    file_path, file_size = vm.get_device_size(primary_target)
    # Convert to Gib
    file_size = int(file_size) / 1073741824
    image_format = utils_test.get_image_info(file_path)["format"]

    remote_host = params.get("migrate_dest_host", "REMOTE.EXAMPLE")
    remote_user = params.get("remote_user", "root")
    remote_passwd = params.get("migrate_dest_pwd", "PASSWORD.EXAMPLE")
    if remote_host.count("EXAMPLE"):
        raise error.TestNAError("Config remote or local host first.")
    # Config ssh autologin for it
    ssh_key.setup_ssh_key(remote_host, remote_user, remote_passwd, port=22)

    # Define a new vm with modified cpu/memory
    new_vm_name = "%s_blockjob" % vm.name
    if vm.is_alive():
        vm.destroy()
    utlv.define_new_vm(vm.name, new_vm_name)
    try:
        set_cpu_memory(new_vm_name, cpu_size, memory_size)
        vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
                           vm.address_cache)
    except:   # Make sure created vm is cleaned up
        virsh.remove_domain(new_vm_name)
        raise

    rdm_params = {"remote_ip": remote_host, "remote_user": remote_user,
                  "remote_pwd": remote_passwd}
    rdm = utils_test.RemoteDiskManager(rdm_params)

    try:
        vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
                           vm.address_cache)
        vm.start()

        rdm.create_image("file", file_path, file_size, None, None,
                         img_frmt=image_format)

        logging.debug("Start migration...")
        copied_migration(vm, params, params.get("qmp_blockjob_type"),
                         primary_target)
    finally:
        # Recover created vm
        if vm.is_alive():
            vm.destroy()
        if vm.name == new_vm_name:
            vm.undefine()
        rdm.remove_path("file", file_path)
        rdm.runner.session.close()
Exemplo n.º 42
0
def run(test, params, env):
    """
    Test domfsfreeze command, make sure that all supported options work well

    Test scenaries:
    1. fsfreeze all fs without options
    2. fsfreeze a mountpoint with --mountpoint
    3. fsfreeze a mountpoint without --mountpoint
    """
    def check_freeze(session):
        """
        Check whether file system has been frozen by touch a test file
        and see if command will hang.

        :param session: Guest session to be tested.
        """
        try:
            output = session.cmd_output('touch freeze_test', timeout=10)
            test.fail("Failed to freeze file system. "
                      "Create file succeeded:%s\n" % output)
        except aexpect.ShellTimeoutError:
            pass

    if not virsh.has_help_command('domfsfreeze'):
        test.cancel("This version of libvirt does not support "
                    "the domfsfreeze test")

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    vm_ref = params.get("vm_ref", "name")
    vm_name = params.get("main_vm", "virt-tests-vm1")
    mountpoint = params.get("domfsfreeze_mnt", None)
    options = params.get("domfsfreeze_options", "")
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    start_vm = ("yes" == params.get("start_vm", "yes"))
    has_channel = ("no" == params.get("no_qemu_ga", "no"))
    start_qemu_ga = ("no" == params.get("no_start_qemu_ga", "no"))
    status_error = ("yes" == params.get("status_error", "no"))

    # Do backup for origin xml
    xml_backup = vm_xml.VMXML.new_from_dumpxml(vm_name)
    try:
        vm = env.get_vm(vm_name)
        if vm.is_alive():
            vm.destroy()

        if has_channel:
            # Add channel device for qemu-ga
            vm.prepare_guest_agent(start=start_qemu_ga)
        else:
            # Remove qemu-ga channel
            vm.prepare_guest_agent(channel=False, start=False)

        if start_vm:
            if not vm.is_alive():
                vm.start()
            domid = vm.get_id()
            session = vm.wait_for_login()
        else:
            vm.destroy()

        domuuid = vm.get_uuid()
        if vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "uuid":
            vm_ref = domuuid
        elif vm_ref.count("invalid"):
            vm_ref = uuid.uuid1()
        elif vm_ref == "none":
            vm_ref = ""
        elif vm_ref == "name":
            vm_ref = vm_name

        result = virsh.domfsfreeze(vm_ref,
                                   mountpoint=mountpoint,
                                   options=options,
                                   unprivileged_user=unprivileged_user,
                                   uri=uri,
                                   debug=True)
        libvirt.check_exit_status(result, status_error)
        if not result.exit_status:
            check_freeze(session)

    finally:
        # Do domain recovery
        xml_backup.sync()
Exemplo n.º 43
0
def run(test, params, env):
    """
    Test command: virsh net-destroy.

    The command can forcefully stop a given network.
    1.Make sure the network exists.
    2.Prepare network status.
    3.Perform virsh net-destroy operation.
    4.Check if the network has been destroied.
    5.Recover network environment.
    6.Confirm the test result.
    """

    net_ref = params.get("net_destroy_net_ref")
    extra = params.get("net_destroy_extra", "")
    network_name = params.get("net_destroy_network", "default")
    network_status = params.get("net_destroy_status", "active")
    status_error = params.get("status_error", "no")
    net_persistent = "yes" == params.get("net_persistent", "yes")
    net_cfg_file = params.get("net_cfg_file",
                              "/usr/share/libvirt/networks/default.xml")
    check_libvirtd = "yes" == params.get("check_libvirtd")
    vm_defined = "yes" == params.get("vm_defined")
    check_vm = "yes" == params.get("check_vm")

    # libvirt acl polkit related params
    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    output_all = virsh.net_list("--all").stdout.strip()
    # prepare the network status: active, persistent
    if not re.search(network_name, output_all):
        if net_persistent:
            virsh.net_define(net_cfg_file, ignore_status=False)
            virsh.net_start(network_name, ignore_status=False)
        else:
            virsh.create(net_cfg_file, ignore_status=False)
    if net_persistent:
        if not virsh.net_state_dict()[network_name]['persistent']:
            logging.debug("make the network persistent...")
            make_net_persistent(network_name)
    else:
        if virsh.net_state_dict()[network_name]['persistent']:
            virsh.net_undefine(network_name, ignore_status=False)
    if not virsh.net_state_dict()[network_name]['active']:
        if network_status == "active":
            virsh.net_start(network_name, ignore_status=False)
    else:
        if network_status == "inactive":
            logging.debug(
                "destroy network as we need to test inactive network...")
            virsh.net_destroy(network_name, ignore_status=False)
    logging.debug("After prepare: %s" % virsh.net_state_dict())

    # Run test case
    if net_ref == "uuid":
        net_ref = virsh.net_uuid(network_name).stdout.strip()
    elif net_ref == "name":
        net_ref = network_name

    if check_libvirtd or check_vm:
        vm_name = params.get("main_vm")
        if virsh.is_alive(vm_name):
            virsh.destroy(vm_name)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml_backup = vmxml
        # make sure there is interface with source network as default
        iface_devices = vmxml.get_devices(device_type="interface")
        has_default_net = False
        for iface in iface_devices:
            source = iface.get_source()
            if 'network' in source.keys() and source['network'] == 'default':
                has_default_net = True
                break
            elif 'bridge' in source.keys() and source['bridge'] == 'virbr0':
                has_default_net = True
                break
        if not has_default_net:
            options = "network default --current"
            virsh.attach_interface(vm_name, options, ignore_status=False)
        try:
            if vm_defined:
                ret = virsh.start(vm_name)
            else:
                logging.debug("undefine the vm, then create the vm...")
                vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
                virsh.undefine(vm_name)
                ret = virsh.create(vmxml.xml)
                logging.debug(ret.stdout)
            # check the create or start cmd status
            utils_test.libvirt.check_exit_status(
                ret, expect_error=(network_status != 'active'))
            status = 1

            if status_error != 'yes':
                cmd = "ps -ef | grep /usr/sbin/libvirtd | grep -v grep"
                # record the libvirt pid then destroy network
                libvirtd_pid = process.run(
                    cmd, shell=True).stdout_text.strip().split()[1]
                ret = virsh.net_destroy(net_ref,
                                        extra,
                                        uri=uri,
                                        debug=True,
                                        unprivileged_user=unprivileged_user,
                                        ignore_status=True)
                utils_test.libvirt.check_exit_status(ret, expect_error=False)
                # check_libvirtd pid no change
                result = check_libvirtd_restart(libvirtd_pid, cmd)
                if result:
                    test.fail("libvirtd crash after destroy network!")
                    status = 1
                else:
                    logging.debug(
                        "libvirtd do not crash after destroy network!")
                    status = 0
                if check_libvirtd:
                    # destroy vm, check libvirtd pid no change
                    ret = virsh.destroy(vm_name)
                    utils_test.libvirt.check_exit_status(ret,
                                                         expect_error=False)
                    result = check_libvirtd_restart(libvirtd_pid, cmd)
                    if result:
                        test.fail("libvirtd crash after destroy vm!")
                        status = 1
                    else:
                        logging.debug(
                            "libvirtd do not crash after destroy vm!")
                        status = 0
                elif check_vm:
                    # restart libvirtd and check vm is running
                    libvirtd = utils_libvirtd.Libvirtd()
                    libvirtd.restart()
                    if not virsh.is_alive(vm_name):
                        test.fail(
                            "vm shutdown when transient network destroyed then libvirtd restart"
                        )
                    else:
                        status = 0

        finally:
            if not vm_defined:
                vmxml_backup.define()
            vmxml_backup.sync()

    else:
        readonly = (params.get("net_destroy_readonly", "no") == "yes")
        status = virsh.net_destroy(net_ref,
                                   extra,
                                   uri=uri,
                                   readonly=readonly,
                                   debug=True,
                                   unprivileged_user=unprivileged_user,
                                   ignore_status=True).exit_status
        # Confirm the network has been destroied.
        if net_persistent:
            if virsh.net_state_dict()[network_name]['active']:
                status = 1
        else:
            output_all = virsh.net_list("--all").stdout.strip()
            if re.search(network_name, output_all):
                status = 1
                logging.debug(
                    "transient network should not exists after destroy")

    # Recover network status to system default status
    try:
        if network_name not in virsh.net_state_dict():
            virsh.net_define(net_cfg_file, ignore_status=False)
        if not virsh.net_state_dict()[network_name]['active']:
            virsh.net_start(network_name, ignore_status=False)
        if not virsh.net_state_dict()[network_name]['persistent']:
            make_net_persistent(network_name)
        if not virsh.net_state_dict()[network_name]['autostart']:
            virsh.net_autostart(network_name, ignore_status=False)
    except process.CmdError:
        test.error("Recover network status failed!")
    # Check status_error
    if status_error == "yes":
        if status == 0:
            test.fail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            test.fail("Run failed with right command")
    else:
        test.error("The status_error must be 'yes' or 'no'!")
Exemplo n.º 44
0
def run(test, params, env):
    """
    Test send-key command, include all types of codeset and sysrq

    For normal sendkey test, we create a file to check the command
    execute by send-key. For sysrq test, check the /var/log/messages
    in RHEL or /var/log/syslog in Ubuntu and guest status
    """

    if not virsh.has_help_command('send-key'):
        test.cancel("This version of libvirt does not support the send-key "
                    "test")

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    status_error = ("yes" == params.get("status_error", "no"))
    keystrokes = params.get("sendkey", "")
    codeset = params.get("codeset", "")
    holdtime = params.get("holdtime", "")
    sysrq_test = ("yes" == params.get("sendkey_sysrq", "no"))
    sleep_time = int(params.get("sendkey_sleeptime", 5))
    readonly = params.get("readonly", False)
    username = params.get("username")
    password = params.get("password")
    create_file = params.get("create_file_name")
    uri = params.get("virsh_uri")
    simultaneous = params.get("sendkey_simultaneous", "yes") == "yes"
    unprivileged_user = params.get('unprivileged_user')
    crash_dir = "/var/crash"
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current libvirt "
                        "version.")

    def send_line(send_str):
        """
        send string to guest with send-key and end with Enter
        """
        for send_ch in list(send_str):
            virsh.sendkey(vm_name,
                          "KEY_%s" % send_ch.upper(),
                          ignore_status=False)

        virsh.sendkey(vm_name, "KEY_ENTER", ignore_status=False)

    vm = env.get_vm(vm_name)
    session = vm.wait_for_login()

    if sysrq_test:
        # In postprocess of previous testcase would pause and resume the VM
        # that would change the domstate to running (unpaused) and cause
        # sysrq reboot testcase to fail as the domstate persist across reboot
        # so it is better to destroy and start VM before the test starts
        if "KEY_B" in keystrokes:
            cmd_result = virsh.domstate(vm_name,
                                        '--reason',
                                        ignore_status=True)
            if "unpaused" in cmd_result.stdout.strip():
                vm.destroy()
                vm.start()
                session = vm.wait_for_login()
        if "KEY_C" in keystrokes or "usb" in codeset:
            session.cmd("rm -rf {0}; mkdir {0}".format(crash_dir))
        if "usb" in codeset:
            libvirt.add_panic_device(vm_name)
            vm.start()
            session = vm.wait_for_login()
        LOG_FILE = "/var/log/messages"
        if "ubuntu" in vm.get_distro().lower():
            LOG_FILE = "/var/log/syslog"
        # Is 'rsyslog' installed on guest? It'll be what writes out
        # to LOG_FILE
        if not utils_package.package_install("rsyslog", session):
            test.fail("Fail to install rsyslog, make sure that you have "
                      "usable repo in guest")

        # clear messages, restart rsyslog, and make sure it's running
        session.cmd("echo '' > %s" % LOG_FILE)
        session.cmd("service rsyslog restart")
        ps_stat = session.cmd_status("ps aux |grep rsyslog")
        if ps_stat != 0:
            test.fail("rsyslog is not running in guest")

        # enable sysrq
        session.cmd("echo 1 > /proc/sys/kernel/sysrq")

    # make sure the environment is clear
    if create_file is not None:
        session.cmd("rm -rf %s" % create_file)

    try:
        # wait for tty started
        tty_stat = "ps aux|grep tty"
        timeout = 60
        while timeout >= 0 and \
                session.get_command_status(tty_stat) != 0:
            time.sleep(1)
            timeout = timeout - 1
        if timeout < 0:
            test.fail("Can not wait for tty started in 60s")

        # send user and passwd to guest to login
        send_line(username)
        time.sleep(2)
        send_line(password)
        time.sleep(2)

        if sysrq_test or simultaneous:
            output = virsh.sendkey(vm_name,
                                   keystrokes,
                                   codeset=codeset,
                                   holdtime=holdtime,
                                   readonly=readonly,
                                   unprivileged_user=unprivileged_user,
                                   uri=uri)
        else:
            # If multiple keycodes are specified, they are all sent
            # simultaneously to the guest, and they may be received
            # in random order. If you need distinct keypresses, you
            # must use multiple send-key invocations.
            for keystroke in keystrokes.split():
                output = virsh.sendkey(vm_name,
                                       keystroke,
                                       codeset=codeset,
                                       holdtime=holdtime,
                                       readonly=readonly,
                                       unprivileged_user=unprivileged_user,
                                       uri=uri)
                if output.exit_status:
                    test.fail("Failed to send key %s to guest: %s" %
                              (keystroke, output.stderr))
        time.sleep(sleep_time)
        if output.exit_status != 0:
            if status_error:
                logging.info(
                    "Failed to sendkey to guest as expected, Error:"
                    "%s.", output.stderr)
                return
            else:
                test.fail("Failed to send key to guest, Error:%s." %
                          output.stderr)
        elif status_error:
            test.fail("Expect fail, but succeed indeed.")

        if create_file is not None:
            # check if created file exist
            cmd_ls = "ls %s" % create_file
            sec_status, sec_output = session.get_command_status_output(cmd_ls)
            if sec_status == 0:
                logging.info("Succeed to create file with send key")
            else:
                test.fail("Fail to create file with send key, Error:%s" %
                          sec_output)
        elif sysrq_test:
            # check LOG_FILE info according to different key

            # Since there's no guarantee when messages will be written
            # we'll do a check and wait loop for up to 60 seconds
            timeout = 60
            while timeout >= 0:
                if "KEY_H" in keystrokes:
                    cmd = "cat %s | grep 'SysRq.*HELP'" % LOG_FILE
                    get_status = session.cmd_status(cmd)
                elif "KEY_M" in keystrokes:
                    cmd = "cat %s | grep 'SysRq.*Show Memory'" % LOG_FILE
                    get_status = session.cmd_status(cmd)
                elif "KEY_T" in keystrokes:
                    cmd = "cat %s | grep 'SysRq.*Show State'" % LOG_FILE
                    get_status = session.cmd_status(cmd)
                    # Sometimes SysRq.*Show State string missed in LOG_FILE
                    # as a fall back check for runnable tasks logged
                    if get_status != 0:
                        cmd = "cat %s | grep 'runnable tasks:'" % LOG_FILE
                        get_status = session.cmd_status(cmd)

                elif "KEY_B" in keystrokes:
                    session = vm.wait_for_login()
                    result = virsh.domstate(vm_name,
                                            '--reason',
                                            ignore_status=True)
                    output = result.stdout.strip()
                    logging.debug("The guest state: %s", output)
                    if not output.count("booted"):
                        get_status = 1
                    else:
                        get_status = 0
                        session.close()
                # crash
                elif "KEY_C" in keystrokes or "usb" in codeset:
                    session = vm.wait_for_login()
                    output = session.cmd_output("ls %s" % crash_dir)
                    logging.debug("Crash file is %s" % output)
                    if output:
                        get_status = 0
                    else:
                        get_status = 1
                        session.close()

                if get_status == 0:
                    timeout = -1
                else:
                    session.cmd("echo \"virsh sendkey waiting\" >> %s" %
                                LOG_FILE)
                    time.sleep(1)
                    timeout = timeout - 1

            if get_status != 0:
                test.fail("SysRq does not take effect in guest, keystrokes is "
                          "%s" % keystrokes)
            else:
                logging.info("Succeed to send SysRq command")
        else:
            test.fail("Test cfg file invalid: either sysrq_params or "
                      "create_file_name must be defined")

    finally:
        if create_file is not None:
            session = vm.wait_for_login()
            session.cmd("rm -rf %s" % create_file)
        session.close()
Exemplo n.º 45
0
def check_output(test, output_msg, params):
    """
    Check if known messages exist in the given output messages.

    :param test: the test object
    :param output_msg: the given output messages
    :param params: the dictionary including necessary parameters

    :raise TestSkipError: raised if the known error is found together
                          with some conditions satisfied
    """
    err_msg = params.get("err_msg", None)
    status_error = params.get("status_error", "no")
    if status_error == "yes" and err_msg:
        if err_msg in output_msg:
            logging.debug("Expected error '%s' was found", err_msg)
            return
        else:
            test.fail("The expected error '%s' was not found in output '%s'" %
                      (err_msg, output_msg))

    ERR_MSGDICT = {
        "Bug 1249587":
        "error: Operation not supported: " +
        "pre-creation of storage targets for incremental " +
        "storage migration is not supported",
        "ERROR 1":
        "error: internal error: unable to " +
        "execute QEMU command 'migrate': " +
        "this feature or command is not currently supported",
        "ERROR 2":
        "error: Cannot access storage file",
        "ERROR 3":
        "Unable to read TLS confirmation: " + "Input/output error",
        "ERROR 4":
        "error: Unsafe migration: Migration " +
        "without shared storage is unsafe"
    }

    # Check for special case firstly
    migrate_disks = "yes" == params.get("migrate_disks")
    status_error = "yes" == params.get("status_error")
    if migrate_disks and status_error:
        logging.debug("To check for migrate-disks...")
        disk = params.get("attach_A_disk_source")
        last_msg = "(as uid:107, gid:107): No such file or directory"
        if not libvirt_version.version_compare(4, 5, 0):
            expect_msg = "%s '%s' %s" % (ERR_MSGDICT["ERROR 2"], disk,
                                         last_msg)
        else:
            expect_msg = ERR_MSGDICT["ERROR 4"]
        if output_msg.find(expect_msg) >= 0:
            logging.debug("The expected error '%s' was found", expect_msg)
            return
        else:
            test.fail("The actual output:\n%s\n"
                      "The expected error '%s' was not found" %
                      (output_msg, expect_msg))

    if params.get("target_vm_name"):
        if output_msg.find(ERR_MSGDICT['ERROR 3']) >= 0:
            logging.debug("The expected error is found: %s",
                          ERR_MSGDICT['ERROR 3'])
            return
        else:
            test.fail("The actual output:\n%s\n"
                      "The expected error '%s' was not found" %
                      (output_msg, ERR_MSGDICT['ERROR 3']))

    for (key, value) in ERR_MSGDICT.items():
        if output_msg.find(value) >= 0:
            if key == "ERROR 1" and params.get("support_precreation") is True:
                logging.debug("The error is not expected: '%s'.", value)
            elif key == "ERROR 2":
                break
            else:
                logging.debug("The known error was found: %s --- %s", key,
                              value)
                test.cancel("Known error: %s --- %s in %s" %
                            (key, value, output_msg))
Exemplo n.º 46
0
def run(test, params, env):
    """
    Test the command virsh maxvcpus

    (1) Call virsh maxvcpus
    (2) Call virsh -c remote_uri maxvcpus
    (3) Call virsh maxvcpus with an unexpected option
    """

    # get the params from subtests.
    # params for general.
    option = params.get("virsh_maxvcpus_options")
    status_error = params.get("status_error")
    connect_arg = params.get("connect_arg", "")

    # params for transport connect.
    local_ip = params.get("local_ip", "ENTER.YOUR.LOCAL.IP")
    local_pwd = params.get("local_pwd", "ENTER.YOUR.LOCAL.ROOT.PASSWORD")
    server_ip = params.get("remote_ip", local_ip)
    server_pwd = params.get("remote_pwd", local_pwd)
    transport_type = params.get("connect_transport_type", "local")
    transport = params.get("connect_transport", "ssh")
    connect_uri = None
    # check the config
    if (connect_arg == "transport" and
            transport_type == "remote" and
            local_ip.count("ENTER")):
        raise exceptions.TestSkipError("Parameter local_ip is not configured "
                                       "in remote test.")
    if (connect_arg == "transport" and
            transport_type == "remote" and
            local_pwd.count("ENTER")):
        raise exceptions.TestSkipError("Parameter local_pwd is not configured "
                                       "in remote test.")

    if connect_arg == "transport":
        canonical_uri_type = virsh.driver()

        if transport == "ssh":
            ssh_connection = utils_conn.SSHConnection(server_ip=server_ip,
                                                      server_pwd=server_pwd,
                                                      client_ip=local_ip,
                                                      client_pwd=local_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()

            connect_uri = libvirt_vm.get_uri_with_transport(
                uri_type=canonical_uri_type,
                transport=transport, dest_ip=server_ip)
            virsh_dargs = {'remote_ip': server_ip, 'remote_user': '******',
                           'remote_pwd': server_pwd,
                           'ssh_remote_auth': True}
            virsh_instance = virsh.VirshPersistent(**virsh_dargs)
    else:
        connect_uri = connect_arg
        virsh_instance = virsh

    if libvirt_version.version_compare(2, 3, 0):
        try:
            maxvcpus = None
            maxvcpus_cap = None
            dom_capabilities = None
            # make sure we take maxvcpus from right host, helps incase remote
            try:
                dom_capabilities = domcap.DomCapabilityXML(virsh_instance=virsh_instance)
                maxvcpus = dom_capabilities.max
                logging.debug("maxvcpus calculate from domcapabilities "
                              "is %s", maxvcpus)
            except Exception as details:
                raise exceptions.TestFail("Failed to get maxvcpus from "
                                          "domcapabilities xml:\n%s"
                                          % dom_capabilities)
            try:
                cap_xml = capability_xml.CapabilityXML()
                maxvcpus_cap = cap_xml.get_guest_capabilities()['hvm'][platform.machine()]['maxcpus']
                logging.debug('maxvcpus_cap is %s', maxvcpus_cap)
            except Exception as details:
                logging.debug("Failed to get maxvcpu from virsh "
                              "capabilities: %s", details)
                # Let's fall back incase of failure
                maxvcpus_cap = maxvcpus
            if not maxvcpus:
                raise exceptions.TestFail("Failed to get max value for vcpu"
                                          "from domcapabilities "
                                          "xml:\n%s" % dom_capabilities)
        except Exception as details:
            raise exceptions.TestFail("Failed get the virsh instance with uri: "
                                      "%s\n Details: %s" % (connect_uri, details))

    is_arm = "aarch" in platform.machine()
    if is_arm:
        for gic_enum in domcap.DomCapabilityXML()['features']['gic_enums']:
            if gic_enum['name'] == "version":
                gic_version = gic_enum['values'][0].get_value()

    # Run test case
    result = virsh.maxvcpus(option, uri=connect_uri, ignore_status=True,
                            debug=True)

    maxvcpus_test = result.stdout.strip()
    status = result.exit_status

    # Check status_error
    if status_error == "yes":
        if status == 0:
            raise exceptions.TestFail("Run successed with unsupported option!")
        else:
            logging.info("Run failed with unsupported option %s " % option)
    elif status_error == "no":
        if status == 0:
            if not libvirt_version.version_compare(2, 3, 0):
                if "kqemu" in option:
                    if not maxvcpus_test == '1':
                        raise exceptions.TestFail("Command output %s is not "
                                                  "expected for %s " % (maxvcpus_test, option))
                elif option in ['qemu', '--type qemu', '']:
                    if not maxvcpus_test == '16':
                        raise exceptions.TestFail("Command output %s is not "
                                                  "expected for %s " % (maxvcpus_test, option))
                else:
                    # No check with other types
                    pass
            else:
                # It covers all possible combinations
                if option in ['qemu', 'kvm', '--type qemu', '--type kvm', 'kqemu', '--type kqemu', '']:
                    if (is_arm and gic_version == '2' and option in ['kvm', '']):
                        if not maxvcpus_test == '8':
                            raise exceptions.TestFail("Command output %s is not "
                                                      "expected for %s " % (maxvcpus_test, option))
                    elif not (maxvcpus_test == maxvcpus or maxvcpus_test == maxvcpus_cap):
                        raise exceptions.TestFail("Command output %s is not "
                                                  "expected for %s " % (maxvcpus_test, option))
                else:
                    # No check with other types
                    pass
        else:
            raise exceptions.TestFail("Run command failed")
Exemplo n.º 47
0
def run(test, params, env):
    """
    Test command: virsh blockpull <domain> <path>

    1) Prepare test environment.
    2) Populate a disk from its backing image.
    3) Recover test environment.
    4) Check result.
    """
    def make_disk_snapshot():
        # Make four external snapshots for disks only
        for count in range(1, 5):
            snap_xml = snapshot_xml.SnapshotXML()
            snapshot_name = "snapshot_test%s" % count
            snap_xml.snap_name = snapshot_name
            snap_xml.description = "Snapshot Test %s" % count

            # Add all disks into xml file.
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            disks = vmxml.devices.by_device_tag('disk')
            new_disks = []
            for src_disk_xml in disks:
                disk_xml = snap_xml.SnapDiskXML()
                disk_xml.xmltreefile = src_disk_xml.xmltreefile
                del disk_xml.device
                del disk_xml.address
                disk_xml.snapshot = "external"
                disk_xml.disk_name = disk_xml.target['dev']

                # Only qcow2 works as external snapshot file format, update it
                # here
                driver_attr = disk_xml.driver
                driver_attr.update({'type': 'qcow2'})
                disk_xml.driver = driver_attr

                new_attrs = disk_xml.source.attrs
                if disk_xml.source.attrs.has_key('file'):
                    file_name = disk_xml.source.attrs['file']
                    new_file = "%s.snap%s" % (file_name.split('.')[0], count)
                    snapshot_external_disks.append(new_file)
                    new_attrs.update({'file': new_file})
                    hosts = None
                elif (disk_xml.source.attrs.has_key('name')
                      and disk_src_protocol == 'gluster'):
                    src_name = disk_xml.source.attrs['name']
                    new_name = "%s.snap%s" % (src_name.split('.')[0], count)
                    new_attrs.update({'name': new_name})
                    snapshot_external_disks.append(new_name)
                    hosts = disk_xml.source.hosts
                elif (disk_xml.source.attrs.has_key('dev')
                      or disk_xml.source.attrs.has_key('name')):
                    if (disk_xml.type_name == 'block'
                            or disk_src_protocol in ['iscsi', 'rbd']):
                        # Use local file as external snapshot target for block
                        # and iscsi network type.
                        # As block device will be treat as raw format by
                        # default, it's not fit for external disk snapshot
                        # target. A work around solution is use qemu-img again
                        # with the target.
                        # And external active snapshots are not supported on
                        # 'network' disks using 'iscsi' protocol
                        disk_xml.type_name = 'file'
                        if new_attrs.has_key('dev'):
                            del new_attrs['dev']
                        elif new_attrs.has_key('name'):
                            del new_attrs['name']
                            del new_attrs['protocol']
                        new_file = "%s/blk_src_file.snap%s" % (tmp_dir, count)
                        snapshot_external_disks.append(new_file)
                        new_attrs.update({'file': new_file})
                        hosts = None

                new_src_dict = {"attrs": new_attrs}
                if hosts:
                    new_src_dict.update({"hosts": hosts})
                disk_xml.source = disk_xml.new_disk_source(**new_src_dict)

                new_disks.append(disk_xml)

            snap_xml.set_disks(new_disks)
            snapshot_xml_path = snap_xml.xml
            logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile)

            options = "--disk-only --xmlfile %s " % snapshot_xml_path

            snapshot_result = virsh.snapshot_create(vm_name,
                                                    options,
                                                    debug=True)

            if snapshot_result.exit_status != 0:
                raise error.TestFail(snapshot_result.stderr)

            # Create a file flag in VM after each snapshot
            flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                                    dir="/tmp")
            file_path = flag_file.name
            flag_file.close()

            status, output = session.cmd_status_output("touch %s" % file_path)
            if status:
                raise error.TestFail("Touch file in vm failed. %s" % output)
            snapshot_flag_files.append(file_path)

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    needs_agent = "yes" == params.get("needs_agent", "yes")
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    snap_in_mirror = "yes" == params.get("snap_in_mirror", 'no')
    snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", 'no')
    bandwidth = params.get("bandwidth", None)
    with_timeout = ("yes" == params.get("with_timeout_option", "no"))
    status_error = ("yes" == params.get("status_error", "no"))
    base_option = params.get("base_option", None)
    keep_relative = "yes" == params.get("keep_relative", 'no')
    virsh_dargs = {'debug': True}

    # Process domain disk device parameters
    disk_type = params.get("disk_type")
    disk_target = params.get("disk_target", 'vda')
    disk_src_protocol = params.get("disk_source_protocol")
    restart_tgtd = params.get("restart_tgtd", "no")
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    logging.debug("original xml is %s", vmxml_backup)

    # Abort the test if there are snapshots already
    exsiting_snaps = virsh.snapshot_list(vm_name)
    if len(exsiting_snaps) != 0:
        raise error.TestFail("There are snapshots created for %s already" %
                             vm_name)

    snapshot_external_disks = []
    try:
        if disk_src_protocol == 'iscsi' and disk_type == 'network':
            if not libvirt_version.version_compare(1, 0, 4):
                raise error.TestNAError("'iscsi' disk doesn't support in"
                                        " current libvirt version.")
        if disk_src_protocol == 'gluster':
            if not libvirt_version.version_compare(1, 2, 7):
                raise error.TestNAError("Snapshot on glusterfs not"
                                        " support in current "
                                        "version. Check more info "
                                        " with https://bugzilla.re"
                                        "dhat.com/show_bug.cgi?id="
                                        "1017289")

        # Set vm xml and guest agent
        if replace_vm_disk:
            if disk_src_protocol == "rbd" and disk_type == "network":
                src_host = params.get("disk_source_host", "EXAMPLE_HOSTS")
                mon_host = params.get("mon_host", "EXAMPLE_MON_HOST")
                if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"):
                    raise error.TestNAError("Please provide ceph host first.")
            libvirt.set_vm_disk(vm, params, tmp_dir)

        if needs_agent:
            vm.prepare_guest_agent()

        # The first disk is supposed to include OS
        # We will perform blockpull operation for it.
        first_disk = vm.get_first_disk_devices()
        blk_source = first_disk['source']
        blk_target = first_disk['target']
        snapshot_flag_files = []

        # get a vm session before snapshot
        session = vm.wait_for_login()
        # do snapshot
        make_disk_snapshot()

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug("The domain xml after snapshot is %s" % vmxml)

        # snapshot src file list
        snap_src_lst = [blk_source]
        snap_src_lst += snapshot_external_disks

        if snap_in_mirror:
            blockpull_options = "--bandwidth 1"
        else:
            blockpull_options = "--wait --verbose"

        if with_timeout:
            blockpull_options += " --timeout 1"

        if bandwidth:
            blockpull_options += " --bandwidth %s" % bandwidth

        if base_option == "async":
            blockpull_options += " --async"

        base_image = None
        base_index = None
        if (libvirt_version.version_compare(1, 2, 4)
                or disk_src_protocol == 'gluster'):
            if base_option == "shallow":
                base_index = 1
                base_image = "%s[%s]" % (disk_target, base_index)
            elif base_option == "base":
                base_index = 2
                base_image = "%s[%s]" % (disk_target, base_index)
            elif base_option == "top":
                base_index = 0
                base_image = "%s[%s]" % (disk_target, base_index)
        else:
            if base_option == "shallow":
                base_image = snap_src_lst[3]
            elif base_option == "base":
                base_image = snap_src_lst[2]
            elif base_option == "top":
                base_image = snap_src_lst[4]

        if base_option and base_image:
            blockpull_options += " --base %s" % base_image

        if keep_relative:
            blockpull_options += " --keep-relative"

        # Run test case
        result = virsh.blockpull(vm_name, blk_target, blockpull_options,
                                 **virsh_dargs)
        status = result.exit_status

        # Check status_error
        libvirt.check_exit_status(result, status_error)

        if not status and not with_timeout:
            if snap_in_mirror:
                snap_mirror_path = "%s/snap_mirror" % tmp_dir
                snap_options = "--diskspec vda,snapshot=external,"
                snap_options += "file=%s --disk-only" % snap_mirror_path
                snapshot_external_disks.append(snap_mirror_path)
                ret = virsh.snapshot_create_as(vm_name,
                                               snap_options,
                                               ignore_status=True,
                                               debug=True)
                libvirt.check_exit_status(ret, snap_in_mirror_err)
                return

            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            disks = vmxml.devices.by_device_tag('disk')
            for disk in disks:
                if disk.target['dev'] != blk_target:
                    continue
                else:
                    disk_xml = disk.xmltreefile
                    break

            logging.debug("after pull the disk xml is: %s" % disk_xml)
            if libvirt_version.version_compare(1, 2, 4):
                err_msg = "Domain image backing chain check failed"
                if not base_option or "async" in base_option:
                    chain_lst = snap_src_lst[-1:]
                    ret = check_chain_xml(disk_xml, chain_lst)
                    if not ret:
                        raise error.TestFail(err_msg)
                elif "base" or "shallow" in base_option:
                    chain_lst = snap_src_lst[::-1]
                    if not base_index and base_image:
                        base_index = chain_lst.index(base_image)
                    val_tmp = []
                    for i in range(1, base_index):
                        val_tmp.append(chain_lst[i])
                    for i in val_tmp:
                        chain_lst.remove(i)
                    ret = check_chain_xml(disk_xml, chain_lst)
                    if not ret:
                        raise error.TestFail(err_msg)

        # If base image is the top layer of snapshot chain,
        # virsh blockpull should fail, return directly
        if base_option == "top":
            return

        # Check flag files
        for flag in snapshot_flag_files:
            status, output = session.cmd_status_output("cat %s" % flag)
            if status:
                raise error.TestFail("blockpull failed: %s" % output)

    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Recover xml of vm.
        vmxml_backup.sync("--snapshots-metadata")

        if not disk_src_protocol or disk_src_protocol != 'gluster':
            for disk in snapshot_external_disks:
                if os.path.exists(disk):
                    os.remove(disk)

        libvirtd = utils_libvirtd.Libvirtd()

        if disk_src_protocol == 'iscsi':
            libvirt.setup_or_cleanup_iscsi(is_setup=False,
                                           restart_tgtd=restart_tgtd)
        elif disk_src_protocol == 'gluster':
            libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path)
            libvirtd.restart()
        elif disk_src_protocol == 'netfs':
            restore_selinux = params.get('selinux_status_bak')
            libvirt.setup_or_cleanup_nfs(is_setup=False,
                                         restore_selinux=restore_selinux)
Exemplo n.º 48
0
def run(test, params, env):
    """
    Test command: virsh net-define/net-undefine.

    1) Collect parameters&environment info before test
    2) Prepare options for command
    3) Execute command for test
    4) Check state of defined network
    5) Recover environment
    6) Check result
    """
    uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri",
                                                      "default"))
    net_name = params.get("net_define_undefine_net_name", "default")
    net_uuid = params.get("net_define_undefine_net_uuid", "")
    options_ref = params.get("net_define_undefine_options_ref", "default")
    trans_ref = params.get("net_define_undefine_trans_ref", "trans")
    extra_args = params.get("net_define_undefine_extra", "")
    remove_existing = params.get("net_define_undefine_remove_existing", "yes")
    status_error = "yes" == params.get("status_error", "no")
    check_states = "yes" == params.get("check_states", "no")
    net_persistent = "yes" == params.get("net_persistent")
    net_active = "yes" == params.get("net_active")
    expect_msg = params.get("net_define_undefine_err_msg")

    # define multi ip/dhcp sections in network
    multi_ip = "yes" == params.get("multi_ip", "no")
    netmask = params.get("netmask")
    prefix_v6 = params.get("prefix_v6")
    single_v6_range = "yes" == params.get("single_v6_range", "no")
    # Get 2nd ipv4 dhcp range
    dhcp_ranges_start = params.get("dhcp_ranges_start", None)
    dhcp_ranges_end = params.get("dhcp_ranges_end", None)

    # Get 2 groups of ipv6 ip address and dhcp section
    address_v6_1 = params.get("address_v6_1")
    dhcp_ranges_v6_start_1 = params.get("dhcp_ranges_v6_start_1", None)
    dhcp_ranges_v6_end_1 = params.get("dhcp_ranges_v6_end_1", None)

    address_v6_2 = params.get("address_v6_2")
    dhcp_ranges_v6_start_2 = params.get("dhcp_ranges_v6_start_2", None)
    dhcp_ranges_v6_end_2 = params.get("dhcp_ranges_v6_end_2", None)

    # Edit net xml forward/ip part then define/start to check invalid setting
    edit_xml = "yes" == params.get("edit_xml", "no")
    address_v4 = params.get("address_v4")
    nat_port_start = params.get("nat_port_start")
    nat_port_end = params.get("nat_port_end")
    test_port = "yes" == params.get("test_port", "no")
    loop = int(params.get("loop", 1))

    virsh_dargs = {'uri': uri, 'debug': False, 'ignore_status': True}
    virsh_instance = virsh.VirshPersistent(**virsh_dargs)

    # libvirt acl polkit related params
    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    virsh_uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    # Prepare environment and record current net_state_dict
    backup = network_xml.NetworkXML.new_all_networks_dict(virsh_instance)
    backup_state = virsh_instance.net_state_dict()
    logging.debug("Backed up network(s): %s", backup_state)

    # Make some XML to use for testing, for now we just copy 'default'
    test_xml = xml_utils.TempXMLFile()  # temporary file
    try:
        # LibvirtXMLBase.__str__ returns XML content
        test_xml.write(str(backup['default']))
        test_xml.flush()
    except (KeyError, AttributeError):
        test.cancel("Test requires default network to exist")

    testnet_xml = get_network_xml_instance(virsh_dargs,
                                           test_xml,
                                           net_name,
                                           net_uuid,
                                           bridge=None)

    if remove_existing:
        for netxml in list(backup.values()):
            netxml.orbital_nuclear_strike()

    # Test both define and undefine, So collect info
    # both of them for result check.
    # When something wrong with network, set it to 1
    fail_flag = 0
    result_info = []

    if options_ref == "correct_arg":
        define_options = testnet_xml.xml
        undefine_options = net_name
    elif options_ref == "no_option":
        define_options = ""
        undefine_options = ""
    elif options_ref == "not_exist_option":
        define_options = "/not/exist/file"
        undefine_options = "NOT_EXIST_NETWORK"

    define_extra = undefine_extra = extra_args
    if trans_ref != "define":
        define_extra = ""

    if params.get('setup_libvirt_polkit') == 'yes':
        virsh_dargs = {
            'uri': virsh_uri,
            'unprivileged_user': unprivileged_user,
            'debug': False,
            'ignore_status': True
        }
        cmd = "chmod 666 %s" % testnet_xml.xml
        process.run(cmd, shell=True)

    if params.get('net_define_undefine_readonly', 'no') == 'yes':
        virsh_dargs = {
            'uri': uri,
            'debug': False,
            'ignore_status': True,
            'readonly': True
        }
    try:
        if edit_xml:
            ipxml_v4 = network_xml.IPXML()
            ipxml_v4.address = address_v4
            ipxml_v4.netmask = netmask
            ipxml_v4.dhcp_ranges = {
                "start": dhcp_ranges_start,
                "end": dhcp_ranges_end
            }
            testnet_xml.del_ip()
            testnet_xml.set_ip(ipxml_v4)
            if test_port:
                nat_port = {"start": nat_port_start, "end": nat_port_end}
                testnet_xml.nat_port = nat_port
            testnet_xml.debug_xml()
        if multi_ip:
            # Enabling IPv6 forwarding with RA routes without accept_ra set to 2
            # is likely to cause routes loss
            sysctl_cmd = 'sysctl net.ipv6.conf.all.accept_ra'
            original_accept_ra = to_text(
                process.system_output(sysctl_cmd + ' -n'))
            if original_accept_ra != '2':
                process.system(sysctl_cmd + '=2')
            # add another ipv4 address and dhcp range
            set_ip_section(testnet_xml,
                           address_v4,
                           ipv6=False,
                           netmask=netmask,
                           dhcp_ranges_start=dhcp_ranges_start,
                           dhcp_ranges_end=dhcp_ranges_end)
            # add ipv6 address and dhcp range
            set_ip_section(testnet_xml,
                           address_v6_1,
                           ipv6=True,
                           prefix_v6=prefix_v6,
                           dhcp_ranges_start=dhcp_ranges_v6_start_1,
                           dhcp_ranges_end=dhcp_ranges_v6_end_1)
            # 2nd ipv6 address and dhcp range
            set_ip_section(testnet_xml,
                           address_v6_2,
                           ipv6=True,
                           prefix_v6=prefix_v6,
                           dhcp_ranges_start=dhcp_ranges_v6_start_2,
                           dhcp_ranges_end=dhcp_ranges_v6_end_2)
        testnet_xml.debug_xml()
        # Run test case
        while loop:
            try:
                define_result = virsh.net_define(define_options, define_extra,
                                                 **virsh_dargs)
                logging.debug(define_result)
                define_status = define_result.exit_status

                # Check network states after define
                if check_states and not define_status:
                    net_state = virsh_instance.net_state_dict()
                    if (net_state[net_name]['active']
                            or net_state[net_name]['autostart']
                            or not net_state[net_name]['persistent']):
                        fail_flag = 1
                        result_info.append("Found wrong network states for "
                                           "defined netowrk: %s" %
                                           str(net_state))

                if define_status == 1 and status_error and expect_msg:
                    libvirt.check_result(define_result, expect_msg.split(';'))

                # If defining network succeed, then trying to start it.
                if define_status == 0:
                    start_result = virsh.net_start(net_name,
                                                   extra="",
                                                   **virsh_dargs)
                    logging.debug(start_result)
                    start_status = start_result.exit_status

                if trans_ref == "trans":
                    if define_status:
                        fail_flag = 1
                        result_info.append(
                            "Define network with right command failed.")
                    else:
                        if start_status:
                            fail_flag = 1
                            result_info.append(
                                "Found wrong network states for "
                                "defined netowrk: %s" % str(net_state))

                # Check network states after start
                if check_states and not status_error:
                    net_state = virsh_instance.net_state_dict()
                    if (not net_state[net_name]['active']
                            or net_state[net_name]['autostart']
                            or not net_state[net_name]['persistent']):
                        fail_flag = 1
                        result_info.append("Found wrong network states for "
                                           "started netowrk: %s" %
                                           str(net_state))
                    # Try to set autostart
                    virsh.net_autostart(net_name, **virsh_dargs)
                    net_state = virsh_instance.net_state_dict()
                    if not net_state[net_name]['autostart']:
                        fail_flag = 1
                        result_info.append(
                            "Failed to set autostart for network %s" %
                            net_name)
                    # Restart libvirtd and check state
                    # Close down persistent virsh session before libvirtd restart
                    if hasattr(virsh_instance, 'close_session'):
                        virsh_instance.close_session()
                    libvirtd = utils_libvirtd.Libvirtd()
                    libvirtd.restart()
                    # Need to redefine virsh_instance after libvirtd restart
                    virsh_instance = virsh.VirshPersistent(**virsh_dargs)
                    net_state = virsh_instance.net_state_dict()
                    if (not net_state[net_name]['active']
                            or not net_state[net_name]['autostart']):
                        fail_flag = 1
                        result_info.append(
                            "Found wrong network state after restarting"
                            " libvirtd: %s" % str(net_state))
                    logging.debug("undefine network:")
                    # prepare the network status
                    if not net_persistent:
                        virsh.net_undefine(net_name, ignore_status=False)
                    if not net_active:
                        virsh.net_destroy(net_name, ignore_status=False)
                    undefine_status = virsh.net_undefine(
                        undefine_options, undefine_extra,
                        **virsh_dargs).exit_status

                    net_state = virsh_instance.net_state_dict()
                    if net_persistent:
                        if undefine_status:
                            fail_flag = 1
                            result_info.append(
                                "undefine should succeed but failed")
                        if net_active:
                            if (not net_state[net_name]['active']
                                    or net_state[net_name]['autostart']
                                    or net_state[net_name]['persistent']):
                                fail_flag = 1
                                result_info.append(
                                    "Found wrong network states for "
                                    "undefined netowrk: %s" % str(net_state))
                        else:
                            if net_name in net_state:
                                fail_flag = 1
                                result_info.append(
                                    "Transient network should not exists "
                                    "after undefine : %s" % str(net_state))
                    else:
                        if not undefine_status:
                            fail_flag = 1
                            result_info.append(
                                "undefine transient network should fail "
                                "but succeed: %s" % str(net_state))
                # Stop network for undefine test anyway
                destroy_result = virsh.net_destroy(net_name,
                                                   extra="",
                                                   **virsh_dargs)
                logging.debug(destroy_result)

                # Undefine network
                if not check_states:
                    undefine_result = virsh.net_undefine(
                        undefine_options, undefine_extra, **virsh_dargs)
                    if trans_ref != "define":
                        logging.debug(undefine_result)
                    undefine_status = undefine_result.exit_status
            except Exception:
                logging.debug(
                    "The define and undefine operation in loop %s failed. ",
                    loop)
            finally:
                loop = loop - 1

    finally:
        # Recover environment
        leftovers = network_xml.NetworkXML.new_all_networks_dict(
            virsh_instance)
        for netxml in list(leftovers.values()):
            netxml.orbital_nuclear_strike()

        # Recover from backup
        for netxml in list(backup.values()):
            netxml.sync(backup_state[netxml.name])

        # Close down persistent virsh session (including for all netxml copies)
        if hasattr(virsh_instance, 'close_session'):
            virsh_instance.close_session()

        # Done with file, cleanup
        del test_xml
        del testnet_xml

    # Check status_error
    # If fail_flag is set, it must be transaction test.
    if fail_flag:
        test.fail("Define network for transaction test "
                  "failed:%s" % result_info)

    # The logic to check result:
    # status_error&only undefine:it is negative undefine test only
    # status_error&(no undefine):it is negative define test only
    # (not status_error)&(only undefine):it is positive transaction test.
    # (not status_error)&(no undefine):it is positive define test only
    if status_error:
        if trans_ref == "undefine":
            if undefine_status == 0:
                test.fail("Run successfully with wrong command.")
        else:
            if define_status == 0:
                if start_status == 0:
                    test.fail("Define an unexpected network, "
                              "and start it successfully.")
                else:
                    test.fail("Define an unexpected network, "
                              "but start it failed.")
    else:
        if trans_ref == "undefine":
            if undefine_status:
                test.fail("Define network for transaction "
                          "successfully, but undefine failed.")
        else:
            if define_status != 0:
                test.fail("Run failed with right command")
            else:
                if start_status != 0:
                    test.fail("Network is defined as expected, "
                              "but start it failed.")
Exemplo n.º 49
0
def run(test, params, env):
    """
    Test the command virsh nodecpustats

    (1) Call the virsh nodecpustats command for all cpu host cpus
        separately
    (2) Get the output
    (3) Check the against /proc/stat output(o) for respective cpu
        user: o[0] + o[1]
        system: o[2] + o[5] + o[6]
        idle: o[3]
        iowait: o[4]
    (4) Call the virsh nodecpustats command with an unexpected option
    (5) Call the virsh nodecpustats command with libvirtd service stop
    """
    def get_expected_stat(cpu=None):
        """
        Parse cpu stats from /proc/stat

        :param cpu: cpu index, None for total cpu stat
        :return: dict of cpu stats
        """
        stats = {}
        cpu_stat = []
        with open("/proc/stat", "r") as fl:
            for line in fl.readlines():
                if line.startswith("cpu"):
                    cpu_stat.append(line.strip().split(" ")[1:])
        # Delete additional space in the total cpu stats line
        del cpu_stat[0][0]
        if cpu is None:
            idx = 0
        else:
            idx = int(cpu) + 1
        stats['user'] = int(cpu_stat[idx][0]) + int(cpu_stat[idx][1])
        stats['system'] = int(cpu_stat[idx][2]) + int(cpu_stat[idx][5]) + int(
            cpu_stat[idx][6])
        stats['idle'] = int(cpu_stat[idx][3])
        stats['iowait'] = int(cpu_stat[idx][4])
        stats['total'] = stats['user'] + stats['system'] + stats[
            'idle'] + stats['iowait']
        return stats

    def virsh_check_nodecpustats_percpu(actual_stats, cpu):
        """
        Check the actual nodecpustats output value
        total time <= total stat from proc

        :param actual_stats: Actual cpu stats
        :param cpu: cpu index

        :return: True if matches, else failout
        """

        # Normalise to seconds from nano seconds
        total = float(
            (actual_stats['system'] + actual_stats['user'] +
             actual_stats['idle'] + actual_stats['iowait']) / (10**9))

        expected = get_expected_stat(cpu)
        if not total <= expected['total']:
            test.fail("Commands 'virsh nodecpustats' not succeeded"
                      " as total time: %f is more"
                      " than proc/stat: %f" % (total, expected['total']))
        return True

    def virsh_check_nodecpustats(actual_stats):
        """
        Check the actual nodecpustats output value
        total time <= total stat from proc

        :param actual_stats: Actual cpu stats
        :return: True if matches, else failout
        """

        # Normalise to seconds from nano seconds and get for one cpu
        total = float(
            ((actual_stats['system'] + actual_stats['user'] +
              actual_stats['idle'] + actual_stats['iowait']) / (10**9)))
        expected = get_expected_stat()
        if not total <= expected['total']:
            test.fail("Commands 'virsh nodecpustats' not succeeded"
                      " as total time: %f is more"
                      " than proc/stat: %f" % (total, expected['total']))
        return True

    def virsh_check_nodecpustats_percentage(actual_per):
        """
        Check the actual nodecpustats percentage adds up to 100%

        :param actual_per: Actual cpu stats percentage
        :return: True if matches, else failout
        """

        total = int(
            round(actual_per['user'] + actual_per['system'] +
                  actual_per['idle'] + actual_per['iowait']))

        if not total == 100:
            test.fail("Commands 'virsh nodecpustats' not succeeded"
                      " as the total percentage value: %d"
                      " is not equal 100" % total)

    def parse_output(output):
        """
        To get the output parsed into a dictionary

        :param output: virsh command output
        :return: dict of user,system,idle,iowait times
        """

        # From the beginning of a line, group 1 is one or more word-characters,
        # followed by zero or more whitespace characters and a ':',
        # then one or more whitespace characters,
        # followed by group 2, which is one or more digit characters,
        # e.g as below
        # user:                  6163690000000
        #
        regex_obj = re.compile(r"^(\w+)\s*:\s+(\d+)")
        actual = {}

        for line in output.stdout.split('\n'):
            match_obj = regex_obj.search(line)
            # Due to the extra space in the list
            if match_obj is not None:
                name = match_obj.group(1)
                value = match_obj.group(2)
                actual[name] = int(value)
        return actual

    def parse_percentage_output(output):
        """
        To get the output parsed into a dictionary

        :param output: virsh command output
        :return: dict of user,system,idle,iowait times
        """

        # From the beginning of a line, group 1 is one or more word-characters,
        # followed by zero or more whitespace characters and a ':',
        # then one or more whitespace characters,
        # followed by group 2, which is one or more digit characters,
        # e.g as below
        # user:             1.5%
        #
        regex_obj = re.compile(r"^(\w+)\s*:\s+(\d+.\d+)")
        actual_percentage = {}

        for line in output.stdout.split('\n'):
            match_obj = regex_obj.search(line)
            # Due to the extra space in the list
            if match_obj is not None:
                name = match_obj.group(1)
                value = match_obj.group(2)
                actual_percentage[name] = float(value)
        return actual_percentage

    # Initialize the variables
    itr = int(params.get("inner_test_iterations"))
    option = params.get("virsh_cpunodestats_options")
    invalid_cpunum = params.get("invalid_cpunum")
    status_error = params.get("status_error")
    libvirtd = params.get("libvirtd", "on")

    # Prepare libvirtd service
    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    # Get the host cpu list
    host_cpus_list = cpuutil.cpu_online_list()

    # Run test case for 5 iterations default can be changed in subtests.cfg
    # file
    for i in range(itr):

        if status_error == "yes":
            if invalid_cpunum == "yes":
                option = "--cpu %s" % (len(host_cpus_list) + 1)
            output = virsh.nodecpustats(ignore_status=True, option=option)
            status = output.exit_status

            if status == 0:
                if libvirtd == "off":
                    if libvirt_version.version_compare(5, 6, 0):
                        logging.debug(
                            "From libvirt version 5.6.0 libvirtd is restarted"
                            " and command should succeed")
                    else:
                        utils_libvirtd.libvirtd_start()
                        test.fail("Command 'virsh nodecpustats' "
                                  "succeeded with libvirtd service "
                                  "stopped, incorrect")
                else:
                    test.fail("Command 'virsh nodecpustats %s' "
                              "succeeded (incorrect command)" % option)

        elif status_error == "no":
            # Run the testcase for each cpu to get the cpu stats
            for idx, cpu in enumerate(host_cpus_list):
                option = "--cpu %s" % cpu
                output = virsh.nodecpustats(ignore_status=True, option=option)
                status = output.exit_status

                if status == 0:
                    actual_value = parse_output(output)
                    virsh_check_nodecpustats_percpu(actual_value, idx)
                else:
                    test.fail("Command 'virsh nodecpustats %s'"
                              "not succeeded" % option)

            # Run the test case for each cpu to get the cpu stats in percentage
            for cpu in host_cpus_list:
                option = "--cpu %s --percent" % cpu
                output = virsh.nodecpustats(ignore_status=True, option=option)
                status = output.exit_status

                if status == 0:
                    actual_value = parse_percentage_output(output)
                    virsh_check_nodecpustats_percentage(actual_value)
                else:
                    test.fail("Command 'virsh nodecpustats %s'"
                              " not succeeded" % option)

            option = ''
            # Run the test case for total cpus to get the cpus stats
            output = virsh.nodecpustats(ignore_status=True, option=option)
            status = output.exit_status

            if status == 0:
                actual_value = parse_output(output)
                virsh_check_nodecpustats(actual_value)
            else:
                test.fail("Command 'virsh nodecpustats %s'"
                          " not succeeded" % option)

            # Run the test case for the total cpus to get the stats in
            # percentage
            option = "--percent"
            output = virsh.nodecpustats(ignore_status=True, option=option)
            status = output.exit_status

            if status == 0:
                actual_value = parse_percentage_output(output)
                virsh_check_nodecpustats_percentage(actual_value)
            else:
                test.fail("Command 'virsh nodecpustats %s'"
                          " not succeeded" % option)

    # Recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()
Exemplo n.º 50
0
def run(test, params, env):
    """
    Test stdio_handler parameter in qemu.conf to use for handling stdout/stderr
    output from QEMU processes.

    1) Change stdio_handler in qemu.conf;
    2) Restart libvirtd daemon;
    3) Check if libvirtd successfully started;
    4) Check if virtlogd.socket is running;
    5) Configure pty serial and console;
    6) Check if VM log file exists and has correct permission and owner;
    7) Check if VM log file is opened by virtlogd;
    8) Check if VM start log is written into VM log file correctly;
    9) Check if QEMU use pipe provided by virtlogd daemon for logging;
    10) Check if VM shutdown log is written into VM log file correctly;
    11) Check if pipe file can be closed gracefully after VM shutdown;
    12) Check if VM restart log can be appended to the end of previous log file;
    """

    def clean_up_vm_log_file(vm_name):
        """Clean up VM log file."""
        # Delete VM log file if exists.
        global QEMU_LOG_PATH
        guest_log_file = os.path.join(QEMU_LOG_PATH, "%s.log" % vm_name)
        if os.path.exists(guest_log_file):
            os.remove(guest_log_file)

    def configure(cmd, guest_log_file=None, errorMsg=None):
        """
        Configure qemu log.
        :param cmd. execute command string.
        :param guest_log_file. the path of VM log file.
        :param errorMsg. error message if failed
        :return: pipe node.
        """
        # If guest_log_file is not None,check if VM log file exists or not.
        if guest_log_file and not os.path.exists(guest_log_file):
            test.error("Expected VM log file: %s not exists" % guest_log_file)
        # If errorMsg is not None, check command running result.
        elif errorMsg:
            if process.run(cmd, ignore_status=True, shell=True).exit_status:
                test.error(errorMsg)
        # Get pipe node.
        else:
            result = process.run(cmd, timeout=90, ignore_status=True, shell=True)
            ret, output = result.exit_status, result.stdout_text
            if ret:
                test.fail("Failed to get pipe node")
            else:
                return output

    def configure_serial_console(vm_name):
        """Configure serial console"""
        # Check the primary serial and set it to pty.
        VMXML.set_primary_serial(vm_name, 'pty', '0', None)
        # Configure VM pty console.
        vm_pty_xml = VMXML.new_from_inactive_dumpxml(vm_name)
        vm_pty_xml.remove_all_device_by_type('console')

        console = Console()
        console.target_port = '0'
        console.target_type = 'serial'
        vm_pty_xml.add_device(console)
        vm_pty_xml.sync()

    def check_vm_log_file_permission_and_owner(vm_name):
        """Check VM log file permission and owner."""
        # Check VM log file permission.
        global QEMU_LOG_PATH
        guest_log_file = os.path.join(QEMU_LOG_PATH, "%s.log" % vm_name)
        logging.info("guest log file: %s", guest_log_file)
        if not os.path.exists(guest_log_file):
            test.error("Expected VM log file: %s not exists" % guest_log_file)
        permission = oct(stat.S_IMODE(os.lstat(guest_log_file).st_mode))
        if permission != '0600' and permission != '0o600':
            test.fail("VM log file: %s expect to get permission:0600, got %s ."
                      % (guest_log_file, permission))
        # Check VM log file owner.
        owner = getpwuid(stat.S_IMODE(os.lstat(guest_log_file).st_uid)).pw_name
        if owner != 'root':
            test.fail("VM log file: %s expect to get owner:root, got %s ."
                      % (guest_log_file, owner))

    def check_info_in_vm_log_file(vm_name, cmd=None, matchedMsg=None):
        """
        Check if log information is written into log file correctly.
        """
        # Check VM log file is opened by virtlogd.
        global QEMU_LOG_PATH
        guest_log_file = os.path.join(QEMU_LOG_PATH, "%s.log" % vm_name)
        if not os.path.exists(guest_log_file):
            test.fail("Expected VM log file: %s not exists" % guest_log_file)

        if cmd is None:
            cmd = ("grep -nr '%s' %s" % (matchedMsg, guest_log_file))
        else:
            cmd = (cmd + " %s |grep '%s'" % (guest_log_file, matchedMsg))
        if process.run(cmd, ignore_status=True, shell=True).exit_status:
            test.fail("Failed to get VM started log from VM log file: %s."
                      % guest_log_file)

    def check_pipe_closed(pipe_node):
        """
        Check pipe used by QEMU is closed gracefully after VM shutdown.
        """
        # Check pipe node can not be listed after VM shutdown.
        cmd = ("lsof  -w |grep pipe|grep virtlogd|grep %s" % pipe_node)
        if not process.run(cmd, timeout=90, ignore_status=True, shell=True).exit_status:
            test.fail("pipe node: %s is not closed in virtlogd gracefully." % pipe_node)

        cmd = ("lsof  -w |grep pipe|grep qemu-kvm|grep %s" % pipe_node)
        if not process.run(cmd, timeout=90, ignore_status=True, shell=True).exit_status:
            test.fail("pipe node: %s is not closed in qemu gracefully." % pipe_node)

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    expected_result = params.get("expected_result", "virtlogd_disabled")
    stdio_handler = params.get("stdio_handler", "not_set")
    vm = env.get_vm(vm_name)
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    guest_log_file = os.path.join(QEMU_LOG_PATH, "%s.log" % vm_name)

    config = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        if stdio_handler != 'not_set':
            config['stdio_handler'] = "'%s'" % stdio_handler
        # Restart libvirtd to make change valid.
        if not libvirtd.restart():
            if expected_result != 'unbootable':
                test.fail('Libvirtd is expected to be started '
                          'with stdio_handler=%s' % stdio_handler)
            return
        if expected_result == 'unbootable':
            test.fail('Libvirtd is not expected to be started '
                      'with stdio_handler=%s' % stdio_handler)

        # Stop all VMs if VMs are already started.
        for tmp_vm in env.get_all_vms():
            if tmp_vm.is_alive():
                tmp_vm.destroy(gracefully=False)

        # Sleep a few seconds to let VM syn underlying data
        time.sleep(3)

        # Remove VM previous log file.
        clean_up_vm_log_file(vm_name)

        # Check if virtlogd socket is running.
        cmd = ("systemctl status virtlogd.socket|grep 'Active: active'")
        configure(cmd, errorMsg="virtlogd.socket is not running")

        # Configure serial console.
        configure_serial_console(vm_name)

        logging.info("final vm:")
        logging.info(VMXML.new_from_inactive_dumpxml(vm_name))

        # Start VM.
        try:
            vm.start()
        except virt_vm.VMStartError as detail:
            test.fail("VM failed to start."
                      "Error: %s" % str(detail))
        # Check VM log file has right permission and owner.
        check_vm_log_file_permission_and_owner(vm_name)
        utils_package.package_install(['lsof'])
        # Check VM log file is opened by virtlogd.
        cmd = ("lsof -w %s|grep 'virtlogd'" % guest_log_file)
        errorMessage = "VM log file: %s is not opened by:virtlogd." % guest_log_file
        configure(cmd, guest_log_file, errorMessage)

        # Check VM started log is written into log file correctly.
        check_info_in_vm_log_file(vm_name, matchedMsg="char device redirected to /dev/pts")

        # Get pipe node opened by virtlogd for VM log file.
        pipe_node_field = "$9"
        # On latest release,No.8 field in lsof returning is pipe node number.
        if libvirt_version.version_compare(4, 3, 0):
            pipe_node_field = "$8"
        cmd = ("lsof  -w |grep pipe|grep virtlogd|tail -n 1|awk '{print %s}'" % pipe_node_field)
        pipe_node = configure(cmd)

        # Check if qemu-kvm use pipe node provided by virtlogd.
        cmd = ("lsof  -w |grep pipe|grep qemu-kvm|grep %s" % pipe_node)
        errorMessage = ("Can not find matched pipe node: %s "
                        "from pipe list used by qemu-kvm." % pipe_node)
        configure(cmd, errorMsg=errorMessage)

        # Shutdown VM.
        if not vm.shutdown():
            vm.destroy(gracefully=True)

        # Check VM shutdown log is written into log file correctly.
        check_info_in_vm_log_file(vm_name, matchedMsg="shutting down")

        # Check pipe is closed gracefully after VM shutdown.
        check_pipe_closed(pipe_node)

        # Start VM again.
        try:
            vm.start()
        except virt_vm.VMStartError as detail:
            test.fail("VM failed to start."
                      "Error: %s" % str(detail))
        # Check the new VM start log is appended to the end of the VM log file.
        check_info_in_vm_log_file(vm_name, cmd="tail -n 5",
                                  matchedMsg="char device redirected to /dev/pts")

    finally:
        config.restore()
        libvirtd.restart()
        vm_xml_backup.sync()
Exemplo n.º 51
0
                            uri=connect_uri,
                            ignore_status=True,
                            debug=True)

    maxvcpus_test = result.stdout.strip()
    status = result.exit_status

    # Check status_error
    if status_error == "yes":
        if status == 0:
            raise exceptions.TestFail("Run successed with unsupported option!")
        else:
            logging.info("Run failed with unsupported option %s " % option)
    elif status_error == "no":
        if status == 0:
            if not libvirt_version.version_compare(2, 3, 0):
                if "kqemu" in option:
                    if not maxvcpus_test == '1':
                        raise exceptions.TestFail("Command output %s is not "
                                                  "expected for %s " %
                                                  (maxvcpus_test, option))
                elif option in ['qemu', '--type qemu', '']:
                    if not maxvcpus_test == '16':
                        raise exceptions.TestFail("Command output %s is not "
                                                  "expected for %s " %
                                                  (maxvcpus_test, option))
                else:
                    # No check with other types
                    pass
            else:
                # It covers all possible combinations
Exemplo n.º 52
0
def run(test, params, env):
    """
    Test command: virsh vol-resize

    Resize the capacity of the given volume (default bytes).
    1. Define and start a given type pool.
    2. Create a volume in the pool.
    3. Do vol-resize.
    4. Check the volume info.
    5. Delete the volume and pool.

    TODO:
    Add volume shrink test after libvirt uptream support it.
    """

    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    emulated_image = params.get("emulated_image")
    emulated_image_size = params.get("emulated_image_size")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format")
    vol_capacity = params.get("vol_capacity")
    vol_new_capacity = params.get("vol_new_capacity")
    resize_option = params.get("resize_option", "")
    check_vol_size = "yes" == params.get("check_vol_size", "yes")
    status_error = "yes" == params.get("status_error", "no")

    if not libvirt_version.version_compare(1, 0, 0):
        if "--allocate" in resize_option:
            raise error.TestNAError("'--allocate' flag is not supported in"
                                    " current libvirt version.")

    # libvirt acl polkit related params
    uri = params.get("virsh_uri")
    unpri_user = params.get('unprivileged_user')
    if unpri_user:
        if unpri_user.count('EXAMPLE'):
            unpri_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    libv_pvt = libvirt.PoolVolumeTest(test, params)
    try:
        libv_pool = libvirt_storage.StoragePool()
        # Raise error if given name pool already exist
        if libv_pool.pool_exists(pool_name):
            raise error.TestError("Pool '%s' already exist", pool_name)
        else:
            # Create a new pool
            libv_pvt.pre_pool(pool_name,
                              pool_type,
                              pool_target,
                              emulated_image,
                              image_size=emulated_image_size)
            pool_info = libv_pool.pool_info(pool_name)
            for key in pool_info:
                logging.debug("Pool info: %s = %s", key, pool_info[key])
            # Deal with vol_new_capacity, '--capacity' only accpet integer
            if vol_new_capacity == "pool_available":
                pool_avai = pool_info["Available"].split()
                vol_new_capacity = pool_avai[0].split('.')[0] + pool_avai[1]
            if vol_new_capacity == "pool_capacity":
                pool_capa = pool_info["Capacity"].split()
                vol_new_capacity = pool_capa[0].split('.')[0] + pool_capa[1]

        # Create a volume
        libv_pvt.pre_vol(vol_name=vol_name,
                         vol_format=vol_format,
                         capacity=vol_capacity,
                         allocation=None,
                         pool_name=pool_name)
        libv_vol = libvirt_storage.PoolVolume(pool_name)
        check_vol_info(libv_vol, vol_name)

        # The volume size may not accurate as we expect after resize, such as:
        # 1) vol_new_capacity = 1b with --delta option, the volume size will not
        #    change; run
        # 2) vol_new_capacity = 1KB with --delta option, the volume size will
        #    increase 1024 not 1000
        # So we can disable volume size check after resize
        if check_vol_size:
            vol_path = libv_vol.list_volumes()[vol_name]
            expect_info = get_expect_info(vol_new_capacity, vol_path,
                                          resize_option)
            logging.debug("Expect volume info: %s", expect_info)
        else:
            expect_info = {}

        # Run vol-resize
        result = virsh.vol_resize(vol_name,
                                  vol_new_capacity,
                                  pool_name,
                                  resize_option,
                                  uri=uri,
                                  unprivileged_user=unpri_user,
                                  debug=True)
        if not status_error:
            if result.exit_status != 0:
                raise error.TestFail(result.stdout.strip())
            else:
                if check_vol_info(libv_vol, vol_name, expect_info):
                    logging.debug("Volume %s resize check pass.", vol_name)
                else:
                    raise error.TestFail("Volume %s resize check fail." %
                                         vol_name)
        elif result.exit_status == 0:
            raise error.TestFail("Expect resize fail but run successfully.")
    finally:
        # Clean up
        try:
            libv_pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                  emulated_image)
        except error.TestFail, detail:
            logging.error(str(detail))
Exemplo n.º 53
0
def run(test, params, env):
    """
    Test command: virsh pool-create.

    Create a libvirt pool from an XML file. The file could be given by tester or
    generated by dumpxml a pre-defined pool.
    """
    pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML")
    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    option = params.get("pool_create_extra_option", "")
    readonly_mode = "yes" == params.get("pool_create_readonly_mode", "no")
    status_error = "yes" == params.get("status_error", "no")
    pre_def_pool = "yes" == params.get("pre_def_pool", "no")
    pool_type = params.get("pool_type", "dir")
    source_format = params.get("pool_src_format", "")
    source_name = params.get("pool_source_name", "")
    source_path = params.get("pool_source_path", "/")
    pool_target = params.get("pool_target", "pool_target")
    duplicate_element = params.get("pool_create_duplicate_element", "")
    new_pool_name = params.get("new_pool_create_name")
    no_disk_label = "yes" == params.get("no_disk_label", "no")

    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            test.cancel("Gluster pool is not supported in current"
                        " libvirt version.")

    if "/PATH/TO/POOL.XML" in pool_xml_f:
        test.cancel("Please replace %s with valid pool xml file" % pool_xml_f)
    pool_ins = libvirt_storage.StoragePool()
    if pre_def_pool and pool_ins.pool_exists(pool_name):
        test.fail("Pool %s already exist" % pool_name)

    emulated_image = "emulated-image"
    kwargs = {
        'image_size': '1G',
        'source_path': source_path,
        'source_name': source_name,
        'source_format': source_format
    }
    pvt = utlv.PoolVolumeTest(test, params)
    old_uuid = None
    new_device_name = None
    if pre_def_pool:
        try:
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image,
                         **kwargs)
            virsh.pool_dumpxml(pool_name, to_file=pool_xml_f)
            old_uuid = virsh.pool_uuid(pool_name).stdout.strip()
            if no_disk_label:
                # Update <device_path>
                logging.debug("Try to update device path")
                new_device_name = utlv.setup_or_cleanup_iscsi(True)
                p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name)
                s_xml = pool_xml.SourceXML()
                s_xml.device_path = new_device_name
                p_xml.set_source(s_xml)
                pool_xml_f = p_xml.xml
            if duplicate_element == "name":
                pass
            elif duplicate_element == "uuid":
                pass
            elif duplicate_element == "source":
                # Remove <uuid> and update <name>
                cmd = "sed -i '/<uuid>/d' %s" % pool_xml_f
                process.run(cmd, shell=True)
                cmd = "sed -i 's/<name>.*<\/name>/<name>%s<\/name>/g' %s" % (
                    new_pool_name, pool_xml_f)
                process.run(cmd, shell=True)
            else:
                # The transient pool will gone after destroyed
                virsh.pool_destroy(pool_name)
            new_source_format = params.get("new_pool_src_format")
            if new_source_format:
                cmd = "sed -i s/type=\\\'%s\\\'/type=\\\'%s\\\'/g %s" % (
                    source_format, new_source_format, pool_xml_f)
                process.run(cmd, shell=True)
            # Remove uuid
            cmd = "sed -i '/<uuid>/d' %s" % pool_xml_f
            process.run(cmd, shell=True)
        except Exception as details:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image,
                             **kwargs)
            if new_device_name:
                utlv.setup_or_cleanup_iscsi(False)
            test.error("Error occurred when prepare pool xml:\n %s" % details)
    # Create an invalid pool xml file
    if pool_xml_f == "invalid-pool-xml":
        tmp_xml_f = xml_utils.TempXMLFile()
        tmp_xml_f.write('"<pool><<<BAD>>><\'XML</name\>'
                        '!@#$%^&*)>(}>}{CORRUPTE|>!</pool>')
        tmp_xml_f.flush()
        pool_xml_f = tmp_xml_f.name
    # Readonly mode
    ro_flag = False
    if readonly_mode:
        logging.debug("Readonly mode test")
        ro_flag = True
    # Run virsh test
    if os.path.exists(pool_xml_f):
        with open(pool_xml_f, 'r') as f:
            logging.debug("Create pool from file:\n %s", f.read())
    try:
        cmd_result = virsh.pool_create(pool_xml_f,
                                       option,
                                       ignore_status=True,
                                       debug=True,
                                       readonly=ro_flag)
        err = cmd_result.stderr.strip()
        status = cmd_result.exit_status
        if not status_error:
            if status:
                test.fail(err)
            utlv.check_actived_pool(pool_name)
            pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name)
            logging.debug("Pool detail: %s", pool_detail)
            if pool_detail['uuid'] == old_uuid:
                test.fail("New created pool still use the old UUID %s" %
                          old_uuid)
        else:
            if status == 0:
                test.fail("Expect fail, but run successfully.")
            else:
                logging.debug("Command fail as expected")
    finally:
        pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image,
                         **kwargs)
        if new_device_name:
            utlv.setup_or_cleanup_iscsi(False)
        if os.path.exists(pool_xml_f):
            os.remove(pool_xml_f)
Exemplo n.º 54
0
def test_specific_timer(test, vm, params):
    """
    Test specific timer and optional attributes of it.

    :param vm: VM instance
    :param params: Test parameters
    """
    timers = params.get("timer_name", "").split(',')
    if 'tsc' in timers and not libvirt_version.version_compare(3, 2, 0):
        start_error = True
    else:
        start_error = "yes" == params.get("timer_start_error", "no")
    if vm.is_dead():
        vm.start()
    vm.wait_for_login()
    # Not config VM clock if the timer is unsupport in VM
    config_clock_in_vm = True
    for timer in timers:
        timer = translate_timer_name(timer)
        if timer not in vm_clock_source(vm, 'available').split():
            config_clock_in_vm = False
    vm.destroy()

    timer_dict_list = set_clock_xml(test, vm, params)

    # Logging vm to verify whether setting is work
    try:
        vm.start()
        vm.wait_for_login()
        if start_error:
            test.fail("Start vm succeed, but expect fail.")
    except virt_vm.VMStartError as detail:
        if start_error:
            logging.debug("Expected failure: %s", detail)
            return
        else:
            test.fail(detail)

    # TODO: Check VM cmdline about different timers
    vm_pid = vm.get_pid()
    with open("/proc/%s/cmdline" % vm_pid) as cmdline_f:
        cmdline_content = cmdline_f.read()
    logging.debug("VM cmdline output:\n%s", cmdline_content.replace('\x00', ' '))
    if not config_clock_in_vm:
        return

    # Get available clocksources
    avail_clock = vm_clock_source(vm, 'available').split()
    if not avail_clock:
        test.fail("Get available clock sources of %s failed"
                  % vm.name)
    logging.debug("Available clock sources of %s: %s", vm.name, avail_clock)
    for timer_dict in timer_dict_list:
        t_name = translate_timer_name(timer_dict['name'])
        t_present = timer_dict['present']
        # Check available clock sources
        if t_present == 'no':
            if t_name in avail_clock:
                test.fail("Timer %s(present=no) is still available"
                          " in vm" % t_name)
        else:
            if t_name not in avail_clock:
                test.fail("Timer %s(present=yes) is not available"
                          " in vm" % t_name)
        # Try to set specific timer
        if not vm_clock_source(vm, 'current', t_name):
            test.error("Set clock source to % in vm failed", t_name)
        time.sleep(2)
        if t_present == 'no':
            if vm_clock_source(vm, 'current') == t_name:
                test.fail("Set clock source to %s in vm successfully"
                          " while present is no" % t_name)
        else:
            if vm_clock_source(vm, 'current') != t_name:
                test.fail("Set clock source to %s in vm successfully"
                          " while present is yes" % t_name)
Exemplo n.º 55
0
def run(test, params, env):
    """
    Test virsh reset command
    """

    if not virsh.has_help_command('reset'):
        raise error.TestNAError("This version of libvirt does not support "
                                "the reset test")

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm_ref = params.get("reset_vm_ref")
    readonly = params.get("readonly", False)
    status_error = ("yes" == params.get("status_error", "no"))
    start_vm = ("yes" == params.get("start_vm"))

    vm = env.get_vm(vm_name)
    domid = vm.get_id()
    domuuid = vm.get_uuid()
    bef_pid = commands.getoutput("pidof -s qemu-kvm")

    if vm_ref == 'id':
        vm_ref = domid
    elif vm_ref == 'uuid':
        vm_ref = domuuid
    else:
        vm_ref = vm_name

    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    # change the disk cache to default
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    def change_cache(vmxml, mode):
        """
        Change the cache mode

        :param vmxml: instance of VMXML
        :param mode: cache mode you want to change
        """
        devices = vmxml.devices
        disk_index = devices.index(devices.by_device_tag('disk')[0])
        disk = devices[disk_index]
        disk_driver = disk.driver
        disk_driver['cache'] = mode
        disk.driver = disk_driver
        vmxml.devices = devices
        vmxml.define()

    try:
        change_cache(vmxml_backup.copy(), "default")

        tmpfile = "/home/%s" % utils_misc.generate_random_string(6)
        logging.debug("tmpfile is %s", tmpfile)
        if start_vm:
            session = vm.wait_for_login()
            session.cmd("rm -rf %s && sync" % tmpfile)
            status = session.get_command_status("touch %s && ls %s" %
                                                (tmpfile, tmpfile))
            if status == 0:
                logging.info("Succeed generate file %s", tmpfile)
            else:
                raise error.TestFail("Touch command failed!")

        # record the pid before reset for compare
        output = virsh.reset(vm_ref, readonly=readonly,
                             unprivileged_user=unprivileged_user,
                             uri=uri, ignore_status=True, debug=True)
        if output.exit_status != 0:
            if status_error:
                logging.info("Failed to reset guest as expected, Error:%s.",
                             output.stderr)
                return
            else:
                raise error.TestFail("Failed to reset guest, Error:%s." %
                                     output.stderr)
        elif status_error:
            raise error.TestFail("Expect fail, but succeed indeed.")

        session.close()
        session = vm.wait_for_login()
        status = session.get_command_status("ls %s" % tmpfile)
        if status == 0:
            raise error.TestFail("Fail to reset guest, tmpfile still exist!")
        else:
            aft_pid = commands.getoutput("pidof -s qemu-kvm")
            if bef_pid == aft_pid:
                logging.info("Succeed to check reset, tmpfile is removed.")
            else:
                raise error.TestFail("Domain pid changed after reset!")
        session.close()

    finally:
        vmxml_backup.sync()
Exemplo n.º 56
0
def run(test, params, env):
    """
    Test the virsh pool commands with acl, initiate a pool then do
    following operations.

    (1) Undefine a given type pool
    (2) Define the pool from xml
    (3) Build given type pool
    (4) Start pool
    (5) Destroy pool
    (6) Refresh pool after start it
    (7) Run vol-list with the pool
    (9) Delete pool

    For negative cases, redo failed step to make the case run continue.
    Run cleanup at last restore env.
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    # The file for dumped pool xml
    pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(test.tmpdir, pool_target)
    vol_name = params.get("vol_name", "temp_vol_1")
    # Use pool name as VG name
    vg_name = pool_name
    vol_path = os.path.join(pool_target, vol_name)
    define_acl = "yes" == params.get("define_acl", "no")
    undefine_acl = "yes" == params.get("undefine_acl", "no")
    start_acl = "yes" == params.get("start_acl", "no")
    destroy_acl = "yes" == params.get("destroy_acl", "no")
    build_acl = "yes" == params.get("build_acl", "no")
    delete_acl = "yes" == params.get("delete_acl", "no")
    refresh_acl = "yes" == params.get("refresh_acl", "no")
    vol_list_acl = "yes" == params.get("vol_list_acl", "no")
    src_pool_error = "yes" == params.get("src_pool_error", "no")
    define_error = "yes" == params.get("define_error", "no")
    undefine_error = "yes" == params.get("undefine_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    destroy_error = "yes" == params.get("destroy_error", "no")
    build_error = "yes" == params.get("build_error", "no")
    delete_error = "yes" == params.get("delete_error", "no")
    refresh_error = "yes" == params.get("refresh_error", "no")
    vol_list_error = "yes" == params.get("vol_list_error", "no")
    # Clean up flags:
    # cleanup_env[0] for nfs, cleanup_env[1] for iscsi, cleanup_env[2] for lvm
    # cleanup_env[3] for selinux backup status.
    cleanup_env = [False, False, False, ""]
    # libvirt acl related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current" +
                                    " libvirt version.")

    acl_dargs = {
        'uri': uri,
        'unprivileged_user': unprivileged_user,
        'debug': True
    }

    def check_exit_status(result, expect_error=False):
        """
        Check the exit status of virsh commands.

        :param result: Virsh command result object
        :param expect_error: Boolean value, expect command success or fail
        """
        if not expect_error:
            if result.exit_status != 0:
                raise error.TestFail(result.stderr)
            else:
                logging.debug("Command output:\n%s", result.stdout.strip())
        elif expect_error and result.exit_status == 0:
            raise error.TestFail("Expect fail, but run successfully.")

    def check_pool_list(pool_name, option="--all", expect_error=False):
        """
        Check pool by running pool-list command with given option.

        :param pool_name: Name of the pool
        :param option: option for pool-list command
        :param expect_error: Boolean value, expect command success or fail
        """
        found = False
        # Get the list stored in a variable
        result = virsh.pool_list(option, ignore_status=True)
        check_exit_status(result, False)
        output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]",
                            str(result.stdout))
        for item in output:
            if pool_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find pool '%s' in pool list.", pool_name)
        else:
            logging.debug("Not find pool %s in pool list.", pool_name)
        if expect_error and found:
            raise error.TestFail("Unexpect pool '%s' exist." % pool_name)
        if not expect_error and not found:
            raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name)

    # Run Testcase
    try:
        _pool = libvirt_storage.StoragePool()
        # Init a pool for test
        result = utils_test.libvirt.define_pool(pool_name, pool_type,
                                                pool_target, cleanup_env)
        check_exit_status(result, src_pool_error)
        option = "--inactive --type %s" % pool_type
        check_pool_list(pool_name, option)

        xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml)
        logging.debug("Pool '%s' XML:\n%s", pool_name, xml)

        # Step (1)
        # Undefine pool
        if undefine_acl:
            result = virsh.pool_undefine(pool_name, **acl_dargs)
        else:
            result = virsh.pool_undefine(pool_name, ignore_status=True)
        check_exit_status(result, undefine_error)
        if undefine_error:
            check_pool_list(pool_name, "--all", False)
            # Redo under negative case to keep case continue
            result = virsh.pool_undefine(pool_name, ignore_status=True)
            check_exit_status(result)
            check_pool_list(pool_name, "--all", True)
        else:
            check_pool_list(pool_name, "--all", True)

        # Step (2)
        # Define pool from XML file
        if define_acl:
            result = virsh.pool_define(pool_xml, **acl_dargs)
        else:
            result = virsh.pool_define(pool_xml)
        check_exit_status(result, define_error)
        if define_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_define(pool_xml)
            check_exit_status(result)

        # Step (3)
        # Buid pool, this step may fail for 'disk' and 'logical' types pool
        if pool_type not in ["disk", "logical"]:
            option = ""
            # Options --overwrite and --no-overwrite can only be used to
            # build a filesystem pool, but it will fail for now
            # if pool_type == "fs":
            #    option = '--overwrite'
            if build_acl:
                result = virsh.pool_build(pool_name, option, **acl_dargs)
            else:
                result = virsh.pool_build(pool_name,
                                          option,
                                          ignore_status=True)
            check_exit_status(result, build_error)
            if build_error:
                # Redo under negative case to keep case continue
                result = virsh.pool_build(pool_name,
                                          option,
                                          ignore_status=True)
                check_exit_status(result)

        # Step (4)
        # Pool start
        if start_acl:
            result = virsh.pool_start(pool_name, **acl_dargs)
        else:
            result = virsh.pool_start(pool_name, ignore_status=True)
        check_exit_status(result, start_error)
        if start_error:
            # Redo under negative case to keep case continue
            result = virsh.pool_start(pool_name, ignore_status=True)
            check_exit_status(result)

        option = "--persistent --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (5)
        # Pool destroy
        if destroy_acl:
            result = virsh.pool_destroy(pool_name, **acl_dargs)
        else:
            result = virsh.pool_destroy(pool_name)
        if result:
            if destroy_error:
                raise error.TestFail("Expect fail, but run successfully.")
        else:
            if not destroy_error:
                raise error.TestFail("Pool %s destroy failed, not expected." %
                                     pool_name)
            else:
                # Redo under negative case to keep case continue
                if virsh.pool_destroy(pool_name):
                    logging.debug("Pool %s destroyed.", pool_name)
                else:
                    raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (6)
        # Pool refresh for 'dir' type pool
        # Pool start
        result = virsh.pool_start(pool_name, ignore_status=True)
        check_exit_status(result)
        if pool_type == "dir":
            os.mknod(vol_path)
            if refresh_acl:
                result = virsh.pool_refresh(pool_name, **acl_dargs)
            else:
                result = virsh.pool_refresh(pool_name)
            check_exit_status(result, refresh_error)

        # Step (7)
        # Pool vol-list
        if vol_list_acl:
            result = virsh.vol_list(pool_name, **acl_dargs)
        else:
            result = virsh.vol_list(pool_name)
        check_exit_status(result, vol_list_error)

        # Step (8)
        # Pool delete for 'dir' type pool
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)
        if pool_type == "dir":
            if os.path.exists(vol_path):
                os.remove(vol_path)
            if delete_acl:
                result = virsh.pool_delete(pool_name, **acl_dargs)
            else:
                result = virsh.pool_delete(pool_name, ignore_status=True)
            check_exit_status(result, delete_error)
            option = "--inactive --type %s" % pool_type
            check_pool_list(pool_name, option)
            if not delete_error:
                if os.path.exists(pool_target):
                    raise error.TestFail("The target path '%s' still exist." %
                                         pool_target)

        result = virsh.pool_undefine(pool_name, ignore_status=True)
        check_exit_status(result)
        check_pool_list(pool_name, "--all", True)
    finally:
        # Clean up
        if os.path.exists(pool_xml):
            os.remove(pool_xml)
        if not _pool.delete_pool(pool_name):
            logging.error("Can't delete pool: %s", pool_name)
        if cleanup_env[2]:
            cmd = "pvs |grep %s |awk '{print $1}'" % vg_name
            pv_name = utils.system_output(cmd)
            lv_utils.vg_remove(vg_name)
            utils.run("pvremove %s" % pv_name)
        if cleanup_env[1]:
            utils_test.libvirt.setup_or_cleanup_iscsi(False)
        if cleanup_env[0]:
            utils_test.libvirt.setup_or_cleanup_nfs(
                False, restore_selinux=cleanup_env[3])
Exemplo n.º 57
0
def run(test, params, env):
    """
    Test command: virsh destroy.

    The command can destroy (stop) a domain.
    1.Prepare test environment.
    2.When the ibvirtd == "off", stop the libvirtd service.
    3.Perform virsh destroy operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    domid = vm.get_id()
    domuuid = vm.get_uuid()

    vm_ref = params.get("destroy_vm_ref")
    status_error = params.get("status_error", "no")
    libvirtd = params.get("libvirtd", "on")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    remote_pwd = params.get("remote_pwd", None)
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    local_pwd = params.get("local_pwd", "LOCAL.EXAMPLE.COM")
    if vm_ref == "remote" and (remote_ip.count("EXAMPLE.COM") or
                               local_ip.count("EXAMPLE.COM")):
        raise error.TestNAError(
            "Remote test parameters unchanged from default")

    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = "%s %s" % (vm_name, params.get("destroy_extra"))
    elif vm_ref == "uuid":
        vm_ref = domuuid

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    if vm_ref != "remote":
        status = virsh.destroy(vm_ref, ignore_status=True,
                               unprivileged_user=unprivileged_user,
                               uri=uri, debug=True).exit_status
        output = ""
    else:
        status = 0
        try:
            remote_uri = libvirt_vm.complete_uri(local_ip)
            session = remote.remote_login("ssh", remote_ip, "22",
                                          "root", remote_pwd, "#")
            session.cmd_output('LANG=C')

            # Setup up remote to remote login in local host
            ssh_key.setup_remote_ssh_key(remote_ip, "root", remote_pwd,
                                         local_ip, "root", local_pwd)

            command = "virsh -c %s destroy %s" % (remote_uri, vm_name)
            status, output = session.cmd_status_output(command,
                                                       internal_timeout=5)
            session.close()
        except error.CmdError:
            status = 1

    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # check status_error
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command! "
                                 "Output:\n%s" % output)
    elif status_error == "no":
        if status != 0:
            raise error.TestFail("Run failed with right command! Output:\n%s"
                                 % output)
Exemplo n.º 58
0
def run(test, params, env):
    """
    Test virsh domdisplay command, return the graphic url
    This test covered vnc and spice type, also readonly and readwrite mode
    If have --include-passwd option, also need to check passwd list in result
    """

    if not virsh.has_help_command('domdisplay'):
        raise error.TestNAError("This version of libvirt doesn't support "
                                "domdisplay test")

    vm_name = params.get("main_vm", "virt-tests-vm1")
    status_error = ("yes" == params.get("status_error", "no"))
    options = params.get("domdisplay_options", "")
    graphic = params.get("domdisplay_graphic", "vnc")
    readonly = ("yes" == params.get("readonly", "no"))
    passwd = params.get("domdisplay_passwd")
    is_ssl = ("yes" == params.get("domdisplay_ssl", "no"))
    is_domid = ("yes" == params.get("domdisplay_domid", "no"))
    is_domuuid = ("yes" == params.get("domdisplay_domuuid", "no"))
    qemu_conf = params.get("qemu_conf_file", "/etc/libvirt/qemu.conf")

    # Do xml backup for final recovery
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    tmp_file = os.path.join(test.tmpdir, "qemu.conf.bk")

    if "--type" in options:
        if not libvirt_version.version_compare(1, 2, 6):
            raise error.TestNAError("--type option is not supportted in this"
                                    " libvirt version.")
        elif "vnc" in options and graphic != "vnc" or \
             "spice" in options and graphic != "spice":
            status_error = True

    def prepare_ssl_env():
        """
        Do prepare for ssl spice connection
        """
        # modify qemu.conf
        f_obj = open(qemu_conf, "r")
        cont = f_obj.read()

        # remove the existing setting
        left_cont = re.sub(r'\s*spice_tls\s*=.*', '', cont)
        left_cont = re.sub(r'\s*spice_tls_x509_cert_dir\s*=.*', '', left_cont)

        # write back to origin file with cut left content
        f_obj = open(qemu_conf, "w")
        f_obj.write(left_cont)
        f_obj.write("spice_tls = 1\n")
        f_obj.write("spice_tls_x509_cert_dir = \"/etc/pki/libvirt-spice\"")
        f_obj.close()

        # make modification effect
        utils_libvirtd.libvirtd_restart()

        # Generate CA cert
        utils_misc.create_x509_dir("/etc/pki/libvirt-spice",
                                   "/C=IL/L=Raanana/O=Red Hat/CN=my CA",
                                   "/C=IL/L=Raanana/O=Red Hat/CN=my server",
                                   passwd)

    try:
        graphic_count = len(vmxml_backup.get_graphics_devices())
        if is_ssl:
            # Do backup for qemu.conf in tmp_file
            shutil.copyfile(qemu_conf, tmp_file)
            prepare_ssl_env()
            if graphic_count:
                Graphics.del_graphic(vm_name)
            Graphics.add_graphic(vm_name, passwd, "spice", True)
        else:
            if not graphic_count:
                Graphics.add_graphic(vm_name, passwd, graphic)
            # Only change graphic type and passwd
            Graphics.change_graphic_type_passwd(vm_name, graphic, passwd)

        vm = env.get_vm(vm_name)
        if not vm.is_alive():
            vm.start()

        dom_id = virsh.domid(vm_name).stdout.strip()
        dom_uuid = virsh.domuuid(vm_name).stdout.strip()

        if is_domid:
            vm_name = dom_id
        if is_domuuid:
            vm_name = dom_uuid

        # Do test
        result = virsh.domdisplay(vm_name,
                                  options,
                                  readonly=readonly,
                                  debug=True)
        logging.debug("result is %s", result)
        if result.exit_status:
            if not status_error:
                raise error.TestFail("Fail to get domain display info. Error:"
                                     "%s." % result.stderr.strip())
            else:
                logging.info(
                    "Get domain display info failed as expected. "
                    "Error:%s.", result.stderr.strip())
                return
        elif status_error:
            raise error.TestFail("Expect fail, but succeed indeed!")

        output = result.stdout.strip()
        # Different result depends on the domain xml listen address
        if output.find("localhost:") >= 0:
            expect_addr = "localhost"
        else:
            expect_addr = "127.0.0.1"

        # Get active domain xml info
        vmxml_act = vm_xml.VMXML.new_from_dumpxml(vm_name, "--security-info")
        logging.debug("xml is %s", vmxml_act.get_xmltreefile())
        graphic_act = vmxml_act.devices.by_device_tag('graphics')[0]
        port = graphic_act.port

        # Do judgement for result
        if graphic == "vnc":
            expect = "vnc://%s:%s" % (expect_addr, str(int(port) - 5900))
        elif graphic == "spice" and is_ssl:
            tlsport = graphic_act.tlsPort
            expect = "spice://%s:%s?tls-port=%s" % \
                     (expect_addr, port, tlsport)
        elif graphic == "spice":
            expect = "spice://%s:%s" % (expect_addr, port)

        if options == "--include-password" and passwd is not None:
            # have --include-passwd and have passwd in xml
            if graphic == "vnc":
                expect = "vnc://:%s@%s:%s" % \
                         (passwd, expect_addr, str(int(port)-5900))
            elif graphic == "spice" and is_ssl:
                expect = expect + "&password="******"spice":
                expect = expect + "?password="******"Get correct display:%s", output)
        else:
            raise error.TestFail("Expect %s, but get %s" % (expect, output))

    finally:
        # Domain xml recovery
        vmxml_backup.sync()
        if is_ssl:
            # qemu.conf recovery
            shutil.move(tmp_file, qemu_conf)
            utils_libvirtd.libvirtd_restart()
Exemplo n.º 59
0
def run(test, params, env):
    """
    Test command: virsh console.
    """
    os_type = params.get("os_type")
    if os_type == "windows":
        test.cancel("SKIP:Do not support Windows.")

    # Get parameters for test
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vm_ref = params.get("virsh_console_vm_ref", "domname")
    vm_state = params.get("virsh_console_vm_state", "running")
    login_user = params.get("console_login_user", "root")
    if login_user == "root":
        login_passwd = params.get("password")
    else:
        login_passwd = params.get("console_password_not_root")
    status_error = "yes" == params.get("status_error", "no")
    domuuid = vm.get_uuid()
    domid = ""
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    if vm.is_alive():
        vm.destroy()
    if vm.is_qemu():
        xml_console_config(vm_name)

    try:
        # Guarantee cleanup after config vm console failed.
        if vm.is_qemu():
            vm_console_config(vm, test)

        # Prepare vm state for test
        if vm_state != "shutoff":
            vm.start(autoconsole=False)
            if vm.is_qemu():
                # LXC cannot login here, because it will use virsh console
                # to login, it will break the console action in next step
                vm.wait_for_login()
            domid = vm.get_id()
        if vm_state == "paused":
            vm.pause()

        if vm_ref == "domname":
            vm_ref = vm_name
        elif vm_ref == "domid":
            vm_ref = domid
        elif vm_ref == "domuuid":
            vm_ref = domuuid
        elif domid and vm_ref == "hex_id":
            vm_ref = hex(int(domid))

        # Run command
        if params.get('setup_libvirt_polkit') == 'yes':
            cmd = "virsh -c %s console %s" % (uri, vm_ref)
            command = "su - %s -c '%s'" % (unprivileged_user, cmd)
            force_command = "su - %s -c '%s --force'" % (unprivileged_user,
                                                         cmd)
        else:
            command = "virsh console %s" % vm_ref
            force_command = "virsh console %s --force" % vm_ref
        console_session = aexpect.ShellSession(command)

        status = utils_test.libvirt.verify_virsh_console(console_session,
                                                         login_user,
                                                         login_passwd,
                                                         timeout=10,
                                                         debug=True)
        console_session.close()

        check_duplicated_console(command, force_command, status_error,
                                 login_user, login_passwd, test)
        check_disconnect_on_shutdown(command, status_error, login_user,
                                     login_passwd, test)
    finally:
        # Recover state of vm.
        if vm_state == "paused":
            vm.resume()

        # Recover vm
        if vm.is_alive():
            vm.destroy()
        if vm.is_qemu():
            xml_console_recover(vmxml_backup)

    # Check result
    if status_error:
        if status:
            test.fail("Run successful with wrong command!")
    else:
        if not status:
            test.fail("Run failed with right command!")
Exemplo n.º 60
0
def run(test, params, env):
    """
    Test the input virtual devices

    1. prepare a guest with different input devices
    2. check whether the guest can be started
    3. check the qemu cmd line
    """
    def check_dumpxml():
        """
        Check whether the added devices are shown in the guest xml
        """
        pattern = "<input bus=\"%s\" type=\"%s\">" % (bus_type, input_type)
        xml_after_adding_device = VMXML.new_from_dumpxml(vm_name)
        if pattern not in str(xml_after_adding_device):
            test.fail("Can not find the %s input device xml "
                      "in the guest xml file." % input_type)

    def check_qemu_cmd_line():
        """
        Check whether the added devices are shown in the qemu cmd line
        """
        # if the tested input device is a keyboard or mouse with ps2 bus,
        # there is no keyboard or mouse in qemu cmd line
        if bus_type == "ps2" and input_type in ["keyboard", "mouse"]:
            return
        with open('/proc/%s/cmdline' % vm.get_pid(), 'r') as cmdline_file:
            cmdline = cmdline_file.read()
        if bus_type == "usb" and input_type == "keyboard":
            pattern = r"-device.%s-kbd" % bus_type
        elif input_type == "passthrough":
            pattern = r"-device.%s-input-host-pci" % bus_type
        else:
            pattern = r"-device.%s-%s" % (bus_type, input_type)
        if not re.search(pattern, cmdline):
            test.fail("Can not find the %s input device "
                      "in qemu cmd line." % input_type)

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    status_error = params.get("status_error", "no") == "yes"
    bus_type = params.get("bus_type")
    input_type = params.get("input_type")
    if input_type == "tablet":
        if not libvirt_version.version_compare(1, 2, 2):
            test.cancel("tablet input type is not supported "
                        "on the current version.")
    if input_type == "passthrough" or bus_type == "virtio":
        if not libvirt_version.version_compare(1, 3, 0):
            test.cancel("passthrough input type or virtio bus type "
                        "is not supported on current version.")

    vm_xml = VMXML.new_from_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    if vm.is_alive():
        vm.destroy()

    try:
        # ps2 keyboard and ps2 mouse are default, no need to re-add the xml
        if not (bus_type == "ps2" and input_type in ["keyboard", "mouse"]):
            vm_xml.remove_all_device_by_type('input')
            input_dev = Input(type_name=input_type)
            input_dev.input_bus = bus_type
            if input_type == "passthrough":
                kbd_dev_name = glob.glob('/dev/input/by-path/*kbd')
                if not kbd_dev_name:
                    test.cancel("There is no keyboard device on this host.")
                logging.debug(
                    "keyboard %s is going to be passthrough "
                    "to the host.", kbd_dev_name[0])
                input_dev.source_evdev = kbd_dev_name[0]
            vm_xml.add_device(input_dev)
            try:
                vm_xml.sync()
            except Exception as error:
                if not status_error:
                    test.fail(
                        "Failed to define the guest after adding the %s input "
                        "device xml. Details: %s " % (input_type, error))
                logging.debug(
                    "This is the expected failing in negative cases.")
                return

        res = virsh.start(vm_name)
        if res.exit_status:
            if not status_error:
                test.fail("Failed to start vm after adding the %s input "
                          "device xml. Details: %s " % (input_type, error))
            logging.debug("This is the expected failure in negative cases.")
            return
        if status_error:
            test.fail(
                "Expected fail in negative cases but vm started successfully.")
            return

        logging.debug("VM started successfully in postive cases.")
        check_dumpxml()
        check_qemu_cmd_line()
    finally:
        if vm.is_alive():
            virsh.destroy(vm_name)
        vm_xml_backup.sync()