Exemple #1
0
def package_jeos(img):
    """
    Package JeOS and make it ready for upload.

    Steps:
    1) Move /path/to/jeos.qcow2 to /path/to/jeos.qcow2.backup
    2) Sparsify the image, creating a new, trimmed down /path/to/jeos.qcow2
    3) Compress the sparsified image with 7za

    :param img: Path to a qcow2 image
    """
    basedir = os.path.dirname(img)
    backup = img + '.backup'
    qemu_img = utils_misc.find_command('qemu-img')
    shutil.move(img, backup)
    logging.info("Backup %s saved", backup)

    utils.system("%s convert -f qcow2 -O qcow2 %s %s" %
                 (qemu_img, backup, img))
    logging.info("Sparse file %s created successfully", img)

    archiver = utils_misc.find_command('7za')
    compressed_img = img + ".7z"
    utils.system("%s a %s %s" % (archiver, compressed_img, img))
    logging.info("JeOS compressed file %s created successfuly", compressed_img)
Exemple #2
0
def create_scsi_disk(scsi_option, scsi_size="2048"):
    """
    Get the scsi device created by scsi_debug kernel module

    :param scsi_option. The scsi_debug kernel module options.
    :return: scsi device if it is created successfully.
    """
    try:
        utils_misc.find_command("lsscsi")
    except ValueError:
        raise error.TestNAError("Missing command 'lsscsi'.")

    try:
        # Load scsi_debug kernel module.
        # Unload it first if it's already loaded.
        if utils.module_is_loaded("scsi_debug"):
            utils.unload_module("scsi_debug")
        utils.load_module("scsi_debug dev_size_mb=%s %s" %
                          (scsi_size, scsi_option))
        # Get the scsi device name
        scsi_disk = utils.run("lsscsi|grep scsi_debug|"
                              "awk '{print $6}'").stdout.strip()
        logging.info("scsi disk: %s" % scsi_disk)
        return scsi_disk
    except Exception, e:
        logging.error(str(e))
        return None
Exemple #3
0
def create_scsi_disk(scsi_option, scsi_size="2048"):
    """
    Get the scsi device created by scsi_debug kernel module

    :param scsi_option. The scsi_debug kernel module options.
    :return: scsi device if it is created successfully.
    """
    try:
        utils_misc.find_command("lsscsi")
    except ValueError:
        raise error.TestNAError("Missing command 'lsscsi'.")

    try:
        # Load scsi_debug kernel module.
        # Unload it first if it's already loaded.
        if utils.module_is_loaded("scsi_debug"):
            utils.unload_module("scsi_debug")
        utils.load_module("scsi_debug dev_size_mb=%s %s"
                          % (scsi_size, scsi_option))
        # Get the scsi device name
        scsi_disk = utils.run("lsscsi|grep scsi_debug|"
                              "awk '{print $6}'").stdout.strip()
        logging.info("scsi disk: %s" % scsi_disk)
        return scsi_disk
    except Exception, e:
        logging.error(str(e))
        return None
def package_jeos(img):
    """
    Package JeOS and make it ready for upload.

    Steps:
    1) Move /path/to/jeos.qcow2 to /path/to/jeos.qcow2.backup
    2) Sparsify the image, creating a new, trimmed down /path/to/jeos.qcow2
    3) Compress the sparsified image with 7za

    :param img: Path to a qcow2 image
    """
    basedir = os.path.dirname(img)
    backup = img + '.backup'
    qemu_img = utils_misc.find_command('qemu-img')
    shutil.move(img, backup)
    logging.info("Backup %s saved", backup)

    process.system("%s convert -f qcow2 -O qcow2 %s %s" % (qemu_img, backup, img))
    logging.info("Sparse file %s created successfully", img)

    archiver = utils_misc.find_command('7za')
    compressed_img = img + ".7z"
    process.system("%s a %s %s" % (archiver, compressed_img, img))
    logging.info("JeOS compressed file %s created successfuly",
                 compressed_img)
def get_storage_devices():
    """
    Retrieve storage devices list from sysfs.

    :return:      A list contains retrieved storage device names with
                  the same format in virsh.
    """
    devices = []
    try:
        utils_misc.find_command('udevadm')
        storage_path = '/sys/class/block'
        if not os.path.exists(storage_path):
            logging.debug(
                'Storage device path %s doesn`t exists!', storage_path)
            return []
        for device in os.listdir(storage_path):
            info = utils.run(
                'udevadm info %s' % os.path.join(storage_path, device),
                timeout=5, ignore_status=True).stdout
            # Only disk devices are list, not partition
            dev_type = re.search(r'(?<=E: DEVTYPE=)\S*', info)
            if dev_type:
                if dev_type.group(0) == 'disk':
                    # Get disk serial
                    dev_id = re.search(r'(?<=E: ID_SERIAL=)\S*', info)
                    if dev_id:
                        serial = dev_id.group(0)
                        dev_name = 'block_' + device.replace(':', '_')
                        dev_name = re.sub(
                            r'\W', '_', 'block_%s_%s' % (device, serial))
                        devices.append(dev_name)
    except ValueError:
        logging.warning('udevadm not found! Skipping storage test!')
        logging.warning('You can try install it using `yum install udev`')
    return devices
Exemple #6
0
def setup_or_cleanup_iscsi(is_setup, is_login=True,
                           emulated_image="emulated_iscsi", image_size="1G"):
    """
    Set up(and login iscsi target) or clean up iscsi service on localhost.

    :param is_setup: Boolean value, true for setup, false for cleanup
    :param is_login: Boolean value, true for login, false for not login
    :param emulated_image: name of iscsi device
    :param image_size: emulated image's size
    :return: iscsi device name or iscsi target
    """
    try:
        utils_misc.find_command("tgtadm")
        utils_misc.find_command("iscsiadm")
    except ValueError:
        raise error.TestNAError("Missing command 'tgtadm' and/or 'iscsiadm'.")

    tmpdir = os.path.join(data_dir.get_root_dir(), 'tmp')
    emulated_path = os.path.join(tmpdir, emulated_image)
    emulated_target = "iqn.2001-01.com.virttest:%s.target" % emulated_image
    iscsi_params = {"emulated_image": emulated_path, "target": emulated_target,
                    "image_size": image_size, "iscsi_thread_id": "virt"}
    _iscsi = iscsi.Iscsi(iscsi_params)
    if is_setup:
        sv_status = None
        if utils_misc.selinux_enforcing():
            sv_status = utils_selinux.get_status()
            utils_selinux.set_status("permissive")
        _iscsi.export_target()
        if sv_status is not None:
            utils_selinux.set_status(sv_status)
        if is_login:
            _iscsi.login()
            # The device doesn't necessarily appear instantaneously, so give
            # about 5 seconds for it to appear before giving up
            iscsi_device = utils_misc.wait_for(_iscsi.get_device_name, 5, 0, 1,
                                               "Searching iscsi device name.")
            if iscsi_device:
                logging.debug("iscsi device: %s", iscsi_device)
                return iscsi_device
            if not iscsi_device:
                logging.error("Not find iscsi device.")
            # Cleanup and return "" - caller needs to handle that
            # _iscsi.export_target() will have set the emulated_id and
            # export_flag already on success...
            _iscsi.cleanup()
            utils.run("rm -f %s" % emulated_path)
        else:
            return emulated_target
    else:
        _iscsi.export_flag = True
        _iscsi.emulated_id = _iscsi.get_target_id()
        _iscsi.cleanup()
        utils.run("rm -f %s" % emulated_path)
    return ""
Exemple #7
0
def run(test, params, env):
    """
    Test command: virsh find-storage-pool-sources-as

    1. Prepare env to provide source storage:
       1). For 'netfs' source type, setup nfs server
       2). For 'iscsi' source type, setup iscsi server
       3). For 'logcial' type pool, setup iscsi storage to create vg
    2. Find the pool source by running virsh cmd
    """

    source_type = params.get("source_type", "")
    source_host = params.get("source_host", "localhost")
    source_port = params.get("source_port", "")
    options = params.get("extra_options", "")
    vg_name = params.get("vg_name", "virttest_vg_0")
    ro_flag = "yes" == params.get("readonly_mode", "no")
    status_error = "yes" == params.get("status_error", "no")

    if not source_type:
        raise error.TestFail("Command requires <type> value")

    cleanup_nfs = False
    cleanup_iscsi = False
    cleanup_logical = False

    if source_host == "localhost":
        if source_type == "netfs":
            # Set up nfs
            res = utils_test.libvirt.setup_or_cleanup_nfs(True)
            selinux_bak = res["selinux_status_bak"]
            cleanup_nfs = True
        if source_type in ["iscsi", "logical"]:
            # Do we have the necessary tools?
            try:
                utils_misc.find_command("tgtadm")
                utils_misc.find_command("iscsiadm")
            except ValueError:
                raise error.TestNAError("Command 'tgtadm' and/or 'iscsiadm' "
                                        "is missing. You must install it.")
            # Set up iscsi
            try:
                iscsi_device = utils_test.libvirt.setup_or_cleanup_iscsi(True)
                # If we got nothing, force failure
                if not iscsi_device:
                    raise error.TestFail("Did not setup an iscsi device")
                cleanup_iscsi = True
                if source_type == "logical":
                    # Create VG by using iscsi device
                    lv_utils.vg_create(vg_name, iscsi_device)
                    cleanup_logical = True
            except Exception, detail:
                if cleanup_iscsi:
                    utils_test.libvirt.setup_or_cleanup_iscsi(False)
                raise error.TestFail("iscsi setup failed:\n%s" % detail)
def run(test, params, env):
    """
    Test command: virsh find-storage-pool-sources-as

    1. Prepare env to provide source storage:
       1). For 'netfs' source type, setup nfs server
       2). For 'iscsi' source type, setup iscsi server
       3). For 'logcial' type pool, setup iscsi storage to create vg
    2. Find the pool source by running virsh cmd
    """

    source_type = params.get("source_type", "")
    source_host = params.get("source_host", "localhost")
    source_port = params.get("source_port", "")
    options = params.get("extra_options", "")
    vg_name = params.get("vg_name", "virttest_vg_0")
    ro_flag = "yes" == params.get("readonly_mode", "no")
    status_error = "yes" == params.get("status_error", "no")

    if not source_type:
        raise error.TestFail("Command requires <type> value")

    cleanup_nfs = False
    cleanup_iscsi = False
    cleanup_logical = False

    if source_host == "localhost":
        if source_type == "netfs":
            # Set up nfs
            res = utils_test.libvirt.setup_or_cleanup_nfs(True)
            selinux_bak = res["selinux_status_bak"]
            cleanup_nfs = True
        if source_type in ["iscsi", "logical"]:
            # Do we have the necessary tools?
            try:
                utils_misc.find_command("tgtadm")
                utils_misc.find_command("iscsiadm")
            except ValueError:
                raise error.TestNAError("Command 'tgtadm' and/or 'iscsiadm' "
                                        "is missing. You must install it.")
            # Set up iscsi
            try:
                iscsi_device = utils_test.libvirt.setup_or_cleanup_iscsi(True)
                # If we got nothing, force failure
                if not iscsi_device:
                    raise error.TestFail("Did not setup an iscsi device")
                cleanup_iscsi = True
                if source_type == "logical":
                    # Create VG by using iscsi device
                    lv_utils.vg_create(vg_name, iscsi_device)
                    cleanup_logical = True
            except Exception, detail:
                if cleanup_iscsi:
                    utils_test.libvirt.setup_or_cleanup_iscsi(False)
                raise error.TestFail("iscsi setup failed:\n%s" % detail)
Exemple #9
0
def setup_or_cleanup_iscsi(is_setup, is_login=True,
                           emulated_image="emulated_iscsi", image_size="1G"):
    """
    Set up(and login iscsi target) or clean up iscsi service on localhost.

    :param is_setup: Boolean value, true for setup, false for cleanup
    :param is_login: Boolean value, true for login, false for not login
    :param emulated_image: name of iscsi device
    :param image_size: emulated image's size
    :return: iscsi device name or iscsi target
    """
    try:
        utils_misc.find_command("tgtadm")
        utils_misc.find_command("iscsiadm")
    except ValueError:
        raise error.TestNAError("Missing command 'tgtadm' and/or 'iscsiadm'.")

    tmpdir = os.path.join(data_dir.get_root_dir(), 'tmp')
    emulated_path = os.path.join(tmpdir, emulated_image)
    emulated_target = "iqn.2001-01.com.virttest:%s.target" % emulated_image
    iscsi_params = {"emulated_image": emulated_path, "target": emulated_target,
                    "image_size": image_size, "iscsi_thread_id": "virt"}
    _iscsi = iscsi.Iscsi(iscsi_params)
    if is_setup:
        sv_status = None
        if utils_misc.selinux_enforcing():
            sv_status = utils_selinux.get_status()
            utils_selinux.set_status("permissive")
        _iscsi.export_target()
        if sv_status is not None:
            utils_selinux.set_status(sv_status)
        if is_login:
            _iscsi.login()
            # The device doesn't necessarily appear instantaneously, so give
            # about 5 seconds for it to appear before giving up
            iscsi_device = utils_misc.wait_for(_iscsi.get_device_name, 5, 0, 1,
                                               "Searching iscsi device name.")
            if iscsi_device:
                logging.debug("iscsi device: %s", iscsi_device)
                return iscsi_device
            if not iscsi_device:
                logging.error("Not find iscsi device.")
            # Cleanup and return "" - caller needs to handle that
            # _iscsi.export_target() will have set the emulated_id and
            # export_flag already on success...
            _iscsi.cleanup()
            utils.run("rm -f %s" % emulated_path)
        else:
            return emulated_target
    else:
        _iscsi.export_flag = True
        _iscsi.emulated_id = _iscsi.get_target_id()
        _iscsi.cleanup()
        utils.run("rm -f %s" % emulated_path)
    return ""
Exemple #10
0
def run(test, params, env):
    """
    Test svirt in virt-clone.
    """
    VIRT_CLONE = None
    try:
        VIRT_CLONE = utils_misc.find_command("virt-clone")
    except ValueError:
        raise error.TestNAError("No virt-clone command found.")

    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_virt_clone_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_virt_clone_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_virt_clone_vm_sec_model", "selinux")
    sec_label = params.get("svirt_virt_clone_vm_sec_label", None)
    sec_relabel = params.get("svirt_virt_clone_vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'model': sec_model, 'label': sec_label,
                'relabel': sec_relabel}
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get varialbles about image.
    img_label = params.get('svirt_virt_clone_disk_label')
    # Label the disks of VM with img_label.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    for disk in disks.values():
        disk_path = disk['source']
        backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file(
            filename=disk_path)
        utils_selinux.set_context_of_file(filename=disk_path,
                                          context=img_label)
    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the context of the VM.
    vmxml.set_seclabel([sec_dict])
    vmxml.sync()

    clone_name = ("%s-clone" % vm.name)
    try:
        cmd = ("%s --original %s --name %s --auto-clone" %
               (VIRT_CLONE, vm.name, clone_name))
        cmd_result = utils.run(cmd, ignore_status=True)
        if cmd_result.exit_status:
            raise error.TestFail("Failed to execute virt-clone command."
                                 "Detail: %s." % cmd_result)
    finally:
        # clean up
        for path, label in backup_labels_of_disks.items():
            utils_selinux.set_context_of_file(filename=path, context=label)
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
        if not virsh.domstate(clone_name).exit_status:
            libvirt_vm.VM(clone_name, params, None, None).remove_with_storage()
Exemple #11
0
    def compare_capabilities_xml(source):
        dom = parseString(source)
        host = dom.getElementsByTagName('host')[0]
        # check that host has a non-empty UUID tag.
        uuid = host.getElementsByTagName('uuid')[0]
        host_uuid_output = uuid.firstChild.data
        logging.info("Host uuid (capabilities_xml):%s", host_uuid_output)
        if host_uuid_output == "":
            raise error.TestFail("The host uuid in capabilities_xml is none!")

        # check the host arch.
        arch = host.getElementsByTagName('arch')[0]
        host_arch_output = arch.firstChild.data
        logging.info("Host arch (capabilities_xml):%s", host_arch_output)
        cmd_result = utils.run("arch", ignore_status=True)
        if cmp(host_arch_output, cmd_result.stdout.strip()) != 0:
            raise error.TestFail("The host arch in capabilities_xml is wrong!")

        # check the host cpus num.
        cpus = dom.getElementsByTagName('cpus')
        host_cpus = 0
        for cpu in cpus:
            host_cpus += int(cpu.getAttribute('num'))
        logging.info("Host cpus num (capabilities_xml):%s", host_cpus)
        cmd = "less /proc/cpuinfo | grep processor | wc -l"
        cmd_result = utils.run(cmd, ignore_status=True)
        if cmp(host_cpus, int(cmd_result.stdout.strip())) != 0:
            raise error.TestFail("Host cpus num (capabilities_xml) is "
                                 "wrong")

        # check the arch of guest supported.
        try:
            img = utils_misc.find_command("qemu-kvm")
        except ValueError:
            raise error.TestNAError("Cannot find qemu-kvm")
        cmd = img + " --cpu ? | grep qemu"
        cmd_result = utils.run(cmd, ignore_status=True)
        guest_wordsize_array = dom.getElementsByTagName('wordsize')
        length = len(guest_wordsize_array)
        for i in range(length):
            element = guest_wordsize_array[i]
            guest_wordsize = element.firstChild.data
            logging.info("Arch of guest supported (capabilities_xml):%s",
                         guest_wordsize)
            if not re.search(guest_wordsize, cmd_result.stdout.strip()):
                raise error.TestFail(
                    "The capabilities_xml gives an extra arch "
                    "of guest to support!")

        # check the type of hyperviosr.
        guest_domain_type = dom.getElementsByTagName('domain')[0]
        guest_domain_type_output = guest_domain_type.getAttribute('type')
        logging.info("Hypervisor (capabilities_xml):%s",
                     guest_domain_type_output)
        cmd_result = utils.run("virsh uri", ignore_status=True)
        if not re.search(guest_domain_type_output, cmd_result.stdout.strip()):
            raise error.TestFail("The capabilities_xml gives an different "
                                 "hypervisor")
    def compare_capabilities_xml(source):
        dom = parseString(source)
        host = dom.getElementsByTagName('host')[0]
        # check that host has a non-empty UUID tag.
        uuid = host.getElementsByTagName('uuid')[0]
        host_uuid_output = uuid.firstChild.data
        logging.info("Host uuid (capabilities_xml):%s", host_uuid_output)
        if host_uuid_output == "":
            raise error.TestFail("The host uuid in capabilities_xml is none!")

        # check the host arch.
        arch = host.getElementsByTagName('arch')[0]
        host_arch_output = arch.firstChild.data
        logging.info("Host arch (capabilities_xml):%s", host_arch_output)
        cmd_result = utils.run("arch", ignore_status=True)
        if cmp(host_arch_output, cmd_result.stdout.strip()) != 0:
            raise error.TestFail("The host arch in capabilities_xml is wrong!")

        # check the host cpus num.
        cpus = dom.getElementsByTagName('cpus')
        host_cpus = 0
        for cpu in cpus:
            host_cpus += int(cpu.getAttribute('num'))
        logging.info("Host cpus num (capabilities_xml):%s", host_cpus)
        cmd = "less /proc/cpuinfo | grep processor | wc -l"
        cmd_result = utils.run(cmd, ignore_status=True)
        if cmp(host_cpus, int(cmd_result.stdout.strip())) != 0:
            raise error.TestFail("Host cpus num (capabilities_xml) is "
                                 "wrong")

        # check the arch of guest supported.
        try:
            img = utils_misc.find_command("qemu-kvm")
        except ValueError:
            raise error.TestNAError("Cannot find qemu-kvm")
        cmd = img + " --cpu ? | grep qemu"
        cmd_result = utils.run(cmd, ignore_status=True)
        guest_wordsize_array = dom.getElementsByTagName('wordsize')
        length = len(guest_wordsize_array)
        for i in range(length):
            element = guest_wordsize_array[i]
            guest_wordsize = element.firstChild.data
            logging.info("Arch of guest supported (capabilities_xml):%s",
                         guest_wordsize)
            if not re.search(guest_wordsize, cmd_result.stdout.strip()):
                raise error.TestFail("The capabilities_xml gives an extra arch "
                                     "of guest to support!")

        # check the type of hyperviosr.
        guest_domain_type = dom.getElementsByTagName('domain')[0]
        guest_domain_type_output = guest_domain_type.getAttribute('type')
        logging.info("Hypervisor (capabilities_xml):%s",
                     guest_domain_type_output)
        cmd_result = utils.run("virsh uri", ignore_status=True)
        if not re.search(guest_domain_type_output, cmd_result.stdout.strip()):
            raise error.TestFail("The capabilities_xml gives an different "
                                 "hypervisor")
Exemple #13
0
def memory_lack(params):
    """
    Lower the available memory of host
    """
    tmp_c_file = params.get("tmp_c_file", "/tmp/test.c")
    tmp_exe_file = params.get("tmp_exe_file", "/tmp/test")
    c_str = """
#include <stdio.h>
#include <unistd.h>
#include <malloc.h>
#define MAX 1024*1024*4
int main(void){
    char *a;
    while(1) {
        a = malloc(MAX);
        if (a == NULL) {
            break;
        }
    }
    while (1){
        sleep(1);
    }
    return 0;
}"""
    c_file = open(tmp_c_file, 'w')
    c_file.write(c_str)
    c_file.close()
    try:
        utils_misc.find_command('gcc')
    except ValueError:
        raise error.TestNAError('gcc command is needed!')
    result = utils.run("gcc %s -o %s" % (tmp_c_file, tmp_exe_file))
    if result.exit_status:
        raise error.TestError("Compile C file failed: %s"
                              % result.stderr.strip())
    # Set swap off before fill memory
    utils.run("swapoff -a")
    utils.run("%s &" % tmp_exe_file)
    result = utils.run("ps -ef | grep %s | grep -v grep" % tmp_exe_file)
    pid = result.stdout.strip().split()[1]
    params['memory_pid'] = pid
def memory_lack(params):
    """
    Lower the available memory of host
    """
    tmp_c_file = params.get("tmp_c_file", "/tmp/test.c")
    tmp_exe_file = params.get("tmp_exe_file", "/tmp/test")
    c_str = """
#include <stdio.h>
#include <malloc.h>
#define MAX 1024*4
int main(void){
    char *a;
    while(1) {
        a = malloc(MAX);
        if (a == NULL) {
            break;
        }
    }
    while (1){
    }
    return 0;
}"""
    c_file = open(tmp_c_file, 'w')
    c_file.write(c_str)
    c_file.close()
    try:
        utils_misc.find_command('gcc')
    except ValueError:
        raise error.TestNAError('gcc command is needed!')
    result = utils.run("gcc %s -o %s" % (tmp_c_file, tmp_exe_file))
    if result.exit_status:
        raise error.TestError("Compile C file failed: %s"
                              % result.stderr.strip())
    # Set swap off before fill memory
    utils.run("swapoff -a")
    utils.run("%s &" % tmp_exe_file)
    result = utils.run("ps -ef | grep %s | grep -v grep" % tmp_exe_file)
    pid = result.stdout.strip().split()[1]
    params['memory_pid'] = pid
Exemple #15
0
def get_page_size():
    """
    Get the current memory page size using getconf.
    If getconf doesn't exist, assume it's 4096.

    :return: An integer of current page size bytes.
    """
    try:
        getconf_path = utils_misc.find_command('getconf')
        return int(utils.run(getconf_path + ' PAGESIZE').stdout)
    except ValueError:
        logging.warning('getconf not found! Assuming 4K for PAGESIZE')
        return 4096
def get_page_size():
    """
    Get the current memory page size using getconf.
    If getconf doesn't exist, assume it's 4096.

    :return: An integer of current page size bytes.
    """
    try:
        getconf_path = utils_misc.find_command("getconf")
        return int(utils.run(getconf_path + " PAGESIZE").stdout)
    except ValueError:
        logging.warning("getconf not found! Assuming 4K for PAGESIZE")
        return 4096
def get_storage_devices():
    """
    Retrieve storage devices list from sysfs.

    :return:      A list contains retrieved storage device names with
                  the same format in virsh.
    """
    devices = []
    try:
        utils_misc.find_command('udevadm')
        storage_path = '/sys/class/block'
        if not os.path.exists(storage_path):
            logging.debug('Storage device path %s doesn`t exists!',
                          storage_path)
            return []
        for device in os.listdir(storage_path):
            info = utils.run('udevadm info %s' %
                             os.path.join(storage_path, device),
                             timeout=5,
                             ignore_status=True).stdout
            # Only disk devices are list, not partition
            dev_type = re.search(r'(?<=E: DEVTYPE=)\S*', info)
            if dev_type:
                if dev_type.group(0) == 'disk':
                    # Get disk serial
                    dev_id = re.search(r'(?<=E: ID_SERIAL=)\S*', info)
                    if dev_id:
                        serial = dev_id.group(0)
                        dev_name = 'block_' + device.replace(':', '_')
                        dev_name = re.sub(r'\W', '_',
                                          'block_%s_%s' % (device, serial))
                        devices.append(dev_name)
    except ValueError:
        logging.warning('udevadm not found! Skipping storage test!')
        logging.warning('You can try install it using `yum install udev`')
    return devices
Exemple #18
0
def netcf_trans_control(command="status"):
    """
    Control current network configuration
    :param: command: it may be 'status', 'snapshot-dir', restart,
            change-begin, etc, Note that is the netcf-libs is required
    :returns: return command result
    """
    try:
        # For OS using systemd, this command usually located in /usr/libexec/.
        cmd = utils_misc.find_command("netcf-transaction.sh")
    except ValueError:
        # This is the default location for sysV init.
        old_path = "/etc/rc.d/init.d/netcf-transaction"
        if os.path.isfile(old_path):
            cmd = old_path
        else:
            raise error.TestNAError("Cannot find netcf-transaction! "
                                    "Make sure you have netcf-libs installed!")
    logging.debug(cmd)

    return commands.getoutput(cmd + " " + command)
def run(test, params, env):
    """
    This test cover two volume commands: vol-clone and vol-wipe.

    1. Create a given type pool.
    2. Create a given format volume in the pool.
    3. Clone the new create volume.
    4. Wipe the new clone volume.
    5. Delete the volume and pool.
    """

    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    if not os.path.dirname(pool_target):
        pool_target = os.path.join(test.tmpdir, pool_target)
    emulated_image = params.get("emulated_image")
    emulated_image_size = params.get("emulated_image_size")
    vol_name = params.get("vol_name")
    new_vol_name = params.get("new_vol_name")
    vol_capability = params.get("vol_capability")
    vol_format = params.get("vol_format")
    clone_option = params.get("clone_option", "")
    wipe_algorithms = params.get("wipe_algorithms")

    if virsh.has_command_help_match("vol-wipe", "--prealloc-metadata") is None:
        if "prealloc-metadata" in clone_option:
            raise error.TestNAError("Option --prealloc-metadata "
                                    "is not supported.")

    # Using algorithms other than zero need scrub installed.
    try:
        utils_misc.find_command('scrub')
    except ValueError:
        logging.warning("Can't locate scrub binary, only 'zero' algorithm "
                        "is used.")
        valid_algorithms = ["zero"]
    else:
        valid_algorithms = ["zero", "nnsa", "dod", "bsi", "gutmann",
                            "schneier", "pfitzner7", "pfitzner33", "random"]

    # Choose an algorithms randomly
    if wipe_algorithms:
        alg = random.choice(wipe_algorithms.split())
    else:
        alg = random.choice(valid_algorithms)

    clone_status_error = "yes" == params.get("clone_status_error", "no")
    wipe_status_error = "yes" == params.get("wipe_status_error", "no")

    # libvirt acl polkit related params
    uri = params.get("virsh_uri")
    unpri_user = params.get('unprivileged_user')
    if unpri_user:
        if unpri_user.count('EXAMPLE'):
            unpri_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    + " libvirt version.")

    libv_pvt = libvirt.PoolVolumeTest(test, params)
    try:
        libv_pool = libvirt_storage.StoragePool()
        pool_rename_times = 0
        # Rename pool if given name pool exist, the max rename times is 5
        while libv_pool.pool_exists(pool_name) and pool_rename_times < 5:
            logging.debug("Pool '%s' already exist.", pool_name)
            pool_name = pool_name + "_t"
            logging.debug("Using a new name '%s' to define pool.", pool_name)
            pool_rename_times += 1
        else:
            # Create a new pool
            libv_pvt.pre_pool(pool_name, pool_type, pool_target,
                              emulated_image, emulated_image_size)

        # Create a new volume
        libv_pvt.pre_vol(vol_name=vol_name, vol_format=vol_format,
                         capacity=vol_capability, allocation=None,
                         pool_name=pool_name)
        libv_vol = libvirt_storage.PoolVolume(pool_name)
        vol_info = libv_vol.volume_info(vol_name)
        for key in vol_info:
            logging.debug("Original volume info: %s = %s", key, vol_info[key])

        # Metadata preallocation is not support for block volume
        if vol_info["Type"] == "block" and clone_option.count("prealloc-metadata"):
            clone_status_error = True

        # Clone volume
        clone_result = virsh.vol_clone(vol_name, new_vol_name, pool_name,
                                       clone_option, debug=True)
        if not clone_status_error:
            if clone_result.exit_status != 0:
                raise error.TestFail("Clone volume fail:\n%s" %
                                     clone_result.stdout.strip())
            else:
                vol_info = libv_vol.volume_info(new_vol_name)
                for key in vol_info:
                    logging.debug("Cloned volume info: %s = %s", key,
                                  vol_info[key])
                logging.debug("Clone volume successfully.")
                # Wipe the new clone volume
                if alg:
                    logging.debug("Wiping volume by '%s' algorithm", alg)
                wipe_result = virsh.vol_wipe(new_vol_name, pool_name, alg,
                                             unprivileged_user=unpri_user,
                                             uri=uri, debug=True)
                if not wipe_status_error:
                    if wipe_result.exit_status != 0:
                        raise error.TestFail("Wipe volume fail:\n%s" %
                                             clone_result.stdout.strip())
                    else:
                        vol_info = libv_vol.volume_info(new_vol_name)
                        for key in vol_info:
                            logging.debug("Wiped volume info: %s = %s", key,
                                          vol_info[key])
                        logging.debug("Wipe volume successfully.")
                elif wipe_status_error and wipe_result.exit_status == 0:
                    raise error.TestFail("Expect wipe volume fail, but run"
                                         " successfully.")
        elif clone_status_error and clone_result.exit_status == 0:
            raise error.TestFail("Expect clone volume fail, but run"
                                 " successfully.")
    finally:
        # Clean up
        try:
            libv_pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                  emulated_image)
        except error.TestFail, detail:
            logging.error(str(detail))
Exemple #20
0
def run(test, params, env):
    """
    Test virsh {at|de}tach-interface command.

    1) Prepare test environment and its parameters
    2) Attach the required interface
    3) Perform attach and detach operation
    4) Check if attached interface is correct
    5) Detach the attached interface
    """

    def is_attached(vmxml_devices, iface_type, iface_source, iface_mac):
        """
        Check attached interface exist or not.

        :param vmxml_devices: VMXMLDevices instance
        :param iface_type: interface device type
        :param iface_source : interface source
        :param iface_mac : interface MAC address
        :return: True/False if backing file and interface found
        """
        ifaces = vmxml_devices.by_device_tag('interface')
        for iface in ifaces:
            if iface.type_name != iface_type:
                continue
            if iface.mac_address != iface_mac:
                continue
            if iface_source is not None:
                if iface.xmltreefile.find('source') is not None:
                    if iface.source['network'] != iface_source:
                        continue
                else:
                    continue
            # All three conditions met
            logging.debug("Find %s in given iface XML", iface_mac)
            return True
        logging.debug("Not find %s in given iface XML", iface_mac)
        return False

    def check_result(vm_name, iface_source, iface_type, iface_mac,
                     flags, vm_state, attach=True):
        """
        Check the test result of attach/detach-device command.
        """
        active_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        if not attach:
            utils_misc.wait_for(lambda: not is_attached(active_vmxml.devices,
                                                        iface_type, iface_source, iface_mac), 20)
        active_attached = is_attached(active_vmxml.devices, iface_type,
                                      iface_source, iface_mac)
        if vm_state != "transient":
            inactive_vmxml = vm_xml.VMXML.new_from_dumpxml(
                vm_name, options="--inactive")
            inactive_attached = is_attached(inactive_vmxml.devices,
                                            iface_type, iface_source,
                                            iface_mac)

        if flags.count("config"):
            if vm_state != "transient":
                if attach:
                    if not inactive_attached:
                        raise exceptions.TestFail("Inactive domain XML not"
                                                  " updated when --config "
                                                  "options used for attachment")
                else:
                    if inactive_attached:
                        raise exceptions.TestFail("Inactive domain XML not"
                                                  " updated when --config "
                                                  "options used for detachment")
        if flags.count("live"):
            if attach:
                if vm_state in ["paused", "running", "transient"]:
                    if not active_attached:
                        raise exceptions.TestFail("Active domain XML not updated"
                                                  " when --live options used for"
                                                  " attachment")
            else:
                if vm_state in ["paused", "running", "transient"]:
                    if active_attached:
                        raise exceptions.TestFail("Active domain XML not updated"
                                                  " when --live options used for"
                                                  " detachment")
        if flags.count("current") or flags == "":
            if attach:
                if vm_state in ["paused", "running", "transient"]:
                    if not active_attached:
                        raise exceptions.TestFail("Active domain XML not updated"
                                                  " when --current options used "
                                                  "for attachment")
                elif vm_state == "shutoff" and not inactive_attached:
                    raise exceptions.TestFail("Inactive domain XML not updated"
                                              " when --current options used for"
                                              " attachment")
            else:
                if vm_state in ["paused", "running", "transient"]:
                    if active_attached:
                        raise exceptions.TestFail("Active domain XML not updated"
                                                  " when --current options used "
                                                  "for detachment")
                elif vm_state == "shutoff" and inactive_attached:
                    raise exceptions.TestFail("Inactive domain XML not updated "
                                              "when --current options used for "
                                              "detachment")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Test parameters
    uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri",
                                                      "default"))
    vm_ref = params.get("at_detach_iface_vm_ref", "domname")
    at_options_suffix = params.get("at_dt_iface_at_options", "")
    dt_options_suffix = params.get("at_dt_iface_dt_options", "")
    at_status_error = "yes" == params.get("at_status_error", "no")
    dt_status_error = "yes" == params.get("dt_status_error", "no")
    pre_vm_state = params.get("at_dt_iface_pre_vm_state")

    # Skip if libvirt doesn't support --live/--current.
    if (at_options_suffix.count("--live") or
            dt_options_suffix.count("--live")):
        if not libvirt_version.version_compare(1, 0, 5):
            raise exceptions.TestSkipError("update-device doesn't"
                                           " support --live")
    if (at_options_suffix.count("--current") or
            dt_options_suffix.count("--current")):
        if not libvirt_version.version_compare(1, 0, 5):
            raise exceptions.TestSkipError("virsh update-device "
                                           "doesn't support --current")

    # Interface specific attributes.
    iface_type = params.get("at_detach_iface_type", "network")
    if iface_type == "bridge":
        try:
            utils_misc.find_command("brctl")
        except ValueError:
            raise exceptions.TestSkipError("Command 'brctl' is missing."
                                           " You must install it.")

    iface_source = params.get("at_detach_iface_source", "default")
    iface_mac = params.get("at_detach_iface_mac", "created")
    virsh_dargs = {'ignore_status': True, 'uri': uri, 'debug': True}

    # Check host version.
    rhel6_host = False
    if not process.run("grep 'Red Hat Enterprise Linux Server "
                       "release 6' /etc/redhat-release",
                       ignore_status=True, shell=True).exit_status:
        rhel6_host = True

    # Back up xml file.
    if vm.is_alive():
        vm.destroy(gracefully=False)
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Get a bridge name for test if iface_type is bridge.
    # If there is no bridge other than virbr0, raise TestSkipError
    if iface_type == "bridge":
        host_bridge = utils_net.Bridge()
        bridge_list = host_bridge.list_br()
        try:
            bridge_list.remove("virbr0")
        except AttributeError:
            pass  # If no virbr0, just pass is ok
        logging.debug("Useful bridges:%s", bridge_list)
        # just choosing one bridge on host.
        if len(bridge_list):
            iface_source = bridge_list[0]
        else:
            raise exceptions.TestSkipError("No useful bridge on host "
                                           "other than 'virbr0'.")

    # Turn VM into certain state.
    if pre_vm_state == "running":
        if (rhel6_host and at_options_suffix == "--config" and
                dt_options_suffix == ""):
            raise exceptions.TestSkipError("For bug921407, "
                                           "won't fix on rhel6 host")
        logging.info("Starting %s..." % vm_name)
        if vm.is_dead():
            vm.start()
            vm.wait_for_login().close()
    elif pre_vm_state == "shutoff":
        logging.info("Shuting down %s..." % vm_name)
        if vm.is_alive():
            vm.destroy(gracefully=False)
    elif pre_vm_state == "paused":
        if (rhel6_host and at_options_suffix == "--config" and
                dt_options_suffix == ""):
            raise exceptions.TestSkipError("For bug921407, "
                                           "won't fix on rhel6 host")
        logging.info("Pausing %s..." % vm_name)
        if vm.is_dead():
            vm.start()
            vm.wait_for_login().close()
        if not vm.pause():
            raise exceptions.TestSkipError("Cann't pause the domain")
    elif pre_vm_state == "transient":
        logging.info("Creating %s..." % vm_name)
        vm.undefine()
        if virsh.create(backup_xml.xml,
                        **virsh_dargs).exit_status:
            backup_xml.define()
            raise exceptions.TestSkipError("Cann't create the domain")

    dom_uuid = vm.get_uuid()
    dom_id = vm.get_id()

    # Set attach-interface domain
    if vm_ref == "domname":
        vm_ref = vm_name
    elif vm_ref == "domid":
        vm_ref = dom_id
    elif vm_ref == "domuuid":
        vm_ref = dom_uuid
    elif vm_ref == "hexdomid" and dom_id is not None:
        vm_ref = hex(int(dom_id))

    # Get a mac address if iface_mac is 'created'.
    if iface_mac == "created":
        iface_mac = utils_net.generate_mac_address_simple()

    try:
        # Set attach-interface options and
        # start attach-interface test
        options = set_options(iface_type, iface_source,
                              iface_mac, at_options_suffix,
                              "attach")
        ret = virsh.attach_interface(vm_name, options,
                                     **virsh_dargs)
        libvirt.check_exit_status(ret, at_status_error)
        # Check if the command take effect in vm
        # or config file.
        if vm.is_paused():
            vm.resume()
            vm.wait_for_login().close()
        #Sleep a while for vm is stable
        time.sleep(3)
        if not ret.exit_status:
            check_result(vm_name, iface_source,
                         iface_type, iface_mac,
                         at_options_suffix, pre_vm_state)

        # Set detach-interface options
        options = set_options(iface_type, None, iface_mac,
                              dt_options_suffix, "detach")

        # Sleep for a while
        time.sleep(10)
        # Start detach-interface test
        if pre_vm_state == "paused":
            if not vm.pause():
                raise exceptions.TestFail("Cann't pause the domain")
        ret = virsh.detach_interface(vm_ref, options,
                                     **virsh_dargs)
        if rhel6_host and pre_vm_state in ['paused', 'running']:
            if (at_options_suffix == "--config" and
                    dt_options_suffix == "--config"):
                dt_status_error = True
        libvirt.check_exit_status(ret, dt_status_error)
        # Check if the command take effect
        # in vm or config file.
        if vm.is_paused():
            vm.resume()
            vm.wait_for_login().close()
        #Sleep a while for vm is stable
        time.sleep(10)
        if not ret.exit_status:
            check_result(vm_name, iface_source,
                         iface_type, iface_mac,
                         dt_options_suffix,
                         pre_vm_state, False)

    finally:
        # Restore the vm
        if vm.is_alive():
            vm.destroy(gracefully=False, free_mac_addresses=False)
        backup_xml.sync()
Exemple #21
0
def run(test, params, env):
    """
    Test command: virsh change-media.

    The command changes the media used by CD or floppy drives.

    Test steps:
    1. Prepare test environment.
    2. Perform virsh change-media operation.
    3. Recover test environment.
    4. Confirm the test result.
    """
    @error.context_aware
    def env_pre(old_iso, new_iso):
        """
        Prepare ISO image for test

        :param old_iso: sourse file for insert
        :param new_iso: sourse file for update
        """
        error.context("Preparing ISO images")
        utils.run("dd if=/dev/urandom of=%s/old bs=1M count=1" % iso_dir)
        utils.run("dd if=/dev/urandom of=%s/new bs=1M count=1" % iso_dir)
        utils.run("mkisofs -o %s %s/old" % (old_iso, iso_dir))
        utils.run("mkisofs -o %s %s/new" % (new_iso, iso_dir))

    @error.context_aware
    def check_media(session, target_file, action):
        """
        Check guest cdrom/floppy files

        :param session: guest session
        :param target_file: the expected files
        :param action: test case action
        """
        if action != "--eject ":
            error.context("Checking guest %s files" % target_device)
            if target_device == "hdc":
                mount_cmd = "mount /dev/sr0 /media"
            else:
                if session.cmd_status("ls /dev/fd0"):
                    session.cmd("mknod /dev/fd0 b 2 0")
                mount_cmd = "mount /dev/fd0 /media"
            session.cmd(mount_cmd)
            session.cmd("test -f /media/%s" % target_file)
            session.cmd("umount /media")

        else:
            error.context("Ejecting guest cdrom files")
            if target_device == "hdc":
                if session.cmd_status("mount /dev/sr0 /media -o loop") == 32:
                    logging.info("Eject succeeded")
            else:
                if session.cmd_status("ls /dev/fd0"):
                    session.cmd("mknod /dev/fd0 b 2 0")
                if session.cmd_status("mount /dev/fd0 /media -o loop") == 32:
                    logging.info("Eject succeeded")

    def add_device(vm_name, init_source="''"):
        """
        Add device for test vm

        :param vm_name: guest name
        :param init_source: source file
        """
        if vm.is_alive():
            virsh.destroy(vm_name)

        virsh.attach_disk(vm_name,
                          init_source,
                          target_device,
                          "--type %s --sourcetype file --config" % device_type,
                          debug=True)

    def update_device(vm_name, init_iso, options, start_vm):
        """
        Update device iso file for test case

        :param vm_name: guest name
        :param init_iso: source file
        :param options: update-device option
        :param start_vm: guest start flag
        """
        snippet = """
<disk type='file' device='%s'>
<driver name='qemu' type='raw'/>
<source file='%s'/>
<target dev='%s'/>
<readonly/>
</disk>
""" % (device_type, init_iso, target_device)
        update_iso_file = open(update_iso_xml, "w")
        update_iso_file.write(snippet)
        update_iso_file.close()

        cmd_options = "--force "
        if options == "--config" or start_vm == "no":
            cmd_options += " --config"

        # Give domain the ISO image file
        return virsh.update_device(domainarg=vm_name,
                                   filearg=update_iso_xml,
                                   flagstr=cmd_options,
                                   debug=True)

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_ref = params.get("change_media_vm_ref")
    action = params.get("change_media_action")
    start_vm = params.get("start_vm")
    options = params.get("change_media_options")
    device_type = params.get("change_media_device_type", "cdrom")
    target_device = params.get("change_media_target_device", "hdc")
    source_name = params.get("change_media_source")
    status_error = params.get("status_error", "no")
    check_file = params.get("change_media_check_file")
    update_iso_xml_name = params.get("change_media_update_iso_xml")
    init_iso_name = params.get("change_media_init_iso")
    old_iso_name = params.get("change_media_old_iso")
    new_iso_name = params.get("change_media_new_iso")
    source_path = params.get("change_media_source_path", "yes")

    if device_type not in ['cdrom', 'floppy']:
        raise error.TestNAError("Got a invalid device type:/n%s" % device_type)

    try:
        utils_misc.find_command("mkisofs")
    except ValueError:
        raise error.TestNAError("Command 'mkisofs' is missing. You must "
                                "install it (try 'genisoimage' package.")

    # Backup for recovery.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    iso_dir = os.path.join(data_dir.get_tmp_dir(), "tmp")
    old_iso = os.path.join(iso_dir, old_iso_name)
    new_iso = os.path.join(iso_dir, new_iso_name)
    update_iso_xml = os.path.join(iso_dir, update_iso_xml_name)
    if not os.path.exists(iso_dir):
        os.mkdir(iso_dir)
    if not init_iso_name:
        init_iso = ""
    else:
        init_iso = os.path.join(iso_dir, init_iso_name)

    if vm_ref == "name":
        vm_ref = vm_name

    env_pre(old_iso, new_iso)
    # Check domain's disk device
    disk_blk = vm_xml.VMXML.get_disk_blk(vm_name)
    logging.info("disk_blk %s", disk_blk)
    if target_device not in disk_blk:
        logging.info("Adding device")
        add_device(vm_name)

    if vm.is_alive() and start_vm == "no":
        logging.info("Destroying guest...")
        vm.destroy()

    elif vm.is_dead() and start_vm == "yes":
        logging.info("Starting guest...")
        vm.start()

    # If test target is floppy, you need to set selinux to Permissive mode.
    result = update_device(vm_name, init_iso, options, start_vm)

    # If the selinux is set to enforcing, if we FAIL, then just SKIP
    force_SKIP = False
    if result.exit_status == 1 and utils_misc.selinux_enforcing() and \
       result.stderr.count("unable to execute QEMU command 'change':"):
        force_SKIP = True

    # Libvirt will ignore --source when action is eject
    if action == "--eject ":
        source = ""
    else:
        source = os.path.join(iso_dir, source_name)
        if source_path == "no":
            source = source_name

    all_options = action + options + " " + source
    result = virsh.change_media(vm_ref,
                                target_device,
                                all_options,
                                ignore_status=True,
                                debug=True)
    if status_error == "yes":
        if start_vm == "no" and vm.is_dead():
            try:
                vm.start()
            except virt_vm.VMStartError, detail:
                result.exit_status = 1
                result.stderr = str(detail)
        if start_vm == "yes" and vm.is_alive():
            vm.destroy(gracefully=False)
            try:
                vm.start()
            except virt_vm.VMStartError, detail:
                result.exit_status = 1
                result.stderr = str(detail)
def run_timedrift_no_net(test, params, env):
    """
    Test suspend commands in qemu guest agent.

    :param test: kvm test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environmen.
    """
    clock_server = params.get("clock_server", "clock.redhat.com")
    ntputil_install = params.get("ntputil_install", "yum install -y ntpdate")
    login_timeout = int(params.get("login_timeout", "240"))
    guest_clock_source = params.get("guest_clock_source", "kvm-clock")
    date_time_command = params.get("date_time_command",
                                   "date -u +'TIME: %a %m/%d/%Y %H:%M:%S.%N'")
    date_time_filter_re = params.get("date_time_filter_re",
                                     "(?:TIME: \w\w\w )(.{19})(.+)")
    date_time_format = params.get("date_time_format",
                                  "%m/%d/%Y %H:%M:%S")
    hwclock_time_command = params.get("hwclock_time_command",
                                      "LC_TIME=C hwclock -u")
    hwclock_time_filter_re = params.get("hwclock_time_filter_re",
                                        "(.+)")
    hwclock_time_format = params.get("hwclock_time_format",
                                     "%a %b %d %H:%M:%S %Y")
    tolerance = float(params.get("time_diff_tolerance", "0.5"))

    sub_work = params["sub_work"]

    vm_name = params.get("vms")
    vm = env.get_vm(vm_name)
    error.context("Check if ntp utils are host in system.", logging.info)
    try:
        utils_misc.find_command("ntpdate")
    except ValueError:
        error.context("Install ntp utils `%s`." % (ntputil_install),
                      logging.info)
        utils.run(ntputil_install)
    error.context("Sync host machine with clock server %s" % (clock_server),
                  logging.info)
    utils.run("ntpdate %s" % (clock_server))
    error.context("Check clock source on guest VM", logging.info)
    session = vm.wait_for_serial_login(timeout=login_timeout)
    out = session.cmd_output("cat /sys/devices/system/clocksource/"
                             "clocksource0/current_clocksource")
    if not guest_clock_source in out:
        raise error.TestFail("Clock source %s missing in guest clock "
                             "sources %s." % (guest_clock_source, out))

    error.context("Get clock from host and guest VM using `date`",
                  logging.info)
    before_date = utils_test.get_time(session,
                                      date_time_command,
                                      date_time_filter_re,
                                      date_time_format)
    logging.debug(before_date)

    error.context("Get clock from host and guest VM using `hwclock`",
                  logging.info)
    before_hwclock = utils_test.get_time(session,
                                         hwclock_time_command,
                                         hwclock_time_filter_re,
                                         hwclock_time_format)
    logging.debug(before_hwclock)

    session.close()

    if sub_work in globals():  # Try to find sub work function.
        globals()[sub_work](params, vm, session)
    else:
        raise error.TestNAError("Unable to found subwork %s in %s test file." %
                                (sub_work, __file__))

    session = vm.wait_for_serial_login(timeout=login_timeout)
    error.context("Get clock from host and guest VM using `date`",
                  logging.info)
    after_date = utils_test.get_time(session,
                                     date_time_command,
                                     date_time_filter_re,
                                     date_time_format)
    logging.debug(after_date)

    error.context("Get clock from host and guest VM using `hwclock`",
                  logging.info)
    after_hwclock = utils_test.get_time(session,
                                        hwclock_time_command,
                                        hwclock_time_filter_re,
                                        hwclock_time_format)
    logging.debug(after_hwclock)

    date_diff = time_diff(before_date, after_date)
    hwclock_diff = time_diff(before_hwclock, after_hwclock)
    if date_diff > tolerance and hwclock_diff > tolerance:
        raise error.TestFail("hwclock %ss and date %ss difference is"
                             " out of tolerance %ss" % (hwclock_diff,
                                                        date_diff,
                                                        tolerance))
    elif date_diff > tolerance:
        raise error.TestFail("date %ss difference is"
                             " out of tolerance %ss" % (date_diff, tolerance))
    elif hwclock_diff > tolerance:
        raise error.TestFail("hwclock %ss difference is"
                             " out of tolerance %ss" % (hwclock_diff,
                                                        tolerance))
def run(test, params, env):
    """
    Test suspend commands in qemu guest agent.

    :param test: kvm test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environmen.
    """
    clock_server = params.get("clock_server", "clock.redhat.com")
    ntputil_install = params.get("ntputil_install", "yum install -y ntpdate")
    login_timeout = int(params.get("login_timeout", "240"))
    date_time_command = params.get("date_time_command",
                                   r"date -u +'TIME: %a %m/%d/%Y %H:%M:%S.%N'")
    date_time_filter_re = params.get("date_time_filter_re",
                                     r"(?:TIME: \w\w\w )(.{19})(.+)")
    date_time_format = params.get("date_time_format",
                                  "%m/%d/%Y %H:%M:%S")

    tolerance = float(params.get("time_diff_tolerance", "0.5"))

    sub_work = params["sub_work"]
    test_type = params["timedrift_sub_work"]

    vm_name = params.get("vms")
    vm = env.get_vm(vm_name)
    error.context("Check if ntp utils are host in system.", logging.info)
    try:
        utils_misc.find_command("ntpdate")
    except ValueError:
        error.context("Install ntp utils `%s`." % (ntputil_install),
                      logging.info)
        utils.run(ntputil_install)
    error.context("Sync host machine with clock server %s" % (clock_server),
                  logging.info)
    utils.run("ntpdate %s" % (clock_server))

    session = vm.wait_for_login(timeout=login_timeout)
    error.context("Get clock from host and guest VM using `date`",
                  logging.info)

    before_date = utils_test.get_time(session,
                                      date_time_command,
                                      date_time_filter_re,
                                      date_time_format)
    logging.debug("date: host time=%ss guest time=%ss",
                  *before_date)

    session.close()

    if sub_work in globals():  # Try to find sub work function.
        globals()[sub_work](params, vm, session)
    else:
        raise error.TestNAError("Unable to found subwork %s in %s test file." %
                                (sub_work, __file__))

    vm = env.get_vm(vm_name)
    session = vm.wait_for_login(timeout=login_timeout)
    error.context("Get clock from host and guest VM using `date`",
                  logging.info)
    after_date = utils_test.get_time(session,
                                     date_time_command,
                                     date_time_filter_re,
                                     date_time_format)
    logging.debug("date: host time=%ss guest time=%ss",
                  *after_date)

    if test_type == 'guest_suspend':
        date_diff = time_diff(before_date, after_date)
        if date_diff > tolerance:
            raise error.TestFail("date %ss difference is"
                                 "'guest_diff_time != host_diff_time'"
                                 " out of tolerance %ss" % (date_diff[1],
                                                            tolerance))
    elif test_type == "guest_pause_resume":
        date_diff = time_diff_host_guest(before_date, after_date)
        if date_diff[1] > tolerance:
            raise error.TestFail("date %ss difference is "
                                 "'guest_time_after-guest_time_before'"
                                 " out of tolerance %ss" % (date_diff[1],
                                                            tolerance))
Exemple #24
0
def run(test, params, env):
    """
    Test numa tuning

    1) Positive testing
       1.1) get the current numa parameters for a running/shutoff guest
       1.2) set the current numa parameters for a running/shutoff guest
           1.2.1) set valid 'mode' parameters
           1.2.2) set valid 'nodeset' parameters
    2) Negative testing
       2.1) get numa parameters
           2.1.1) invalid options
           2.1.2) stop cgroup service
       2.2) set numa parameters
           2.2.1) invalid 'mode' parameters
           2.2.2) invalid 'nodeset' parameters
           2.2.3) change 'mode' for a running guest and 'mode' is not 'strict'
           2.2.4) change 'nodeset' for running guest with mode of 'interleave'
                  'interleave' or 'preferred' numa mode
           2.2.5) stop cgroup service
    """

    try:
        utils_misc.find_command("numactl")
    except ValueError:
        raise error.TestNAError("Command 'numactl' is missing. You must "
                                "install it.")

    # Run test case
    #vm_name = params.get("vms")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    original_vm_xml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    cg = utils_cgroup.CgconfigService()
    status_error = params.get("status_error", "no")
    libvirtd = params.get("libvirtd", "on")
    cgconfig = params.get("cgconfig", "on")
    start_vm = params.get("start_vm", "no")
    change_parameters = params.get("change_parameters", "no")

    # Make sure vm is down if start not requested
    if start_vm == "no" and vm.is_alive():
        vm.destroy()

    # positive and negative testing #########

    cgstop = False
    try:
        if status_error == "no":
            if change_parameters == "no":
                get_numa_parameter(params, cgstop)
            else:
                set_numa_parameter(params, cgstop)
        if cgconfig == "off":
            # If running, then need to shutdown a running guest before
            # stopping cgconfig service and will start the guest after
            # restarting libvirtd service
            if cg.cgconfig_is_running():
                if vm.is_alive():
                    vm.destroy()
                cg.cgconfig_stop()
                cgstop = True

        # If we stopped cg, then refresh libvirtd service
        # to get latest cgconfig service change; otherwise,
        # if no cg change restart of libvirtd is pointless
        if cgstop and libvirtd == "restart":
            try:
                utils_libvirtd.libvirtd_restart()
            finally:
                # Not running is not a good thing, but it does happen
                # and it will affect other tests
                if not utils_libvirtd.libvirtd_is_running():
                    raise error.TestNAError("libvirt service is not running!")

        # Recover previous running guest
        if (cgconfig == "off" and libvirtd == "restart" and not vm.is_alive()
                and start_vm == "yes"):
            vm.start()
        if status_error == "yes":
            if change_parameters == "no":
                get_numa_parameter(params, cgstop)
            else:
                set_numa_parameter(params, cgstop)
    finally:
        # Restore guest
        original_vm_xml.sync()

        # If we stopped cg, then recover and refresh libvirtd to recognize
        if cgstop:
            cg.cgconfig_start()
            utils_libvirtd.libvirtd_restart()
Exemple #25
0
def _set_iptables_firewalld(iptables_status, firewalld_status):
    """
    Try to set firewalld and iptables services status.

    :param iptables_status: Whether iptables should be set active.
    :param firewalld_status: Whether firewalld should be set active.
    :return: A tuple of two boolean stand for the original status of
             iptables and firewalld.
    """
    # pylint: disable=E1103
    logging.debug("Setting firewalld and iptables services.")

    # Iptables and firewalld are two exclusive services.
    # It's impossible to start both.
    if iptables_status and firewalld_status:
        msg = "Can't active both iptables and firewalld services."
        raise error.TestNAError(msg)

    # Check the availability of both packages.
    try:
        utils_misc.find_command('iptables')
        iptables = service.Factory.create_service('iptables')
    except ValueError:
        msg = "Can't find service iptables."
        raise error.TestNAError(msg)

    try:
        utils_misc.find_command('firewalld')
        firewalld = service.Factory.create_service('firewalld')
    except ValueError:
        msg = "Can't find service firewalld."
        raise error.TestNAError(msg)

    # Back up original services status.
    old_iptables = iptables.status()
    old_firewalld = firewalld.status()

    # We should stop services first then start the other after.
    # Directly start one service will force the other service stop,
    # which will not be easy to handle.
    if not iptables_status and iptables.status():
        utils.run('iptables-save > /tmp/iptables.save')
        if not iptables.stop():
            msg = "Can't stop service iptables"
            raise error.TestError(msg)

    if not firewalld_status and firewalld.status():
        if not firewalld.stop():
            msg = ("Service firewalld can't be stopped. "
                   "Maybe it is masked by default. you can unmask it by "
                   "running 'systemctl unmask firewalld'.")
            raise error.TestNAError(msg)

    if iptables_status and not iptables.status():
        if not iptables.start():
            msg = "Can't start service iptables"
            raise error.TestError(msg)
        utils.run('iptables-restore < /tmp/iptables.save')

    if firewalld_status and not firewalld.status():
        if not firewalld.start():
            msg = ("Service firewalld can't be started. "
                   "Maybe it is masked by default. you can unmask it by "
                   "running 'systemctl unmask firewalld'.")
            raise error.TestNAError(msg)

    return old_iptables, old_firewalld
Exemple #26
0
def run(test, params, env):
    """
    Test the network transaction and cover virsh iface-{begin,commit,rollback}

    1. Positive testing
       1.1 begin or/and commit testing with libvirtd running
       1.2 begin or/and rollback testing with libvirtd running
    2. Negative testing
       2.1 no pending transaction testing
       2.2 there is already an open transaction testing
       2.3 break network transaction testing
           2.3.1 begin and commit testing with libvirtd restart
           2.3.2 begin and rollback testing with libvirtd restart
    """

    try:
        utils_misc.find_command("locate")
    except ValueError:
        raise error.TestNAError("Command 'locate' is missing. You must "
                                "install it.")
    # Run test case
    status_error = params.get("status_error", "no")
    libvirtd = params.get("libvirtd", "on")
    exist_trans = params.get("exist_trans", "no")
    iface_name = params.get("iface_name", "ifcfg-test")
    transaction = params.get("iface_transaction", "")

    network_script_dir = "/etc/sysconfig/network-scripts"

    iface_cfg = os.path.join(network_script_dir, iface_name)
    netcf_snap_dir = netcf_trans_control("snapshot-dir")

    params['iface_cfg'] = iface_cfg
    params['netcf_snap_dir'] = netcf_snap_dir

    # positive and negative testing #########

    try:
        if status_error == "no":
            # Do begin-commit testing
            if transaction == "begin_commit":
                iface_trans_begin(params)
                write_iface_cfg(iface_cfg)
                # Break begin-commit operation
                if libvirtd == "restart":
                    utils_libvirtd.service_libvirtd_control("restart")
                try:
                    iface_trans_commit(params)
                except error.TestError:
                    cleanup(iface_cfg, exist_trans)

                # Only cleanup temporary network configuration file
                cleanup(iface_cfg)

            # Do begin-rollback testing
            elif transaction == "begin_rollback":
                iface_trans_begin(params)
                write_iface_cfg(iface_cfg)
                # Break begin-rollback operation
                if libvirtd == "restart":
                    utils_libvirtd.service_libvirtd_control("restart")
                try:
                    iface_trans_rollback(params)
                except error.TestError:
                    cleanup(iface_cfg, exist_trans)
            else:
                raise error.TestFail("The 'transaction' must be 'begin_commit' or"
                                     " 'begin_rollback': %s" % status_error)

        if status_error == "yes":
            # No pending transaction
            if exist_trans != "yes":
                iface_trans_commit(params)
                iface_trans_rollback(params)
            # There is already an open transaction
            else:
                netcf_trans_control('change-begin')
                iface_trans_begin(params)
    finally:
        # Cleanup network transaction and temporary configuration file
        cleanup(iface_cfg, exist_trans)
def run(test, params, env):
    """
    This test cover two volume commands: vol-clone and vol-wipe.

    1. Create a given type pool.
    2. Create a given format volume in the pool.
    3. Clone the new create volume.
    4. Wipe the new clone volume.
    5. Delete the volume and pool.
    """

    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    if not os.path.dirname(pool_target):
        pool_target = os.path.join(test.tmpdir, pool_target)
    emulated_image = params.get("emulated_image")
    emulated_image_size = params.get("emulated_image_size")
    vol_name = params.get("vol_name")
    new_vol_name = params.get("new_vol_name")
    vol_capability = params.get("vol_capability")
    vol_format = params.get("vol_format")
    clone_option = params.get("clone_option", "")
    wipe_algorithms = params.get("wipe_algorithms")

    if virsh.has_command_help_match("vol-wipe", "--prealloc-metadata") is None:
        if "prealloc-metadata" in clone_option:
            raise error.TestNAError("Option --prealloc-metadata " "is not supported.")

    # Using algorithms other than zero need scrub installed.
    try:
        utils_misc.find_command("scrub")
    except ValueError:
        logging.warning("Can't locate scrub binary, only 'zero' algorithm " "is used.")
        valid_algorithms = ["zero"]
    else:
        valid_algorithms = ["zero", "nnsa", "dod", "bsi", "gutmann", "schneier", "pfitzner7", "pfitzner33", "random"]

    # Choose an algorithms randomly
    if wipe_algorithms:
        alg = random.choice(wipe_algorithms.split())
    else:
        alg = random.choice(valid_algorithms)

    clone_status_error = "yes" == params.get("clone_status_error", "no")
    wipe_status_error = "yes" == params.get("wipe_status_error", "no")
    setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit")

    # libvirt acl polkit related params
    uri = params.get("virsh_uri")
    unpri_user = params.get("unprivileged_user")
    if unpri_user:
        if unpri_user.count("EXAMPLE"):
            unpri_user = "******"

    if not libvirt_version.version_compare(1, 1, 1):
        if setup_libvirt_polkit:
            raise error.TestNAError("API acl test not supported in current" " libvirt version.")

    del_pool = True
    libv_pvt = libvirt.PoolVolumeTest(test, params)
    try:
        libv_pool = libvirt_storage.StoragePool()
        while libv_pool.pool_exists(pool_name):
            logging.debug("Use exist pool '%s'", pool_name)
            del_pool = False
        else:
            # Create a new pool
            disk_vol = []
            if pool_type == "disk":
                disk_vol.append(params.get("pre_vol", "10M"))
            libv_pvt.pre_pool(
                pool_name=pool_name,
                pool_type=pool_type,
                pool_target=pool_target,
                emulated_image=emulated_image,
                image_size=emulated_image_size,
                pre_disk_vol=disk_vol,
            )
        libv_vol = libvirt_storage.PoolVolume(pool_name)
        if libv_vol.volume_exists(vol_name):
            logging.debug("Use exist volume '%s'", vol_name)
        elif vol_format in ["raw", "qcow2", "qed", "vmdk"]:
            # Create a new volume
            libv_pvt.pre_vol(
                vol_name=vol_name, vol_format=vol_format, capacity=vol_capability, allocation=None, pool_name=pool_name
            )
        elif vol_format == "partition":
            vol_name = libv_vol.list_volumes().keys()[0]
            logging.debug("Partition %s in disk pool is volume" % vol_name)
        elif vol_format == "sparse":
            # Create a sparse file in pool
            sparse_file = pool_target + "/" + vol_name
            cmd = "dd if=/dev/zero of=" + sparse_file
            cmd += " bs=1 count=0 seek=" + vol_capability
            utils.run(cmd)
        else:
            raise error.TestError("Unknown volume format %s" % vol_format)
        # Refresh the pool
        virsh.pool_refresh(pool_name)
        vol_info = libv_vol.volume_info(vol_name)
        for key in vol_info:
            logging.debug("Original volume info: %s = %s", key, vol_info[key])

        # Metadata preallocation is not support for block volume
        if vol_info["Type"] == "block" and clone_option.count("prealloc-metadata"):
            clone_status_error = True

        if pool_type == "disk":
            new_vol_name = libvirt.new_disk_vol_name(pool_name)
            if new_vol_name is None:
                raise error.TestError("Fail to generate volume name")
            # update polkit rule as the volume name changed
            if setup_libvirt_polkit:
                vol_pat = r"lookup\('vol_name'\) == ('\S+')"
                new_value = "lookup('vol_name') == '%s'" % new_vol_name
                libvirt.update_polkit_rule(params, vol_pat, new_value)
        # Clone volume
        clone_result = virsh.vol_clone(vol_name, new_vol_name, pool_name, clone_option, debug=True)
        if not clone_status_error:
            if clone_result.exit_status != 0:
                raise error.TestFail("Clone volume fail:\n%s" % clone_result.stderr.strip())
            else:
                vol_info = libv_vol.volume_info(new_vol_name)
                for key in vol_info:
                    logging.debug("Cloned volume info: %s = %s", key, vol_info[key])
                logging.debug("Clone volume successfully.")
                # Wipe the new clone volume
                if alg:
                    logging.debug("Wiping volume by '%s' algorithm", alg)
                wipe_result = virsh.vol_wipe(
                    new_vol_name, pool_name, alg, unprivileged_user=unpri_user, uri=uri, debug=True
                )
                unsupported_err = ["Unsupported algorithm", "no such pattern sequence"]
                if not wipe_status_error:
                    if wipe_result.exit_status != 0:
                        if any(err in wipe_result.stderr for err in unsupported_err):
                            raise error.TestNAError(wipe_result.stderr)
                        raise error.TestFail("Wipe volume fail:\n%s" % clone_result.stdout.strip())
                    else:
                        virsh_vol_info = libv_vol.volume_info(new_vol_name)
                        for key in virsh_vol_info:
                            logging.debug("Wiped volume info(virsh): %s = %s", key, virsh_vol_info[key])
                        vol_path = virsh.vol_path(new_vol_name, pool_name).stdout.strip()
                        qemu_vol_info = utils_misc.get_image_info(vol_path)
                        for key in qemu_vol_info:
                            logging.debug("Wiped volume info(qemu): %s = %s", key, qemu_vol_info[key])
                            if qemu_vol_info["format"] != "raw":
                                raise error.TestFail("Expect wiped volume " "format is raw")
                elif wipe_status_error and wipe_result.exit_status == 0:
                    raise error.TestFail("Expect wipe volume fail, but run" " successfully.")
        elif clone_status_error and clone_result.exit_status == 0:
            raise error.TestFail("Expect clone volume fail, but run" " successfully.")
    finally:
        # Clean up
        try:
            if del_pool:
                libv_pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
            else:
                # Only delete the volumes
                libv_vol = libvirt_storage.PoolVolume(pool_name)
                for vol in [vol_name, new_vol_name]:
                    libv_vol.delete_volume(vol)
        except error.TestFail, detail:
            logging.error(str(detail))
def run(test, params, env):
    """
    Test virsh iface-bridge and iface-unbridge commands.

    (1) Bridge an existing network device(iface-bridge).
    (2) Unbridge a network device(iface-unbridge).
    """

    iface_name = params.get("iface_name")
    bridge_name = params.get("bridge_name")
    ping_ip = params.get("ping_ip", "")
    ping_count = int(params.get("ping_count", "3"))
    ping_timeout = int(params.get("ping_timeout", "5"))
    bridge_option = params.get("bridge_option")
    unbridge_option = params.get("unbridge_option")
    bridge_delay = "yes" == params.get("bridge_delay", "no")
    delay_num = params.get("delay_num", "0")
    create_bridge = "yes" == params.get("create_bridge", "yes")
    bridge_status_error = "yes" == params.get("bridge_status_error", "no")
    unbridge_status_error = "yes" == params.get("unbridge_status_error", "no")
    iface_script = NETWORK_SCRIPT + iface_name
    iface_script_bk = os.path.join(test.tmpdir, "iface-%s.bk" % iface_name)
    check_iface = "yes" == params.get("check_iface", "yes")
    if check_iface:
        # Make sure the interface exists
        if not libvirt.check_iface(iface_name, "exists", "--all"):
            raise error.TestNAError("Interface '%s' not exists" % iface_name)

        net_iface = utils_net.Interface(name=iface_name)
        iface_is_up = net_iface.is_up()
        iface_ip = net_iface.get_ip()

        # Back up the interface script
        utils.run("cp %s %s" % (iface_script, iface_script_bk))

    # Make sure the bridge name not exists
    net_bridge = utils_net.Bridge()
    if bridge_name in net_bridge.list_br():
        raise error.TestNAError("Bridge '%s' already exists" % bridge_name)

    # Stop NetworkManager service
    try:
        NM = utils_misc.find_command("NetworkManager")
    except ValueError:
        logging.debug("No NetworkManager service.")
        NM = None
    NM_is_running = False
    if NM is not None:
        NM_service = service.Factory.create_service("NetworkManager")
        NM_is_running = NM_service.status()
        if NM_is_running:
            NM_service.stop()

    def unbridge_check():
        """
        Check the result after do unbridge.
        """
        list_option = "--all"
        if libvirt.check_iface(bridge_name, "exists", list_option):
            raise error.TestFail("%s is still present." % bridge_name)
        if "no-start" in unbridge_option:
            list_option = "--inactive"
        if not libvirt.check_iface(iface_name, "exists", list_option):
            raise error.TestFail("%s is not present." % iface_name)

    if bridge_delay:
        bridge_option += " --delay %s" % delay_num
    # Run test
    try:
        if create_bridge:
            # Create bridge
            result = virsh.iface_bridge(iface_name, bridge_name, bridge_option)
            libvirt.check_exit_status(result, bridge_status_error)
            if not bridge_status_error:
                # Get the new create bridge IP address
                try:
                    br_ip = utils_net.get_ip_address_by_interface(bridge_name)
                except:
                    br_ip = ""
                # check IP of new bridge
                if check_iface and br_ip and br_ip != iface_ip:
                    raise error.Testfail("bridge IP(%s) isn't the same as iface IP(%s)."
                                         % (br_ip, iface_ip))
                # check the status of STP feature
                if "no-start" not in bridge_option:
                    if "no-stp" not in bridge_option:
                        if "yes" != net_bridge.get_stp_status(bridge_name):
                            raise error.Testfail("Fail to enable STP.")
                # Do ping test only bridge has IP address and ping_ip not empty
                if br_ip and ping_ip:
                    if not libvirt.check_iface(bridge_name, "ping", ping_ip,
                                               count=ping_count, timeout=ping_timeout):
                        raise error.TestFail("Fail to ping %s from %s."
                                             % (ping_ip, bridge_name))
                else:
                    # Skip ping test
                    logging.debug("Skip ping test as %s has no IP address",
                                  bridge_name)
                list_option = ""
                if "no-start" in bridge_option:
                    list_option = "--inactive"
                if libvirt.check_iface(bridge_name, "exists", list_option):
                    # Unbridge
                    result = virsh.iface_unbridge(bridge_name, unbridge_option)
                    libvirt.check_exit_status(result, unbridge_status_error)
                    if not unbridge_status_error:
                        unbridge_check()
                else:
                    raise error.TestFail("%s is not present." % bridge_name)
        else:
            # Unbridge without creating bridge, only for negative test now
            result = virsh.iface_unbridge(bridge_name, unbridge_option)
            libvirt.check_exit_status(result, unbridge_status_error)
            if not unbridge_status_error:
                unbridge_check()
    finally:
        if create_bridge and check_iface:
            if libvirt.check_iface(bridge_name, "exists", "--all"):
                virsh.iface_unbridge(bridge_name)
            if os.path.exists(iface_script_bk):
                utils.run("mv %s %s" % (iface_script_bk, iface_script))
            if iface_is_up:
                # Need reload script
                utils.run("ifdown %s" % iface_name)
                utils.run("ifup %s" % iface_name)
            else:
                net_iface.down()
            # Clear the new create bridge if it exists
            try:
                utils_net.bring_down_ifname(bridge_name)
                utils.run("brctl delbr %s" % bridge_name)
            except utils_net.TAPBringDownError:
                pass
        if NM_is_running:
            NM_service.start()
def run(test, params, env):
    """
    This test cover two volume commands: vol-clone and vol-wipe.

    1. Create a given type pool.
    2. Create a given format volume in the pool.
    3. Clone the new create volume.
    4. Wipe the new clone volume.
    5. Delete the volume and pool.
    """

    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    if not os.path.dirname(pool_target):
        pool_target = os.path.join(test.tmpdir, pool_target)
    emulated_image = params.get("emulated_image")
    emulated_image_size = params.get("emulated_image_size")
    vol_name = params.get("vol_name")
    new_vol_name = params.get("new_vol_name")
    vol_capability = params.get("vol_capability")
    vol_format = params.get("vol_format")
    clone_option = params.get("clone_option", "")
    wipe_algorithms = params.get("wipe_algorithms")

    if virsh.has_command_help_match("vol-wipe", "--prealloc-metadata") is None:
        if "prealloc-metadata" in clone_option:
            raise error.TestNAError("Option --prealloc-metadata "
                                    "is not supported.")

    # Using algorithms other than zero need scrub installed.
    try:
        utils_misc.find_command('scrub')
    except ValueError:
        logging.warning("Can't locate scrub binary, only 'zero' algorithm "
                        "is used.")
        valid_algorithms = ["zero"]
    else:
        valid_algorithms = [
            "zero", "nnsa", "dod", "bsi", "gutmann", "schneier", "pfitzner7",
            "pfitzner33", "random"
        ]

    # Choose an algorithms randomly
    if wipe_algorithms:
        alg = random.choice(wipe_algorithms.split())
    else:
        alg = random.choice(valid_algorithms)

    clone_status_error = "yes" == params.get("clone_status_error", "no")
    wipe_status_error = "yes" == params.get("wipe_status_error", "no")

    # libvirt acl polkit related params
    uri = params.get("virsh_uri")
    unpri_user = params.get('unprivileged_user')
    if unpri_user:
        if unpri_user.count('EXAMPLE'):
            unpri_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current" +
                                    " libvirt version.")

    libv_pvt = libvirt.PoolVolumeTest(test, params)
    try:
        libv_pool = libvirt_storage.StoragePool()
        pool_rename_times = 0
        # Rename pool if given name pool exist, the max rename times is 5
        while libv_pool.pool_exists(pool_name) and pool_rename_times < 5:
            logging.debug("Pool '%s' already exist.", pool_name)
            pool_name = pool_name + "_t"
            logging.debug("Using a new name '%s' to define pool.", pool_name)
            pool_rename_times += 1
        else:
            # Create a new pool
            libv_pvt.pre_pool(pool_name, pool_type, pool_target,
                              emulated_image, emulated_image_size)

        # Create a new volume
        libv_pvt.pre_vol(vol_name=vol_name,
                         vol_format=vol_format,
                         capacity=vol_capability,
                         allocation=None,
                         pool_name=pool_name)
        libv_vol = libvirt_storage.PoolVolume(pool_name)
        vol_info = libv_vol.volume_info(vol_name)
        for key in vol_info:
            logging.debug("Original volume info: %s = %s", key, vol_info[key])

        # Metadata preallocation is not support for block volume
        if vol_info["Type"] == "block" and clone_option.count(
                "prealloc-metadata"):
            clone_status_error = True

        # Clone volume
        clone_result = virsh.vol_clone(vol_name,
                                       new_vol_name,
                                       pool_name,
                                       clone_option,
                                       debug=True)
        if not clone_status_error:
            if clone_result.exit_status != 0:
                raise error.TestFail("Clone volume fail:\n%s" %
                                     clone_result.stdout.strip())
            else:
                vol_info = libv_vol.volume_info(new_vol_name)
                for key in vol_info:
                    logging.debug("Cloned volume info: %s = %s", key,
                                  vol_info[key])
                logging.debug("Clone volume successfully.")
                # Wipe the new clone volume
                if alg:
                    logging.debug("Wiping volume by '%s' algorithm", alg)
                wipe_result = virsh.vol_wipe(new_vol_name,
                                             pool_name,
                                             alg,
                                             unprivileged_user=unpri_user,
                                             uri=uri,
                                             debug=True)
                if not wipe_status_error:
                    if wipe_result.exit_status != 0:
                        raise error.TestFail("Wipe volume fail:\n%s" %
                                             clone_result.stdout.strip())
                    else:
                        vol_info = libv_vol.volume_info(new_vol_name)
                        for key in vol_info:
                            logging.debug("Wiped volume info: %s = %s", key,
                                          vol_info[key])
                        logging.debug("Wipe volume successfully.")
                elif wipe_status_error and wipe_result.exit_status == 0:
                    raise error.TestFail("Expect wipe volume fail, but run"
                                         " successfully.")
        elif clone_status_error and clone_result.exit_status == 0:
            raise error.TestFail("Expect clone volume fail, but run"
                                 " successfully.")
    finally:
        # Clean up
        try:
            libv_pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                  emulated_image)
        except error.TestFail, detail:
            logging.error(str(detail))
Exemple #30
0
def run(test, params, env):
    """
    Test virsh interface related commands.

    (1) If using given exist interface for testing(eg. lo or ethX):
        1.1 Dumpxml for the interface(with --inactive option)
        1.2 Destroy the interface
        1.3 Undefine the interface
    (2) Define an interface from XML file
    (3) List interfaces with '--inactive' optioin
    (4) Start the interface
    (5) List interfaces with no option
    (6) Dumpxml for the interface
    (7) Get interface MAC address by interface name
    (8) Get interface name by interface MAC address
    (9) Delete interface if not use the exist interface for testing
        9.1 Destroy the interface
        9.2 Undefine the interface

    Caveat, this test may affect the host network, so using the loopback(lo)
    device by default. You can specify the interface which you want, but be
    careful.
    """

    iface_name = params.get("iface_name", "ENTER.BRIDGE.NAME")
    iface_xml = params.get("iface_xml")
    iface_type = params.get("iface_type", "ethernet")
    iface_pro = params.get("iface_pro", "")
    iface_eth = params.get("iface_eth", "")
    iface_tag = params.get("iface_tag", "0")
    if iface_type == "vlan":
        iface_name = iface_eth + "." + iface_tag
    iface_eth_using = "yes" == params.get("iface_eth_using", "no")
    ping_ip = params.get("ping_ip", "localhost")
    use_exist_iface = "yes" == params.get("use_exist_iface", "no")
    status_error = "yes" == params.get("status_error", "no")
    net_restart = "yes" == params.get("iface_net_restart", "no")
    list_dumpxml_acl = "yes" == params.get("list_dumpxml_acl", "no")
    if ping_ip.count("ENTER"):
        raise error.TestNAError("Please input a valid ip address")
    if iface_name.count("ENTER"):
        raise error.TestNAError("Please input a existing bridge/ethernet name")

    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user', "EXAMPLE")
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    + " libvirt version.")

    virsh_dargs = {'debug': True}
    list_dumpxml_dargs = {'debug': True}
    if params.get('setup_libvirt_polkit') == 'yes':
        if not list_dumpxml_acl:
            virsh_dargs['uri'] = uri
            virsh_dargs['unprivileged_user'] = unprivileged_user
        else:
            list_dumpxml_dargs['uri'] = uri
            list_dumpxml_dargs['unprivileged_user'] = unprivileged_user
            list_dumpxml_dargs['ignore_status'] = False

    # acl api negative testing params
    write_save_status_error = "yes" == params.get("write_save_status_error",
                                                  "no")
    start_status_error = "yes" == params.get("start_status_error", "no")
    stop_status_error = "yes" == params.get("stop_status_error", "no")
    delete_status_error = "yes" == params.get("delete_status_error", "no")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    if vm:
        xml_bak = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    iface_script = NETWORK_SCRIPT + iface_name
    iface_script_bk = os.path.join(test.tmpdir, "iface-%s.bk" % iface_name)
    net_bridge = utils_net.Bridge()
    if use_exist_iface:
        if iface_type == "bridge":
            if iface_name not in net_bridge.list_br():
                raise error.TestError("Bridge '%s' not exists" % iface_name)
            ifaces = net_bridge.get_structure()[iface_name]
            if len(ifaces) < 1:
                # In this situation, dhcp maybe cannot get ip address
                # Unless you use static, we'd better skip such case
                raise error.TestNAError("Bridge '%s' has no interface"
                                        " bridged, perhaps cannot get"
                                        " ipaddress" % iface_name)
    net_iface = utils_net.Interface(name=iface_name)
    iface_is_up = True
    list_option = "--all"
    if use_exist_iface:
        if not libvirt.check_iface(iface_name, "exists", "--all"):
            raise error.TestError("Interface '%s' not exists" % iface_name)
        iface_xml = os.path.join(test.tmpdir, "iface.xml.tmp")
        iface_is_up = net_iface.is_up()
    else:
        # Note, if not use the interface which already exists, iface_name must
        # be equal to the value specified in XML file
        if libvirt.check_iface(iface_name, "exists", "--all"):
            raise error.TestError("Interface '%s' already exists" % iface_name)
        if not iface_xml:
            raise error.TestError("XML file is needed.")
        iface_xml = os.path.join(test.tmpdir, iface_xml)
        create_xml_file(iface_xml, params)

    # Stop NetworkManager as which may conflict with virsh iface commands
    try:
        NM = utils_misc.find_command("NetworkManager")
    except ValueError:
        logging.debug("No NetworkManager service.")
        NM = None
    NM_is_running = False
    if NM is not None:
        NM_service = service.Factory.create_service("NetworkManager")
        NM_is_running = NM_service.status()
        if NM_is_running:
            NM_service.stop()

    # run test cases
    try:
        if use_exist_iface:
            # back up the interface script
            utils.run("cp %s %s" % (iface_script, iface_script_bk))
            # step 1.1
            # dumpxml for interface
            if list_dumpxml_acl:
                virsh.iface_list(**list_dumpxml_dargs)
            xml = virsh.iface_dumpxml(iface_name, "--inactive",
                                      to_file=iface_xml,
                                      **list_dumpxml_dargs)
            # Step 1.2
            # Destroy interface
            if iface_is_up:
                result = virsh.iface_destroy(iface_name, **virsh_dargs)
                if (params.get('setup_libvirt_polkit') == 'yes' and
                        stop_status_error):
                    # acl_test negative test
                    libvirt.check_exit_status(result, stop_status_error)
                    virsh.iface_destroy(iface_name, debug=True)
                else:
                    libvirt.check_exit_status(result, status_error)

            # Step 1.3
            # Undefine interface
            result = virsh.iface_undefine(iface_name, **virsh_dargs)
            if (params.get('setup_libvirt_polkit') == 'yes' and
                    delete_status_error):
                # acl_test negative test
                libvirt.check_exit_status(result, delete_status_error)
                virsh.iface_undefine(iface_name, debug=True)
            else:
                libvirt.check_exit_status(result, status_error)
            if not status_error:
                if libvirt.check_iface(iface_name, "exists", list_option):
                    raise error.TestFail("%s is still present." % iface_name)

        # Step 2
        # Define interface
        result = virsh.iface_define(iface_xml, **virsh_dargs)
        if (params.get('setup_libvirt_polkit') == 'yes' and
                write_save_status_error):
            # acl_test negative test
            libvirt.check_exit_status(result, write_save_status_error)
            virsh.iface_define(iface_xml, debug=True)
        elif iface_type == "bond" and not ping_ip:
            libvirt.check_exit_status(result, True)
            return
        else:
            libvirt.check_exit_status(result, status_error)

        if net_restart:
            network = service.Factory.create_service("network")
            network.restart()

        # After network restart, (ethernet)interface will be started
        if (not net_restart and iface_type in ("bridge", "ethernet")) or\
           (not use_exist_iface and iface_type in ("vlan", "bond")):
            # Step 3
            # List inactive interfaces
            list_option = "--inactive"
            if not status_error:
                if not libvirt.check_iface(iface_name, "exists", list_option):
                    raise error.TestFail("Fail to find %s." % iface_name)

            # Step 4
            # Start interface
            result = virsh.iface_start(iface_name, **virsh_dargs)
            if (params.get('setup_libvirt_polkit') == 'yes' and
                    start_status_error):
                # acl_test negative test
                libvirt.check_exit_status(result, start_status_error)
                virsh.iface_start(iface_name, debug=True)
            elif (not net_restart and not use_exist_iface and
                    (iface_type == "ethernet" and iface_pro in ["", "dhcp"] or
                        iface_type == "bridge" and iface_pro == "dhcp")):
                libvirt.check_exit_status(result, True)
            else:
                libvirt.check_exit_status(result, status_error)
            if not status_error:
                iface_ip = net_iface.get_ip()
                ping_ip = ping_ip if not iface_ip else iface_ip
                if ping_ip:
                    if not libvirt.check_iface(iface_name, "ping", ping_ip):
                        raise error.TestFail("Ping %s fail." % ping_ip)

        # Step 5
        # List active interfaces
        if use_exist_iface or\
           (iface_pro != "dhcp" and iface_type == "bridge") or\
           (iface_eth_using and iface_type == "vlan"):
            list_option = ""
            if not status_error:
                if not libvirt.check_iface(iface_name, "exists", list_option):
                    raise error.TestFail("Fail to find %s in active "
                                         "interface list" % iface_name)
            if vm:
                iface_mac_list = vm_xml.VMXML.get_iface_dev(vm_name)
                # Before test, detach all interfaces in guest
                for mac in iface_mac_list:
                    iface_info = vm_xml.VMXML.get_iface_by_mac(vm_name, mac)
                    type = iface_info.get('type')
                    virsh.detach_interface(vm_name,
                                           "--type %s --mac %s"
                                           " --config" % (type, mac))
                virsh.attach_interface(vm_name,
                                       "--type %s --source %s"
                                       " --config" % (iface_type, iface_name))
                vm.start()
                try:
                    # Test if guest can be login
                    vm.wait_for_login()
                except remote.LoginError:
                    raise error.TestFail("Cannot login guest with %s" %
                                         iface_name)

        # Step 6
        # Dumpxml for interface
        if list_dumpxml_acl:
            virsh.iface_list(**list_dumpxml_dargs)
        xml = virsh.iface_dumpxml(iface_name, "", to_file="",
                                  **list_dumpxml_dargs)
        logging.debug("Interface '%s' XML:\n%s", iface_name, xml)

        # Step 7
        # Get interface MAC address by name
        result = virsh.iface_mac(iface_name, debug=True)
        libvirt.check_exit_status(result, status_error)
        if not status_error and result.stdout.strip():
            if not libvirt.check_iface(iface_name, "mac",
                                       result.stdout.strip()):
                raise error.TestFail("Mac address check fail")

        # Step 8
        # Get interface name by MAC address
        # Bridge's Mac equal to bridged interface's mac
        if iface_type not in ("bridge", "vlan") and result.stdout.strip():
            iface_mac = net_iface.get_mac()
            result = virsh.iface_name(iface_mac, debug=True)
            libvirt.check_exit_status(result, status_error)

        # Step 9
        if not use_exist_iface:
            # Step 9.1
            # Destroy interface
            result = virsh.iface_destroy(iface_name, **virsh_dargs)
            if (params.get('setup_libvirt_polkit') == 'yes' and
                    stop_status_error):
                # acl_test negative test
                libvirt.check_exit_status(result, stop_status_error)
                virsh.iface_destroy(iface_name, debug=True)
            elif (not net_restart and iface_type == "ethernet"
                    and iface_pro in ["", "dhcp"] or iface_type == "bridge"
                    and iface_pro == "dhcp"):
                libvirt.check_exit_status(result, True)
            else:
                libvirt.check_exit_status(result, status_error)

            # Step 9.2
            # Undefine interface
            result = virsh.iface_undefine(iface_name, **virsh_dargs)
            if (params.get('setup_libvirt_polkit') == 'yes' and
                    delete_status_error):
                # acl_test negative test
                libvirt.check_exit_status(result, delete_status_error)
                virsh.iface_undefine(iface_name, debug=True)
            else:
                libvirt.check_exit_status(result, status_error)
            list_option = "--all"
            if not status_error:
                if libvirt.check_iface(iface_name, "exists", list_option):
                    raise error.TestFail("%s is still present." % iface_name)
    finally:
        if os.path.exists(iface_xml):
            os.remove(iface_xml)
        if os.path.exists(iface_script):
            os.remove(iface_script)

        if use_exist_iface:
            if not os.path.exists(iface_script):
                utils.run("mv %s %s" % (iface_script_bk, iface_script))
            if iface_is_up and\
               not libvirt.check_iface(iface_name, "exists", ""):
                # Need reload script
                utils.run("ifup %s" % iface_name)
            elif not iface_is_up and libvirt.check_iface(iface_name,
                                                         "exists", ""):
                net_iface.down()
            if vm:
                xml_bak.sync()
        else:
            if libvirt.check_iface(iface_name, "exists", "--all"):
                # Remove the interface
                try:
                    utils_net.bring_down_ifname(iface_name)
                except utils_net.TAPBringDownError:
                    pass
            if iface_type == "bridge":
                if iface_name in net_bridge.list_br():
                    try:
                        net_bridge.del_bridge(iface_name)
                    except IOError:
                        pass
        if NM_is_running:
            NM_service.start()
def run(test, params, env):
    """
    Test virsh iface-bridge and iface-unbridge commands.

    (1) Bridge an existing network device(iface-bridge).
    (2) Unbridge a network device(iface-unbridge).
    """

    iface_name = params.get("iface_name")
    bridge_name = params.get("bridge_name")
    ping_ip = params.get("ping_ip", "")
    bridge_option = params.get("bridge_option")
    unbridge_option = params.get("unbridge_option")
    bridge_delay = "yes" == params.get("bridge_delay", "no")
    delay_num = params.get("delay_num", "0")
    create_bridge = "yes" == params.get("create_bridge", "yes")
    bridge_status_error = "yes" == params.get("bridge_status_error", "no")
    unbridge_status_error = "yes" == params.get("unbridge_status_error", "no")
    iface_script = NETWORK_SCRIPT + iface_name
    iface_script_bk = os.path.join(test.tmpdir, "iface-%s.bk" % iface_name)
    if not libvirt.check_iface(iface_name, "exists", "--all"):
        raise error.TestNAError("Interface '%s' not exists" % iface_name)
    net_iface = utils_net.Interface(name=iface_name)
    iface_is_up = net_iface.is_up()

    # Make sure the interface exists
    if not libvirt.check_iface(iface_name, "exists", "--all"):
        raise error.TestNAError("Interface '%s' not exists" % iface_name)

    # Make sure the bridge name not exists
    net_bridge = utils_net.Bridge()
    if bridge_name in net_bridge.list_br():
        raise error.TestNAError("Bridge '%s' already exists" % bridge_name)

    # Stop NetworkManager service
    try:
        NM = utils_misc.find_command("NetworkManager")
    except ValueError:
        logging.debug("No NetworkManager service.")
        NM = None
    NM_is_running = False
    if NM is not None:
        NM_service = service.Factory.create_service("NetworkManager")
        NM_is_running = NM_service.status()
        if NM_is_running:
            NM_service.stop()

    # Back up the interface script
    utils.run("cp %s %s" % (iface_script, iface_script_bk))

    def unbridge_check():
        """
        Check the result after do unbridge.
        """
        list_option = "--all"
        if libvirt.check_iface(bridge_name, "exists", list_option):
            raise error.TestFail("%s is still present." % bridge_name)
        if "no-start" in unbridge_option:
            list_option = "--inactive"
        if not libvirt.check_iface(iface_name, "exists", list_option):
            raise error.TestFail("%s is not present." % iface_name)

    if bridge_delay:
        bridge_option += " --delay %s" % delay_num
    # Run test
    try:
        if create_bridge:
            # Create bridge
            result = virsh.iface_bridge(iface_name, bridge_name, bridge_option)
            libvirt.check_exit_status(result, bridge_status_error)
            if not bridge_status_error:
                # Get the new create bridge IP address
                try:
                    br_ip = utils_net.get_ip_address_by_interface(bridge_name)
                except:
                    br_ip = ""
                # Do ping test only bridge has IP address and ping_ip not empty
                if br_ip and ping_ip:
                    if not libvirt.check_iface(bridge_name, "ping", ping_ip):
                        raise error.TestFail("Fail to ping %s from %s." %
                                             (ping_ip, bridge_name))
                else:
                    # Skip ping test
                    logging.debug("Skip ping test as %s has no IP address",
                                  bridge_name)
                list_option = ""
                if "no-start" in bridge_option:
                    list_option = "--inactive"
                if libvirt.check_iface(bridge_name, "exists", list_option):
                    # Unbridge
                    result = virsh.iface_unbridge(bridge_name, unbridge_option)
                    libvirt.check_exit_status(result, unbridge_status_error)
                    if not unbridge_status_error:
                        unbridge_check()
                else:
                    raise error.TestFail("%s is not present." % bridge_name)
        else:
            # Unbridge without creating bridge, only for negative test now
            result = virsh.iface_unbridge(bridge_name, unbridge_option)
            libvirt.check_exit_status(result, unbridge_status_error)
            if not unbridge_status_error:
                unbridge_check()
    finally:
        if create_bridge:
            if libvirt.check_iface(bridge_name, "exists", "--all"):
                virsh.iface_unbridge(bridge_name)
            if not os.path.exists(iface_script):
                utils.run("mv %s %s" % (iface_script_bk, iface_script))
            if iface_is_up:
                # Need reload script
                utils.run("ifup %s" % iface_name)
            else:
                net_iface.down()
            # Clear the new create bridge if it exists
            try:
                utils_net.bring_down_ifname(bridge_name)
                utils.run("brctl delbr %s" % bridge_name)
            except utils_net.TAPBringDownError:
                pass
        if NM_is_running:
            NM_service.start()
Exemple #32
0
def run(test, params, env):
    """
    Test virsh interface related commands.

    (1) If using given exist interface for testing(eg. lo or ethX):
        1.1 Dumpxml for the interface(with --inactive option)
        1.2 Destroy the interface
        1.3 Undefine the interface
    (2) Define an interface from XML file
    (3) List interfaces with '--inactive' optioin
    (4) Start the interface
    (5) List interfaces with no option
    (6) Dumpxml for the interface
    (7) Get interface MAC address by interface name
    (8) Get interface name by interface MAC address
    (9) Delete interface if not use the exist interface for testing
        9.1 Destroy the interface
        9.2 Undefine the interface

    Caveat, this test may affect the host network, so using the loopback(lo)
    device by default. You can specify the interface which you want, but be
    careful.
    """

    iface_name = params.get("iface_name")
    iface_xml = params.get("iface_xml")
    iface_type = params.get("iface_type", "ethernet")
    iface_pro = params.get("iface_pro", "")
    iface_eth = params.get("iface_eth", "")
    iface_tag = params.get("iface_tag", "0")
    if iface_type == "vlan":
        iface_name = iface_eth + "." + iface_tag
    iface_eth_using = "yes" == params.get("iface_eth_using", "no")
    ping_ip = params.get("ping_ip", "localhost")
    use_exist_iface = "yes" == params.get("use_exist_iface", "no")
    status_error = "yes" == params.get("status_error", "no")
    net_restart = "yes" == params.get("iface_net_restart", "no")
    if ping_ip.count("ENTER"):
        raise error.TestNAError("Please input a valid ip address")
    if iface_name.count("ENTER"):
        raise error.TestNAError("Please input a existing bridge/ethernet name")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    if vm:
        xml_bak = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    iface_script = NETWORK_SCRIPT + iface_name
    iface_script_bk = os.path.join(test.tmpdir, "iface-%s.bk" % iface_name)
    net_bridge = utils_net.Bridge()
    if use_exist_iface:
        if iface_type == "bridge":
            if iface_name not in net_bridge.list_br():
                raise error.TestError("Bridge '%s' not exists" % iface_name)
            ifaces = net_bridge.get_structure()[iface_name]
            if len(ifaces) < 1:
                # In this situation, dhcp maybe cannot get ip address
                # Unless you use static, we'd better skip such case
                raise error.TestNAError("Bridge '%s' has no interface"
                                        " bridged, perhaps cannot get"
                                        " ipaddress" % iface_name)
    net_iface = utils_net.Interface(name=iface_name)
    iface_is_up = True
    list_option = "--all"
    if use_exist_iface:
        if not libvirt.check_iface(iface_name, "exists", "--all"):
            raise error.TestError("Interface '%s' not exists" % iface_name)
        iface_xml = os.path.join(test.tmpdir, "iface.xml.tmp")
        iface_is_up = net_iface.is_up()
    else:
        # Note, if not use the interface which already exists, iface_name must
        # be equal to the value specified in XML file
        if libvirt.check_iface(iface_name, "exists", "--all"):
            raise error.TestError("Interface '%s' already exists" % iface_name)
        if not iface_xml:
            raise error.TestError("XML file is needed.")
        iface_xml = os.path.join(test.tmpdir, iface_xml)
        create_xml_file(iface_xml, params)

    # Stop NetworkManager as which may conflict with virsh iface commands
    try:
        NM = utils_misc.find_command("NetworkManager")
    except ValueError:
        logging.debug("No NetworkManager service.")
        NM = None
    NM_is_running = False
    if NM is not None:
        NM_service = service.Factory.create_service("NetworkManager")
        NM_is_running = NM_service.status()
        if NM_is_running:
            NM_service.stop()

    # run test cases
    try:
        if use_exist_iface:
            # back up the interface script
            utils.run("cp %s %s" % (iface_script, iface_script_bk))
            # step 1.1
            # dumpxml for interface
            xml = virsh.iface_dumpxml(iface_name,
                                      "--inactive",
                                      to_file=iface_xml,
                                      debug=True)
            # Step 1.2
            # Destroy interface
            if iface_is_up:
                result = virsh.iface_destroy(iface_name, debug=True)
                libvirt.check_exit_status(result, status_error)

            # Step 1.3
            # Undefine interface
            result = virsh.iface_undefine(iface_name, debug=True)
            libvirt.check_exit_status(result, status_error)
            if not status_error:
                if libvirt.check_iface(iface_name, "exists", list_option):
                    raise error.TestFail("%s is still present." % iface_name)

        # Step 2
        # Define interface
        result = virsh.iface_define(iface_xml, debug=True)
        if iface_type == "bond" and not ping_ip:
            libvirt.check_exit_status(result, True)
            return
        else:
            libvirt.check_exit_status(result, status_error)

        if net_restart:
            network = service.Factory.create_service("network")
            network.restart()

        # After network restart, (ethernet)interface will be started
        if (not net_restart and iface_type in ("bridge", "ethernet")) or\
           (not use_exist_iface and iface_type in ("vlan", "bond")):
            # Step 3
            # List inactive interfaces
            list_option = "--inactive"
            if not status_error:
                if not libvirt.check_iface(iface_name, "exists", list_option):
                    raise error.TestFail("Fail to find %s." % iface_name)

            # Step 4
            # Start interface
            result = virsh.iface_start(iface_name, debug=True)
            if not net_restart and not use_exist_iface and\
               (iface_type == "ethernet" and iface_pro in ["", "dhcp"] or
                    iface_type == "bridge" and iface_pro == "dhcp"):
                libvirt.check_exit_status(result, True)
            else:
                libvirt.check_exit_status(result, status_error)
            if not status_error:
                iface_ip = net_iface.get_ip()
                ping_ip = ping_ip if not iface_ip else iface_ip
                if ping_ip:
                    if not libvirt.check_iface(iface_name, "ping", ping_ip):
                        raise error.TestFail("Ping %s fail." % ping_ip)

        # Step 5
        # List active interfaces
        if use_exist_iface or\
           (iface_pro != "dhcp" and iface_type == "bridge") or\
           (iface_eth_using and iface_type == "vlan"):
            list_option = ""
            if not status_error:
                if not libvirt.check_iface(iface_name, "exists", list_option):
                    raise error.TestFail("Fail to find %s in active "
                                         "interface list" % iface_name)
            if vm:
                iface_mac_list = vm_xml.VMXML.get_iface_dev(vm_name)
                # Before test, detach all interfaces in guest
                for mac in iface_mac_list:
                    iface_info = vm_xml.VMXML.get_iface_by_mac(vm_name, mac)
                    type = iface_info.get('type')
                    virsh.detach_interface(
                        vm_name, "--type %s --mac %s"
                        " --config" % (type, mac))
                virsh.attach_interface(
                    vm_name, "--type %s --source %s"
                    " --config" % (iface_type, iface_name))
                vm.start()
                try:
                    # Test if guest can be login
                    vm.wait_for_login()
                except remote.LoginError:
                    raise error.TestFail("Cannot login guest with %s" %
                                         iface_name)

        # Step 6
        # Dumpxml for interface
        xml = virsh.iface_dumpxml(iface_name, "", to_file="", debug=True)
        logging.debug("Interface '%s' XML:\n%s", iface_name, xml)

        # Step 7
        # Get interface MAC address by name
        result = virsh.iface_mac(iface_name, debug=True)
        libvirt.check_exit_status(result, status_error)
        if not status_error and result.stdout.strip():
            if not libvirt.check_iface(iface_name, "mac",
                                       result.stdout.strip()):
                raise error.TestFail("Mac address check fail")

        # Step 8
        # Get interface name by MAC address
        # Bridge's Mac equal to bridged interface's mac
        if iface_type not in ("bridge", "vlan") and result.stdout.strip():
            iface_mac = net_iface.get_mac()
            result = virsh.iface_name(iface_mac, debug=True)
            libvirt.check_exit_status(result, status_error)

        # Step 9
        if not use_exist_iface:
            # Step 9.1
            # Destroy interface
            result = virsh.iface_destroy(iface_name, debug=True)
            if not net_restart and\
               iface_type == "ethernet" and iface_pro in ["", "dhcp"] or\
               iface_type == "bridge" and iface_pro == "dhcp":
                libvirt.check_exit_status(result, True)
            else:
                libvirt.check_exit_status(result, status_error)

            # Step 9.2
            # Undefine interface
            result = virsh.iface_undefine(iface_name, debug=True)
            libvirt.check_exit_status(result, status_error)
            list_option = "--all"
            if not status_error:
                if libvirt.check_iface(iface_name, "exists", list_option):
                    raise error.TestFail("%s is still present." % iface_name)
    finally:
        if os.path.exists(iface_xml):
            os.remove(iface_xml)
        if os.path.exists(iface_script):
            os.remove(iface_script)

        if use_exist_iface:
            if not os.path.exists(iface_script):
                utils.run("mv %s %s" % (iface_script_bk, iface_script))
            if iface_is_up and\
               not libvirt.check_iface(iface_name, "exists", ""):
                # Need reload script
                utils.run("ifup %s" % iface_name)
            elif not iface_is_up and libvirt.check_iface(
                    iface_name, "exists", ""):
                net_iface.down()
            if vm:
                xml_bak.sync()
        else:
            if libvirt.check_iface(iface_name, "exists", "--all"):
                # Remove the interface
                try:
                    utils_net.bring_down_ifname(iface_name)
                except utils_net.TAPBringDownError:
                    pass
            if iface_type == "bridge":
                if iface_name in net_bridge.list_br():
                    try:
                        net_bridge.del_bridge(iface_name)
                    except IOError:
                        pass
        if NM_is_running:
            NM_service.start()
def run(test, params, env):
    """
    Test virsh {at|de}tach-interface command.

    1) Prepare test environment and its parameters
    2) Attach the required interface
    3) According test type(only attach or both attach and detach):
       a.Go on to test detach(if attaching is correct)
       b.Return GOOD or raise TestFail(if attaching is wrong)
    4) Check if attached interface is correct:
       a.Try to catch it in vm's XML file
       b.Try to catch it in vm
    5) Detach the attached interface
    6) Check result
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Test parameters
    uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri",
                                                      "default"))
    vm_ref = params.get("at_detach_iface_vm_ref", "domname")
    options_suffix = params.get("at_detach_iface_options_suffix", "")
    status_error = "yes" == params.get("status_error", "no")
    start_vm = params.get("start_vm")
    # Should attach must be pass for detach test.
    correct_attach = "yes" == params.get("correct_attach", "no")

    # Interface specific attributes.
    iface_type = params.get("at_detach_iface_type", "network")
    if iface_type == "bridge":
        try:
            utils_misc.find_command("brctl")
        except ValueError:
            raise error.TestNAError("Command 'brctl' is missing. You must "
                                    "install it.")

    iface_source = params.get("at_detach_iface_source", "default")
    iface_mac = params.get("at_detach_iface_mac", "created")
    virsh_dargs = {'ignore_status': True, 'uri': uri}

    # Get a bridge name for test if iface_type is bridge.
    # If there is no bridge other than virbr0, raise TestNAError
    if iface_type == "bridge":
        host_bridge = utils_net.Bridge()
        bridge_list = host_bridge.list_br()
        try:
            bridge_list.remove("virbr0")
        except AttributeError:
            pass  # If no virbr0, just pass is ok
        logging.debug("Useful bridges:%s", bridge_list)
        # just choosing one bridge on host.
        if len(bridge_list):
            iface_source = bridge_list[0]
        else:
            raise error.TestNAError("No useful bridge on host "
                                    "other than 'virbr0'.")

    dom_uuid = vm.get_uuid()
    dom_id = vm.get_id()

    # To confirm vm's state
    if start_vm == "no" and vm.is_alive():
        vm.destroy()

    # Test both detach and attach, So collect info
    # both of them for result check.
    # When something wrong with interface, set it to 1
    fail_flag = 0
    result_info = []

    # Set attach-interface domain
    if vm_ref == "domname":
        vm_ref = vm_name
    elif vm_ref == "domid":
        vm_ref = dom_id
    elif vm_ref == "domuuid":
        vm_ref = dom_uuid
    elif vm_ref == "hexdomid" and dom_id is not None:
        vm_ref = hex(int(dom_id))

    # Get a mac address if iface_mac is 'created'.
    if iface_mac == "created" or correct_attach:
        iface_mac = utils_net.generate_mac_address_simple()

    # Set attach-interface options and Start attach-interface test
    if correct_attach:
        options = set_options("network", "default", iface_mac, "", "attach")
        attach_result = virsh.attach_interface(vm_name, options, **virsh_dargs)
    else:
        options = set_options(iface_type, iface_source, iface_mac,
                              options_suffix, "attach")
        attach_result = virsh.attach_interface(vm_ref, options, **virsh_dargs)
    attach_status = attach_result.exit_status
    logging.debug(attach_result)

    # If attach interface failed.
    if attach_status:
        if not status_error:
            fail_flag = 1
            result_info.append("Attach Failed: %s" % attach_result)
        elif status_error:
            # Here we just use it to exit, do not mean test failed
            fail_flag = 1
    # If attach interface succeeded.
    else:
        if status_error and not correct_attach:
            fail_flag = 1
            result_info.append("Attach Success with wrong command.")

    if fail_flag and start_vm == "yes":
        vm.destroy()
        if len(result_info):
            raise error.TestFail(result_info)
        else:
            # Exit because it is error_test for attach-interface.
            return

    # Check dumpxml file whether the interface is added successfully.
    status, ret = check_dumpxml_iface(vm_name, iface_mac, iface_type,
                                      iface_source)
    if status:
        fail_flag = 1
        result_info.append(ret)

    # Login to domain to check new interface.
    if not vm.is_alive():
        vm.start()
    elif vm.state() == "paused":
        vm.resume()

    status, ret = login_to_check(vm, iface_mac)
    if status:
        fail_flag = 1
        result_info.append(ret)

    # Set detach-interface options
    options = set_options(iface_type, None, iface_mac, options_suffix,
                          "detach")

    # Start detach-interface test
    detach_result = virsh.detach_interface(vm_ref, options, **virsh_dargs)
    detach_status = detach_result.exit_status

    logging.debug(detach_result)

    # Clean up.
    if check_dumpxml_iface(vm_name, iface_mac) is not None:
        cleanup_options = "--type %s --mac %s" % (iface_type, iface_mac)
        virsh.detach_interface(vm_ref, cleanup_options, **virsh_dargs)

    # Shutdown vm to be afraid of cleaning up failed
    if vm.is_alive():
        vm.destroy()

    # Check results.
    if status_error:
        if detach_status == 0:
            raise error.TestFail("Detach Success with wrong command.")
    else:
        if detach_status != 0:
            raise error.TestFail("Detach Failed.")
        else:
            if fail_flag:
                raise error.TestFail("Attach-Detach Success but "
                                     "something wrong with its "
                                     "functional use:%s" % result_info)
Exemple #34
0
def run_virsh_domfstrim(test, params, env):
    """
    Test domfstrim command, make sure that all supported options work well

    Test scenaries:
    1. fstrim without options
    2. fstrim with --minimum with large options
    3. fstrim with --minimum with small options

    Note: --mountpoint still not supported so will not test here
    """

    if not virsh.has_help_command('domfstrim'):
        raise error.TestNAError("This version of libvirt does not support "
                                "the domfstrim test")

    try:
        utils_misc.find_command("lsscsi")
    except ValueError:
        raise error.TestNAError("Command 'lsscsi' is missing. You must "
                                "install it.")

    vm_name = params.get("main_vm", "virt-tests-vm1")
    status_error = ("yes" == params.get("status_error", "no"))
    minimum = params.get("domfstrim_minimum")
    mountpoint = params.get("domfstrim_mountpoint")
    options = params.get("domfstrim_options", "")
    is_fulltrim = ("yes" == params.get("is_fulltrim", "yes"))

    def recompose_xml(vm_name, scsi_disk):
        """
        Add scsi disk, guest agent and scsi controller for guest
        :param: vm_name: Name of domain
        :param: scsi_disk: scsi_debug disk name
        """
        # Get disk path of scsi_disk
        path_cmd = "udevadm info --name %s | grep /dev/disk/by-path/ | " \
                   "cut -d' ' -f4" % scsi_disk
        disk_path = utils.run(path_cmd).stdout.strip()

        # Add qemu guest agent in guest xml
        vm_xml.VMXML.set_agent_channel(vm_name)

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        # Add scsi disk xml
        scsi_disk = Disk(type_name="block")
        scsi_disk.device = "lun"
        scsi_disk.source = scsi_disk.new_disk_source(
            **{'attrs': {
                'dev': disk_path
            }})
        scsi_disk.target = {'dev': "sdb", 'bus': "scsi"}
        vmxml.add_device(scsi_disk)

        # Add scsi disk controller
        scsi_controller = Controller("controller")
        scsi_controller.type = "scsi"
        scsi_controller.index = "0"
        scsi_controller.model = "virtio-scsi"
        vmxml.add_device(scsi_controller)

        # Redefine guest
        vmxml.sync()

    def start_guest_agent(session):
        """
        Start guest agent service in guest
        :param: session: session in guest
        """
        # Check if qemu-ga installed
        check_cmd = "rpm -q qemu-guest-agent||yum install -y qemu-guest-agent"
        session.cmd(check_cmd)
        session.cmd("service qemu-guest-agent start")
        # Check if the qemu-ga really started
        stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
        if stat_ps != 0:
            raise error.TestFail("Fail to run qemu-ga in guest")

    # Do backup for origin xml
    xml_backup = vm_xml.VMXML.new_from_dumpxml(vm_name)
    try:
        vm = env.get_vm(vm_name)
        if not vm.is_alive():
            vm.start()
        session = vm.wait_for_login()
        bef_list = session.cmd_output("fdisk -l|grep ^/dev|"
                                      "cut -d' ' -f1").split("\n")
        session.close()
        vm.destroy()

        # Load module and get scsi disk name
        utils.load_module("scsi_debug lbpu=1 lbpws=1")
        scsi_disk = utils.run("lsscsi|grep scsi_debug|"
                              "awk '{print $6}'").stdout.strip()
        # Create partition
        open("/tmp/fdisk-cmd", "w").write("n\np\n\n\n\nw\n")
        output = utils.run("fdisk %s < /tmp/fdisk-cmd" %
                           scsi_disk).stdout.strip()
        logging.debug("fdisk output %s", output)
        # Format disk
        output = utils.run("mkfs.ext3 %s1" % scsi_disk).stdout.strip()
        logging.debug("output %s", output)
        # Add scsi disk and agent channel in guest
        recompose_xml(vm_name, scsi_disk)

        vm.start()
        guest_session = vm.wait_for_login()
        start_guest_agent(guest_session)
        # Get new generated disk
        af_list = guest_session.cmd_output("fdisk -l|grep ^/dev|"
                                           "cut -d' ' -f1").split('\n')
        new_disk = "".join(list(set(bef_list) ^ set(af_list)))
        # Mount disk in guest
        guest_session.cmd("mkdir -p /home/test && mount %s /home/test" %
                          new_disk)

        # Do first fstrim before all to get original map for compare
        cmd_result = virsh.domfstrim(vm_name)
        if cmd_result.exit_status != 0:
            raise error.TestFail("Fail to do virsh domfstrim, error %s" %
                                 cmd_result.stderr)

        def get_diskmap_size():
            """
            Collect size from disk map
            :return: disk size
            """
            map_cmd = "cat /sys/bus/pseudo/drivers/scsi_debug/map"
            diskmap = utils.run(map_cmd).stdout.strip('\n\x00')
            logging.debug("disk map is %s", diskmap)
            sum = 0
            for i in diskmap.split(","):
                sum = sum + int(i.split("-")[1]) - int(i.split("-")[0])
            return sum

        ori_size = get_diskmap_size()

        # Write date in disk
        dd_cmd = "dd if=/dev/zero of=/home/test/file bs=1048576 count=5"
        guest_session.cmd(dd_cmd)

        def _full_mapped():
            """
            Do full map check
            :return: True or False
            """
            full_size = get_diskmap_size()
            return (ori_size < full_size)

        if not utils_misc.wait_for(_full_mapped, timeout=30):
            raise error.TestError("Scsi map is not updated after dd command.")

        full_size = get_diskmap_size()

        # Remove disk content in guest
        guest_session.cmd("rm -rf /home/test/*")
        guest_session.close()

        def _trim_completed():
            """
            Do empty fstrim check
            :return: True of False
            """
            cmd_result = virsh.domfstrim(vm_name, minimum, mountpoint, options)
            if cmd_result.exit_status != 0:
                if not status_error:
                    raise error.TestFail(
                        "Fail to do virsh domfstrim, error %s" %
                        cmd_result.stderr)
                else:
                    logging.info("Fail to do virsh domfstrim as expected: %s",
                                 cmd_result.stderr)
                    return True

            empty_size = get_diskmap_size()

            if is_fulltrim:
                return empty_size <= ori_size
            else:
                # For partly trim will check later
                return False

        if not utils_misc.wait_for(_trim_completed, timeout=30):
            if not is_fulltrim:
                # Get result again to check partly fstrim
                empty_size = get_diskmap_size()
                if ori_size < empty_size <= full_size:
                    logging.info("Success to do fstrim partly")
                    return True
            raise error.TestFail("Fail to do fstrim %s, %s")
        logging.info("Success to do fstrim")

    finally:
        # Do domain recovery
        xml_backup.sync()
        utils.unload_module("scsi_debug")
Exemple #35
0
def run(test, params, env):
    """
    This case check error messages in libvirtd logging.

    Implemetent test cases:
    with_iptables:  Simply start libvirtd when using iptables service
                          as firewall.
    with_firewalld: Simply start libvirtd when using firewalld service
                          as firewall.
    """
    def _error_handler(errors, line):
        """
        A callback function called when new error lines appares in libvirtd
        log, then this line is appended to list 'errors'

        :param errors: A list to contain all error lines.
        :param line: Newly found error line in libvirtd log.
        """
        errors.append(line)

    test_type = params.get('test_type')

    old_iptables = None
    old_firewalld = None
    iptables = None
    try:
        # Setup firewall services according to test type.
        if test_type == 'with_firewalld':
            old_iptables, old_firewalld = _set_iptables_firewalld(False, True)
        elif test_type == 'with_iptables':
            old_iptables, old_firewalld = _set_iptables_firewalld(True, False)
        elif test_type == 'stop_iptables':
            # Use _set_iptables_firewalld(False, False) on rhel6 will got skip
            # as firewalld not on rhel6, but the new case which came from bug
            # 716612 is mainly a rhel6 problem and should be tested, so skip
            # using the  _set_iptables_firewalld function and direct stop
            # iptables.
            try:
                utils_misc.find_command('iptables')
                iptables = service.Factory.create_service('iptables')
            except ValueError:
                msg = "Can't find service iptables."
                raise error.TestNAError(msg)

            utils.run('iptables-save > /tmp/iptables.save')
            if not iptables.stop():
                msg = "Can't stop service iptables"
                raise error.TestError(msg)

        try:
            errors = []
            # Run libvirt session and collect errors in log.
            libvirtd_session = LibvirtdSession(
                error_func=_error_handler,
                error_params=(errors,),
            )

            libvirt_pid = libvirtd_session.get_pid()
            libvirt_context = utils_selinux.get_context_of_process(libvirt_pid)
            logging.debug("The libvirtd pid context is: %s" % libvirt_context)

            # Check errors.
            if errors:
                logging.debug("Found errors in libvirt log:")
                for line in errors:
                    logging.debug(line)
                if test_type == 'stop_iptables':
                    for line in errors:
                        # libvirtd process started without virt_t will failed
                        # to set iptable rules which is expected here
                        if ("/sbin/iptables" and
                                "unexpected exit status 1" not in line):
                            raise error.TestFail("Found errors other than"
                                                 " iptables failure in"
                                                 " libvirt log.")
                else:
                    raise error.TestFail("Found errors in libvirt log.")
        finally:
            libvirtd_session.close()
    finally:
        # Recover services status.
        if test_type in ('with_firewalld', 'with_iptables'):
            _set_iptables_firewalld(old_iptables, old_firewalld)
        elif test_type == "stop_iptables" and iptables:
            iptables.start()
            utils.run('iptables-restore < /tmp/iptables.save')
        if os.path.exists("/tmp/iptables.save"):
            os.remove("/tmp/iptables.save")
def run(test, params, env):
    """
    This test cover two volume commands: vol-clone and vol-wipe.

    1. Create a given type pool.
    2. Create a given format volume in the pool.
    3. Clone the new create volume.
    4. Wipe the new clone volume.
    5. Delete the volume and pool.
    """

    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    if not os.path.dirname(pool_target):
        pool_target = os.path.join(test.tmpdir, pool_target)
    emulated_image = params.get("emulated_image")
    emulated_image_size = params.get("emulated_image_size")
    vol_name = params.get("vol_name")
    new_vol_name = params.get("new_vol_name")
    vol_capability = params.get("vol_capability")
    vol_format = params.get("vol_format")
    clone_option = params.get("clone_option", "")
    wipe_algorithms = params.get("wipe_algorithms")

    if virsh.has_command_help_match("vol-wipe", "--prealloc-metadata") is None:
        if "prealloc-metadata" in clone_option:
            raise error.TestNAError("Option --prealloc-metadata "
                                    "is not supported.")

    # Using algorithms other than zero need scrub installed.
    try:
        utils_misc.find_command('scrub')
    except ValueError:
        logging.warning("Can't locate scrub binary, only 'zero' algorithm "
                        "is used.")
        valid_algorithms = ["zero"]
    else:
        valid_algorithms = [
            "zero", "nnsa", "dod", "bsi", "gutmann", "schneier", "pfitzner7",
            "pfitzner33", "random"
        ]

    # Choose an algorithms randomly
    if wipe_algorithms:
        alg = random.choice(wipe_algorithms.split())
    else:
        alg = random.choice(valid_algorithms)

    clone_status_error = "yes" == params.get("clone_status_error", "no")
    wipe_status_error = "yes" == params.get("wipe_status_error", "no")
    setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit")

    # libvirt acl polkit related params
    uri = params.get("virsh_uri")
    unpri_user = params.get('unprivileged_user')
    if unpri_user:
        if unpri_user.count('EXAMPLE'):
            unpri_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if setup_libvirt_polkit:
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    del_pool = True
    libv_pvt = libvirt.PoolVolumeTest(test, params)
    try:
        libv_pool = libvirt_storage.StoragePool()
        while libv_pool.pool_exists(pool_name):
            logging.debug("Use exist pool '%s'", pool_name)
            del_pool = False
        else:
            # Create a new pool
            disk_vol = []
            if pool_type == 'disk':
                disk_vol.append(params.get("pre_vol", '10M'))
            libv_pvt.pre_pool(pool_name=pool_name,
                              pool_type=pool_type,
                              pool_target=pool_target,
                              emulated_image=emulated_image,
                              image_size=emulated_image_size,
                              pre_disk_vol=disk_vol)
        libv_vol = libvirt_storage.PoolVolume(pool_name)
        if libv_vol.volume_exists(vol_name):
            logging.debug("Use exist volume '%s'", vol_name)
        elif vol_format in ['raw', 'qcow2', 'qed', 'vmdk']:
            # Create a new volume
            libv_pvt.pre_vol(vol_name=vol_name,
                             vol_format=vol_format,
                             capacity=vol_capability,
                             allocation=None,
                             pool_name=pool_name)
        elif vol_format == 'partition':
            vol_name = libv_vol.list_volumes().keys()[0]
            logging.debug("Partition %s in disk pool is volume" % vol_name)
        elif vol_format == 'sparse':
            # Create a sparse file in pool
            sparse_file = pool_target + '/' + vol_name
            cmd = "dd if=/dev/zero of=" + sparse_file
            cmd += " bs=1 count=0 seek=" + vol_capability
            utils.run(cmd)
        else:
            raise error.TestError("Unknown volume format %s" % vol_format)
        # Refresh the pool
        virsh.pool_refresh(pool_name)
        vol_info = libv_vol.volume_info(vol_name)
        for key in vol_info:
            logging.debug("Original volume info: %s = %s", key, vol_info[key])

        # Metadata preallocation is not support for block volume
        if vol_info["Type"] == "block" and clone_option.count(
                "prealloc-metadata"):
            clone_status_error = True

        if pool_type == "disk":
            new_vol_name = libvirt.new_disk_vol_name(pool_name)
            if new_vol_name is None:
                raise error.TestError("Fail to generate volume name")
            # update polkit rule as the volume name changed
            if setup_libvirt_polkit:
                vol_pat = r"lookup\('vol_name'\) == ('\S+')"
                new_value = "lookup('vol_name') == '%s'" % new_vol_name
                libvirt.update_polkit_rule(params, vol_pat, new_value)
        # Clone volume
        clone_result = virsh.vol_clone(vol_name,
                                       new_vol_name,
                                       pool_name,
                                       clone_option,
                                       debug=True)
        if not clone_status_error:
            if clone_result.exit_status != 0:
                raise error.TestFail("Clone volume fail:\n%s" %
                                     clone_result.stderr.strip())
            else:
                vol_info = libv_vol.volume_info(new_vol_name)
                for key in vol_info:
                    logging.debug("Cloned volume info: %s = %s", key,
                                  vol_info[key])
                logging.debug("Clone volume successfully.")
                # Wipe the new clone volume
                if alg:
                    logging.debug("Wiping volume by '%s' algorithm", alg)
                wipe_result = virsh.vol_wipe(new_vol_name,
                                             pool_name,
                                             alg,
                                             unprivileged_user=unpri_user,
                                             uri=uri,
                                             debug=True)
                unsupported_err = [
                    "Unsupported algorithm", "no such pattern sequence"
                ]
                if not wipe_status_error:
                    if wipe_result.exit_status != 0:
                        if any(err in wipe_result.stderr
                               for err in unsupported_err):
                            raise error.TestNAError(wipe_result.stderr)
                        raise error.TestFail("Wipe volume fail:\n%s" %
                                             clone_result.stdout.strip())
                    else:
                        virsh_vol_info = libv_vol.volume_info(new_vol_name)
                        for key in virsh_vol_info:
                            logging.debug("Wiped volume info(virsh): %s = %s",
                                          key, virsh_vol_info[key])
                        vol_path = virsh.vol_path(new_vol_name,
                                                  pool_name).stdout.strip()
                        qemu_vol_info = utils_misc.get_image_info(vol_path)
                        for key in qemu_vol_info:
                            logging.debug("Wiped volume info(qemu): %s = %s",
                                          key, qemu_vol_info[key])
                            if qemu_vol_info['format'] != 'raw':
                                raise error.TestFail("Expect wiped volume "
                                                     "format is raw")
                elif wipe_status_error and wipe_result.exit_status == 0:
                    raise error.TestFail("Expect wipe volume fail, but run"
                                         " successfully.")
        elif clone_status_error and clone_result.exit_status == 0:
            raise error.TestFail("Expect clone volume fail, but run"
                                 " successfully.")
    finally:
        # Clean up
        try:
            if del_pool:
                libv_pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                      emulated_image)
            else:
                # Only delete the volumes
                libv_vol = libvirt_storage.PoolVolume(pool_name)
                for vol in [vol_name, new_vol_name]:
                    libv_vol.delete_volume(vol)
        except error.TestFail, detail:
            logging.error(str(detail))
def run(test, params, env):
    """
    Test virsh {at|de}tach-interface command.

    1) Prepare test environment and its parameters
    2) Attach the required interface
    3) According test type(only attach or both attach and detach):
       a.Go on to test detach(if attaching is correct)
       b.Return GOOD or raise TestFail(if attaching is wrong)
    4) Check if attached interface is correct:
       a.Try to catch it in vm's XML file
       b.Try to catch it in vm
    5) Detach the attached interface
    6) Check result
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Test parameters
    uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri",
                                                      "default"))
    vm_ref = params.get("at_detach_iface_vm_ref", "domname")
    options_suffix = params.get("at_detach_iface_options_suffix", "")
    status_error = "yes" == params.get("status_error", "no")
    start_vm = params.get("start_vm")
    # Should attach must be pass for detach test.
    correct_attach = "yes" == params.get("correct_attach", "no")

    # Interface specific attributes.
    iface_type = params.get("at_detach_iface_type", "network")
    if iface_type == "bridge":
        try:
            utils_misc.find_command("brctl")
        except ValueError:
            raise error.TestNAError("Command 'brctl' is missing. You must "
                                    "install it.")

    iface_source = params.get("at_detach_iface_source", "default")
    iface_mac = params.get("at_detach_iface_mac", "created")
    virsh_dargs = {'ignore_status': True, 'uri': uri}

    # Get a bridge name for test if iface_type is bridge.
    # If there is no bridge other than virbr0, raise TestNAError
    if iface_type == "bridge":
        host_bridge = utils_net.Bridge()
        bridge_list = host_bridge.list_br()
        try:
            bridge_list.remove("virbr0")
        except AttributeError:
            pass  # If no virbr0, just pass is ok
        logging.debug("Useful bridges:%s", bridge_list)
        # just choosing one bridge on host.
        if len(bridge_list):
            iface_source = bridge_list[0]
        else:
            raise error.TestNAError("No useful bridge on host "
                                    "other than 'virbr0'.")

    dom_uuid = vm.get_uuid()
    dom_id = vm.get_id()

    # To confirm vm's state
    if start_vm == "no" and vm.is_alive():
        vm.destroy()

    # Test both detach and attach, So collect info
    # both of them for result check.
    # When something wrong with interface, set it to 1
    fail_flag = 0
    result_info = []

    # Set attach-interface domain
    if vm_ref == "domname":
        vm_ref = vm_name
    elif vm_ref == "domid":
        vm_ref = dom_id
    elif vm_ref == "domuuid":
        vm_ref = dom_uuid
    elif vm_ref == "hexdomid" and dom_id is not None:
        vm_ref = hex(int(dom_id))

    # Get a mac address if iface_mac is 'created'.
    if iface_mac == "created" or correct_attach:
        iface_mac = utils_net.generate_mac_address_simple()

    # Set attach-interface options and Start attach-interface test
    if correct_attach:
        options = set_options("network", "default", iface_mac, "", "attach")
        attach_result = virsh.attach_interface(vm_name, options,
                                               **virsh_dargs)
    else:
        options = set_options(iface_type, iface_source, iface_mac,
                              options_suffix, "attach")
        attach_result = virsh.attach_interface(vm_ref, options, **virsh_dargs)
    attach_status = attach_result.exit_status
    logging.debug(attach_result)

    # If attach interface failed.
    if attach_status:
        if not status_error:
            fail_flag = 1
            result_info.append("Attach Failed: %s" % attach_result)
        elif status_error:
            # Here we just use it to exit, do not mean test failed
            fail_flag = 1
    # If attach interface succeeded.
    else:
        if status_error and not correct_attach:
            fail_flag = 1
            result_info.append("Attach Success with wrong command.")

    if fail_flag and start_vm == "yes":
        vm.destroy()
        if len(result_info):
            raise error.TestFail(result_info)
        else:
            # Exit because it is error_test for attach-interface.
            return

    # Check dumpxml file whether the interface is added successfully.
    status, ret = check_dumpxml_iface(
        vm_name, iface_mac, iface_type, iface_source)
    if status:
        fail_flag = 1
        result_info.append(ret)

    # Login to domain to check new interface.
    if not vm.is_alive():
        vm.start()
    elif vm.state() == "paused":
        vm.resume()

    status, ret = login_to_check(vm, iface_mac)
    if status:
        fail_flag = 1
        result_info.append(ret)

    # Set detach-interface options
    options = set_options(iface_type, None, iface_mac,
                          options_suffix, "detach")

    # Start detach-interface test
    detach_result = virsh.detach_interface(vm_ref, options, **virsh_dargs)
    detach_status = detach_result.exit_status

    logging.debug(detach_result)

    # Clean up.
    if check_dumpxml_iface(vm_name, iface_mac) is not None:
        cleanup_options = "--type %s --mac %s" % (iface_type, iface_mac)
        virsh.detach_interface(vm_ref, cleanup_options, **virsh_dargs)

    # Shutdown vm to be afraid of cleaning up failed
    if vm.is_alive():
        vm.destroy()

    # Check results.
    if status_error:
        if detach_status == 0:
            raise error.TestFail("Detach Success with wrong command.")
    else:
        if detach_status != 0:
            raise error.TestFail("Detach Failed.")
        else:
            if fail_flag:
                raise error.TestFail("Attach-Detach Success but "
                                     "something wrong with its "
                                     "functional use:%s" % result_info)
Exemple #38
0
def run(test, params, env):
    """
    Test numa tuning

    1) Positive testing
       1.1) get the current numa parameters for a running/shutoff guest
       1.2) set the current numa parameters for a running/shutoff guest
           1.2.1) set valid 'mode' parameters
           1.2.2) set valid 'nodeset' parameters
    2) Negative testing
       2.1) get numa parameters
           2.1.1) invalid options
           2.1.2) stop cgroup service
       2.2) set numa parameters
           2.2.1) invalid 'mode' parameters
           2.2.2) invalid 'nodeset' parameters
           2.2.3) change 'mode' for a running guest and 'mode' is not 'strict'
           2.2.4) change 'nodeset' for running guest with mode of 'interleave'
                  'interleave' or 'preferred' numa mode
           2.2.5) stop cgroup service
    """

    try:
        utils_misc.find_command("numactl")
    except ValueError:
        raise error.TestNAError("Command 'numactl' is missing. You must "
                                "install it.")

    # Run test case
    vm_name = params.get("vms")
    vm = env.get_vm(vm_name)
    original_vm_xml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    cg = utils_cgroup.CgconfigService()
    status_error = params.get("status_error", "no")
    libvirtd = params.get("libvirtd", "on")
    cgconfig = params.get("cgconfig", "on")
    start_vm = params.get("start_vm", "no")
    change_parameters = params.get("change_parameters", "no")

    # Make sure vm is down if start not requested
    if start_vm == "no" and vm.is_alive():
        vm.destroy()

    # positive and negative testing #########

    cgstop = False
    try:
        if status_error == "no":
            if change_parameters == "no":
                get_numa_parameter(params, cgstop)
            else:
                set_numa_parameter(params, cgstop)
        if cgconfig == "off":
            # If running, then need to shutdown a running guest before
            # stopping cgconfig service and will start the guest after
            # restarting libvirtd service
            if cg.cgconfig_is_running():
                if vm.is_alive():
                    vm.destroy()
                cg.cgconfig_stop()
                cgstop = True

        # If we stopped cg, then refresh libvirtd service
        # to get latest cgconfig service change; otherwise,
        # if no cg change restart of libvirtd is pointless
        if cgstop and libvirtd == "restart":
            try:
                utils_libvirtd.libvirtd_restart()
            finally:
                # Not running is not a good thing, but it does happen
                # and it will affect other tests
                if not utils_libvirtd.libvirtd_is_running():
                    raise error.TestNAError("libvirt service is not running!")

        # Recover previous running guest
        if (cgconfig == "off" and libvirtd == "restart" and
                not vm.is_alive() and start_vm == "yes"):
            vm.start()
        if status_error == "yes":
            if change_parameters == "no":
                get_numa_parameter(params, cgstop)
            else:
                set_numa_parameter(params, cgstop)
    finally:
        # Restore guest
        original_vm_xml.sync()

        # If we stopped cg, then recover and refresh libvirtd to recognize
        if cgstop:
            cg.cgconfig_start()
            utils_libvirtd.libvirtd_restart()
Exemple #39
0
def run(test, params, env):
    """
    Test virsh interface related commands.

    (1) If using given exist interface for testing(eg. lo or ethX):
        1.1 Dumpxml for the interface(with --inactive option)
        1.2 Destroy the interface
        1.3 Undefine the interface
    (2) Define an interface from XML file
    (3) List interfaces with '--inactive' optioin
    (4) Start the interface
    (5) List interfaces with no option
    (6) Dumpxml for the interface
    (7) Get interface MAC address by interface name
    (8) Get interface name by interface MAC address
    (9) Delete interface if not use the exist interface for testing
        9.1 Destroy the interface
        9.2 Undefine the interface

    Caveat, this test may affect the host network, so using the loopback(lo)
    device by default. You can specify the interface which you want, but be
    careful.
    """

    iface_name = params.get("iface_name")
    iface_xml = params.get("iface_xml")
    ping_ip = params.get("ping_ip", "localhost")
    use_exist_iface = "yes" == params.get("use_exist_iface", "no")
    status_error = "yes" == params.get("status_error", "no")
    iface_script = NETWORK_SCRIPT + iface_name
    iface_script_bk = os.path.join(test.tmpdir, "iface-%s.bk" % iface_name)
    net_iface = utils_net.Interface(name=iface_name)
    iface_is_up = True
    list_option = "--all"
    if use_exist_iface:
        if not libvirt.check_iface(iface_name, "exists", "--all"):
            raise error.TestError("Interface '%s' not exists" % iface_name)
        iface_xml = os.path.join(test.tmpdir, "iface.xml.tmp")
        iface_is_up = net_iface.is_up()
    else:
        # Note, if not use the interface which already exists, iface_name must
        # be equal to the value specified in XML file
        if libvirt.check_iface(iface_name, "exists", "--all"):
            raise error.TestError("Interface '%s' already exists" % iface_name)
        if not iface_xml:
            raise error.TestError("XML file is needed.")

    # Stop NetworkManager as which may conflict with virsh iface commands
    try:
        NM = utils_misc.find_command("NetworkManager")
    except ValueError:
        logging.debug("No NetworkManager service.")
        NM = None
    NM_is_running = False
    if NM is not None:
        NM_service = service.Factory.create_service("NetworkManager")
        NM_is_running = NM_service.status()
        if NM_is_running:
            NM_service.stop()

    # run test cases
    try:
        if use_exist_iface:
            # back up the interface script
            utils.run("cp %s %s" % (iface_script, iface_script_bk))
            # step 1.1
            # dumpxml for interface
            xml = virsh.iface_dumpxml(iface_name,
                                      "--inactive",
                                      to_file=iface_xml,
                                      debug=True)
            # Step 1.2
            # Destroy interface
            if iface_is_up:
                result = virsh.iface_destroy(iface_name, debug=True)
                libvirt.check_exit_status(result, status_error)

            # Step 1.3
            # Undefine interface
            result = virsh.iface_undefine(iface_name, debug=True)
            libvirt.check_exit_status(result, status_error)
            if not status_error:
                if libvirt.check_iface(iface_name, "exists", list_option):
                    raise error.TestFail("%s is still present." % iface_name)

        # Step 2
        # Define interface
        result = virsh.iface_define(iface_xml, debug=True)
        libvirt.check_exit_status(result, status_error)

        # Step 3
        # List inactive interfaces
        list_option = "--inactive"
        if not status_error:
            if not libvirt.check_iface(iface_name, "exists", list_option):
                raise error.TestFail("Fail to find %s." % iface_name)

        # Step 4
        # Start interface
        result = virsh.iface_start(iface_name, debug=True)
        libvirt.check_exit_status(result, status_error)
        if not status_error:
            if not libvirt.check_iface(iface_name, "ping", ping_ip):
                raise error.TestFail("Ping %s fail." % ping_ip)

        # Step 5
        # List active interfaces
        list_option = ""
        if not status_error:
            if not libvirt.check_iface(iface_name, "exists", list_option):
                raise error.TestFail(
                    "Fail to find %s in active interface list." % iface_name)

        # Step 6
        # Dumpxml for interface
        xml = virsh.iface_dumpxml(iface_name, "", to_file="", debug=True)
        logging.debug("Interface '%s' XML:\n%s", iface_name, xml)

        # Step 7
        # Get interface MAC address by name
        result = virsh.iface_mac(iface_name, debug=True)
        libvirt.check_exit_status(result, status_error)
        if not status_error:
            if not libvirt.check_iface(iface_name, "mac",
                                       result.stdout.strip()):
                raise error.TestFail("Mac address check fail")

        # Step 8
        # Get interface name by MAC address
        iface_mac = net_iface.get_mac()
        result = virsh.iface_name(iface_mac, debug=True)
        libvirt.check_exit_status(result, status_error)

        # Step 9
        if not use_exist_iface:
            # Step 9.1
            # Destroy interface
            result = virsh.iface_destroy(iface_name, debug=True)
            libvirt.check_exit_status(result, status_error)

            # Step 9.2
            # Undefine interface
            result = virsh.iface_undefine(iface_name, debug=True)
            libvirt.check_exit_status(result, status_error)
            list_option = "--all"
            if not status_error:
                if libvirt.check_iface(iface_name, "exists", list_option):
                    raise error.TestFail("%s is still present." % iface_name)
    finally:
        if use_exist_iface:
            if os.path.exists(iface_xml):
                os.remove(iface_xml)
            if not os.path.exists(iface_script):
                utils.run("mv %s %s" % (iface_script_bk, iface_script))
            if iface_is_up:
                # Need reload script
                utils.run("ifup %s" % iface_name)
            else:
                net_iface.down()
        else:
            if libvirt.check_iface(iface_name, "exists", "--all"):
                # Remove the interface
                if os.path.exists(iface_script):
                    os.remove(iface_script)
                utils_net.bring_down_ifname(iface_name)
        if NM_is_running:
            NM_service.start()
Exemple #40
0
def _set_iptables_firewalld(iptables_status, firewalld_status):
    """
    Try to set firewalld and iptables services status.

    :param iptables_status: Whether iptables should be set active.
    :param firewalld_status: Whether firewalld should be set active.
    :return: A tuple of two boolean stand for the original status of
             iptables and firewalld.
    """
    # pylint: disable=E1103
    logging.debug("Setting firewalld and iptables services.")

    # Iptables and firewalld are two exclusive services.
    # It's impossible to start both.
    if iptables_status and firewalld_status:
        msg = "Can't active both iptables and firewalld services."
        raise error.TestNAError(msg)

    # Check the availability of both packages.
    try:
        utils_misc.find_command('iptables')
        iptables = service.Factory.create_service('iptables')
    except ValueError:
        msg = "Can't find service iptables."
        raise error.TestNAError(msg)

    try:
        utils_misc.find_command('firewalld')
        firewalld = service.Factory.create_service('firewalld')
    except ValueError:
        msg = "Can't find service firewalld."
        raise error.TestNAError(msg)

    # Back up original services status.
    old_iptables = iptables.status()
    old_firewalld = firewalld.status()

    # We should stop services first then start the other after.
    # Directly start one service will force the other service stop,
    # which will not be easy to handle.
    if not iptables_status and iptables.status():
        utils.run('iptables-save > /tmp/iptables.save')
        if not iptables.stop():
            msg = "Can't stop service iptables"
            raise error.TestError(msg)

    if not firewalld_status and firewalld.status():
        if not firewalld.stop():
            msg = ("Service firewalld can't be stopped. "
                   "Maybe it is masked by default. you can unmask it by "
                   "running 'systemctl unmask firewalld'.")
            raise error.TestNAError(msg)

    if iptables_status and not iptables.status():
        if not iptables.start():
            msg = "Can't start service iptables"
            raise error.TestError(msg)
        utils.run('iptables-restore < /tmp/iptables.save')

    if firewalld_status and not firewalld.status():
        if not firewalld.start():
            msg = ("Service firewalld can't be started. "
                   "Maybe it is masked by default. you can unmask it by "
                   "running 'systemctl unmask firewalld'.")
            raise error.TestNAError(msg)

    return old_iptables, old_firewalld
def run(test, params, env):
    """
    Test command: virsh change-media.

    The command changes the media used by CD or floppy drives.

    Test steps:
    1. Prepare test environment.
    2. Perform virsh change-media operation.
    3. Recover test environment.
    4. Confirm the test result.
    """
    @error.context_aware
    def env_pre(old_iso, new_iso):
        """
        Prepare ISO image for test

        :param old_iso: sourse file for insert
        :param new_iso: sourse file for update
        """
        error.context("Preparing ISO images")
        utils.run("dd if=/dev/urandom of=%s/old bs=1M count=1" % iso_dir)
        utils.run("dd if=/dev/urandom of=%s/new bs=1M count=1" % iso_dir)
        utils.run("mkisofs -o %s %s/old" % (old_iso, iso_dir))
        utils.run("mkisofs -o %s %s/new" % (new_iso, iso_dir))

    @error.context_aware
    def check_media(session, target_file, action, rw_test=False):
        """
        Check guest cdrom/floppy files

        :param session: guest session
        :param target_file: the expected files
        :param action: test case action
        """
        if action != "--eject ":
            error.context("Checking guest %s files" % target_device)
            if target_device == "hdc" or target_device == "sdc":
                mount_cmd = "mount /dev/sr0 /media"
            else:
                if session.cmd_status("ls /dev/fd0"):
                    session.cmd("mknod /dev/fd0 b 2 0")
                mount_cmd = "mount /dev/fd0 /media"
            session.cmd(mount_cmd)
            if rw_test:
                target_file = "/media/rw_test.txt"
                session.cmd("touch %s" % target_file)
                session.cmd("echo 'Hello World'> %s" % target_file)
                output = session.get_command_output("cat %s" % target_file)
                logging.debug("cat %s output: %s", target_file, output)
            else:
                session.cmd("test -f /media/%s" % target_file)
            session.cmd("umount /media")

        else:
            error.context("Ejecting guest cdrom files")
            if target_device == "hdc" or target_device == "sdc":
                if session.cmd_status("mount /dev/sr0 /media -o loop") == 32:
                    logging.info("Eject succeeded")
            else:
                if session.cmd_status("ls /dev/fd0"):
                    session.cmd("mknod /dev/fd0 b 2 0")
                if session.cmd_status("mount /dev/fd0 /media -o loop") == 32:
                    logging.info("Eject succeeded")

    def add_device(vm_name, init_source="''"):
        """
        Add device for test vm

        :param vm_name: guest name
        :param init_source: source file
        """
        if vm.is_alive():
            virsh.destroy(vm_name)

        virsh.attach_disk(vm_name, init_source,
                          target_device,
                          "--type %s --sourcetype file --config" % device_type,
                          debug=True)

    def update_device(vm_name, init_iso, options, start_vm):
        """
        Update device iso file for test case

        :param vm_name: guest name
        :param init_iso: source file
        :param options: update-device option
        :param start_vm: guest start flag
        """
        snippet = """
<disk type='file' device='%s'>
<driver name='qemu' type='raw'/>
<source file='%s'/>
<target dev='%s'/>
<readonly/>
</disk>
""" % (device_type, init_iso, target_device)
        update_iso_file = open(update_iso_xml, "w")
        update_iso_file.write(snippet)
        update_iso_file.close()

        cmd_options = "--force "
        if options == "--config" or start_vm == "no":
            cmd_options += " --config"

        # Give domain the ISO image file
        return virsh.update_device(domainarg=vm_name,
                                   filearg=update_iso_xml, flagstr=cmd_options,
                                   debug=True)

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_ref = params.get("change_media_vm_ref")
    action = params.get("change_media_action")
    start_vm = params.get("start_vm")
    options = params.get("change_media_options")
    device_type = params.get("change_media_device_type", "cdrom")
    target_device = params.get("change_media_target_device", "hdc")
    source_name = params.get("change_media_source")
    status_error = params.get("status_error", "no")
    check_file = params.get("change_media_check_file")
    update_iso_xml_name = params.get("change_media_update_iso_xml")
    init_iso_name = params.get("change_media_init_iso")
    old_iso_name = params.get("change_media_old_iso")
    new_iso_name = params.get("change_media_new_iso")
    source_path = params.get("change_media_source_path", "yes")

    if device_type not in ['cdrom', 'floppy']:
        raise error.TestNAError("Got a invalid device type:/n%s" % device_type)

    try:
        utils_misc.find_command("mkisofs")
    except ValueError:
        raise error.TestNAError("Command 'mkisofs' is missing. You must "
                                "install it (try 'genisoimage' package.")

    # Backup for recovery.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    iso_dir = os.path.join(data_dir.get_tmp_dir(), "tmp")
    old_iso = os.path.join(iso_dir, old_iso_name)
    new_iso = os.path.join(iso_dir, new_iso_name)
    update_iso_xml = os.path.join(iso_dir, update_iso_xml_name)
    if not os.path.exists(iso_dir):
        os.mkdir(iso_dir)
    if not init_iso_name:
        init_iso = ""
    else:
        init_iso = os.path.join(iso_dir, init_iso_name)

    if vm_ref == "name":
        vm_ref = vm_name

    env_pre(old_iso, new_iso)
    # Check domain's disk device
    disk_blk = vm_xml.VMXML.get_disk_blk(vm_name)
    logging.info("disk_blk %s", disk_blk)
    if target_device not in disk_blk:
        logging.info("Adding device")
        add_device(vm_name)

    if vm.is_alive() and start_vm == "no":
        logging.info("Destroying guest...")
        vm.destroy()

    elif vm.is_dead() and start_vm == "yes":
        logging.info("Starting guest...")
        vm.start()

    # If test target is floppy, you need to set selinux to Permissive mode.
    result = update_device(vm_name, init_iso, options, start_vm)

    # If the selinux is set to enforcing, if we FAIL, then just SKIP
    force_SKIP = False
    if result.exit_status == 1 and utils_misc.selinux_enforcing() and \
       result.stderr.count("unable to execute QEMU command 'change':"):
        force_SKIP = True

    # Libvirt will ignore --source when action is eject
    if action == "--eject ":
        source = ""
    else:
        source = os.path.join(iso_dir, source_name)
        if source_path == "no":
            source = source_name

    # For read&write floppy test, the iso media need a writeable fs
    rw_floppy_test = "yes" == params.get("rw_floppy_test", "no")
    if rw_floppy_test:
        utils.run("mkfs.ext3 -F %s" % source)

    all_options = action + options + " " + source
    result = virsh.change_media(vm_ref, target_device,
                                all_options, ignore_status=True, debug=True)
    if status_error == "yes":
        if start_vm == "no" and vm.is_dead():
            try:
                vm.start()
            except virt_vm.VMStartError, detail:
                result.exit_status = 1
                result.stderr = str(detail)
        if start_vm == "yes" and vm.is_alive():
            vm.destroy(gracefully=False)
            try:
                vm.start()
            except virt_vm.VMStartError, detail:
                result.exit_status = 1
                result.stderr = str(detail)
Exemple #42
0
def run(test, params, env):
    """
    This case check error messages in libvirtd logging.

    Implemetent test cases:
    with_iptables:  Simply start libvirtd when using iptables service
                          as firewall.
    with_firewalld: Simply start libvirtd when using firewalld service
                          as firewall.
    """
    def _error_handler(errors, line):
        """
        A callback function called when new error lines appares in libvirtd
        log, then this line is appended to list 'errors'

        :param errors: A list to contain all error lines.
        :param line: Newly found error line in libvirtd log.
        """
        errors.append(line)

    test_type = params.get('test_type')

    old_iptables = None
    old_firewalld = None
    iptables = None
    try:
        # Setup firewall services according to test type.
        if test_type == 'with_firewalld':
            old_iptables, old_firewalld = _set_iptables_firewalld(False, True)
        elif test_type == 'with_iptables':
            old_iptables, old_firewalld = _set_iptables_firewalld(True, False)
        elif test_type == 'stop_iptables':
            # Use _set_iptables_firewalld(False, False) on rhel6 will got skip
            # as firewalld not on rhel6, but the new case which came from bug
            # 716612 is mainly a rhel6 problem and should be tested, so skip
            # using the  _set_iptables_firewalld function and direct stop
            # iptables.
            try:
                utils_misc.find_command('iptables')
                iptables = service.Factory.create_service('iptables')
            except ValueError:
                msg = "Can't find service iptables."
                raise error.TestNAError(msg)

            utils.run('iptables-save > /tmp/iptables.save')
            if not iptables.stop():
                msg = "Can't stop service iptables"
                raise error.TestError(msg)

        try:
            errors = []
            # Run libvirt session and collect errors in log.
            libvirtd_session = LibvirtdSession(
                error_func=_error_handler,
                error_params=(errors, ),
            )

            libvirt_pid = libvirtd_session.get_pid()
            libvirt_context = utils_selinux.get_context_of_process(libvirt_pid)
            logging.debug("The libvirtd pid context is: %s" % libvirt_context)

            # Check errors.
            if errors:
                logging.debug("Found errors in libvirt log:")
                for line in errors:
                    logging.debug(line)
                if test_type == 'stop_iptables':
                    for line in errors:
                        # libvirtd process started without virt_t will failed
                        # to set iptable rules which is expected here
                        if ("/sbin/iptables"
                                and "unexpected exit status 1" not in line):
                            raise error.TestFail("Found errors other than"
                                                 " iptables failure in"
                                                 " libvirt log.")
                else:
                    raise error.TestFail("Found errors in libvirt log.")
        finally:
            libvirtd_session.close()
    finally:
        # Recover services status.
        if test_type in ('with_firewalld', 'with_iptables'):
            _set_iptables_firewalld(old_iptables, old_firewalld)
        elif test_type == "stop_iptables" and iptables:
            iptables.start()
            utils.run('iptables-restore < /tmp/iptables.save')
        if os.path.exists("/tmp/iptables.save"):
            os.remove("/tmp/iptables.save")
    def compare_capabilities_xml(source):
        cap_xml = capability_xml.CapabilityXML()
        cap_xml.xml = source

        # Check that host has a non-empty UUID tag.
        xml_uuid = cap_xml.uuid
        logging.debug("Host UUID (capabilities_xml): %s" % xml_uuid)
        if xml_uuid == "":
            raise error.TestFail("The host uuid in capabilities_xml is none!")

        # Check the host arch.
        xml_arch = cap_xml.arch
        logging.debug("Host arch (capabilities_xml): %s", xml_arch)
        exp_arch = utils.run("arch", ignore_status=True).stdout.strip()
        if cmp(xml_arch, exp_arch) != 0:
            raise error.TestFail("The host arch in capabilities_xml is expected"
                                 " to be %s, but get %s" % (exp_arch, xml_arch))

        # Check the host cpu count.
        xml_cpu_count = cap_xml.cpu_count
        logging.debug("Host cpus count (capabilities_xml): %s", xml_cpu_count)
        cmd = "grep processor /proc/cpuinfo | wc -l"
        exp_cpu_count = int(utils.run(cmd, ignore_status=True).stdout.strip())
        if xml_cpu_count != exp_cpu_count:
            raise error.TestFail("Host cpus count is expected to be %s, but get "
                                 "%s" % (exp_cpu_count, xml_cpu_count))

        # Check the arch of guest supported.
        guest_capa = cap_xml.get_guest_capabilities()
        logging.debug(guest_capa)
        try:
            img = utils_misc.find_command("qemu-kvm")
        except ValueError:
            raise error.TestNAError("Cannot find qemu-kvm")
        if re.search("ppc", utils.run("arch").stdout):
            cmd = img + " --cpu ? | grep ppc"
        else:
            cmd = img + " --cpu ? | grep qemu"
        cmd_result = utils.run(cmd, ignore_status=True)
        for guest in cap_xml.xmltreefile.findall('guest'):
            guest_wordsize = guest.find('arch').find('wordsize').text
            logging.debug("Arch of guest supported (capabilities_xml):%s",
                          guest_wordsize)
            if not re.search(guest_wordsize, cmd_result.stdout.strip()):
                raise error.TestFail("The capabilities_xml gives an extra arch "
                                     "of guest to support!")

        # Check the type of hypervisor.
        first_guest = cap_xml.xmltreefile.findall('guest')[0]
        first_domain = first_guest.find('arch').findall('domain')[0]
        guest_domain_type = first_domain.get('type')
        logging.debug("Hypervisor (capabilities_xml):%s", guest_domain_type)
        cmd_result = utils.run("virsh uri", ignore_status=True)
        if not re.search(guest_domain_type, cmd_result.stdout.strip()):
            raise error.TestFail("The capabilities_xml gives an different "
                                 "hypervisor")

        # check power management support.
        try:
            pm_cmd = os_dep.command('pm-is-supported')
            pm_cap_map = {'suspend': 'suspend_mem',
                          'hibernate': 'suspend_disk',
                          'suspend-hybrid': 'suspend_hybrid',
                          }
            exp_pms = []
            for opt in pm_cap_map:
                cmd = '%s --%s' % (pm_cmd, opt)
                res = utils.run(cmd, ignore_status=True)
                if res.exit_status == 0:
                    exp_pms.append(pm_cap_map[opt])
            pms = cap_xml.power_management_list
            if set(exp_pms) != set(pms):
                raise error.TestFail("Expected supported PMs are %s, got %s "
                                     "instead." % (exp_pms, pms))
        except ValueError:
            logging.debug('Power management checking is skipped, since command '
                          'pm-is-supported is not found.')
Exemple #44
0
def run_virsh_domfstrim(test, params, env):
    """
    Test domfstrim command, make sure that all supported options work well

    Test scenaries:
    1. fstrim without options
    2. fstrim with --minimum with large options
    3. fstrim with --minimum with small options

    Note: --mountpoint still not supported so will not test here
    """

    if not virsh.has_help_command('domfstrim'):
        raise error.TestNAError("This version of libvirt does not support "
                                "the domfstrim test")

    try:
        utils_misc.find_command("lsscsi")
    except ValueError:
        raise error.TestNAError("Command 'lsscsi' is missing. You must "
                                "install it.")

    vm_name = params.get("main_vm", "virt-tests-vm1")
    status_error = ("yes" == params.get("status_error", "no"))
    minimum = params.get("domfstrim_minimum")
    mountpoint = params.get("domfstrim_mountpoint")
    options = params.get("domfstrim_options", "")
    is_fulltrim = ("yes" == params.get("is_fulltrim", "yes"))

    def recompose_xml(vm_name, scsi_disk):
        """
        Add scsi disk, guest agent and scsi controller for guest
        :param: vm_name: Name of domain
        :param: scsi_disk: scsi_debug disk name
        """
        # Get disk path of scsi_disk
        path_cmd = "udevadm info --name %s | grep /dev/disk/by-path/ | " \
                   "cut -d' ' -f4" % scsi_disk
        disk_path = utils.run(path_cmd).stdout.strip()

        # Add qemu guest agent in guest xml
        vm_xml.VMXML.set_agent_channel(vm_name)

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        # Add scsi disk xml
        scsi_disk = Disk(type_name="block")
        scsi_disk.device = "lun"
        scsi_disk.source = scsi_disk.new_disk_source(
                                            **{'attrs': {'dev': disk_path}})
        scsi_disk.target = {'dev': "sdb", 'bus': "scsi"}
        vmxml.add_device(scsi_disk)

        # Add scsi disk controller
        scsi_controller = Controller("controller")
        scsi_controller.type = "scsi"
        scsi_controller.index = "0"
        scsi_controller.model = "virtio-scsi"
        vmxml.add_device(scsi_controller)

        # Redefine guest
        vmxml.sync()

    def start_guest_agent(session):
        """
        Start guest agent service in guest
        :param: session: session in guest
        """
        # Check if qemu-ga installed
        check_cmd = "rpm -q qemu-guest-agent||yum install -y qemu-guest-agent"
        session.cmd(check_cmd)
        session.cmd("service qemu-guest-agent start")
        # Check if the qemu-ga really started
        stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
        if stat_ps != 0:
            raise error.TestFail("Fail to run qemu-ga in guest")

    # Do backup for origin xml
    xml_backup = vm_xml.VMXML.new_from_dumpxml(vm_name)
    try:
        vm = env.get_vm(vm_name)
        if not vm.is_alive():
            vm.start()
        session = vm.wait_for_login()
        bef_list = session.cmd_output("fdisk -l|grep ^/dev|"
                                      "cut -d' ' -f1").split("\n")
        session.close()
        vm.destroy()

        # Load module and get scsi disk name
        utils.load_module("scsi_debug lbpu=1 lbpws=1")
        scsi_disk = utils.run("lsscsi|grep scsi_debug|"
                              "awk '{print $6}'").stdout.strip()
        # Create partition
        open("/tmp/fdisk-cmd", "w").write("n\np\n\n\n\nw\n")
        output = utils.run("fdisk %s < /tmp/fdisk-cmd"
                           % scsi_disk).stdout.strip()
        logging.debug("fdisk output %s", output)
        # Format disk
        output = utils.run("mkfs.ext3 %s1" % scsi_disk).stdout.strip()
        logging.debug("output %s", output)
        # Add scsi disk and agent channel in guest
        recompose_xml(vm_name, scsi_disk)

        vm.start()
        guest_session = vm.wait_for_login()
        start_guest_agent(guest_session)
        # Get new generated disk
        af_list = guest_session.cmd_output("fdisk -l|grep ^/dev|"
                                           "cut -d' ' -f1").split('\n')
        new_disk = "".join(list(set(bef_list) ^ set(af_list)))
        # Mount disk in guest
        guest_session.cmd("mkdir -p /home/test && mount %s /home/test" %
                          new_disk)

        # Do first fstrim before all to get original map for compare
        cmd_result = virsh.domfstrim(vm_name)
        if cmd_result.exit_status != 0:
            raise error.TestFail("Fail to do virsh domfstrim, error %s" %
                                 cmd_result.stderr)

        def get_diskmap_size():
            """
            Collect size from disk map
            :return: disk size
            """
            map_cmd = "cat /sys/bus/pseudo/drivers/scsi_debug/map"
            diskmap = utils.run(map_cmd).stdout.strip('\n\x00')
            logging.debug("disk map is %s", diskmap)
            sum = 0
            for i in diskmap.split(","):
                sum = sum + int(i.split("-")[1]) - int(i.split("-")[0])
            return sum

        ori_size = get_diskmap_size()

        # Write date in disk
        dd_cmd = "dd if=/dev/zero of=/home/test/file bs=1048576 count=5"
        guest_session.cmd(dd_cmd)

        def _full_mapped():
            """
            Do full map check
            :return: True or False
            """
            full_size = get_diskmap_size()
            return (ori_size < full_size)

        if not utils_misc.wait_for(_full_mapped, timeout=30):
            raise error.TestError("Scsi map is not updated after dd command.")

        full_size = get_diskmap_size()

        # Remove disk content in guest
        guest_session.cmd("rm -rf /home/test/*")
        guest_session.close()

        def _trim_completed():
            """
            Do empty fstrim check
            :return: True of False
            """
            cmd_result = virsh.domfstrim(vm_name, minimum, mountpoint, options)
            if cmd_result.exit_status != 0:
                if not status_error:
                    raise error.TestFail("Fail to do virsh domfstrim, error %s"
                                         % cmd_result.stderr)
                else:
                    logging.info("Fail to do virsh domfstrim as expected: %s",
                                 cmd_result.stderr)
                    return True

            empty_size = get_diskmap_size()

            if is_fulltrim:
                return empty_size <= ori_size
            else:
                # For partly trim will check later
                return False

        if not utils_misc.wait_for(_trim_completed, timeout=30):
            if not is_fulltrim:
                # Get result again to check partly fstrim
                empty_size = get_diskmap_size()
                if ori_size < empty_size <= full_size:
                    logging.info("Success to do fstrim partly")
                    return True
            raise error.TestFail("Fail to do fstrim %s, %s")
        logging.info("Success to do fstrim")

    finally:
        # Do domain recovery
        xml_backup.sync()
        utils.unload_module("scsi_debug")
def run(test, params, env):
    """
    Test domfstrim command, make sure that all supported options work well

    Test scenaries:
    1. fstrim without options
    2. fstrim with --minimum with large options
    3. fstrim with --minimum with small options

    Note: --mountpoint still not supported so will not test here
    """
    def recompose_xml(vm_name, scsi_disk):
        """
        Add scsi disk, guest agent and scsi controller for guest
        :param: vm_name: Name of domain
        :param: scsi_disk: scsi_debug disk name
        """

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_path = scsi_disk
        # Add scsi disk xml
        scsi_disk = Disk(type_name="block")
        scsi_disk.device = "lun"
        scsi_disk.source = scsi_disk.new_disk_source(
            **{'attrs': {'dev': disk_path}})
        scsi_disk.target = {'dev': "sdb", 'bus': "scsi"}
        find_scsi = "no"
        controllers = vmxml.xmltreefile.findall("devices/controller")
        for controller in controllers:
            if controller.get("type") == "scsi":
                find_scsi = "yes"
        vmxml.add_device(scsi_disk)

        # Add scsi disk controller
        if find_scsi == "no":
            scsi_controller = Controller("controller")
            scsi_controller.type = "scsi"
            scsi_controller.index = "0"
            scsi_controller.model = "virtio-scsi"
            vmxml.add_device(scsi_controller)

        # Redefine guest
        vmxml.sync()

    if not virsh.has_help_command('domfstrim'):
        raise error.TestNAError("This version of libvirt does not support "
                                "the domfstrim test")

    try:
        utils_misc.find_command("lsscsi")
    except ValueError:
        raise error.TestNAError("Command 'lsscsi' is missing. You must "
                                "install it.")

    vm_name = params.get("main_vm", "virt-tests-vm1")
    status_error = ("yes" == params.get("status_error", "no"))
    minimum = params.get("domfstrim_minimum")
    mountpoint = params.get("domfstrim_mountpoint")
    options = params.get("domfstrim_options", "")
    is_fulltrim = ("yes" == params.get("is_fulltrim", "yes"))
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    has_qemu_ga = not ("yes" == params.get("no_qemu_ga", "no"))
    start_qemu_ga = not ("yes" == params.get("no_start_qemu_ga", "no"))
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    # Do backup for origin xml
    xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        vm = env.get_vm(vm_name)
        if not vm.is_alive():
            vm.start()
        session = vm.wait_for_login()
        bef_list = session.cmd_output("fdisk -l|grep ^/dev|"
                                      "cut -d' ' -f1").split("\n")
        session.close()
        vm.destroy()

        # Load module and get scsi disk name
        utils.load_module("scsi_debug lbpu=1 lbpws=1")
        scsi_disk = utils.run("lsscsi|grep scsi_debug|"
                              "awk '{print $6}'").stdout.strip()
        # Create partition
        open("/tmp/fdisk-cmd", "w").write("n\np\n\n\n\nw\n")
        output = utils.run("fdisk %s < /tmp/fdisk-cmd"
                           % scsi_disk).stdout.strip()
        logging.debug("fdisk output %s", output)
        os.remove("/tmp/fdisk-cmd")
        # Format disk
        output = utils.run("mkfs.ext3 %s1" % scsi_disk).stdout.strip()
        logging.debug("output %s", output)
        # Add scsi disk in guest
        recompose_xml(vm_name, scsi_disk)

        # Prepare guest agent and start guest
        if has_qemu_ga:
            vm.prepare_guest_agent(start=start_qemu_ga)
        else:
            # Remove qemu-ga channel
            vm.prepare_guest_agent(channel=has_qemu_ga, start=False)

        guest_session = vm.wait_for_login()
        # Get new generated disk
        af_list = guest_session.cmd_output("fdisk -l|grep ^/dev|"
                                           "cut -d' ' -f1").split('\n')
        new_disk = "".join(list(set(bef_list) ^ set(af_list)))
        # Mount disk in guest
        guest_session.cmd("mkdir -p /home/test && mount %s /home/test" %
                          new_disk)

        # Do first fstrim before all to get original map for compare
        cmd_result = virsh.domfstrim(vm_name)
        if cmd_result.exit_status != 0:
            if not status_error:
                raise error.TestFail("Fail to do virsh domfstrim, error %s" %
                                     cmd_result.stderr)

        def get_diskmap_size():
            """
            Collect size from disk map
            :return: disk size
            """
            map_cmd = "cat /sys/bus/pseudo/drivers/scsi_debug/map"
            diskmap = utils.run(map_cmd).stdout.strip('\n\x00')
            logging.debug("disk map is %s", diskmap)
            sum = 0
            for i in diskmap.split(","):
                sum = sum + int(i.split("-")[1]) - int(i.split("-")[0])
            return sum

        ori_size = get_diskmap_size()

        # Write date in disk
        dd_cmd = "dd if=/dev/zero of=/home/test/file bs=1048576 count=5"
        guest_session.cmd(dd_cmd)

        def _full_mapped():
            """
            Do full map check
            :return: True or False
            """
            full_size = get_diskmap_size()
            return (ori_size < full_size)

        if not utils_misc.wait_for(_full_mapped, timeout=30):
            raise error.TestError("Scsi map is not updated after dd command.")

        full_size = get_diskmap_size()

        # Remove disk content in guest
        guest_session.cmd("rm -rf /home/test/*")
        guest_session.close()

        def _trim_completed():
            """
            Do empty fstrim check
            :return: True of False
            """
            cmd_result = virsh.domfstrim(vm_name, minimum, mountpoint, options,
                                         unprivileged_user=unprivileged_user,
                                         uri=uri)
            if cmd_result.exit_status != 0:
                if not status_error:
                    raise error.TestFail("Fail to do virsh domfstrim, error %s"
                                         % cmd_result.stderr)
                else:
                    logging.info("Fail to do virsh domfstrim as expected: %s",
                                 cmd_result.stderr)
                    return True

            empty_size = get_diskmap_size()

            if is_fulltrim:
                return empty_size <= ori_size
            else:
                # For partly trim will check later
                return False

        if not utils_misc.wait_for(_trim_completed, timeout=30):
            # Get result again to check partly fstrim
            empty_size = get_diskmap_size()
            if not is_fulltrim:
                if ori_size < empty_size <= full_size:
                    logging.info("Success to do fstrim partly")
                    return True
            raise error.TestFail("Fail to do fstrim. (orignal size: %s), "
                                 "(current size: %s), (full size: %s)" %
                                 (ori_size, empty_size, full_size))
        logging.info("Success to do fstrim")

    finally:
        # Do domain recovery
        vm.shutdown()
        xml_backup.sync()
        utils.unload_module("scsi_debug")
Exemple #46
0
def run(test, params, env):
    """
    Test virsh interface related commands.

    (1) If using given exist interface for testing(eg. lo or ethX):
        1.1 Dumpxml for the interface(with --inactive option)
        1.2 Destroy the interface
        1.3 Undefine the interface
    (2) Define an interface from XML file
    (3) List interfaces with '--inactive' optioin
    (4) Start the interface
    (5) List interfaces with no option
    (6) Dumpxml for the interface
    (7) Get interface MAC address by interface name
    (8) Get interface name by interface MAC address
    (9) Delete interface if not use the exist interface for testing
        9.1 Destroy the interface
        9.2 Undefine the interface

    Caveat, this test may affect the host network, so using the loopback(lo)
    device by default. You can specify the interface which you want, but be
    careful.
    """

    iface_name = params.get("iface_name")
    iface_xml = params.get("iface_xml")
    ping_ip = params.get("ping_ip", "localhost")
    use_exist_iface = "yes" == params.get("use_exist_iface", "no")
    status_error = "yes" == params.get("status_error", "no")
    iface_script = NETWORK_SCRIPT + iface_name
    iface_script_bk = os.path.join(test.tmpdir, "iface-%s.bk" % iface_name)
    net_iface = utils_net.Interface(name=iface_name)
    iface_is_up = True
    list_option = "--all"
    if use_exist_iface:
        if not libvirt.check_iface(iface_name, "exists", "--all"):
            raise error.TestError("Interface '%s' not exists" % iface_name)
        iface_xml = os.path.join(test.tmpdir, "iface.xml.tmp")
        iface_is_up = net_iface.is_up()
    else:
        # Note, if not use the interface which already exists, iface_name must
        # be equal to the value specified in XML file
        if libvirt.check_iface(iface_name, "exists", "--all"):
            raise error.TestError("Interface '%s' already exists" % iface_name)
        if not iface_xml:
            raise error.TestError("XML file is needed.")

    # Stop NetworkManager as which may conflict with virsh iface commands
    try:
        NM = utils_misc.find_command("NetworkManager")
    except ValueError:
        logging.debug("No NetworkManager service.")
        NM = None
    NM_is_running = False
    if NM is not None:
        NM_service = service.Factory.create_service("NetworkManager")
        NM_is_running = NM_service.status()
        if NM_is_running:
            NM_service.stop()

    # run test cases
    try:
        if use_exist_iface:
            # back up the interface script
            utils.run("cp %s %s" % (iface_script, iface_script_bk))
            # step 1.1
            # dumpxml for interface
            xml = virsh.iface_dumpxml(iface_name, "--inactive",
                                      to_file=iface_xml, debug=True)
            # Step 1.2
            # Destroy interface
            if iface_is_up:
                result = virsh.iface_destroy(iface_name, debug=True)
                libvirt.check_exit_status(result, status_error)

            # Step 1.3
            # Undefine interface
            result = virsh.iface_undefine(iface_name, debug=True)
            libvirt.check_exit_status(result, status_error)
            if not status_error:
                if libvirt.check_iface(iface_name, "exists", list_option):
                    raise error.TestFail("%s is still present." % iface_name)

        # Step 2
        # Define interface
        result = virsh.iface_define(iface_xml, debug=True)
        libvirt.check_exit_status(result, status_error)

        # Step 3
        # List inactive interfaces
        list_option = "--inactive"
        if not status_error:
            if not libvirt.check_iface(iface_name, "exists", list_option):
                raise error.TestFail("Fail to find %s." % iface_name)

        # Step 4
        # Start interface
        result = virsh.iface_start(iface_name, debug=True)
        libvirt.check_exit_status(result, status_error)
        if not status_error:
            if not libvirt.check_iface(iface_name, "ping", ping_ip):
                raise error.TestFail("Ping %s fail." % ping_ip)

        # Step 5
        # List active interfaces
        list_option = ""
        if not status_error:
            if not libvirt.check_iface(iface_name, "exists", list_option):
                raise error.TestFail("Fail to find %s in active interface list."
                                     % iface_name)

        # Step 6
        # Dumpxml for interface
        xml = virsh.iface_dumpxml(iface_name, "", to_file="", debug=True)
        logging.debug("Interface '%s' XML:\n%s", iface_name, xml)

        # Step 7
        # Get interface MAC address by name
        result = virsh.iface_mac(iface_name, debug=True)
        libvirt.check_exit_status(result, status_error)
        if not status_error:
            if not libvirt.check_iface(iface_name, "mac", result.stdout.strip()):
                raise error.TestFail("Mac address check fail")

        # Step 8
        # Get interface name by MAC address
        iface_mac = net_iface.get_mac()
        result = virsh.iface_name(iface_mac, debug=True)
        libvirt.check_exit_status(result, status_error)

        # Step 9
        if not use_exist_iface:
            # Step 9.1
            # Destroy interface
            result = virsh.iface_destroy(iface_name, debug=True)
            libvirt.check_exit_status(result, status_error)

            # Step 9.2
            # Undefine interface
            result = virsh.iface_undefine(iface_name, debug=True)
            libvirt.check_exit_status(result, status_error)
            list_option = "--all"
            if not status_error:
                if libvirt.check_iface(iface_name, "exists", list_option):
                    raise error.TestFail("%s is still present." % iface_name)
    finally:
        if use_exist_iface:
            if os.path.exists(iface_xml):
                os.remove(iface_xml)
            if not os.path.exists(iface_script):
                utils.run("mv %s %s" % (iface_script_bk, iface_script))
            if iface_is_up:
                # Need reload script
                utils.run("ifup %s" % iface_name)
            else:
                net_iface.down()
        else:
            if libvirt.check_iface(iface_name, "exists", "--all"):
                # Remove the interface
                if os.path.exists(iface_script):
                    os.remove(iface_script)
                utils_net.bring_down_ifname(iface_name)
        if NM_is_running:
            NM_service.start()
Exemple #47
0
def run(test, params, env):
    """
    Test domfstrim command, make sure that all supported options work well

    Test scenaries:
    1. fstrim without options
    2. fstrim with --minimum with large options
    3. fstrim with --minimum with small options

    Note: --mountpoint still not supported so will not test here
    """
    def recompose_xml(vm_name, scsi_disk):
        """
        Add scsi disk, guest agent and scsi controller for guest
        :param: vm_name: Name of domain
        :param: scsi_disk: scsi_debug disk name
        """

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_path = scsi_disk
        # Add scsi disk xml
        scsi_disk = Disk(type_name="block")
        scsi_disk.device = "lun"
        scsi_disk.source = scsi_disk.new_disk_source(
            **{'attrs': {
                'dev': disk_path
            }})
        scsi_disk.target = {'dev': "sdb", 'bus': "scsi"}
        find_scsi = "no"
        controllers = vmxml.xmltreefile.findall("devices/controller")
        for controller in controllers:
            if controller.get("type") == "scsi":
                find_scsi = "yes"
        vmxml.add_device(scsi_disk)

        # Add scsi disk controller
        if find_scsi == "no":
            scsi_controller = Controller("controller")
            scsi_controller.type = "scsi"
            scsi_controller.index = "0"
            scsi_controller.model = "virtio-scsi"
            vmxml.add_device(scsi_controller)

        # Redefine guest
        vmxml.sync()

    if not virsh.has_help_command('domfstrim'):
        raise error.TestNAError("This version of libvirt does not support "
                                "the domfstrim test")

    try:
        utils_misc.find_command("lsscsi")
    except ValueError:
        raise error.TestNAError("Command 'lsscsi' is missing. You must "
                                "install it.")

    vm_name = params.get("main_vm", "virt-tests-vm1")
    status_error = ("yes" == params.get("status_error", "no"))
    minimum = params.get("domfstrim_minimum")
    mountpoint = params.get("domfstrim_mountpoint")
    options = params.get("domfstrim_options", "")
    is_fulltrim = ("yes" == params.get("is_fulltrim", "yes"))
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    has_qemu_ga = not ("yes" == params.get("no_qemu_ga", "no"))
    start_qemu_ga = not ("yes" == params.get("no_start_qemu_ga", "no"))
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    # Do backup for origin xml
    xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        vm = env.get_vm(vm_name)
        if not vm.is_alive():
            vm.start()
        session = vm.wait_for_login()
        bef_list = session.cmd_output("fdisk -l|grep ^/dev|"
                                      "cut -d' ' -f1").split("\n")
        session.close()
        vm.destroy()

        # Load module and get scsi disk name
        utils.load_module("scsi_debug lbpu=1 lbpws=1")
        scsi_disk = utils.run("lsscsi|grep scsi_debug|"
                              "awk '{print $6}'").stdout.strip()
        # Create partition
        open("/tmp/fdisk-cmd", "w").write("n\np\n\n\n\nw\n")
        output = utils.run("fdisk %s < /tmp/fdisk-cmd" %
                           scsi_disk).stdout.strip()
        logging.debug("fdisk output %s", output)
        os.remove("/tmp/fdisk-cmd")
        # Format disk
        output = utils.run("mkfs.ext3 %s1" % scsi_disk).stdout.strip()
        logging.debug("output %s", output)
        # Add scsi disk in guest
        recompose_xml(vm_name, scsi_disk)

        # Prepare guest agent and start guest
        if has_qemu_ga:
            vm.prepare_guest_agent(start=start_qemu_ga)
        else:
            # Remove qemu-ga channel
            vm.prepare_guest_agent(channel=has_qemu_ga, start=False)

        guest_session = vm.wait_for_login()
        # Get new generated disk
        af_list = guest_session.cmd_output("fdisk -l|grep ^/dev|"
                                           "cut -d' ' -f1").split('\n')
        new_disk = "".join(list(set(bef_list) ^ set(af_list)))
        # Mount disk in guest
        guest_session.cmd("mkdir -p /home/test && mount %s /home/test" %
                          new_disk)

        # Do first fstrim before all to get original map for compare
        cmd_result = virsh.domfstrim(vm_name)
        if cmd_result.exit_status != 0:
            if not status_error:
                raise error.TestFail("Fail to do virsh domfstrim, error %s" %
                                     cmd_result.stderr)

        def get_diskmap_size():
            """
            Collect size from disk map
            :return: disk size
            """
            map_cmd = "cat /sys/bus/pseudo/drivers/scsi_debug/map"
            diskmap = utils.run(map_cmd).stdout.strip('\n\x00')
            logging.debug("disk map is %s", diskmap)
            sum = 0
            for i in diskmap.split(","):
                sum = sum + int(i.split("-")[1]) - int(i.split("-")[0])
            return sum

        ori_size = get_diskmap_size()

        # Write date in disk
        dd_cmd = "dd if=/dev/zero of=/home/test/file bs=1048576 count=5"
        guest_session.cmd(dd_cmd)

        def _full_mapped():
            """
            Do full map check
            :return: True or False
            """
            full_size = get_diskmap_size()
            return (ori_size < full_size)

        if not utils_misc.wait_for(_full_mapped, timeout=30):
            raise error.TestError("Scsi map is not updated after dd command.")

        full_size = get_diskmap_size()

        # Remove disk content in guest
        guest_session.cmd("rm -rf /home/test/*")
        guest_session.close()

        def _trim_completed():
            """
            Do empty fstrim check
            :return: True of False
            """
            cmd_result = virsh.domfstrim(vm_name,
                                         minimum,
                                         mountpoint,
                                         options,
                                         unprivileged_user=unprivileged_user,
                                         uri=uri)
            if cmd_result.exit_status != 0:
                if not status_error:
                    raise error.TestFail(
                        "Fail to do virsh domfstrim, error %s" %
                        cmd_result.stderr)
                else:
                    logging.info("Fail to do virsh domfstrim as expected: %s",
                                 cmd_result.stderr)
                    return True

            empty_size = get_diskmap_size()

            if is_fulltrim:
                return empty_size <= ori_size
            else:
                # For partly trim will check later
                return False

        if not utils_misc.wait_for(_trim_completed, timeout=30):
            # Get result again to check partly fstrim
            empty_size = get_diskmap_size()
            if not is_fulltrim:
                if ori_size < empty_size <= full_size:
                    logging.info("Success to do fstrim partly")
                    return True
            raise error.TestFail("Fail to do fstrim. (orignal size: %s), "
                                 "(current size: %s), (full size: %s)" %
                                 (ori_size, empty_size, full_size))
        logging.info("Success to do fstrim")

    finally:
        # Do domain recovery
        vm.shutdown()
        xml_backup.sync()
        utils.unload_module("scsi_debug")
    def compare_capabilities_xml(source):
        cap_xml = capability_xml.CapabilityXML()
        cap_xml.xml = source

        # Check that host has a non-empty UUID tag.
        xml_uuid = cap_xml.uuid
        logging.debug("Host UUID (capabilities_xml): %s" % xml_uuid)
        if xml_uuid == "":
            raise error.TestFail("The host uuid in capabilities_xml is none!")

        # Check the host arch.
        xml_arch = cap_xml.arch
        logging.debug("Host arch (capabilities_xml): %s", xml_arch)
        exp_arch = utils.run("arch", ignore_status=True).stdout.strip()
        if cmp(xml_arch, exp_arch) != 0:
            raise error.TestFail("The host arch in capabilities_xml is expected"
                                 " to be %s, but get %s" % (exp_arch, xml_arch))

        # Check the host cpu count.
        xml_cpu_count = cap_xml.cpu_count
        logging.debug("Host cpus count (capabilities_xml): %s", xml_cpu_count)
        cmd = "grep processor /proc/cpuinfo | wc -l"
        exp_cpu_count = int(utils.run(cmd, ignore_status=True).stdout.strip())
        if xml_cpu_count != exp_cpu_count:
            raise error.TestFail("Host cpus count is expected to be %s, but get "
                                 "%s" % (exp_cpu_count, xml_cpu_count))

        # Check the arch of guest supported.
        xmltreefile = cap_xml.__dict_get__('xml')
        xml_os_arch_machine_map = cap_xml.os_arch_machine_map
        logging.debug(xml_os_arch_machine_map['hvm'])
        try:
            img = utils_misc.find_command("qemu-kvm")
        except ValueError:
            raise error.TestNAError("Cannot find qemu-kvm")
        cmd = img + " --cpu ? | grep qemu"
        cmd_result = utils.run(cmd, ignore_status=True)
        for guest in xmltreefile.findall('guest'):
            guest_wordsize = guest.find('arch').find('wordsize').text
            logging.debug("Arch of guest supported (capabilities_xml):%s",
                          guest_wordsize)
            if not re.search(guest_wordsize, cmd_result.stdout.strip()):
                raise error.TestFail("The capabilities_xml gives an extra arch "
                                     "of guest to support!")

        # Check the type of hypervisor.
        first_guest = xmltreefile.findall('guest')[0]
        first_domain = first_guest.find('arch').findall('domain')[0]
        guest_domain_type = first_domain.get('type')
        logging.debug("Hypervisor (capabilities_xml):%s", guest_domain_type)
        cmd_result = utils.run("virsh uri", ignore_status=True)
        if not re.search(guest_domain_type, cmd_result.stdout.strip()):
            raise error.TestFail("The capabilities_xml gives an different "
                                 "hypervisor")

        # check power management support.
        try:
            pm_cmd = os_dep.command('pm-is-supported')
            pm_cap_map = {'suspend': 'suspend_mem',
                          'hibernate': 'suspend_disk',
                          'suspend-hybrid': 'suspend_hybrid',
                          }
            exp_pms = []
            for opt in pm_cap_map:
                cmd = '%s --%s' % (pm_cmd, opt)
                res = utils.run(cmd, ignore_status=True)
                if res.exit_status == 0:
                    exp_pms.append(pm_cap_map[opt])
            pms = cap_xml.power_management_list
            if set(exp_pms) != set(pms):
                raise error.TestFail("Expected supported PMs are %s, got %s "
                                     "instead." % (exp_pms, pms))
        except ValueError:
            logging.debug('Power management checking is skipped, since command '
                          'pm-is-supported is not found.')
def run_timedrift_no_net(test, params, env):
    """
    Test suspend commands in qemu guest agent.

    :param test: kvm test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environmen.
    """
    clock_server = params.get("clock_server", "clock.redhat.com")
    ntputil_install = params.get("ntputil_install", "yum install -y ntpdate")
    login_timeout = int(params.get("login_timeout", "240"))
    guest_clock_source = params.get("guest_clock_source", "kvm-clock")
    date_time_command = params.get("date_time_command",
                                   "date -u +'TIME: %a %m/%d/%Y %H:%M:%S.%N'")
    date_time_filter_re = params.get("date_time_filter_re",
                                     "(?:TIME: \w\w\w )(.{19})(.+)")
    date_time_format = params.get("date_time_format", "%m/%d/%Y %H:%M:%S")
    hwclock_time_command = params.get("hwclock_time_command",
                                      "LC_TIME=C hwclock -u")
    hwclock_time_filter_re = params.get("hwclock_time_filter_re", "(.+)")
    hwclock_time_format = params.get("hwclock_time_format",
                                     "%a %b %d %H:%M:%S %Y")
    tolerance = float(params.get("time_diff_tolerance", "0.5"))

    sub_work = params["sub_work"]

    vm_name = params.get("vms")
    vm = env.get_vm(vm_name)
    error.context("Check if ntp utils are host in system.", logging.info)
    try:
        utils_misc.find_command("ntpdate")
    except ValueError:
        error.context("Install ntp utils `%s`." % (ntputil_install),
                      logging.info)
        utils.run(ntputil_install)
    error.context("Sync host machine with clock server %s" % (clock_server),
                  logging.info)
    utils.run("ntpdate %s" % (clock_server))
    error.context("Check clock source on guest VM", logging.info)
    session = vm.wait_for_serial_login(timeout=login_timeout)
    out = session.cmd_output("cat /sys/devices/system/clocksource/"
                             "clocksource0/current_clocksource")
    if not guest_clock_source in out:
        raise error.TestFail("Clock source %s missing in guest clock "
                             "sources %s." % (guest_clock_source, out))

    error.context("Get clock from host and guest VM using `date`",
                  logging.info)
    before_date = utils_test.get_time(session, date_time_command,
                                      date_time_filter_re, date_time_format)
    logging.debug(before_date)

    error.context("Get clock from host and guest VM using `hwclock`",
                  logging.info)
    before_hwclock = utils_test.get_time(session, hwclock_time_command,
                                         hwclock_time_filter_re,
                                         hwclock_time_format)
    logging.debug(before_hwclock)

    session.close()

    if sub_work in globals():  # Try to find sub work function.
        globals()[sub_work](params, vm, session)
    else:
        raise error.TestNAError("Unable to found subwork %s in %s test file." %
                                (sub_work, __file__))

    session = vm.wait_for_serial_login(timeout=login_timeout)
    error.context("Get clock from host and guest VM using `date`",
                  logging.info)
    after_date = utils_test.get_time(session, date_time_command,
                                     date_time_filter_re, date_time_format)
    logging.debug(after_date)

    error.context("Get clock from host and guest VM using `hwclock`",
                  logging.info)
    after_hwclock = utils_test.get_time(session, hwclock_time_command,
                                        hwclock_time_filter_re,
                                        hwclock_time_format)
    logging.debug(after_hwclock)

    date_diff = time_diff(before_date, after_date)
    hwclock_diff = time_diff(before_hwclock, after_hwclock)
    if date_diff > tolerance and hwclock_diff > tolerance:
        raise error.TestFail("hwclock %ss and date %ss difference is"
                             " out of tolerance %ss" %
                             (hwclock_diff, date_diff, tolerance))
    elif date_diff > tolerance:
        raise error.TestFail("date %ss difference is"
                             " out of tolerance %ss" % (date_diff, tolerance))
    elif hwclock_diff > tolerance:
        raise error.TestFail("hwclock %ss difference is"
                             " out of tolerance %ss" %
                             (hwclock_diff, tolerance))
Exemple #50
0
def run(test, params, env):
    """
    Test suspend commands in qemu guest agent.

    :param test: kvm test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environmen.
    """
    clock_server = params.get("clock_server", "clock.redhat.com")
    ntputil_install = params.get("ntputil_install", "yum install -y ntpdate")
    login_timeout = int(params.get("login_timeout", "240"))
    date_time_command = params.get(
        "date_time_command", r"date -u +'TIME: %a %m/%d/%Y %H:%M:%S.%N'")
    date_time_filter_re = params.get("date_time_filter_re",
                                     r"(?:TIME: \w\w\w )(.{19})(.+)")
    date_time_format = params.get("date_time_format", "%m/%d/%Y %H:%M:%S")

    tolerance = float(params.get("time_diff_tolerance", "0.5"))

    sub_work = params["sub_work"]
    test_type = params["timedrift_sub_work"]

    vm_name = params.get("vms")
    vm = env.get_vm(vm_name)
    error.context("Check if ntp utils are host in system.", logging.info)
    try:
        utils_misc.find_command("ntpdate")
    except ValueError:
        error.context("Install ntp utils `%s`." % (ntputil_install),
                      logging.info)
        utils.run(ntputil_install)
    error.context("Sync host machine with clock server %s" % (clock_server),
                  logging.info)
    utils.run("ntpdate %s" % (clock_server))

    session = vm.wait_for_login(timeout=login_timeout)
    error.context("Get clock from host and guest VM using `date`",
                  logging.info)

    before_date = utils_test.get_time(session, date_time_command,
                                      date_time_filter_re, date_time_format)
    logging.debug("date: host time=%ss guest time=%ss", *before_date)

    session.close()

    if sub_work in globals():  # Try to find sub work function.
        globals()[sub_work](params, vm, session)
    else:
        raise error.TestNAError("Unable to found subwork %s in %s test file." %
                                (sub_work, __file__))

    vm = env.get_vm(vm_name)
    session = vm.wait_for_login(timeout=login_timeout)
    error.context("Get clock from host and guest VM using `date`",
                  logging.info)
    after_date = utils_test.get_time(session, date_time_command,
                                     date_time_filter_re, date_time_format)
    logging.debug("date: host time=%ss guest time=%ss", *after_date)

    if test_type == 'guest_suspend':
        date_diff = time_diff(before_date, after_date)
        if date_diff > tolerance:
            raise error.TestFail("date %ss difference is"
                                 "'guest_diff_time != host_diff_time'"
                                 " out of tolerance %ss" %
                                 (date_diff[1], tolerance))
    elif test_type == "guest_pause_resume":
        date_diff = time_diff_host_guest(before_date, after_date)
        if date_diff[1] > tolerance:
            raise error.TestFail("date %ss difference is "
                                 "'guest_time_after-guest_time_before'"
                                 " out of tolerance %ss" %
                                 (date_diff[1], tolerance))