def run(test, params, env):
    """
    Test command: virsh pool-create.

    Create a libvirt pool from an XML file. The file could be given by tester or
    generated by dumpxml a pre-defined pool.
    """
    pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML")
    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    option = params.get("pool_create_extra_option", "")
    readonly_mode = "yes" == params.get("pool_create_readonly_mode", "no")
    status_error = "yes" == params.get("status_error", "no")
    pre_def_pool = "yes" == params.get("pre_def_pool", "no")
    pool_type = params.get("pool_type", "dir")
    source_format = params.get("pool_src_format", "")
    source_name = params.get("pool_source_name", "")
    source_path = params.get("pool_source_path", "/")
    pool_target = params.get("pool_target", "pool_target")
    duplicate_element = params.get("pool_create_duplicate_element", "")
    new_pool_name = params.get("new_pool_create_name")

    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            raise error.TestNAError("Gluster pool is not supported in current"
                                    " libvirt version.")

    if "/PATH/TO/POOL.XML" in pool_xml_f:
        raise error.TestNAError("Please replace %s with valid pool xml file" %
                                pool_xml_f)
    pool_ins = libvirt_storage.StoragePool()
    if pre_def_pool and pool_ins.pool_exists(pool_name):
        raise error.TestFail("Pool %s already exist" % pool_name)

    emulated_image = "emulated-image"
    kwargs = {
        'image_size': '1G',
        'source_path': source_path,
        'source_name': source_name,
        'source_format': source_format
    }
    pvt = utlv.PoolVolumeTest(test, params)
    old_uuid = None
    if pre_def_pool:
        try:
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image,
                         **kwargs)
            virsh.pool_dumpxml(pool_name, to_file=pool_xml_f)
            old_uuid = virsh.pool_uuid(pool_name).stdout.strip()
            if duplicate_element == "name":
                pass
            elif duplicate_element == "uuid":
                # Remove <uuid>
                cmd = "sed -i '/<uuid>/d' %s" % pool_xml_f
                utils.run(cmd)
            elif duplicate_element == "source":
                # Remove <uuid> and update <name>
                cmd = "sed -i '/<uuid>/d' %s" % pool_xml_f
                utils.run(cmd)
                cmd = "sed -i 's/<name>.*<\/name>/<name>%s<\/name>/g' %s" % (
                    new_pool_name, pool_xml_f)
                utils.run(cmd)
            else:
                # The transient pool will gone after destroyed
                virsh.pool_destroy(pool_name)
            new_source_format = params.get("new_pool_src_format")
            if new_source_format:
                cmd = "sed -i s/type=\\\'%s\\\'/type=\\\'%s\\\'/g %s" % (
                    source_format, new_source_format, pool_xml_f)
                utils.run(cmd)
        except Exception, e:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image,
                             **kwargs)
            raise error.TestError(
                "Error occurred when prepare pool xml:\n %s" % e)
Beispiel #2
0
def run(test, params, env):
    """
    This case check error messages in libvirtd logging.

    Implemetent test cases:
    with_iptables:  Simply start libvirtd when using iptables service
                          as firewall.
    with_firewalld: Simply start libvirtd when using firewalld service
                          as firewall.
    """
    def _error_handler(errors, line):
        """
        A callback function called when new error lines appares in libvirtd
        log, then this line is appended to list 'errors'

        :param errors: A list to contain all error lines.
        :param line: Newly found error line in libvirtd log.
        """
        errors.append(line)

    test_type = params.get('test_type')

    old_iptables = None
    old_firewalld = None
    iptables = None
    try:
        # Setup firewall services according to test type.
        if test_type == 'with_firewalld':
            old_iptables, old_firewalld = _set_iptables_firewalld(False, True)
        elif test_type == 'with_iptables':
            old_iptables, old_firewalld = _set_iptables_firewalld(True, False)
        elif test_type == 'stop_iptables':
            # Use _set_iptables_firewalld(False, False) on rhel6 will got skip
            # as firewalld not on rhel6, but the new case which came from bug
            # 716612 is mainly a rhel6 problem and should be tested, so skip
            # using the  _set_iptables_firewalld function and direct stop
            # iptables.
            try:
                utils_path.find_command('iptables')
                iptables = service.Factory.create_service('iptables')
            except utils_path.CmdNotFoundError:
                msg = "Can't find service iptables."
                raise error.TestNAError(msg)

            utils.run('iptables-save > /tmp/iptables.save')
            if not iptables.stop():
                msg = "Can't stop service iptables"
                raise error.TestError(msg)

        try:
            errors = []
            # Run libvirt session and collect errors in log.
            libvirtd_session = LibvirtdSession(
                error_func=_error_handler,
                error_params=(errors, ),
            )

            libvirt_pid = libvirtd_session.get_pid()
            libvirt_context = utils_selinux.get_context_of_process(libvirt_pid)
            logging.debug("The libvirtd pid context is: %s" % libvirt_context)

            # Check errors.
            if errors:
                logging.debug("Found errors in libvirt log:")
                for line in errors:
                    logging.debug(line)
                if test_type == 'stop_iptables':
                    for line in errors:
                        # libvirtd process started without virt_t will failed
                        # to set iptable rules which is expected here
                        if ("/sbin/iptables"
                                and "unexpected exit status 1" not in line):
                            raise error.TestFail("Found errors other than"
                                                 " iptables failure in"
                                                 " libvirt log.")
                else:
                    raise error.TestFail("Found errors in libvirt log.")
        finally:
            libvirtd_session.close()
    finally:
        # Recover services status.
        if test_type in ('with_firewalld', 'with_iptables'):
            _set_iptables_firewalld(old_iptables, old_firewalld)
        elif test_type == "stop_iptables" and iptables:
            iptables.start()
            utils.run('iptables-restore < /tmp/iptables.save')
        if os.path.exists("/tmp/iptables.save"):
            os.remove("/tmp/iptables.save")
def run(test, params, env):
    """
    Test for virt-clone, it is used to clone a guest
    to another with given name.

    (1). Get source guest and destination guest name.
    (2). Clone source guest to dest guest with virt-clone.
    (3). Check the dest guest.
    """
    # Get the full path of virt-clone command.
    try:
        VIRT_CLONE = os_dep.command("virt-clone")
    except ValueError:
        raise error.TestNAError("Not find virt-clone command on host.")

    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)

    dest_guest_name = params.get("dest_vm", "test-clone")
    dest_guest_file = params.get("dest_image")
    dest_guest_path = None

    # Destroy and undefine the test-clone guest before
    # executing virt-clone command.
    virsh.remove_domain(dest_guest_name)

    cmd = "%s --connect=%s -o %s -n %s" % (VIRT_CLONE, vm.connect_uri, vm_name,
                                           dest_guest_name)

    domblklist_result = virsh.domblklist(vm_name)

    if len(domblklist_result.stdout.strip().splitlines()) >= 3:
        # We need a file for destination if guest has block devices.
        dest_guest_path = os.path.join(data_dir.get_data_dir(),
                                       dest_guest_file)
        if os.path.exists(dest_guest_path):
            os.remove(dest_guest_path)

        cmd = "%s -f %s" % (cmd, dest_guest_path)

    try:
        cmd_result = utils.run(cmd, ignore_status=True)

        if cmd_result.exit_status:
            raise error.TestFail("command of virt-clone failed.\n"
                                 "output: %s." % cmd_result)

        start_result = None

        # We will get an error of "error: monitor socket did not show up:"
        # when start vm immediately after virt-clone.

        def _start_success():
            start_result = virsh.start(dest_guest_name)
            if start_result.exit_status:
                return False
            return True

        if not utils_misc.wait_for(_start_success, timeout=5):
            raise error.TestFail("command virt-clone exit successfully.\n"
                                 "but start it failed.\n Detail: %s." %
                                 start_result)
    finally:
        # cleanup remove the dest guest.
        virsh.remove_domain(dest_guest_name)
        # remove image file if we created it.
        if dest_guest_path and os.path.exists(dest_guest_path):
            os.remove(dest_guest_path)
Beispiel #4
0
    test_disk_bootorder = "yes" == params.get("virt_disk_test_bootorder", "no")
    test_disk_bootorder_snapshot = "yes" == params.get(
        "virt_disk_test_bootorder_snapshot", "no")
    test_boot_console = "yes" == params.get("virt_disk_device_boot_console",
                                            "no")
    test_disk_readonly = "yes" == params.get("virt_disk_device_test_readonly",
                                             "no")
    test_disk_snapshot = "yes" == params.get("virt_disk_test_snapshot", "no")
    test_disk_save_restore = "yes" == params.get("virt_disk_test_save_restore",
                                                 "no")
    test_bus_device_option = "yes" == params.get("test_bus_option_cmd", "no")
    snapshot_before_start = "yes" == params.get("snapshot_before_start", "no")

    if dom_iothreads:
        if not libvirt_version.version_compare(1, 2, 8):
            raise error.TestNAError("iothreads not supported for"
                                    " this libvirt version")

    if test_block_size:
        logical_block_size = params.get("logical_block_size")
        physical_block_size = params.get("physical_block_size")

    if any([test_boot_console, add_disk_driver]):
        if vm.is_dead():
            vm.start()
        session = vm.wait_for_login()
        if test_boot_console:
            # Setting console to kernel parameters
            vm.set_kernel_console("ttyS0", "115200")
        if add_disk_driver:
            # Ignore errors here
            session.cmd("dracut --force --add-drivers '%s'" % add_disk_driver)
def run(test, params, env):
    """
    Test command: virsh nodedev-reset <device>

    When `device_option` is:
    1) resettable   : Reset specified device if it is resettable.
    2) non-exist    : Try to reset specified device which doesn't exist.
    3) non-pci      : Try to reset all local non-PCI devices.
    4) active       : Try to reset specified device which is attached to VM.
    5) unresettable : Try to reset all unresettable PCI devices.
    """
    # Retrive parameters
    expect_succeed = params.get('expect_succeed', 'yes')
    device_option = params.get('device_option', 'valid')
    unspecified = 'REPLACE_WITH_TEST_DEVICE'
    specified_device = params.get('specified_device', unspecified)

    # Backup original libvirtd status and prepare libvirtd status
    logging.debug('Preparing libvirtd')
    libvirtd = params.get("libvirtd", "on")
    libvirtd_status = utils_libvirtd.libvirtd_is_running()
    if libvirtd == "off" and libvirtd_status:
        utils_libvirtd.libvirtd_stop()
    elif libvirtd == "on" and not libvirtd_status:
        utils_libvirtd.libvirtd_start()

    # Get whether PCI devices are resettable from sysfs.
    devices = get_pci_info()

    # Devide PCI devices into to catagories.
    resettable_nodes = []
    unresettable_nodes = []
    for device in devices:
        info = devices[device]
        if info['reset'] and info['driver']:
            resettable_nodes.append(device)
        else:
            unresettable_nodes.append(device)

    # Find out all non-PCI devices.
    all_devices = virsh.nodedev_list().stdout.strip().splitlines()
    non_pci_nodes = []
    for device in all_devices:
        if device not in devices:
            non_pci_nodes.append(device)

    try:
        if device_option == 'resettable':
            # Test specified resettable device.
            if specified_device != unspecified:
                if specified_device in resettable_nodes:
                    test_nodedev_reset([specified_device], expect_succeed)
                else:
                    raise error.TestNAError(
                        'Param specified_device is not set!')
            else:
                raise error.TestNAError('Param specified_device is not set!')
        elif device_option == 'non-exist':
            # Test specified non-exist device.
            if specified_device != unspecified:
                if specified_device not in all_devices:
                    test_nodedev_reset([specified_device], expect_succeed)
                else:
                    raise error.TestError('Specified device exists!')
            else:
                raise error.TestNAError('Param specified_device is not set!')
        elif device_option == 'non-pci':
            # Test all non-PCI device.
            if non_pci_nodes:
                test_nodedev_reset(non_pci_nodes, expect_succeed)
            else:
                raise error.TestNAError('No non-PCI device found!')
        elif device_option == 'active':
            # Test specified device if attached to VM.
            if specified_device != unspecified:
                vm_name = params.get('main_vm', 'virt-tests-vm1')
                vm = env.get_vm(vm_name)
                test_active_nodedev_reset(specified_device, vm, expect_succeed)
            else:
                raise error.TestNAError('Param specified_device is not set!')
        elif device_option == 'unresettable':
            # Test all unresettable device.
            if unresettable_nodes:
                test_nodedev_reset(unresettable_nodes, expect_succeed)
            else:
                raise error.TestNAError('No unresettable device found!')
        else:
            raise error.TestError('Unrecognisable device option %s!' %
                                  device_option)
    finally:
        # Restore libvirtd status
        logging.debug('Restoring libvirtd')
        current_libvirtd_status = utils_libvirtd.libvirtd_is_running()
        if current_libvirtd_status and not libvirtd_status:
            utils_libvirtd.libvirtd_stop()
        elif not current_libvirtd_status and libvirtd_status:
            utils_libvirtd.libvirtd_start()
Beispiel #6
0
def run(test, params, env):
    """
    Test command: virsh shutdown.

    The conmand can gracefully shutdown a domain.

    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh setvcpus operation.
    4.Recover test environment.
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_ref = params.get("shutdown_vm_ref")
    status_error = ("yes" == params.get("status_error"))
    agent = ("yes" == params.get("shutdown_agent", "no"))
    mode = params.get("shutdown_mode", "")
    pre_domian_status = params.get("reboot_pre_domian_status", "running")
    libvirtd = params.get("libvirtd", "on")
    xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Libvirt acl test related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current" +
                                    " libvirt version.")

    try:

        # Add or remove qemu-agent from guest before test
        if agent:
            vm_xml.VMXML.set_agent_channel(vm_name)
        else:
            vm_xml.VMXML.remove_agent_channel(vm_name)
        virsh.start(vm_name)
        guest_session = vm.wait_for_login()
        if agent:
            guest_session.cmd("qemu-ga -d")
            stat_ps = guest_session.cmd_status("ps aux |grep [q]emu-ga")
            guest_session.close()
            if stat_ps:
                raise error.TestError("Fail to start qemu-guest-agent!")
        if pre_domian_status == "shutoff":
            virsh.destroy(vm_name)
        domid = vm.get_id()
        domuuid = vm.get_uuid()
        # run test case
        if vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref.find("invalid") != -1:
            vm_ref = params.get(vm_ref)
        elif vm_ref == "name":
            vm_ref = "%s %s" % (vm_name, params.get("shutdown_extra"))
        elif vm_ref == "uuid":
            vm_ref = domuuid

        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

        if vm_ref != "remote":
            status = virsh.shutdown(vm_ref,
                                    mode,
                                    unprivileged_user=unprivileged_user,
                                    uri=uri,
                                    debug=True,
                                    ignore_status=True).exit_status
        else:
            remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
            remote_pwd = params.get("remote_pwd", None)
            local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                raise error.TestNAError(
                    "Remote test parameters unchanged from default")
            status = 0
            try:
                remote_uri = libvirt_vm.complete_uri(local_ip)
                session = remote.remote_login("ssh", remote_ip, "22", "root",
                                              remote_pwd, "#")
                session.cmd_output('LANG=C')
                command = ("virsh -c %s shutdown %s %s" %
                           (remote_uri, vm_name, mode))
                status = session.cmd_status(command, internal_timeout=5)
                session.close()
            except error.CmdError:
                status = 1

        # recover libvirtd service start
        if libvirtd == "off":
            utils_libvirtd.libvirtd_start()

        # check status_error
        if status_error:
            if not status:
                raise error.TestFail("Run successfully with wrong command!")
        else:
            if status:
                raise error.TestFail("Run failed with right command")
    finally:
        xml_backup.sync()
Beispiel #7
0
                check_media(session, check_file, action, rw_floppy_test)
                session.close()
    finally:
        # Clean the iso dir  and clean the device
        update_device(vm_name, "", options, start_vm)
        # Recover xml of vm.
        vmxml_backup.sync()
        utils.safe_rmdir(iso_dir)

    # Negative testing
    if status_error:
        if status:
            logging.info("Expected error (negative testing). Output: %s",
                         result.stderr.strip())
        else:
            raise error.TestFail("Unexpected return code %d "
                                 "(negative testing)" % status)

    # Positive testing
    else:
        if status:
            if force_SKIP:
                raise error.TestNAError(
                    "SELinux is set to enforcing and has "
                    "resulted in this test failing to open "
                    "the iso file for a floppy.")
            raise error.TestFail("Unexpected error (positive testing). "
                                 "Output: %s" % result.stderr.strip())
        else:
            logging.info("Expected success. Output: %s", result.stdout.strip())
Beispiel #8
0
def run(test, params, env):
    """
    1. Configure kernel cmdline to support kdump
    2. Start kdump service
    3. Inject NMI to the guest
    4. Check NMI times
    """
    for cmd in 'inject-nmi', 'qemu-monitor-command':
        if not virsh.has_help_command(cmd):
            raise error.TestNAError(
                "This version of libvirt does not "
                " support the %s test", cmd)

    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    start_vm = params.get("start_vm")
    expected_nmi_times = params.get("expected_nmi_times", '0')
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current" +
                                    " libvirt version.")

    if start_vm == "yes":
        # start kdump service in the guest
        cmd = "which kdump"
        try:
            run_cmd_in_guest(vm, cmd)
        except:
            try:
                # try to install kexec-tools on fedoraX/rhelx.y guest
                run_cmd_in_guest(vm, "yum install -y kexec-tools")
            except:
                raise error.TestNAError("Requires kexec-tools(or the "
                                        "equivalent for your distro)")

        # enable kdump service in the guest
        cmd = "service kdump start"
        run_cmd_in_guest(vm, cmd)

        # filter original 'NMI' information from the /proc/interrupts
        cmd = "grep NMI /proc/interrupts"
        nmi_str = run_cmd_in_guest(vm, cmd)

        # filter CPU from the /proc/cpuinfo and count number
        cmd = "grep -E '^process' /proc/cpuinfo | wc -l"
        vcpu_num = run_cmd_in_guest(vm, cmd).strip()

        logging.info("Inject NMI to the guest via virsh inject_nmi")
        virsh.inject_nmi(vm_name, debug=True, ignore_status=False)

        logging.info("Inject NMI to the guest via virsh qemu_monitor_command")
        virsh.qemu_monitor_command(vm_name, '{"execute":"inject-nmi"}')

        # injects a Non-Maskable Interrupt into the default CPU (x86/s390)
        # or all CPUs (ppc64), as usual, the default CPU index is 0
        cmd = "grep NMI /proc/interrupts | awk '{print $2}'"
        nmi_from_default_vcpu = run_cmd_in_guest(vm, cmd)
        real_nmi_times = nmi_from_default_vcpu.splitlines()[0]
        logging.debug("The current Non-Maskable Interrupts: %s",
                      real_nmi_times)

        # check Non-maskable interrupts times
        if real_nmi_times != expected_nmi_times:
            raise error.TestFail("NMI times aren't expected %s:%s",
                                 real_nmi_times, expected_nmi_times)
Beispiel #9
0
def launch_client(sessions, server, server_ctl, host, clients, l, nf_args,
                  port, params, server_cyg):
    """ Launch netperf clients """

    netperf_version = params.get("netperf_version", "2.6.0")
    client_path = "/tmp/netperf-%s/src/netperf" % netperf_version
    server_path = "/tmp/netperf-%s/src/netserver" % netperf_version
    # Start netserver
    error.context("Start Netserver on guest", logging.info)
    if params.get("os_type") == "windows":
        timeout = float(params.get("timeout", "240"))
        cdrom_drv = utils_misc.get_winutils_vol(server_ctl)
        get_status_flag = False
        if params.get("use_cygwin") == "yes":
            netserv_start_cmd = params.get("netserv_start_cmd")
            netperf_src = params.get("netperf_src") % cdrom_drv
            cygwin_root = params.get("cygwin_root")
            netserver_path = params.get("netserver_path")
            netperf_install_cmd = params.get("netperf_install_cmd")
            start_session = server_cyg
            logging.info("Start netserver with cygwin, cmd is: %s" %
                         netserv_start_cmd)
            if "netserver" not in server_ctl.cmd_output("tasklist"):
                netperf_pack = "netperf-%s" % params.get("netperf_version")
                s_check_cmd = "dir %s" % netserver_path
                p_check_cmd = "dir %s" % cygwin_root
                if not ("netserver.exe" in server_ctl.cmd(s_check_cmd)
                        and netperf_pack in server_ctl.cmd(p_check_cmd)):
                    error.context("Install netserver in Windows guest cygwin",
                                  logging.info)
                    cmd = "xcopy %s %s /S /I /Y" % (netperf_src, cygwin_root)
                    server_ctl.cmd(cmd)
                    server_cyg.cmd_output(netperf_install_cmd, timeout=timeout)
                    if "netserver.exe" not in server_ctl.cmd(s_check_cmd):
                        err_msg = "Install netserver cygwin failed"
                        raise error.TestNAError(err_msg)
                    logging.info("Install netserver in cygwin successfully")

        else:
            start_session = server_ctl
            netserv_start_cmd = params.get("netserv_start_cmd") % cdrom_drv
            logging.info("Start netserver without cygwin, cmd is: %s" %
                         netserv_start_cmd)

        error.context("Start netserver on windows guest", logging.info)
        start_netserver_win(start_session, netserv_start_cmd)

    else:
        logging.info("Netserver start cmd is '%s'" % server_path)
        ssh_cmd(server_ctl, "pidof netserver || %s" % server_path)
        get_status_flag = True
        ncpu = ssh_cmd(server_ctl, "cat /proc/cpuinfo |grep processor |wc -l")
        ncpu = re.findall(r"\d+", ncpu)[0]

    logging.info("Netserver start successfully")

    def count_interrupt(name):
        """
        :param name: the name of interrupt, such as "virtio0-input"
        """
        intr = 0
        stat = ssh_cmd(server_ctl, "cat /proc/interrupts |grep %s" % name)
        stat = stat.strip().split("\n")[-1]
        for cpu in range(int(ncpu)):
            intr += int(stat.split()[cpu + 1])
        return intr

    def get_state():
        for i in ssh_cmd(server_ctl, "ifconfig").split("\n\n"):
            if server in i:
                ifname = re.findall(r"(\w+\d+)[:\s]", i)[0]

        path = "find /sys/devices|grep net/%s/statistics" % ifname
        cmd = "%s/rx_packets|xargs cat;%s/tx_packets|xargs cat;" \
            "%s/rx_bytes|xargs cat;%s/tx_bytes|xargs cat" % (path,
                                                             path, path, path)
        output = ssh_cmd(server_ctl, cmd).split()[-4:]

        nrx = int(output[0])
        ntx = int(output[1])
        nrxb = int(output[2])
        ntxb = int(output[3])

        nre = int(
            ssh_cmd(server_ctl, "grep Tcp /proc/net/snmp|tail -1").split()[12])
        state_list = [
            'rx_pkts', nrx, 'tx_pkts', ntx, 'rx_byts', nrxb, 'tx_byts', ntxb,
            're_pkts', nre
        ]
        try:
            nrx_intr = count_interrupt("virtio.-input")
            ntx_intr = count_interrupt("virtio.-output")
            state_list.append('rx_intr')
            state_list.append(nrx_intr)
            state_list.append('tx_intr')
            state_list.append(ntx_intr)
        except IndexError:
            ninit = count_interrupt("virtio.")
            state_list.append('intr')
            state_list.append(ninit)

        io_exit = int(ssh_cmd(host, "cat /sys/kernel/debug/kvm/io_exits"))
        irq_inj = int(ssh_cmd(host,
                              "cat /sys/kernel/debug/kvm/irq_injections"))
        state_list.append('io_exit')
        state_list.append(io_exit)
        state_list.append('irq_inj')
        state_list.append(irq_inj)
        return state_list

    def netperf_thread(i, numa_enable, client_s, timeout):
        cmd = ""
        fname = "/tmp/netperf.%s.nf" % pid
        if numa_enable:
            output = ssh_cmd(client_s, "numactl --hardware")
            n = int(re.findall(r"available: (\d+) nodes", output)[0]) - 1
            cmd += "numactl --cpunodebind=%s --membind=%s " % (n, n)
        cmd += "/tmp/netperf_agent.py %d %s -D 1 -H %s -l %s %s" % (
            i, client_path, server, int(l) * 1.5, nf_args)
        cmd += " >> %s" % fname
        logging.info("Start netperf thread by cmd '%s'" % cmd)
        ssh_cmd(client_s, cmd, timeout)
        logging.info("Netperf thread completed successfully")

    def all_clients_up():
        try:
            content = ssh_cmd(clients[-1], "cat %s" % fname)
        except:
            content = ""
            return False
        if int(sessions) == len(re.findall("MIGRATE", content)):
            return True
        return False

    def parse_demo_result(fname, sessions):
        """
        Process the demo result, remove the noise from head,
        and compute the final throughout.

        :param fname: result file name
        :param sessions: sessions' number
        """
        fd = open(fname)
        lines = fd.readlines()
        fd.close()

        for i in range(1, len(lines) + 1):
            if "AF_INET" in lines[-i]:
                break
        nresult = i - 1
        if nresult < int(sessions):
            raise error.TestError("We couldn't expect this parallism,"
                                  "expect %s get %s" % (sessions, nresult))

        niteration = nresult / sessions
        result = 0.0
        for this in lines[-sessions * niteration:]:
            if "Interim" in this:
                result += float(re.findall(r"Interim result: *(\S+)", this)[0])
        result = result / niteration
        logging.debug("niteration: %s" % niteration)
        return result

    error.context("Start netperf client threads", logging.info)
    pid = str(os.getpid())
    fname = "/tmp/netperf.%s.nf" % pid
    ssh_cmd(clients[-1], "rm -f %s" % fname)
    numa_enable = params.get("netperf_with_numa", "yes") == "yes"
    timeout_netperf_start = float(params.get("netperf_start_timeout", 360))
    client_thread = threading.Thread(target=netperf_thread,
                                     kwargs={
                                         "i": int(sessions),
                                         "numa_enable": numa_enable,
                                         "client_s": clients[0],
                                         "timeout": timeout_netperf_start
                                     })
    client_thread.start()

    ret = {}
    ret['pid'] = pid

    if utils_misc.wait_for(all_clients_up, timeout_netperf_start, 30, 5,
                           "Wait until all netperf clients start to work"):
        logging.debug("All netperf clients start to work.")
    else:
        raise error.TestNAError("Error, not all netperf clients at work")

    # real & effective test starts
    if get_status_flag:
        start_state = get_state()
    ret['mpstat'] = ssh_cmd(host, "mpstat 1 %d |tail -n 1" % (l - 1))
    finished_result = ssh_cmd(clients[-1], "cat %s" % fname)

    # real & effective test ends
    if get_status_flag:
        end_state = get_state()
        if len(start_state) != len(end_state):
            msg = "Initial state not match end state:\n"
            msg += "  start state: %s\n" % start_state
            msg += "  end state: %s\n" % end_state
            logging.warn(msg)
        else:
            for i in range(len(end_state) / 2):
                ret[end_state[i * 2]] = (end_state[i * 2 + 1] -
                                         start_state[i * 2 + 1])

    client_thread.join()

    error.context("Testing Results Treatment and Report", logging.info)
    f = open(fname, "w")
    f.write(finished_result)
    f.close()
    ret['thu'] = parse_demo_result(fname, int(sessions))
    return ret
Beispiel #10
0
def run(test, params, env):
    """
    Test command: migrate-compcache <domain> [--size <number>]

    1) Run migrate-compcache command and check return code.
    """
    vm_ref = params.get("vm_ref", "name")
    vm_name = params.get("migrate_main_vm")
    start_vm = 'yes' == params.get('start_vm', 'yes')
    pause_vm = 'yes' == params.get('pause_after_start_vm', 'no')
    expect_succeed = 'yes' == params.get('expect_succeed', 'yes')
    size_option = params.get('size_option', 'valid')
    action = params.get('compcache_action', 'get')
    vm = env.get_vm(vm_name)

    # Check if the virsh command migrate-compcache is available
    if not virsh.has_help_command('migrate-compcache'):
        raise error.TestNAError("This version of libvirt does not support "
                                "virsh command migrate-compcache")

    # Prepare the VM state if it's not correct.
    if start_vm and not vm.is_alive():
        vm.start()
    elif not start_vm and vm.is_alive():
        vm.destroy()
    if pause_vm and not vm.is_paused():
        vm.pause()

    # Setup domain reference
    if vm_ref == 'domname':
        vm_ref = vm_name

    # Setup size according to size_option:
    # minimal: Same as memory page size
    # maximal: Same as guest memory
    # empty: An empty string
    # small: One byte less than page size
    # large: Larger than guest memory
    # huge : Largest int64
    page_size = get_page_size()
    if size_option == 'minimal':
        size = str(page_size)
    elif size_option == 'maximal':
        size = str(vm.get_max_mem() * 1024)
    elif size_option == 'empty':
        size = '""'
    elif size_option == 'small':
        size = str(page_size - 1)
    elif size_option == 'large':
        # Guest memory is larger than the max mem set,
        # add 50MB to ensure size exceeds guest memory.
        size = str(vm.get_max_mem() * 1024 + 50000000)
    elif size_option == 'huge':
        size = str(2**64 - 1)
    else:
        size = size_option

    # If we need to get, just omit the size option
    if action == 'get':
        size = None

    # Run testing command
    result = virsh.migrate_compcache(vm_ref, size=size)
    logging.debug(result)

    remote_uri = params.get("compcache_remote_uri")
    remote_host = params.get("migrate_dest_host")
    remote_user = params.get("migrate_dest_user", "root")
    remote_pwd = params.get("migrate_dest_pwd")
    check_job_compcache = False
    compressed_size = None
    if not remote_host.count(
            "EXAMPLE") and size is not None and expect_succeed:
        # Config ssh autologin for remote host
        ssh_key.setup_ssh_key(remote_host, remote_user, remote_pwd, port=22)
        if vm.is_dead():
            vm.start()
        if vm.is_paused():
            vm.resume()
        vm.wait_for_login()
        # Do actual migration to verify compression cache of migrate jobs
        command = ("virsh migrate %s %s --compressed --unsafe --verbose" %
                   (vm_name, remote_uri))
        logging.debug("Start migrating: %s", command)
        p = subprocess.Popen(command,
                             shell=True,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)

        # Give enough time for starting job
        t = 0
        while t < 5:
            jobinfo = virsh.domjobinfo(vm_ref, debug=True,
                                       ignore_status=True).stdout
            jobtype = "None"
            for line in jobinfo.splitlines():
                key = line.split(':')[0]
                if key.count("type"):
                    jobtype = line.split(':')[-1].strip()
                elif key.strip() == "Compression cache":
                    compressed_size = line.split(':')[-1].strip()
            if "None" == jobtype or compressed_size is None:
                t += 1
                time.sleep(1)
                continue
            else:
                check_job_compcache = True
                logging.debug("Job started: %s", jobtype)
                break

        if p.poll():
            try:
                p.kill()
            except OSError:
                pass

        # Cleanup in case of successful migration
        utlv.MigrationTest().cleanup_dest_vm(vm, None, remote_uri)

    # Shut down the VM to make sure the compcache setting cleared
    if vm.is_alive():
        vm.destroy()

    # Check test result
    if expect_succeed:
        if result.exit_status != 0:
            raise error.TestFail(
                'Expected succeed, but failed with result:\n%s' % result)
        if check_job_compcache:
            value = compressed_size.split()[0].strip()
            unit = compressed_size.split()[-1].strip()
            value = int(float(value))
            if unit == "KiB":
                size = int(int(size) / 1024)
            elif unit == "MiB":
                size = int(int(size) / 1048576)
            elif unit == "GiB":
                size = int(int(size) / 1073741824)
            if value != size:
                raise error.TestFail("Compression cache is not match"
                                     " with setted")
            else:
                return
            raise error.TestFail("Get compression cache in job failed.")
        else:
            logging.warn("The compressed size wasn't been verified "
                         "during migration.")
    elif not expect_succeed:
        if result.exit_status == 0:
            raise error.TestFail(
                'Expected fail, but succeed with result:\n%s' % result)
def run(test, params, env):
    r"""
    Test for virt-win-reg.

    (1).Get parameters from params.
    (2).Build the full command of virt-win-reg.
    (3).Login vm to get a session.
    (4).Prepare for test.
    (5).Run virt-win-reg command.
            Command virt-win-reg is used to export and merge Windows Registry
        entries from a Windows guest. We can do add/remove/modify/query with
        it.

        Example:
        * add:
            Make sure there is no value named AddTest in
            [HKLM\SYSTEM\ControlSet001\Control\ComputerName\ComputerName]
            # cat reg_file.reg
            [HKLM\SYSTEM\ControlSet001\Control\ComputerName\ComputerName]
            "AddTest" = "VIRTTEST"
            # virt-win-reg Guestname/disk --merge reg_file.reg
        * remove:
            # cat reg_file.reg
            [HKLM\SYSTEM\ControlSet001\Control\ComputerName\ComputerName]
            "ComputerName" = -
            # virt-win-reg Guestname/disk --merge reg_file.reg
        * modify:
            # cat reg_file.reg
            [HKLM\SYSTEM\ControlSet001\Control\ComputerName\ComputerName]
            "ComputerName" = "VIRTTEST_v2"
            # virt-win-reg Guestname/disk --merge reg_file.reg
        * query:
            # virt-win-reg domname
              'HKLM\SYSTEM\ControlSet001\Control\ComputerName\ComputerName'
              ComputerName

    (6).Verify the result.
    (7).Clean up.
    """
    try:
        virt_win_reg_exec = os_dep.command("virt-win-reg")
    except ValueError:
        raise error.TestNAError("Not find virt-win-reg command.")
    # Get parameters.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Get parameters for remote.
    remote_yes = (params.get("virt_win_reg_remote", "no") == "yes")
    remote_uri = params.get("virt_win_reg_remote_uri", "ENTER.YOUR.REMOTE")
    if remote_yes and remote_uri.count("ENTER"):
        raise error.TestNAError("Remote Test is skipped.")

    # Get parameters about reg value.
    computer_name = params.get("virt_win_reg_computer_name")
    computer_name_v2 = params.get("virt_win_reg_computer_name_v2")
    key_path = params.get("virt_win_reg_key_path")
    value_name = params.get("virt_win_reg_value_name")

    # Get vm_ref.
    vm_ref = params.get("virt_win_reg_vm_ref")
    if vm_ref == "domname":
        vm_ref = vm_name
    elif vm_ref == "image_name":
        disks = vm.get_disk_devices()
        vm_ref = disks.values()[0]['source']

    # Get information about operations.
    operation = params.get("virt_win_reg_operation")
    virt_win_reg_cmd = params.get("virt_win_reg_cmd")
    prepare_reg_cmd = params.get("prepare_reg_cmd")
    verify_reg_cmd = params.get("verify_reg_cmd")
    if not (virt_win_reg_cmd and prepare_reg_cmd and verify_reg_cmd):
        raise error.TestNAError("Missing command for virt_win_reg or"
                                " cmd in guest to check result.")

    # Build a command.
    command = virt_win_reg_exec
    if remote_yes:
        command += " -c %s" % remote_uri
        command += " %s" % vm_name
    else:
        command += " %s" % vm_ref
    command += " %s" % virt_win_reg_cmd

    reg_file = None
    if not operation == "query":
        # Prepare a file for virt-win-reg --merge
        reg_file = open(os.path.join(data_dir.get_tmp_dir(), "merge.reg"), "w")
        lines = []
        lines.append("[%s]\n" % key_path)
        if operation == "add":
            lines.append("\"%s\"=\"%s\"" % (value_name, computer_name))
        elif operation == "remove":
            lines.append("\"%s\"=-" % (value_name))
        elif operation == "modify":
            lines.append("\"%s\"=\"%s\"" % (value_name, computer_name_v2))
        reg_file.writelines(lines)
        reg_file.close()

        command += " %s" % reg_file.name

    session = vm.wait_for_login()

    try:
        status, output = session.cmd_status_output(prepare_reg_cmd)
        if status:
            logging.debug("Preparation is already done.")

        vm.destroy()
        result = utils.run(command, ignore_status=True)
        if result.exit_status:
            raise error.TestFail(result)

        output_virt_win_reg = result.stdout.strip()

        if not vm.is_alive():
            vm.start()
            session = vm.wait_for_login()
        status, output = session.cmd_status_output(verify_reg_cmd)
        if operation == "query":
            output_in_guest = output.split()[-1].strip()
            if not output_in_guest == output_virt_win_reg:
                raise error.TestFail("Informations are not equal from "
                                     "virt_win_reg and from cmd in guest.\n"
                                     "virt_win_reg: %s\n"
                                     "cmd_in_guest: %s" %
                                     (output_virt_win_reg, output_in_guest))
        elif operation == "remove":
            if not status:
                raise error.TestFail("Get the value of computer %s in remove"
                                     " test.\nSo it means the virt-win-reg"
                                     "to remove it failed." % output)
        elif operation == "modify":
            output_in_guest = output.split()[-1].strip()
            if not output_in_guest == computer_name_v2:
                raise error.TestFail("Modify test failed. The value of"
                                     "computer after virt-win-reg is %s."
                                     "But our expected value is %s." %
                                     (output_in_guest, computer_name_v2))
        elif operation == "add":
            if status:
                raise error.TestFail("Add test failed. Get the computer_name"
                                     "failed after virt-win-reg command."
                                     "Detail: %s." % output)
    finally:
        # Clean up.
        session.close()
        # remove temp file.
        if reg_file and os.path.exists(reg_file.name):
            os.remove(reg_file.name)
Beispiel #12
0
def run(test, params, env):
    """
    Timer device check clock frequency offset using NTP on CPU starved guest:

    1) Check for an appropriate clocksource on host.
    2) Boot the guest.
    3) Copy time-warp-test.c to guest.
    4) Compile the time-warp-test.c.
    5) Stop ntpd and apply load on guest.
    6) Pin every vcpu to a physical cpu.
    7) Verify each vcpu is pinned on host.
    8) Run time-warp-test on guest.
    9) Start ntpd on guest.
    10) Check the drift in /var/lib/ntp/drift file on guest after hours
        of running.

    :param test: QEMU test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """
    def _drift_file_exist():
        try:
            session.cmd("test -f /var/lib/ntp/drift")
            return True
        except Exception:
            return False

    error.context("Check for an appropriate clocksource on host", logging.info)
    host_cmd = "cat /sys/devices/system/clocksource/"
    host_cmd += "clocksource0/current_clocksource"
    if "tsc" not in utils.system_output(host_cmd):
        raise error.TestNAError("Host must use 'tsc' clocksource")

    error.context("Boot the guest", logging.info)
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    timeout = int(params.get("login_timeout", 360))
    sess_guest_load = vm.wait_for_login(timeout=timeout)

    error.context("Copy time-warp-test.c to guest", logging.info)
    src_file_name = os.path.join(data_dir.get_deps_dir(), "time_warp",
                                 "time-warp-test.c")
    vm.copy_files_to(src_file_name, "/tmp")

    error.context("Compile the time-warp-test.c", logging.info)
    cmd = "cd /tmp/;"
    cmd += " yum install -y popt-devel;"
    cmd += " rm -f time-warp-test;"
    cmd += " gcc -Wall -o time-warp-test time-warp-test.c -lrt"
    sess_guest_load.cmd(cmd)

    error.context("Stop ntpd and apply load on guest", logging.info)
    sess_guest_load.cmd("yum install -y ntp; service ntpd stop")
    load_cmd = "for ((I=0; I<`grep 'processor id' /proc/cpuinfo| wc -l`; I++));"
    load_cmd += " do taskset $(( 1 << $I )) /bin/bash -c 'for ((;;)); do X=1; done &';"
    load_cmd += " done"
    sess_guest_load.sendline(load_cmd)

    error.context("Pin every vcpu to a physical cpu", logging.info)
    host_cpu_cnt_cmd = params["host_cpu_cnt_cmd"]
    host_cpu_num = utils.system_output(host_cpu_cnt_cmd).strip()
    host_cpu_list = (_ for _ in range(int(host_cpu_num)))
    cpu_pin_list = zip(vm.vcpu_threads, host_cpu_list)
    if len(cpu_pin_list) < len(vm.vcpu_threads):
        raise error.TestNAError("There isn't enough physical cpu to"
                                " pin all the vcpus")
    for vcpu, pcpu in cpu_pin_list:
        utils.system("taskset -p %s %s" % (1 << pcpu, vcpu))

    error.context("Verify each vcpu is pinned on host", logging.info)

    error.context("Run time-warp-test", logging.info)
    session = vm.wait_for_login(timeout=timeout)
    cmd = "/tmp/time-warp-test > /dev/null &"
    session.sendline(cmd)

    error.context("Start ntpd on guest", logging.info)
    cmd = "service ntpd start; sleep 1; echo"
    session.cmd(cmd)

    error.context("Check if the drift file exists on guest", logging.info)
    test_run_timeout = float(params["test_run_timeout"])
    try:
        utils_misc.wait_for(_drift_file_exist, test_run_timeout, step=5)
    except aexpect.ShellCmdError, detail:
        raise error.TestError("Failed to wait for the creation of"
                              " /var/lib/ntp/drift file. Detail: '%s'" %
                              detail)
def run(test, params, env):
    """
    Test virsh {at|de}tach-interface command.

    1) Prepare test environment and its parameters
    2) Attach the required interface
    3) According test type(only attach or both attach and detach):
       a.Go on to test detach(if attaching is correct)
       b.Return GOOD or raise TestFail(if attaching is wrong)
    4) Check if attached interface is correct:
       a.Try to catch it in vm's XML file
       b.Try to catch it in vm
    5) Detach the attached interface
    6) Check result
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Test parameters
    uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri",
                                                      "default"))
    vm_ref = params.get("at_detach_iface_vm_ref", "domname")
    options_suffix = params.get("at_detach_iface_options_suffix", "")
    status_error = "yes" == params.get("status_error", "no")
    start_vm = params.get("start_vm")
    # Should attach must be pass for detach test.
    correct_attach = "yes" == params.get("correct_attach", "no")

    # Interface specific attributes.
    iface_type = params.get("at_detach_iface_type", "network")
    if iface_type == "bridge":
        try:
            utils_misc.find_command("brctl")
        except ValueError:
            raise error.TestNAError("Command 'brctl' is missing. You must "
                                    "install it.")

    iface_source = params.get("at_detach_iface_source", "default")
    iface_mac = params.get("at_detach_iface_mac", "created")
    virsh_dargs = {'ignore_status': True, 'uri': uri}

    # Get a bridge name for test if iface_type is bridge.
    # If there is no bridge other than virbr0, raise TestNAError
    if iface_type == "bridge":
        host_bridge = utils_net.Bridge()
        bridge_list = host_bridge.list_br()
        try:
            bridge_list.remove("virbr0")
        except AttributeError:
            pass  # If no virbr0, just pass is ok
        logging.debug("Useful bridges:%s", bridge_list)
        # just choosing one bridge on host.
        if len(bridge_list):
            iface_source = bridge_list[0]
        else:
            raise error.TestNAError("No useful bridge on host "
                                    "other than 'virbr0'.")

    dom_uuid = vm.get_uuid()
    dom_id = vm.get_id()

    # To confirm vm's state
    if start_vm == "no" and vm.is_alive():
        vm.destroy()

    # Test both detach and attach, So collect info
    # both of them for result check.
    # When something wrong with interface, set it to 1
    fail_flag = 0
    result_info = []

    # Set attach-interface domain
    if vm_ref == "domname":
        vm_ref = vm_name
    elif vm_ref == "domid":
        vm_ref = dom_id
    elif vm_ref == "domuuid":
        vm_ref = dom_uuid
    elif vm_ref == "hexdomid" and dom_id is not None:
        vm_ref = hex(int(dom_id))

    # Get a mac address if iface_mac is 'created'.
    if iface_mac == "created" or correct_attach:
        iface_mac = utils_net.generate_mac_address_simple()

    # Set attach-interface options and Start attach-interface test
    if correct_attach:
        options = set_options("network", "default", iface_mac, "", "attach")
        attach_result = virsh.attach_interface(vm_name, options, **virsh_dargs)
    else:
        options = set_options(iface_type, iface_source, iface_mac,
                              options_suffix, "attach")
        attach_result = virsh.attach_interface(vm_ref, options, **virsh_dargs)
    attach_status = attach_result.exit_status
    logging.debug(attach_result)

    # If attach interface failed.
    if attach_status:
        if not status_error:
            fail_flag = 1
            result_info.append("Attach Failed: %s" % attach_result)
        elif status_error:
            # Here we just use it to exit, do not mean test failed
            fail_flag = 1
    # If attach interface succeeded.
    else:
        if status_error and not correct_attach:
            fail_flag = 1
            result_info.append("Attach Success with wrong command.")

    if fail_flag and start_vm == "yes":
        vm.destroy()
        if len(result_info):
            raise error.TestFail(result_info)
        else:
            # Exit because it is error_test for attach-interface.
            return

    # Check dumpxml file whether the interface is added successfully.
    status, ret = check_dumpxml_iface(vm_name, iface_mac, iface_type,
                                      iface_source)
    if status:
        fail_flag = 1
        result_info.append(ret)

    # Login to domain to check new interface.
    if not vm.is_alive():
        vm.start()
    elif vm.state() == "paused":
        vm.resume()

    status, ret = login_to_check(vm, iface_mac)
    if status:
        fail_flag = 1
        result_info.append(ret)

    # Set detach-interface options
    options = set_options(iface_type, None, iface_mac, options_suffix,
                          "detach")

    # Start detach-interface test
    detach_result = virsh.detach_interface(vm_ref, options, **virsh_dargs)
    detach_status = detach_result.exit_status

    logging.debug(detach_result)

    # Clean up.
    if check_dumpxml_iface(vm_name, iface_mac) is not None:
        cleanup_options = "--type %s --mac %s" % (iface_type, iface_mac)
        virsh.detach_interface(vm_ref, cleanup_options, **virsh_dargs)

    # Shutdown vm to be afraid of cleaning up failed
    if vm.is_alive():
        vm.destroy()

    # Check results.
    if status_error:
        if detach_status == 0:
            raise error.TestFail("Detach Success with wrong command.")
    else:
        if detach_status != 0:
            raise error.TestFail("Detach Failed.")
        else:
            if fail_flag:
                raise error.TestFail("Attach-Detach Success but "
                                     "something wrong with its "
                                     "functional use:%s" % result_info)
Beispiel #14
0
def run(test, params, env):
    """
    Test virsh undefine command.

    Undefine an inactive domain, or convert persistent to transient.
    1.Prepare test environment.
    2.Backup the VM's information to a xml file.
    3.When the libvirtd == "off", stop the libvirtd service.
    4.Perform virsh undefine operation.
    5.Recover test environment.(libvirts service,VM)
    6.Confirm the test result.
    """

    vm_ref = params.get("undefine_vm_ref", "vm_name")
    extra = params.get("undefine_extra", "")
    option = params.get("undefine_option", "")
    libvirtd_state = params.get("libvirtd", "on")
    status_error = ("yes" == params.get("status_error", "no"))
    undefine_twice = ("yes" == params.get("undefine_twice", 'no'))
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    local_pwd = params.get("local_pwd", "password")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    remote_user = params.get("remote_user", "user")
    remote_pwd = params.get("remote_pwd", "password")
    remote_prompt = params.get("remote_prompt", "#")
    pool_type = params.get("pool_type")
    pool_name = params.get("pool_name", "test")
    pool_target = params.get("pool_target")
    volume_size = params.get("volume_size", "1G")
    vol_name = params.get("vol_name", "test_vol")
    emulated_img = params.get("emulated_img", "emulated_img")
    emulated_size = "%sG" % (int(volume_size[:-1]) + 1)
    disk_target = params.get("disk_target", "vdb")
    wipe_data = "yes" == params.get("wipe_data", "no")
    if wipe_data:
        option += " --wipe-storage"

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    vm_id = vm.get_id()
    vm_uuid = vm.get_uuid()

    # polkit acl related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    # Back up xml file.Xen host has no guest xml file to define a guset.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Confirm how to reference a VM.
    if vm_ref == "vm_name":
        vm_ref = vm_name
    elif vm_ref == "id":
        vm_ref = vm_id
    elif vm_ref == "hex_vm_id":
        vm_ref = hex(int(vm_id))
    elif vm_ref == "uuid":
        vm_ref = vm_uuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)

    volume = None
    pvtest = None
    status3 = None

    elems = backup_xml.xmltreefile.findall('/devices/disk/source')
    existing_images = [elem.get('file') for elem in elems]

    # Backup images since remove-all-storage could remove existing libvirt
    # managed guest images
    if existing_images and option.count("remove-all-storage"):
        for img in existing_images:
            backup_img = img + '.bak'
            logging.info('Backup %s to %s', img, backup_img)
            shutil.copyfile(img, backup_img)

    try:
        save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name
        if option.count("managedsave") and vm.is_alive():
            virsh.managedsave(vm_name)

        if not vm.is_lxc():
            snp_list = virsh.snapshot_list(vm_name)
            if option.count("snapshot"):
                snp_file_list = []
                if not len(snp_list):
                    virsh.snapshot_create(vm_name)
                    logging.debug("Create a snapshot for test!")
                else:
                    # Backup snapshots for domain
                    for snp_item in snp_list:
                        tmp_file = os.path.join(test.tmpdir, snp_item + ".xml")
                        virsh.snapshot_dumpxml(vm_name, snp_item, to_file=tmp_file)
                        snp_file_list.append(tmp_file)
            else:
                if len(snp_list):
                    raise error.TestNAError("This domain has snapshot(s), "
                                            "cannot be undefined!")
        if option.count("remove-all-storage"):
            pvtest = utlv.PoolVolumeTest(test, params)
            pvtest.pre_pool(pool_name, pool_type, pool_target, emulated_img,
                            emulated_size=emulated_size)
            new_pool = libvirt_storage.PoolVolume(pool_name)
            if not new_pool.create_volume(vol_name, volume_size):
                raise error.TestFail("Creation of volume %s failed." % vol_name)
            volumes = new_pool.list_volumes()
            volume = volumes[vol_name]
            virsh.attach_disk(vm_name, volume, disk_target, "--config")

        # Turn libvirtd into certain state.
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_stop()

        # Test virsh undefine command.
        output = ""
        if vm_ref != "remote":
            vm_ref = "%s %s" % (vm_ref, extra)
            cmdresult = virsh.undefine(vm_ref, option,
                                       unprivileged_user=unprivileged_user,
                                       uri=uri,
                                       ignore_status=True, debug=True)
            status = cmdresult.exit_status
            output = cmdresult.stdout.strip()
            if status:
                logging.debug("Error status, command output: %s",
                              cmdresult.stderr.strip())
            if undefine_twice:
                status2 = virsh.undefine(vm_ref,
                                         ignore_status=True).exit_status
        else:
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                raise error.TestNAError("remote_ip and/or local_ip parameters"
                                        " not changed from default values")
            try:
                local_user = params.get("username", "root")
                uri = libvirt_vm.complete_uri(local_ip)
                # setup ssh auto login from remote machine to test machine
                # for the command to execute remotely
                ssh_key.setup_remote_ssh_key(remote_ip, remote_user,
                                             remote_pwd, hostname2=local_ip,
                                             user2=local_user,
                                             password2=local_pwd)
                session = remote.remote_login("ssh", remote_ip, "22",
                                              remote_user, remote_pwd,
                                              remote_prompt)
                cmd_undefine = "virsh -c %s undefine %s" % (uri, vm_name)
                status, output = session.cmd_status_output(cmd_undefine)
                logging.info("Undefine output: %s", output)
            except (process.CmdError, remote.LoginError, aexpect.ShellError), de:
                logging.error("Detail: %s", de)
                status = 1

        # Recover libvirtd state.
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_start()

        # Shutdown VM.
        if virsh.domain_exists(vm.name):
            try:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
            except process.CmdError, detail:
                logging.error("Detail: %s", detail)
Beispiel #15
0
  <bridge name='%s' stp='on' delay='0' />
  <mac address='52:54:00:03:78:6c'/>
  <ip address='192.168.100.1' netmask='255.255.255.0'>
    <dhcp>
      <range start='192.168.100.2' end='192.168.100.254' />
    </dhcp>
  </ip>
</network>
""" % (net_name, net_name)

    try:
        test_xml = network_xml.NetworkXML(network_name=net_name)
        test_xml.xml = virtual_net
        test_xml.define()
    except xcepts.LibvirtXMLError, detail:
        raise error.TestNAError("Failed to define a test network.\n"
                                "Detail: %s." % detail)

    # Run test case
    try:
        edit_net_xml()

        cmd_result = virsh.net_dumpxml(net_name, debug=True)
        if cmd_result.exit_status:
            raise error.TestFail("Failed to dump xml of virtual network %s" %
                                 net_name)

        # The xml should contain the match_string
        match_string = "100.253"
        xml = cmd_result.stdout.strip()
        if not re.search(match_string, xml):
            raise error.TestFail("The xml is not expected")
     # Check result
     if not libvirtd_inst.is_running():
         raise error.TestFail("Libvirtd is not running after run command.")
     if status_error:
         if not status:
             # Bug 853673
             err_msg = "Expect fail but run successfully, please check Bug: "
             err_msg += "https://bugzilla.redhat.com/show_bug.cgi?id=853673"
             err_msg += " for more info"
             raise error.TestFail(err_msg)
         else:
             logging.debug("Command failed as expected.")
     else:
         if status:
             if cmd_result.stderr.count("not responding"):
                 raise error.TestNAError(cmd_result.stderr.strip())
             if cmd.count("guest-shutdown") and\
                cmd_result.stderr.count("Missing monitor reply object"):
                 err_msg = "Please check bug: "
                 err_msg += "https://bugzilla.redhat.com/show_bug.cgi?id="
                 err_msg += "1050843 for more info"
                 logging.error(err_msg)
             if "--async" in options:
                 err_msg = "Please check bug: "
                 err_msg += "https://bugzilla.redhat.com/show_bug.cgi?id="
                 err_msg += "1099060 for more info"
                 logging.error(err_msg)
             raise error.TestFail("Expect succeed, but run fail.")
 finally:
     # Cleanup
     reset_env(vm_name, xml_file)
def run(test, params, env):
    """
    Test command: virsh net-dumpxml.

    This command can output the network information as an XML dump to stdout.
    1.Get all parameters from config file.
    2.If test case's network status is inactive, destroy it.
    3.Perform virsh net-dumpxml operation.
    4.Recover test environment(network status).
    5.Confirm the test result.
    """
    status_error = params.get("status_error", "no")
    net_ref = params.get("net_dumpxml_net_ref")
    net_name = params.get("net_dumpxml_network", "default")
    net_status = params.get("net_dumpxml_network_status", "active")
    xml_flie = params.get("net_dumpxml_xml_file", "default.xml")
    extra = params.get("net_dumpxml_extra", "")
    network_xml = os.path.join(test.tmpdir, xml_flie)

    # acl polkit params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    # Run test case
    if net_ref == "uuid":
        net_ref = virsh.net_uuid(net_name).stdout.strip()
    elif net_ref == "name":
        net_ref = net_name

    net_status_current = "active"
    if not virsh.net_state_dict()[net_name]['active']:
        net_status_current = "inactive"

    if not virsh.net_state_dict()[net_name]['persistent']:
        raise error.TestError("Network is transient!")
    try:
        if net_status == "inactive" and net_status_current == "active":
            status_destroy = virsh.net_destroy(net_name,
                                               ignore_status=True).exit_status
            if status_destroy != 0:
                raise error.TestError("Network destroied failed!")

        virsh_dargs = {'ignore_status': True}
        if params.get('setup_libvirt_polkit') == 'yes':
            virsh_dargs['unprivileged_user'] = unprivileged_user
            virsh_dargs['uri'] = uri
        result = virsh.net_dumpxml(net_ref, extra, network_xml, **virsh_dargs)
        status = result.exit_status
        err = result.stderr.strip()
        xml_validate_cmd = "virt-xml-validate %s network" % network_xml
        valid_s = utils.run(xml_validate_cmd, ignore_status=True).exit_status

        # Check option valid or not.
        if extra.find("--") != -1:
            options = extra.split("--")
            for option in options:
                if option.strip() == "":
                    continue
                if not virsh.has_command_help_match("net-dumpxml",
                                                    option.strip()) and\
                   status_error == "no":
                    raise error.TestNAError("The current libvirt version"
                                            " doesn't support '%s' option" %
                                            option.strip())
    finally:
        # Recover network
        if net_status == "inactive" and net_status_current == "active":
            status_start = virsh.net_start(net_name,
                                           ignore_status=True).exit_status
            if status_start != 0:
                raise error.TestError("Network started failed!")

    # Check status_error
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command!")
        if err == "":
            raise error.TestFail("The wrong command has no error outputed!")
    elif status_error == "no":
        if status != 0:
            raise error.TestFail("Run failed with right command!")
        if valid_s != 0:
            raise error.TestFail("Command output is invalid!")
    else:
        raise error.TestError("The status_error must be 'yes' or 'no'!")
def reset_domain(vm,
                 vm_state,
                 needs_agent=False,
                 guest_cpu_busy=False,
                 password=None):
    """
    Setup guest agent in domain.

    :param vm: the vm object
    :param vm_state: the given vm state string "shut off" or "running"
    """
    if vm.is_alive():
        vm.destroy()
    vm_xml = libvirt_xml.VMXML()
    vm_xml.new_from_dumpxml(vm.name)
    if needs_agent:
        logging.debug("Attempting to set guest agent channel")
        vm_xml.set_agent_channel(vm.name)
    if not vm_state == "shut off":
        vm.start()
        session = vm.wait_for_login()
        if needs_agent:
            # Check if qemu-ga already started automatically
            session = vm.wait_for_login()
            cmd = "rpm -q qemu-guest-agent || yum install -y qemu-guest-agent"
            stat_install = session.cmd_status(cmd, 300)
            if stat_install != 0:
                raise error.TestFail("Fail to install qemu-guest-agent, make "
                                     "sure that you have usable repo in guest")

            # Check if qemu-ga already started
            stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
            if stat_ps != 0:
                session.cmd("qemu-ga -d")
                # Check if the qemu-ga really started
                stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
                if stat_ps != 0:
                    raise error.TestFail("Fail to run qemu-ga in guest")
        if guest_cpu_busy:
            shell_file = "/tmp/test.sh"
            cpu_detail_list = [
                'while true', 'do', '    j==\${j:+1}', '    j==\${j:-1}',
                'done'
            ]
            remote_file = remote.RemoteFile(vm.get_address(), 'scp', 'root',
                                            password, 22, shell_file)
            remote_file.truncate()
            remote_file.add(cpu_detail_list)
            session.cmd('chmod 777 %s' % shell_file)
            session.cmd('%s &' % shell_file)
    if vm_state == "paused":
        vm.pause()
    elif vm_state == "halt":
        try:
            session.cmd("halt")
        except (aexpect.ShellProcessTerminatedError, aexpect.ShellStatusError):
            # The halt command always gets these errors, but execution is OK,
            # skip these errors
            pass
    elif vm_state == "pm_suspend":
        # Execute "pm-suspend-hybrid" command directly will get Timeout error,
        # so here execute it in background, and wait for 3s manually
        if session.cmd_status("which pm-suspend-hybrid"):
            raise error.TestNAError("Cannot execute this test for domain"
                                    " doesn't have pm-suspend-hybrid command!")
        session.cmd("pm-suspend-hybrid &")
        time.sleep(3)
def run(test, params, env):
    """
    Test the virsh pool commands

    (1) Define a given type pool
    (2) List pool with '--inactive --type' options
    (3) Dumpxml for the pool
    (4) Undefine the pool
    (5) Define pool by using the XML file in step (3)
    (6) Build the pool(except 'disk' type pool
        For 'fs' type pool, cover --overwrite and --no-overwrite options
    (7) Start the pool
    (8) List pool with '--persistent --type' options
    (9) Mark pool autostart
    (10) List pool with '--autostart --type' options
    (11) Restart libvirtd and list pool with '--autostart --persistent' options
    (12) Destroy the pool
    (13) Unmark pool autostart
    (14) Repeat step (11)
    (15) Start the pool
    (16) Get pool info
    (17) Get pool uuid by name
    (18) Get pool name by uuid
    (19) Refresh the pool
         For 'dir' type pool, touch a file under target path and refresh again
         to make the new file show in vol-list.
    (20) Check pool 'Capacity', 'Allocation' and 'Available'
         Create a over size vol in pool(expect fail), then check these values
    (21) Undefine the pool, and this should fail as pool is still active
    (22) Destroy the pool
    (23) Delete pool for 'dir' type pool. After the command, the pool object
         will still exist but target path will be deleted
    (24) Undefine the pool
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    source_format = params.get("source_format", "")
    source_name = params.get("pool_source_name", "gluster-vol1")
    source_path = params.get("pool_source_path", "/")
    # The file for dumped pool xml
    pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(test.tmpdir, pool_target)
    vol_name = params.get("vol_name", "temp_vol_1")
    # Use pool name as VG name
    status_error = "yes" == params.get("status_error", "no")
    vol_path = os.path.join(pool_target, vol_name)
    ip_protocal = params.get('ip_protocal', 'ipv4')

    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            raise error.TestNAError("Gluster pool is not supported in current"
                                    " libvirt version.")

    def check_exit_status(result, expect_error=False):
        """
        Check the exit status of virsh commands.

        :param result: Virsh command result object
        :param expect_error: Boolean value, expect command success or fail
        """
        if not expect_error:
            if result.exit_status != 0:
                raise error.TestFail(result.stderr)
            else:
                logging.debug("Command output:\n%s", result.stdout.strip())
        elif expect_error and result.exit_status == 0:
            raise error.TestFail("Expect fail, but run successfully.")

    def check_pool_list(pool_name, option="--all", expect_error=False):
        """
        Check pool by running pool-list command with given option.

        :param pool_name: Name of the pool
        :param option: option for pool-list command
        :param expect_error: Boolean value, expect command success or fail
        """
        found = False
        # Get the list stored in a variable
        result = virsh.pool_list(option, ignore_status=True)
        check_exit_status(result, False)
        output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]",
                            str(result.stdout))
        for item in output:
            if pool_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find pool '%s' in pool list.", pool_name)
        else:
            logging.debug("Not find pool %s in pool list.", pool_name)
        if expect_error and found:
            raise error.TestFail("Unexpect pool '%s' exist." % pool_name)
        if not expect_error and not found:
            raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name)

    def check_vol_list(vol_name, pool_name):
        """
        Check volume from the list

        :param vol_name: Name of the volume
        :param pool_name: Name of the pool
        """
        found = False
        # Get the volume list stored in a variable
        result = virsh.vol_list(pool_name, ignore_status=True)
        check_exit_status(result)

        output = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(result.stdout))
        for item in output:
            if vol_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find volume '%s' in pool '%s'.", vol_name,
                          pool_name)
        else:
            raise error.TestFail("Not find volume '%s' in pool '%s'." %
                                 (vol_name, pool_name))

    def check_pool_info(pool_info, check_point, value):
        """
        Check the pool name, uuid, etc.

        :param pool_info: A dict include pool's information
        :param key: Key of pool info dict, available value: Name, UUID, State
                    Persistent, Autostart, Capacity, Allocation, Available
        :param value: Expect value of pool_info[key]
        """
        if pool_info is None:
            raise error.TestFail("Pool info dictionary is needed.")
        if pool_info[check_point] == value:
            logging.debug("Pool '%s' is '%s'.", check_point, value)
        else:
            raise error.TestFail("Pool '%s' isn't '%s'." %
                                 (check_point, value))

    # Stop multipathd to avoid start pool fail(For fs like pool, the new add
    # disk may in use by device-mapper, so start pool will report disk already
    # mounted error).
    multipathd = service.Factory.create_service("multipathd")
    multipathd_status = multipathd.status()
    if multipathd_status:
        multipathd.stop()

    # Run Testcase
    pvt = utlv.PoolVolumeTest(test, params)
    emulated_image = "emulated-image"
    kwargs = {
        'image_size': '1G',
        'pre_disk_vol': ['1M'],
        'source_name': source_name,
        'source_path': source_path,
        'source_format': source_format,
        'persistent': True,
        'ip_protocal': ip_protocal
    }
    try:
        _pool = libvirt_storage.StoragePool()
        # Step (1)
        # Pool define
        pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image,
                     **kwargs)

        # Step (2)
        # Pool list
        option = "--inactive --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (3)
        # Pool dumpxml
        xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml)
        logging.debug("Pool '%s' XML:\n%s", pool_name, xml)

        # Step (4)
        # Undefine pool
        result = virsh.pool_undefine(pool_name, ignore_status=True)
        check_exit_status(result)
        check_pool_list(pool_name, "--all", True)

        # Step (5)
        # Define pool from XML file
        result = virsh.pool_define(pool_xml)
        check_exit_status(result, status_error)

        # Step (6)
        # Buid pool, this step may fail for 'disk' and 'logical' types pool
        if pool_type not in ["disk", "logical"]:
            option = ""
            # Options --overwrite and --no-overwrite can only be used to
            # build a filesystem pool, but it will fail for now
            # if pool_type == "fs":
            #    option = '--overwrite'
            result = virsh.pool_build(pool_name, option, ignore_status=True)
            check_exit_status(result)

        # Step (7)
        # Pool start
        result = virsh.pool_start(pool_name, ignore_status=True)
        check_exit_status(result)

        # Step (8)
        # Pool list
        option = "--persistent --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (9)
        # Pool autostart
        result = virsh.pool_autostart(pool_name, ignore_status=True)
        check_exit_status(result)

        # Step (10)
        # Pool list
        option = "--autostart --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (11)
        # Restart libvirtd and check the autostart pool
        utils_libvirtd.libvirtd_restart()
        option = "--autostart --persistent"
        check_pool_list(pool_name, option)

        # Step (12)
        # Pool destroy
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (13)
        # Pool autostart disable
        result = virsh.pool_autostart(pool_name,
                                      "--disable",
                                      ignore_status=True)
        check_exit_status(result)

        # Step (14)
        # Repeat step (11)
        utils_libvirtd.libvirtd_restart()
        option = "--autostart"
        check_pool_list(pool_name, option, True)

        # Step (15)
        # Pool start
        # When libvirtd starts up, it'll check to see if any of the storage
        # pools have been activated externally. If so, then it'll mark the
        # pool as active. This is independent of autostart.
        # So a directory based storage pool is thus pretty much always active,
        # and so as the SCSI pool.
        if pool_type not in ["dir", 'scsi']:
            result = virsh.pool_start(pool_name, ignore_status=True)
            check_exit_status(result)

        # Step (16)
        # Pool info
        pool_info = _pool.pool_info(pool_name)
        logging.debug("Pool '%s' info:\n%s", pool_name, pool_info)

        # Step (17)
        # Pool UUID
        result = virsh.pool_uuid(pool_info["Name"], ignore_status=True)
        check_exit_status(result)
        check_pool_info(pool_info, "UUID", result.stdout.strip())

        # Step (18)
        # Pool Name
        result = virsh.pool_name(pool_info["UUID"], ignore_status=True)
        check_exit_status(result)
        check_pool_info(pool_info, "Name", result.stdout.strip())

        # Step (19)
        # Pool refresh for 'dir' type pool
        if pool_type == "dir":
            os.mknod(vol_path)
            result = virsh.pool_refresh(pool_name)
            check_exit_status(result)
            check_vol_list(vol_name, pool_name)

        # Step (20)
        # Create an over size vol in pool(expect fail), then check pool:
        # 'Capacity', 'Allocation' and 'Available'
        # For NFS type pool, there's a bug(BZ#1077068) about allocate volume,
        # and glusterfs pool not support create volume, so not test them
        if pool_type != "netfs":
            vol_capacity = "10000G"
            vol_allocation = "10000G"
            result = virsh.vol_create_as("oversize_vol", pool_name,
                                         vol_capacity, vol_allocation, "raw")
            check_exit_status(result, True)
            new_info = _pool.pool_info(pool_name)
            check_pool_info(pool_info, "Capacity", new_info['Capacity'])
            check_pool_info(pool_info, "Allocation", new_info['Allocation'])
            check_pool_info(pool_info, "Available", new_info['Available'])

        # Step (21)
        # Undefine pool, this should fail as the pool is active
        result = virsh.pool_undefine(pool_name, ignore_status=True)
        check_exit_status(result, expect_error=True)
        check_pool_list(pool_name, "", False)

        # Step (22)
        # Pool destroy
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (23)
        # Pool delete for 'dir' type pool
        if pool_type == "dir":
            for f in os.listdir(pool_target):
                os.remove(os.path.join(pool_target, f))
            result = virsh.pool_delete(pool_name, ignore_status=True)
            check_exit_status(result)
            option = "--inactive --type %s" % pool_type
            check_pool_list(pool_name, option)
            if os.path.exists(pool_target):
                raise error.TestFail("The target path '%s' still exist." %
                                     pool_target)
            result = virsh.pool_start(pool_name, ignore_status=True)
            check_exit_status(result, True)

        # Step (24)
        # Pool undefine
        result = virsh.pool_undefine(pool_name, ignore_status=True)
        check_exit_status(result)
        check_pool_list(pool_name, "--all", True)
    finally:
        # Clean up
        try:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image,
                             **kwargs)
        except error.TestFail, detail:
            logging.error(str(detail))
        if multipathd_status:
            multipathd.start()
        if os.path.exists(pool_xml):
            os.remove(pool_xml)
Beispiel #20
0
def run(test, params, env):
    """
    Test the command virsh vcpucount

    (1) Iterate perform setvcpus operation with four valid options.
    (2) Iterate call virsh vcpucount with given options.
    (3) Check whether the virsh vcpucount works as expected.
    (4) Recover test environment.

    The test works for domain state as "shut off" or "running", it check
    vcpucount result after vcpu hotplug using setvcpus.

    For setvcpus, include four valid options:
      --config
      --config --maximum
      --live
      --guest

    For vcpucount options, restrict up to 2 options together, upstream libvirt
    support more options combinations now (e.g. 3 options together or single
    --maximum option), for backward support, only following options are
    checked:
      None
      --config --active
      --config --maximum
      --live --active
      --live --maximum
      --current --active
      --current --maximum
      --guest
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    xml_file = params.get("vcpucount_xml_file", "vm.xml")
    virsh.dumpxml(vm_name, extra="--inactive", to_file=xml_file)
    pre_vm_state = params.get("vcpucount_pre_vm_state")
    options = params.get("vcpucount_options")
    status_error = params.get("status_error")
    set_option = ["--config", "--config --maximum", "--live", "--guest"]

    # maximum options should be 2
    if len(options.split()) > 2:
        raise error.TestNAError("Options exceeds 2 is not supported")

    # Prepare domain
    reset_domain(vm, pre_vm_state, ("--guest" in options))

    # Perform guest vcpu hotplug
    for i in range(len(set_option)):
        # Hotplug domain vcpu
        result = virsh.setvcpus(vm_name,
                                2,
                                set_option[i],
                                ignore_status=True,
                                debug=True)
        setvcpus_status = result.exit_status

        # Call virsh vcpucount with option
        result = virsh.vcpucount(vm_name,
                                 options,
                                 ignore_status=True,
                                 debug=True)
        output = result.stdout.strip()
        vcpucount_status = result.exit_status

        if "--guest" in options:
            if result.stderr.count("doesn't support option") or \
               result.stderr.count("command guest-get-vcpus has not been found"):
                reset_env(vm_name, xml_file)
                raise error.TestNAError("Option %s is not supported" % options)

        # Reset domain
        reset_domain(vm, pre_vm_state, ("--guest" in options))

        # Check result
        if status_error == "yes":
            if vcpucount_status == 0:
                reset_env(vm_name, xml_file)
                raise error.TestFail("Run successfully with wrong command!")
            else:
                logging.info("Run failed as expected")
        else:
            if vcpucount_status != 0:
                reset_env(vm_name, xml_file)
                raise error.TestFail("Run command failed with options %s" %
                                     options)
            elif setvcpus_status == 0:
                if pre_vm_state == "shut off":
                    if i == 0:
                        expect_out = [4, 2]
                        chk_output_shutoff(output, expect_out, options)
                    elif i == 1:
                        expect_out = [2, 1]
                        chk_output_shutoff(output, expect_out, options)
                    else:
                        reset_env(vm_name, xml_file)
                        raise error.TestFail("setvcpus should failed")
                else:
                    if i == 0:
                        expect_out = [4, 4, 2, 1, 1]
                        chk_output_running(output, expect_out, options)
                    elif i == 1:
                        expect_out = [2, 4, 1, 1, 1]
                        chk_output_running(output, expect_out, options)
                    elif i == 2:
                        expect_out = [4, 4, 1, 2, 2]
                        chk_output_running(output, expect_out, options)
                    else:
                        expect_out = [4, 4, 1, 1, 2]
                        chk_output_running(output, expect_out, options)
            else:
                if pre_vm_state == "shut off":
                    expect_out = [4, 1]
                    chk_output_shutoff(output, expect_out, options)
                else:
                    expect_out = [4, 4, 1, 1, 1]
                    chk_output_running(output, expect_out, options)

    # Recover env
    reset_env(vm_name, xml_file)
Beispiel #21
0
def run(test, params, env):
    """
    Test command: virsh change-media.

    The command changes the media used by CD or floppy drives.

    Test steps:
    1. Prepare test environment.
    2. Perform virsh change-media operation.
    3. Recover test environment.
    4. Confirm the test result.
    """
    @error.context_aware
    def env_pre(old_iso, new_iso):
        """
        Prepare ISO image for test

        :param old_iso: sourse file for insert
        :param new_iso: sourse file for update
        """
        error.context("Preparing ISO images")
        utils.run("dd if=/dev/urandom of=%s/old bs=1M count=1" % iso_dir)
        utils.run("dd if=/dev/urandom of=%s/new bs=1M count=1" % iso_dir)
        utils.run("mkisofs -o %s %s/old" % (old_iso, iso_dir))
        utils.run("mkisofs -o %s %s/new" % (new_iso, iso_dir))

    @error.context_aware
    def check_media(session, target_file, action, rw_test=False):
        """
        Check guest cdrom/floppy files

        :param session: guest session
        :param target_file: the expected files
        :param action: test case action
        """
        if target_device == "hdc" or target_device == "sdc":
            drive_name = session.cmd(
                "cat /proc/sys/dev/cdrom/info | grep -i 'drive name'",
                ignore_all_errors=True).split()[2]
        if action != "--eject ":
            error.context("Checking guest %s files" % target_device)
            if target_device == "hdc" or target_device == "sdc":
                mount_cmd = "mount /dev/%s /media" % drive_name
            else:
                if session.cmd_status("ls /dev/fd0"):
                    session.cmd("mknod /dev/fd0 b 2 0")
                mount_cmd = "mount /dev/fd0 /media"
            session.cmd(mount_cmd)
            if rw_test:
                target_file = "/media/rw_test.txt"
                session.cmd("touch %s" % target_file)
                session.cmd("echo 'Hello World'> %s" % target_file)
                output = session.get_command_output("cat %s" % target_file)
                logging.debug("cat %s output: %s", target_file, output)
            else:
                session.cmd("test -f /media/%s" % target_file)
            session.cmd("umount /media")

        else:
            error.context("Ejecting guest cdrom files")
            if target_device == "hdc" or target_device == "sdc":
                if session.cmd_status("mount /dev/%s /media -o loop" %
                                      drive_name) == 32:
                    logging.info("Eject succeeded")
            else:
                if session.cmd_status("ls /dev/fd0"):
                    session.cmd("mknod /dev/fd0 b 2 0")
                if session.cmd_status("mount /dev/fd0 /media -o loop") == 32:
                    logging.info("Eject succeeded")

    def add_device(vm_name, init_source="''"):
        """
        Add device for test vm

        :param vm_name: guest name
        :param init_source: source file
        """
        if vm.is_alive():
            virsh.destroy(vm_name)

        virsh.attach_disk(vm_name,
                          init_source,
                          target_device,
                          "--type %s --sourcetype file --config" % device_type,
                          debug=True)

    def update_device(vm_name, init_iso, options, start_vm):
        """
        Update device iso file for test case

        :param vm_name: guest name
        :param init_iso: source file
        :param options: update-device option
        :param start_vm: guest start flag
        """
        snippet = """
<disk type='file' device='%s'>
<driver name='qemu' type='raw'/>
<source file='%s'/>
<target dev='%s'/>
<readonly/>
</disk>
""" % (device_type, init_iso, target_device)
        update_iso_file = open(update_iso_xml, "w")
        update_iso_file.write(snippet)
        update_iso_file.close()

        cmd_options = "--force "
        if options == "--config" or start_vm == "no":
            cmd_options += " --config"

        # Give domain the ISO image file
        return virsh.update_device(domainarg=vm_name,
                                   filearg=update_iso_xml,
                                   flagstr=cmd_options,
                                   debug=True)

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_ref = params.get("change_media_vm_ref")
    action = params.get("change_media_action")
    start_vm = params.get("start_vm")
    options = params.get("change_media_options")
    device_type = params.get("change_media_device_type", "cdrom")
    target_device = params.get("change_media_target_device", "hdc")
    source_name = params.get("change_media_source")
    status_error = "yes" == params.get("status_error", "no")
    check_file = params.get("change_media_check_file")
    update_iso_xml_name = params.get("change_media_update_iso_xml")
    init_iso_name = params.get("change_media_init_iso")
    old_iso_name = params.get("change_media_old_iso")
    new_iso_name = params.get("change_media_new_iso")
    source_path = params.get("change_media_source_path", "yes")

    if device_type not in ['cdrom', 'floppy']:
        raise error.TestNAError("Got a invalid device type:/n%s" % device_type)

    try:
        utils_path.find_command("mkisofs")
    except utils_path.CmdNotFoundError:
        raise error.TestNAError("Command 'mkisofs' is missing. You must "
                                "install it (try 'genisoimage' package.")

    # Check virsh command option
    if options and not status_error:
        libvirt.virsh_cmd_has_option('change-media', options)

    # Backup for recovery.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    iso_dir = os.path.join(data_dir.get_tmp_dir(), "tmp")
    old_iso = os.path.join(iso_dir, old_iso_name)
    new_iso = os.path.join(iso_dir, new_iso_name)
    update_iso_xml = os.path.join(iso_dir, update_iso_xml_name)
    if not os.path.exists(iso_dir):
        os.mkdir(iso_dir)
    if not init_iso_name:
        init_iso = ""
    else:
        init_iso = os.path.join(iso_dir, init_iso_name)

    if vm_ref == "name":
        vm_ref = vm_name

    env_pre(old_iso, new_iso)
    # Check domain's disk device
    disk_blk = vm_xml.VMXML.get_disk_blk(vm_name)
    logging.info("disk_blk %s", disk_blk)
    if target_device not in disk_blk:
        logging.info("Adding device")
        add_device(vm_name)

    if vm.is_alive() and start_vm == "no":
        logging.info("Destroying guest...")
        vm.destroy()

    elif vm.is_dead() and start_vm == "yes":
        logging.info("Starting guest...")
        vm.start()

    # If test target is floppy, you need to set selinux to Permissive mode.
    result = update_device(vm_name, init_iso, options, start_vm)

    # If the selinux is set to enforcing, if we FAIL, then just SKIP
    force_SKIP = False
    if result.exit_status == 1 and utils_misc.selinux_enforcing() and \
       result.stderr.count("unable to execute QEMU command 'change':"):
        force_SKIP = True

    # Libvirt will ignore --source when action is eject
    if action == "--eject ":
        source = ""
    else:
        source = os.path.join(iso_dir, source_name)
        if source_path == "no":
            source = source_name

    # For read&write floppy test, the iso media need a writeable fs
    rw_floppy_test = "yes" == params.get("rw_floppy_test", "no")
    if rw_floppy_test:
        utils.run("mkfs.ext3 -F %s" % source)

    all_options = action + options + " " + source
    result = virsh.change_media(vm_ref,
                                target_device,
                                all_options,
                                ignore_status=True,
                                debug=True)
    if status_error:
        if start_vm == "no" and vm.is_dead():
            try:
                vm.start()
            except virt_vm.VMStartError, detail:
                result.exit_status = 1
                result.stderr = str(detail)
        if start_vm == "yes" and vm.is_alive():
            vm.destroy(gracefully=False)
            try:
                vm.start()
            except virt_vm.VMStartError, detail:
                result.exit_status = 1
                result.stderr = str(detail)
Beispiel #22
0
    def run_once(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise error.TestNAError("Test dependency failed")

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        logging.info("Starting test %s", self.tag)
        logging.debug("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])

        # Warn of this special condition in related location in output & logs
        if os.getuid() == 0 and params.get('nettype', 'user') == 'user':
            logging.warning("")
            logging.warning("Testing with nettype='user' while running "
                            "as root may produce unexpected results!!!")
            logging.warning("")

        # Open the environment file
        env_filename = os.path.join(
            data_dir.get_backend_dir(params.get("vm_type")),
            params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)

        test_passed = False
        t_types = None
        t_type = None

        try:
            try:
                try:
                    subtest_dirs = []

                    other_subtests_dirs = params.get("other_tests_dirs", "")
                    for d in other_subtests_dirs.split():
                        d = os.path.join(*d.split("/"))
                        subtestdir = os.path.join(self.bindir, d, "tests")
                        if not os.path.isdir(subtestdir):
                            raise error.TestError("Directory %s does not "
                                                  "exist" % (subtestdir))
                        subtest_dirs += data_dir.SubdirList(
                            subtestdir, bootstrap.test_filter)

                    provider = params.get("provider", None)

                    if provider is None:
                        # Verify if we have the correspondent source file for it
                        for generic_subdir in asset.get_test_provider_subdirs(
                                'generic'):
                            subtest_dirs += data_dir.SubdirList(
                                generic_subdir, bootstrap.test_filter)

                        for specific_subdir in asset.get_test_provider_subdirs(
                                params.get("vm_type")):
                            subtest_dirs += data_dir.SubdirList(
                                specific_subdir, bootstrap.test_filter)
                    else:
                        provider_info = asset.get_test_provider_info(provider)
                        for key in provider_info['backends']:
                            subtest_dirs += data_dir.SubdirList(
                                provider_info['backends'][key]['path'],
                                bootstrap.test_filter)

                    subtest_dir = None

                    # Get the test routine corresponding to the specified
                    # test type
                    logging.debug(
                        "Searching for test modules that match "
                        "'type = %s' and 'provider = %s' "
                        "on this cartesian dict", params.get("type"),
                        params.get("provider", None))

                    t_types = params.get("type").split()
                    # Make sure we can load provider_lib in tests
                    for s in subtest_dirs:
                        if os.path.dirname(s) not in sys.path:
                            sys.path.insert(0, os.path.dirname(s))

                    test_modules = {}
                    for t_type in t_types:
                        for d in subtest_dirs:
                            module_path = os.path.join(d, "%s.py" % t_type)
                            if os.path.isfile(module_path):
                                logging.debug("Found subtest module %s",
                                              module_path)
                                subtest_dir = d
                                break
                        if subtest_dir is None:
                            msg = ("Could not find test file %s.py on test"
                                   "dirs %s" % (t_type, subtest_dirs))
                            raise error.TestError(msg)
                        # Load the test module
                        f, p, d = imp.find_module(t_type, [subtest_dir])
                        test_modules[t_type] = imp.load_module(t_type, f, p, d)
                        f.close()

                    # Preprocess
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        env.save()

                    # Run the test function
                    for t_type, test_module in test_modules.items():
                        run_func = utils_misc.get_test_entrypoint_func(
                            t_type, test_module)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            env.save()
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise error.TestWarn("funcatexit failed with: %s" %
                                             error_message)

                except Exception, e:
                    if (not t_type is None):
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    try:
                        env_process.postprocess_on_error(self, params, env)
                    finally:
                        env.save()
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        env_process.postprocess(self, params, env)
                    except Exception, e:
                        if test_passed:
                            raise
                        logging.error(
                            "Exception raised during "
                            "postprocessing: %s", e)
                finally:
                    env.save()

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info("It has a %s monitor unix socket at: %s",
                                     m.protocol, m.filename)
                    logging.info("The command line used to start it was:\n%s",
                                 vm.make_qemu_command())
                raise error.JobError("Abort requested (%s)" % e)
Beispiel #23
0
def run(test, params, env):
    """
    Test guest numa setting
    """
    vcpu_num = int(params.get("vcpu_num", 2))
    max_mem = int(params.get("max_mem", 1048576))
    max_mem_unit = params.get("max_mem_unit", 'KiB')
    vcpu_placement = params.get("vcpu_placement", 'static')
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
    mode_dict = {
        'strict': 'bind',
        'preferred': 'prefer',
        'interleave': 'interleave'
    }

    # Prepare numatune memory parameter dict and list
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    memnode_tuple = ('memnode_cellid', 'memnode_mode', 'memnode_nodeset')
    numa_memnode = handle_param(memnode_tuple, params)

    if numa_memnode:
        if not libvirt_version.version_compare(1, 2, 7):
            raise error.TestNAError("Setting hugepages more specifically per "
                                    "numa node not supported on current "
                                    "version")

    # Prepare cpu numa cell parameter
    topology = {}
    topo_tuple = ('sockets', 'cores', 'threads')
    for key in topo_tuple:
        if params.get(key):
            topology[key] = params.get(key)

    cell_tuple = ('cell_id', 'cell_cpus', 'cell_memory')
    numa_cell = handle_param(cell_tuple, params)

    # Prepare qemu cmdline check parameter
    cmdline_tuple = ("qemu_cmdline", )
    cmdline_list = handle_param(cmdline_tuple, params)

    # Prepare hugepages parameter
    backup_list = []
    page_tuple = ('vmpage_size', 'vmpage_unit', 'vmpage_nodeset')
    page_list = handle_param(page_tuple, params)
    nr_pagesize_total = params.get("nr_pagesize_total")
    deallocate = False
    default_nr_hugepages_path = "/sys/kernel/mm/hugepages/hugepages-2048kB/"
    default_nr_hugepages_path += "nr_hugepages"

    if page_list:
        if not libvirt_version.version_compare(1, 2, 5):
            raise error.TestNAError("Setting hugepages more specifically per "
                                    "numa node not supported on current "
                                    "version")

    hp_cl = test_setup.HugePageConfig(params)
    default_hp_size = hp_cl.get_hugepage_size()
    supported_hp_size = hp_cl.get_multi_supported_hugepage_size()
    mount_path = []
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    qemu_conf_restore = False

    def _update_qemu_conf():
        """
        Mount hugepage path, update qemu conf then restart libvirtd
        """
        size_dict = {'2048': '2M', '1048576': '1G', '16384': '16M'}
        for page in page_list:
            if page['size'] not in supported_hp_size:
                raise error.TestError(
                    "Hugepage size [%s] isn't supported, "
                    "please verify kernel cmdline configuration." %
                    page['size'])
            m_path = "/dev/hugepages%s" % size_dict[page['size']]
            hp_cl.hugepage_size = int(page['size'])
            hp_cl.hugepage_path = m_path
            hp_cl.mount_hugepage_fs()
            mount_path.append(m_path)
        if mount_path:
            qemu_conf.hugetlbfs_mount = mount_path
            libvirtd.restart()

    try:
        # Get host numa node list
        host_numa_node = utils_misc.NumaInfo()
        node_list = host_numa_node.online_nodes
        logging.debug("host node list is %s", node_list)
        used_node = []
        if numa_memory.get('nodeset'):
            used_node += utlv.cpus_parser(numa_memory['nodeset'])
        if numa_memnode:
            for i in numa_memnode:
                used_node += utlv.cpus_parser(i['nodeset'])
        if page_list:
            host_page_tuple = ("hugepage_size", "page_num", "page_nodenum")
            h_list = handle_param(host_page_tuple, params)
            h_nodenum = [
                h_list[p_size]['nodenum'] for p_size in range(len(h_list))
            ]
            for i in h_nodenum:
                used_node += utlv.cpus_parser(i)
        if used_node and not status_error:
            logging.debug("set node list is %s", used_node)
            used_node = list(set(used_node))
            for i in used_node:
                if i not in node_list:
                    raise error.TestNAError("%s in nodeset out of range" % i)
                mem_size = host_numa_node.read_from_node_meminfo(i, 'MemTotal')
                logging.debug("the memory total in the node %s is %s", i,
                              mem_size)
                if not int(mem_size):
                    raise error.TestNAError("node %s memory is empty" % i)

        # set hugepage with qemu.conf and mount path
        if default_hp_size == 2048:
            hp_cl.setup()
            deallocate = True
        else:
            _update_qemu_conf()
            qemu_conf_restore = True

        # set hugepage with total number or per-node number
        if nr_pagesize_total:
            # Only set total 2M size huge page number as total 1G size runtime
            # update not supported now.
            deallocate = True
            hp_cl.kernel_hp_file = default_nr_hugepages_path
            hp_cl.target_hugepages = int(nr_pagesize_total)
            hp_cl.set_hugepages()
        if page_list:
            hp_size = [h_list[p_size]['size'] for p_size in range(len(h_list))]
            multi_hp_size = hp_cl.get_multi_supported_hugepage_size()
            for size in hp_size:
                if size not in multi_hp_size:
                    raise error.TestNAError("The hugepage size %s not "
                                            "supported or not configured under"
                                            " current running kernel." % size)
            # backup node page setting and set new value
            for i in h_list:
                node_val = hp_cl.get_node_num_huge_pages(
                    i['nodenum'], i['size'])
                # set hugpege per node if current value not satisfied
                # kernel 1G hugepage runtime number update is supported now
                if int(i['num']) > node_val:
                    node_dict = i.copy()
                    node_dict['num'] = node_val
                    backup_list.append(node_dict)
                    hp_cl.set_node_num_huge_pages(i['num'], i['nodenum'],
                                                  i['size'])

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.vcpu = vcpu_num
        vmxml.max_mem = max_mem
        vmxml.max_mem_unit = max_mem_unit
        vmxml.current_mem = max_mem
        vmxml.current_mem_unit = max_mem_unit

        # numatune setting
        if numa_memnode:
            vmxml.numa_memory = numa_memory
            vmxml.numa_memnode = numa_memnode
            del vmxml.numa_memory
        if numa_memory:
            vmxml.numa_memory = numa_memory

        # vcpu placement setting
        vmxml.placement = vcpu_placement

        # guest numa cpu setting
        vmcpuxml = libvirt_xml.vm_xml.VMCPUXML()
        vmcpuxml.xml = "<cpu><numa/></cpu>"
        if topology:
            vmcpuxml.topology = topology
        logging.debug(vmcpuxml.numa_cell)
        vmcpuxml.numa_cell = numa_cell
        logging.debug(vmcpuxml.numa_cell)
        vmxml.cpu = vmcpuxml

        # hugepages setting
        if page_list:
            membacking = libvirt_xml.vm_xml.VMMemBackingXML()
            hugepages = libvirt_xml.vm_xml.VMHugepagesXML()
            pagexml_list = []
            for i in range(len(page_list)):
                pagexml = hugepages.PageXML()
                pagexml.update(page_list[i])
                pagexml_list.append(pagexml)
            hugepages.pages = pagexml_list
            membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()

        try:
            vm.start()
            session = vm.wait_for_login()
            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            logging.debug("vm xml after start is %s", vmxml_new)

        except virt_vm.VMStartError, e:
            # Starting VM failed.
            if status_error:
                return
            else:
                raise error.TestFail("Test failed in positive case.\n error:"
                                     " %s\n%s" % (e, bug_url))

        vm_pid = vm.get_pid()
        # numa hugepage check
        if page_list:
            numa_maps = open("/proc/%s/numa_maps" % vm_pid)
            numa_map_info = numa_maps.read()
            numa_maps.close()
            hugepage_info = re.findall(".*file=\S*hugepages.*", numa_map_info)
            if not hugepage_info:
                raise error.TestFail("Can't find hugepages usage info in vm "
                                     "numa maps")
            else:
                logging.debug("The hugepage info in numa_maps is %s" %
                              hugepage_info)
                map_dict = {}
                usage_dict = {}
                node_pattern = r"\s(\S+):(\S+)\s.*ram-node(\d+).*\s"
                node_pattern += "N(\d+)=(\d+)"
                for map_info in hugepage_info:
                    for (mem_mode, mem_num, cell_num, host_node_num,
                         vm_page_num) in re.findall(node_pattern, map_info):
                        usage_dict[mem_mode] = utlv.cpus_parser(mem_num)
                        usage_dict[host_node_num] = vm_page_num
                        map_dict[cell_num] = usage_dict.copy()
                logging.debug("huagepage info in vm numa maps is %s", map_dict)
                memnode_dict = {}
                usage_dict = {}
                if numa_memnode:
                    for i in numa_memnode:
                        node = utlv.cpus_parser(i['nodeset'])
                        mode = mode_dict[i['mode']]
                        usage_dict[mode] = node
                        memnode_dict[i['cellid']] = usage_dict.copy()
                    logging.debug("memnode setting dict is %s", memnode_dict)
                    for k in memnode_dict.keys():
                        for mk in memnode_dict[k].keys():
                            if memnode_dict[k][mk] != map_dict[k][mk]:
                                raise error.TestFail("vm pid numa map dict %s"
                                                     " not expected" %
                                                     map_dict)

        # qemu command line check
        f_cmdline = open("/proc/%s/cmdline" % vm_pid)
        q_cmdline_list = f_cmdline.read().split("\x00")
        f_cmdline.close()
        logging.debug("vm qemu cmdline list is %s" % q_cmdline_list)
        for cmd in cmdline_list:
            logging.debug("checking '%s' in qemu cmdline", cmd['cmdline'])
            p_found = False
            for q_cmd in q_cmdline_list:
                if re.search(cmd['cmdline'], q_cmd):
                    p_found = True
                    break
                else:
                    continue
            if not p_found:
                raise error.TestFail("%s not found in vm qemu cmdline" %
                                     cmd['cmdline'])

        # vm inside check
        vm_cpu_info = utils_misc.get_cpu_info(session)
        logging.debug("lscpu output dict in vm is %s", vm_cpu_info)
        session.close()
        node_num = int(vm_cpu_info["NUMA node(s)"])
        if node_num != len(numa_cell):
            raise error.TestFail("node number %s in vm is not expected" %
                                 node_num)
        for i in range(len(numa_cell)):
            cpu_str = vm_cpu_info["NUMA node%s CPU(s)" % i]
            vm_cpu_list = utlv.cpus_parser(cpu_str)
            cpu_list = utlv.cpus_parser(numa_cell[i]["cpus"])
            if vm_cpu_list != cpu_list:
                raise error.TestFail("vm node %s cpu list %s not expected" %
                                     (i, vm_cpu_list))
        if topology:
            vm_topo_tuple = ("Socket(s)", "Core(s) per socket",
                             "Thread(s) per core")
            for i in range(len(topo_tuple)):
                topo_info = vm_cpu_info[vm_topo_tuple[i]]
                if topo_info != topology[topo_tuple[i]]:
                    raise error.TestFail("%s in vm topology not expected." %
                                         topo_tuple[i])
Beispiel #24
0
def run(test, params, env):
    """
    Test virsh iface-bridge and iface-unbridge commands.

    (1) Bridge an existing network device(iface-bridge).
    (2) Unbridge a network device(iface-unbridge).
    """

    iface_name = params.get("iface_name")
    bridge_name = params.get("bridge_name")
    ping_ip = params.get("ping_ip", "")
    ping_count = int(params.get("ping_count", "3"))
    ping_timeout = int(params.get("ping_timeout", "5"))
    bridge_option = params.get("bridge_option")
    unbridge_option = params.get("unbridge_option")
    bridge_delay = "yes" == params.get("bridge_delay", "no")
    delay_num = params.get("delay_num", "0")
    create_bridge = "yes" == params.get("create_bridge", "yes")
    bridge_status_error = "yes" == params.get("bridge_status_error", "no")
    unbridge_status_error = "yes" == params.get("unbridge_status_error", "no")
    iface_script = NETWORK_SCRIPT + iface_name
    iface_script_bk = os.path.join(test.tmpdir, "iface-%s.bk" % iface_name)
    check_iface = "yes" == params.get("check_iface", "yes")
    if check_iface:
        # Make sure the interface exists
        if not libvirt.check_iface(iface_name, "exists", "--all"):
            raise error.TestNAError("Interface '%s' not exists" % iface_name)

        net_iface = utils_net.Interface(name=iface_name)
        iface_is_up = net_iface.is_up()
        iface_ip = net_iface.get_ip()

        # Back up the interface script
        utils.run("cp %s %s" % (iface_script, iface_script_bk))

    # Make sure the bridge name not exists
    net_bridge = utils_net.Bridge()
    if bridge_name in net_bridge.list_br():
        raise error.TestNAError("Bridge '%s' already exists" % bridge_name)

    # Stop NetworkManager service
    try:
        NM = utils_path.find_command("NetworkManager")
    except utils_path.CmdNotFoundError:
        logging.debug("No NetworkManager service.")
        NM = None
    NM_is_running = False
    if NM is not None:
        NM_service = service.Factory.create_service("NetworkManager")
        NM_is_running = NM_service.status()
        if NM_is_running:
            NM_service.stop()

    def unbridge_check():
        """
        Check the result after do unbridge.
        """
        list_option = "--all"
        if libvirt.check_iface(bridge_name, "exists", list_option):
            raise error.TestFail("%s is still present." % bridge_name)
        if "no-start" in unbridge_option:
            list_option = "--inactive"
        if not libvirt.check_iface(iface_name, "exists", list_option):
            raise error.TestFail("%s is not present." % iface_name)

    if bridge_delay:
        bridge_option += " --delay %s" % delay_num
    # Run test
    try:
        if create_bridge:
            # Create bridge
            result = virsh.iface_bridge(iface_name, bridge_name, bridge_option)
            libvirt.check_exit_status(result, bridge_status_error)
            if not bridge_status_error:
                # Get the new create bridge IP address
                try:
                    br_ip = utils_net.get_ip_address_by_interface(bridge_name)
                except:
                    br_ip = ""
                # check IP of new bridge
                if check_iface and br_ip and br_ip != iface_ip:
                    raise error.Testfail(
                        "bridge IP(%s) isn't the same as iface IP(%s)." %
                        (br_ip, iface_ip))
                # check the status of STP feature
                if "no-start" not in bridge_option:
                    if "no-stp" not in bridge_option:
                        if "yes" != net_bridge.get_stp_status(bridge_name):
                            raise error.Testfail("Fail to enable STP.")
                # Do ping test only bridge has IP address and ping_ip not empty
                if br_ip and ping_ip:
                    if not libvirt.check_iface(bridge_name,
                                               "ping",
                                               ping_ip,
                                               count=ping_count,
                                               timeout=ping_timeout):
                        raise error.TestFail("Fail to ping %s from %s." %
                                             (ping_ip, bridge_name))
                else:
                    # Skip ping test
                    logging.debug("Skip ping test as %s has no IP address",
                                  bridge_name)
                list_option = ""
                if "no-start" in bridge_option:
                    list_option = "--inactive"
                if libvirt.check_iface(bridge_name, "exists", list_option):
                    # Unbridge
                    result = virsh.iface_unbridge(bridge_name, unbridge_option)
                    libvirt.check_exit_status(result, unbridge_status_error)
                    if not unbridge_status_error:
                        unbridge_check()
                else:
                    raise error.TestFail("%s is not present." % bridge_name)
        else:
            # Unbridge without creating bridge, only for negative test now
            result = virsh.iface_unbridge(bridge_name, unbridge_option)
            libvirt.check_exit_status(result, unbridge_status_error)
            if not unbridge_status_error:
                unbridge_check()
    finally:
        if create_bridge and check_iface:
            if libvirt.check_iface(bridge_name, "exists", "--all"):
                virsh.iface_unbridge(bridge_name)
            if os.path.exists(iface_script_bk):
                utils.run("mv %s %s" % (iface_script_bk, iface_script))
            if iface_is_up:
                # Need reload script
                utils.run("ifdown %s" % iface_name)
                utils.run("ifup %s" % iface_name)
            else:
                net_iface.down()
            # Clear the new create bridge if it exists
            try:
                utils_net.bring_down_ifname(bridge_name)
                utils.run("brctl delbr %s" % bridge_name)
            except utils_net.TAPBringDownError:
                pass
        if NM_is_running:
            NM_service.start()
Beispiel #25
0
def _set_iptables_firewalld(iptables_status, firewalld_status):
    """
    Try to set firewalld and iptables services status.

    :param iptables_status: Whether iptables should be set active.
    :param firewalld_status: Whether firewalld should be set active.
    :return: A tuple of two boolean stand for the original status of
             iptables and firewalld.
    """
    # pylint: disable=E1103
    logging.debug("Setting firewalld and iptables services.")

    # Iptables and firewalld are two exclusive services.
    # It's impossible to start both.
    if iptables_status and firewalld_status:
        msg = "Can't active both iptables and firewalld services."
        raise error.TestNAError(msg)

    # Check the availability of both packages.
    try:
        utils_path.find_command('iptables')
        iptables = service.Factory.create_service('iptables')
    except utils_path.CmdNotFoundError:
        msg = "Can't find service iptables."
        raise error.TestNAError(msg)

    try:
        utils_path.find_command('firewalld')
        firewalld = service.Factory.create_service('firewalld')
    except utils_path.CmdNotFoundError:
        msg = "Can't find service firewalld."
        raise error.TestNAError(msg)

    # Back up original services status.
    old_iptables = iptables.status()
    old_firewalld = firewalld.status()

    # We should stop services first then start the other after.
    # Directly start one service will force the other service stop,
    # which will not be easy to handle.
    if not iptables_status and iptables.status():
        utils.run('iptables-save > /tmp/iptables.save')
        if not iptables.stop():
            msg = "Can't stop service iptables"
            raise error.TestError(msg)

    if not firewalld_status and firewalld.status():
        if not firewalld.stop():
            msg = ("Service firewalld can't be stopped. "
                   "Maybe it is masked by default. you can unmask it by "
                   "running 'systemctl unmask firewalld'.")
            raise error.TestNAError(msg)

    if iptables_status and not iptables.status():
        if not iptables.start():
            msg = "Can't start service iptables"
            raise error.TestError(msg)
        utils.run('iptables-restore < /tmp/iptables.save')

    if firewalld_status and not firewalld.status():
        if not firewalld.start():
            msg = ("Service firewalld can't be started. "
                   "Maybe it is masked by default. you can unmask it by "
                   "running 'systemctl unmask firewalld'.")
            raise error.TestNAError(msg)

    return old_iptables, old_firewalld
Beispiel #26
0
        fd = open(xmlfile, 'w')
        fd.write(newxml)
        fd.close()

        cmd_result = virsh.net_update(net_name,
                                      update_command,
                                      net_section,
                                      xmlfile,
                                      options,
                                      debug=True)

        if cmd_result.exit_status:
            err = cmd_result.stderr.strip()
            if re.search("is not supported", err):
                raise error.TestNAError("Skip the test: %s" % err)
            else:
                raise error.TestFail("Failed to execute "
                                     "virsh net-update command")

        # Check the actual xml
        virsh_option = ""
        if options == "--config":
            virsh_option = "--inactive"
        cmd_result = virsh.net_dumpxml(net_name, virsh_option)
        actual_net_xml = cmd_result.stdout.strip()
        logging.info("After net-update, the actual net xml is %s",
                     actual_net_xml)

        if update_command == "delete":
            new_xml_obj = network_xml.NetworkXML.new_from_net_dumpxml(
Beispiel #27
0
def run(test, params, env):
    """
    Test snapshot-dumpxml command, make sure that the xml you get is correct

    Test scenaries:
    1. live snapshot dump
    2. shutoff snapshot dump
    3. dumpxml with security info
    4. readonly mode
    """

    if not virsh.has_help_command('snapshot-dumpxml'):
        raise error.TestNAError("This version of libvirt does not support "
                                "the snapshot-dumpxml test")

    vm_name = params.get("main_vm")
    status_error = params.get("status_error", "no")
    passwd = params.get("snapshot_passwd")
    secu_opt = params.get("snapshot_secure_option")
    desc_opt = params.get("snapshot_desc_option")
    mem_opt = params.get("snapshot_mem_option")
    disk_opt = params.get("disk_only_snap")
    snap_name = params.get("snapshot_name", "snap_test")
    readonly = params.get("readonly", False)

    try:
        snap_opt = ""
        opt_dict = {}
        # collect all the parameters at one time
        opt_name = locals()
        for opt in ["snap_name", "desc_opt", "mem_opt", "disk_opt"]:
            if opt_name[opt] is not None:
                # Integrate snapshot create options
                snap_opt = snap_opt + " " + opt_name[opt]

        # Do xml backup for final recovery
        vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        # Add passwd in guest graphics
        if passwd is not None:
            vm = env.get_vm(vm_name)
            if vm.is_alive():
                vm.destroy()
            vm_xml.VMXML.add_security_info(
                vm_xml.VMXML.new_from_dumpxml(vm_name), passwd)
            vm.start()
            if secu_opt is not None:
                opt_dict['passwd'] = passwd

        logging.debug("snapshot create options are %s", snap_opt)

        # Get state to do snapshot xml state check
        dom_state = virsh.domstate(vm_name).stdout.strip()

        # Create disk snapshot before all to make the origin image clean
        virsh.snapshot_create_as(vm_name, "--disk-only")

        # Create snapshot with options
        snapshot_result = virsh.snapshot_create_as(vm_name,
                                                   snap_opt,
                                                   readonly=readonly)
        if snapshot_result.exit_status:
            if status_error == "no":
                raise error.TestFail("Failed to create snapshot. Error:%s." %
                                     snapshot_result.stderr.strip())
            elif status_error == "yes":
                logging.info("Create snapshot failed as expected, Error:%s.",
                             snapshot_result.stderr.strip())
                return

        ctime = get_snap_createtime(vm_name, snap_name)

        # Run virsh command for snapshot-dumpxml
        dumpxml_result = virsh.snapshot_dumpxml(vm_name, snap_name, secu_opt)
        if dumpxml_result.exit_status:
            if status_error == "no":
                raise error.TestFail("Failed to dump snapshot xml. Error:%s." %
                                     dumpxml_result.stderr.strip())
            elif status_error == "yes":
                logging.info("Dumpxml snapshot failed as expected, Error:%s.",
                             dumpxml_result.stderr.strip())
                return

        # Record all the parameters in dict at one time
        check_name = locals()
        for var in [
                "vm_name", "snap_name", "desc_opt", "dom_state", "ctime",
                "disk_opt"
        ]:
            if check_name[var] is not None:
                opt_dict[var] = check_name[var]

        logging.debug("opt_dict is %s", opt_dict)
        output = dumpxml_result.stdout.strip()
        snapshot_dumpxml_check(output, opt_dict)

    finally:
        # Recovery
        utils_test.libvirt.clean_up_snapshots(vm_name)
        vmxml_backup.sync("--snapshots-metadata")
Beispiel #28
0
def run(test, params, env):
    """
    Test command: virsh console.
    """
    os_type = params.get("os_type")
    if os_type == "windows":
        raise error.TestNAError("SKIP:Do not support Windows.")

    # Get parameters for test
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vm_ref = params.get("virsh_console_vm_ref", "domname")
    vm_state = params.get("virsh_console_vm_state", "running")
    login_user = params.get("console_login_user", "root")
    if login_user == "root":
        login_passwd = params.get("password")
    else:
        login_passwd = params.get("console_password_not_root")
    status_error = "yes" == params.get("status_error", "no")
    domuuid = vm.get_uuid()
    domid = ""

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    if vm.is_alive():
        vm.destroy()
    xml_console_config(vm_name)

    try:
        # Guarantee cleanup after config vm console failed.
        vm_console_config(vm)

        # Prepare vm state for test
        if vm_state != "shutoff":
            vm.start(autoconsole=False)
            vm.wait_for_login()
            domid = vm.get_id()
        if vm_state == "paused":
            vm.pause()

        if vm_ref == "domname":
            vm_ref = vm_name
        elif vm_ref == "domid":
            vm_ref = domid
        elif vm_ref == "domuuid":
            vm_ref = domuuid
        elif domid and vm_ref == "hex_id":
            vm_ref = hex(int(domid))

        # Run command
        command = "virsh console %s" % vm_ref
        console_session = aexpect.ShellSession(command)

        status = verify_virsh_console(console_session,
                                      login_user,
                                      login_passwd,
                                      debug=True)
        console_session.close()

    finally:
        # Recover state of vm.
        if vm_state == "paused":
            vm.resume()

        # Recover vm
        if vm.is_alive():
            vm.destroy()
        xml_console_recover(vmxml_backup)

    # Check result
    if status_error:
        if status:
            raise error.TestFail("Run successful with wrong command!")
    else:
        if not status:
            raise error.TestFail("Run failed with right command!")
def run_timedrift_no_net_win(test, params, env):
    """
    Test suspend commands in qemu guest agent.

    :param test: kvm test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environmen.
    """
    clock_server = params.get("clock_server", "clock.redhat.com")
    ntputil_install = params.get("ntputil_install", "yum install -y ntpdate")
    login_timeout = int(params.get("login_timeout", "240"))
    date_time_command = params.get("date_time_command",
                                   r"date -u +'TIME: %a %m/%d/%Y %H:%M:%S.%N'")
    date_time_filter_re = params.get("date_time_filter_re",
                                     r"(?:TIME: \w\w\w )(.{19})(.+)")
    date_time_format = params.get("date_time_format",
                                  "%m/%d/%Y %H:%M:%S")

    tolerance = float(params.get("time_diff_tolerance", "0.5"))

    sub_work = params["sub_work"]
    test_type = params["timedrift_sub_work"]

    vm_name = params.get("vms")
    vm = env.get_vm(vm_name)
    error.context("Check if ntp utils are host in system.", logging.info)
    try:
        utils_misc.find_command("ntpdate")
    except ValueError:
        error.context("Install ntp utils `%s`." % (ntputil_install),
                      logging.info)
        utils.run(ntputil_install)
    error.context("Sync host machine with clock server %s" % (clock_server),
                  logging.info)
    utils.run("ntpdate %s" % (clock_server))

    session = vm.wait_for_login(timeout=login_timeout)
    error.context("Get clock from host and guest VM using `date`",
                  logging.info)

    before_date = utils_test.get_time(session,
                                      date_time_command,
                                      date_time_filter_re,
                                      date_time_format)
    logging.debug("date: host time=%ss guest time=%ss",
                  *before_date)

    session.close()

    if sub_work in globals():  # Try to find sub work function.
        globals()[sub_work](params, vm, session)
    else:
        raise error.TestNAError("Unable to found subwork %s in %s test file." %
                                (sub_work, __file__))

    vm = env.get_vm(vm_name)
    session = vm.wait_for_login(timeout=login_timeout)
    error.context("Get clock from host and guest VM using `date`",
                  logging.info)
    after_date = utils_test.get_time(session,
                                     date_time_command,
                                     date_time_filter_re,
                                     date_time_format)
    logging.debug("date: host time=%ss guest time=%ss",
                  *after_date)

    if test_type == 'guest_suspend':
        date_diff = time_diff(before_date, after_date)
        if date_diff > tolerance:
            raise error.TestFail("date %ss difference is"
                                 "'guest_diff_time != host_diff_time'"
                                 " out of tolerance %ss" % (date_diff[1],
                                                            tolerance))
    elif test_type == "guest_pause_resume":
        date_diff = time_diff_host_guest(before_date, after_date)
        if date_diff[1] > tolerance:
            raise error.TestFail("date %ss difference is "
                                 "'guest_time_after-guest_time_before'"
                                 " out of tolerance %ss" % (date_diff[1],
                                                            tolerance))
Beispiel #30
0
def run(test, params, env):
    """
    Qemu reboot test:
    1) Boot up a windows guest.
    2) Run stress tool on host.
    3) After guest starts up, start the ftrace.
    4) Reboot VM inside guest.
    5.1) If the guest reboot successfully, then stop the trace-cmd and remove
         the trace.dat file.
    5.2) If guest hang, stop the trace-cmd and generate the readable report
         file.
    6) if 5.2, Check whether the trace.txt includes the error log.
    7) Repeat step 3~6.

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """

    def find_trace_cmd():
        if utils.system("ps -a | grep trace-cmd", ignore_status=True):
            return False
        else:
            return True

    if os.system("which trace-cmd"):
        raise error.TestNAError("Please install trace-cmd.")

    timeout = float(params.get("login_timeout", 240))
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session = vm.wait_for_login(timeout=timeout)

    reboot_method = params["reboot_method"]
    stress_cmd = params.get("stress_cmd", "stress --vm 4 --vm-bytes 1000M")

    trace_o = os.path.join(test.debugdir, "trace.dat")
    trace_cmd = "trace-cmd record -b 20000 -e kvm -o %s" % trace_o
    trace_cmd = params.get("trace_cmd", trace_cmd)
    re_trace = params.get("re_trace", "kvm_inj_exception:    #GP")

    report_file = os.path.join(test.debugdir, "trace.txt")
    trace_report_cmd = "trace-cmd report -i %s > %s " % (trace_o, report_file)
    try:
        error.context("Run stress tool on host.", logging.info)
        stress_job = utils.BgJob(stress_cmd)
        # Reboot the VM
        for num in xrange(int(params.get("reboot_count", 1))):
            error.context("Reboot guest '%s'. Repeat %d" % (vm.name, num + 1),
                          logging.info)
            trace_job = utils.BgJob(trace_cmd)
            try:
                session = vm.reboot(session,
                                    reboot_method,
                                    0,
                                    timeout)
            except Exception, err:
                txt = "stop the trace-cmd and generate the readable report."
                error.context(txt, logging.info)
                os.kill(trace_job.sp.pid, signal.SIGINT)
                if not utils_misc.wait_for(lambda: not find_trace_cmd(),
                                           120, 60, 3):
                    logging.warn("trace-cmd could not finish after 120s.")
                trace_job = None
                utils.system(trace_report_cmd)
                report_txt = file(report_file).read()
                txt = "Check whether the trace.txt includes the error log."
                error.context(txt, logging.info)
                if re.findall(re_trace, report_txt, re.S):
                    msg = "Found %s in trace log %s" % (re_trace, report_file)
                    logging.info(msg)
                    error.TestFail(msg)
            else:
                txt = "stop the trace-cmd and remove the trace.dat file."
                error.context(txt, logging.info)
                os.kill(trace_job.sp.pid, signal.SIGINT)
                if not utils_misc.wait_for(lambda: not find_trace_cmd(),
                                           120, 60, 3):
                    logging.warn("trace-cmd could not finish after 120s.")
                trace_job = None
                utils.system("rm -rf %s" % trace_o, timeout=60)
    finally:
        if session:
            session.close()
        if stress_job and stress_job.sp.poll() is None:
            utils_misc.kill_process_tree(stress_job.sp.pid, 9)
        if trace_job:
            if trace_job.sp.poll() is None:
                os.kill(trace_job.sp.pid, signal.SIGINT)