Ejemplo n.º 1
0
 def do_operation():
     """
         Do operation in guest os with vf and check the os behavior after operation.
     """
     if operation == "resume_suspend":
         try:
             virsh.suspend(vm.name, debug=True, ignore_status=False)
             virsh.resume(vm.name, debug=True, ignore_statue=False)
             get_ip_by_mac(mac_addr, timeout=120)
         except process.CmdError as detail:
             err_msg = "Suspend-Resume %s with vf failed: %s" % (vm_name, detail)
             test.fail(err_msg)
     if operation == "reboot":
         try:
             if vm.serial_console is not None:
                 vm.cleanup_serial_console()
                 vm.create_serial_console()
             virsh.reboot(vm.name, ignore_status=False)
             get_ip_by_mac(mac_addr, timeout=120)
         except process.CmdError as detail:
             err_msg = "Reboot %s with vf failed: %s" % (vm_name, detail)
             test.fail(err_msg)
     if operation == "save":
         result = virsh.managedsave(vm_name, ignore_status=True, debug=True)
         utils_test.libvirt.check_exit_status(result, expect_error=True)
Ejemplo n.º 2
0
    def do_operation():
        """
            Do operation in guest os with vf and check the os behavior after operation.
        """
        if operation == "resume_suspend":
            try:
                virsh.suspend(vm.name, debug=True, ignore_status=False)
                virsh.resume(vm.name, debug=True, ignore_statue=False)
                get_ip_by_mac(mac_addr, timeout=120)
            except process.CmdError as detail:
                err_msg = "Suspend-Resume %s with vf failed: %s" % (vm_name,
                                                                    detail)
                test.fail(err_msg)
        if operation == "reboot":
            try:
                if vm.serial_console is not None:
                    vm.cleanup_serial_console()
                    vm.create_serial_console()
                virsh.reboot(vm.name, ignore_status=False)
                get_ip_by_mac(mac_addr, timeout=120)
            except process.CmdError as detail:
                err_msg = "Reboot %s with vf failed: %s" % (vm_name, detail)
                test.fail(err_msg)
        if operation == "save":
            result = virsh.managedsave(vm_name, ignore_status=True, debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=True)

        if operation == "restart_libvirtd":
            detach_interface()
            utils_libvirtd.libvirtd_restart()
            interface = attach_interface()
def run(test, params, env):
    """
    Check libvirt daemons are removed after removing libvirt pkgs.
    """
    daemons = params.get('daemons', "").split()
    require_modular_daemon = params.get('require_modular_daemon', "no") == "yes"

    utils_split_daemons.daemon_mode_check(require_modular_daemon)

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    try:
        vm_name = params.get("main_vm")
        vm = env.get_vm(vm_name)
        if not vm.is_alive():
            vm.start()

        session = vm.wait_for_login()
        if not utils_package.package_install("libvirt*", session):
            test.error("Failed to install libvirt package on guest")

        virsh.reboot(vm)
        if session is None:
            session = vm.wait_for_login()

        #Destroy default network, otherwise network daemon will not be removed after removed libvirt pkgs
        cmd = "virsh net-destroy default"
        session.cmd(cmd, ignore_all_errors=True)

        runner = remote.RemoteRunner(session=session).run
        service.Factory.create_service('virtlogd', run=runner).start()

        if not utils_package.package_remove("libvirt*", session):
            test.error("Failed to remove libvirt packages on guest")

        for daemon in daemons:
            cmd = "systemctl -a | grep %s" % daemon
            if not session.cmd_status(cmd):
                test.fail("%s still exists after removing libvirt pkgs" % daemon)

    finally:
        if session is not None:
            session.close()
        if vm.is_alive():
            vm.destroy()
Ejemplo n.º 4
0
def test_with_label(vm, params, test):
    """
    Test nvdimm with label setting

    :param vm: vm object
    :param params: dict, test parameters
    :param test: test object
    :raises: test.fail if checkpoints fail
    """
    test_str = params.get('test_str')
    test_file = params.get('test_file')
    vm_name = params.get('main_vm')
    vm_session = vm.wait_for_login()
    # Create a file on the nvdimm device.
    create_file_within_nvdimm_disk(vm_session, test_file, test_str, test, block_size=4096)

    # Reboot the guest, and remount the nvdimm device in the guest.
    # Check the file foo-label is exited
    vm_session.close()
    virsh.reboot(vm_name, debug=True)
    vm_session = vm.wait_for_login()

    vm_session.cmd('mount -o dax /dev/pmem0  /mnt')
    if test_str not in vm_session.cmd('cat /mnt/foo-label '):
        test.fail('"%s" should be in output' % test_str)
    vm_session.close()
    if params.get('check_life_cycle', 'no') == 'yes':
        virsh.managedsave(vm_name, ignore_status=False, debug=True)
        vm.start()
        vm_session = vm.wait_for_login()
        check_nvdimm_file(test_str, test_file, vm_session, test)
        vm_session.close()
        vm_s1 = vm_name + ".s1"
        virsh.save(vm_name, vm_s1, ignore_status=False, debug=True)
        virsh.restore(vm_s1, ignore_status=False, debug=True)
        vm_session = vm.wait_for_login()
        check_nvdimm_file(test_str, test_file, vm_session, test)
        vm_session.close()
        virsh.snapshot_create_as(vm_name, vm_s1, ignore_status=False, debug=True)
        virsh.snapshot_revert(vm_name, vm_s1, ignore_status=False, debug=True)
        virsh.snapshot_delete(vm_name, vm_s1, ignore_status=False, debug=True)
Ejemplo n.º 5
0
 def test_reboot():
     # Reboot(not supported on RHEL6)
     result = virsh.reboot(vm_name, ignore_status=True, debug=True)
     supported_err = 'not supported by the connection driver:\
                      virDomainReboot'
     if supported_err in result.stderr.strip():
         logging.info("Reboot is not supported")
     else:
         libvirt.check_exit_status(result)
     vm.wait_for_login()
     if check_attach_pci():
         logging.debug("adapter found after reboot")
     else:
         test.fail("passthroughed adapter not found after reboot")
 def test_reboot():
     # Reboot(not supported on RHEL6)
     result = virsh.reboot(vm_name, ignore_status=True, debug=True)
     supported_err = 'not supported by the connection driver:\
                      virDomainReboot'
     if supported_err in result.stderr.strip():
         logging.info("Reboot is not supported")
     else:
         libvirt.check_exit_status(result)
     vm.wait_for_login()
     if check_attach_pci():
         logging.debug("adapter found after reboot")
     else:
         test.fail("passthroughed adapter not found after reboot")
Ejemplo n.º 7
0
 def guest_lifecycle():
     if operation == "suspend":
         # Suspend
         logging.info("Performing VM Suspend with device pass-through")
         result = virsh.suspend(vm_name, ignore_status=True, debug=True)
         libvirt.check_exit_status(result)
         libvirt.check_vm_state(vm_name, 'paused')
         time.sleep(10)
         # Resume
         logging.info("Performing VM Resume with device pass-through")
         result = virsh.resume(vm_name, ignore_status=True, debug=True)
         libvirt.check_exit_status(result)
         libvirt.check_vm_state(vm_name, 'running')
     elif operation == "shutdown":
         # Shutdown and Start the VM
         try:
             logging.info("Performing VM Shutdown with device pass-through")
             vm.shutdown()
             vm.wait_for_shutdown()
             libvirt.check_vm_state(vm_name, 'shut off')
             logging.info("Performing VM Start with device pass-through")
             vm.start()
             libvirt.check_vm_state(vm_name, 'running')
             vm.wait_for_login().close()
         except virt_vm.VMStartError as detail:
             test.fail("VM failed to start."
                       "Error: %s" % str(detail))
     elif operation == "reboot":
         # Reboot
         logging.info("Performing VM Reboot with device pass-through")
         result = virsh.reboot(vm_name, ignore_status=True, debug=True)
         if supported_err in result.stderr.strip():
             logging.info("Reboot is not supported")
         else:
             libvirt.check_exit_status(result)
     else:
         logging.debug("No operation for the domain")
     if sorted(vm.get_pci_devices()) != sorted(nic_list_before):
         logging.debug("Adapter found after lifecycle operation")
     else:
         test.fail("Passthroughed adapter not found after lifecycle operation")
Ejemplo n.º 8
0
def run(test, params, env):
    """
    Test bridge support from network

    1) create a linux bridge and connect a physical interface to it
    2) define nwfilter with "vdsm-no-mac-spoofing"
    3) redefine the vm with the new create bridge and filter
    4) check if guest can get public ip after vm start
    5) check if guest and host can ping each other
    6) check if guest and host can ping outside
    7) start another vm connected to the same bridge
    8) check if the 2 guests can ping each other
    """
    def create_bridge(br_name, iface_name):
        """
        Create a linux bridge by virsh cmd:
        1. Stop NetworkManager and Start network service
        2. virsh iface-bridge <iface> <name> [--no-stp]

        :param br_name: bridge name
        :param iface_name: physical interface name
        :return: bridge created or raise exception
        """
        # Make sure the bridge not exist
        if libvirt.check_iface(br_name, "exists", "--all"):
            test.cancel("The bridge %s already exist" % br_name)

        # Create bridge
        utils_package.package_install('tmux')
        cmd = 'tmux -c "ip link add name {0} type bridge; ip link set {1} up;' \
              ' ip link set {1} master {0}; ip link set {0} up;' \
              ' pkill dhclient; sleep 6; dhclient {0}; ifconfig {1} 0"'.format(br_name, iface_name)
        process.run(cmd, shell=True, verbose=True)

    def create_bridge_network(br_name, net_name):
        """
        Define and start the bridge type network
        """
        # check if network with the same name already exists
        output_all = virsh.net_list("--all").stdout.strip()
        if re.search(net_name, output_all):
            test.cancel("Network with the same name already exists!")
        test_xml = network_xml.NetworkXML(network_name="%s" % net_name)
        test_xml.forward = {"mode": "bridge"}
        test_xml.bridge = {"name": br_name}
        test_xml.create()

    def define_nwfilter(filter_name):
        """
        Define nwfilter vdsm-no-mac-spoofing with content like:
        <filter name='vdsm-no-mac-spoofing' chain='root'>
            <filterref filter='no-mac-spoofing'/>
            <filterref filter='no-arp-mac-spoofing'/>
        </filter>

        :param filter_name: the name of nwfilter
        :return: filter created or raise exception
        """
        filter_uuid = params.get("filter_uuid",
                                 "11111111-b071-6127-b4ec-111111111111")
        filter_params = {
            "filter_name": "vdsm-no-mac-spoofing",
            "filter_chain": "root",
            "filter_uuid": filter_uuid,
            "filterref_name_1": "no-mac-spoofing",
            "filterref_name_2": "no-arp-mac-spoofing"
        }
        filter_xml = libvirt.create_nwfilter_xml(filter_params).xml
        # Run command
        result = virsh.nwfilter_define(filter_xml,
                                       ignore_status=True,
                                       debug=True)
        if result.exit_status:
            test.fail("Failed to define nwfilter with %s" % filter_xml)

    def ping(src_ip, dest_ip, ping_count, timeout, session=None):
        """
        Wrap of ping

        :param src_ip: source address
        :param dest_ip: destination address
        :param ping_count: count of icmp packet
        :param timeout: timeout for the ping command
        :param session: local execution or session to execute the ping command
        :return: ping succeed or raise exception
        """
        status, output = utils_net.ping(dest=dest_ip,
                                        count=ping_count,
                                        interface=src_ip,
                                        timeout=timeout,
                                        session=session,
                                        force_ipv4=True)
        if status:
            test.fail("Fail to ping %s from %s" % (dest_ip, src_ip))

    def check_net_functions(guest_ip, ping_count, ping_timeout, guest_session,
                            host_ip, remote_url, endpoint_ip):
        # make sure host network works well
        # host ping remote url
        ping(host_ip, remote_url, ping_count, ping_timeout)
        # host ping guest
        ping(host_ip, guest_ip, ping_count, ping_timeout)
        # guest ping host
        ping(guest_ip,
             host_ip,
             ping_count,
             ping_timeout,
             session=guest_session)
        # guest ping remote url
        ping(guest_ip,
             remote_url,
             ping_count,
             ping_timeout,
             session=guest_session)
        # guest ping endpoint
        ping(guest_ip,
             endpoint_ip,
             ping_count,
             ping_timeout,
             session=guest_session)

    # Get test params
    bridge_name = params.get("bridge_name", "test_br0")
    filter_name = params.get("filter_name", "vdsm-no-mac-spoofing")
    ping_count = params.get("ping_count", "5")
    ping_timeout = float(params.get("ping_timeout", "10"))
    iface_name = utils_net.get_net_if(state="UP")[0]
    bridge_script = NETWORK_SCRIPT + bridge_name
    iface_script = NETWORK_SCRIPT + iface_name
    iface_script_bk = os.path.join(data_dir.get_tmp_dir(),
                                   "iface-%s.bk" % iface_name)
    attach_interface = "yes" == params.get("attach_interface", "no")
    iface_model = params.get("iface_model", "virtio")
    iface_source = eval(params.get("iface_source", "{'bridge':'test_br0'}"))
    iface_type = params.get("iface_type", None)
    iface_target = params.get("iface_target", "br_target")
    iface_alias = params.get("iface_alias", None)
    hotplug = "yes" == params.get("hotplug", "no")
    iface_driver = params.get("iface_driver", None)
    start_vm2 = "yes" == params.get("start_vm2", "no")
    create_network = "yes" == params.get("create_network", "no")
    update_device = "yes" == params.get("update_with_diff_type", "no")

    vms = params.get("vms").split()
    if len(vms) <= 1:
        test.cancel("Need two VMs to test")
    else:
        vm1_name = vms[0]
        vm2_name = vms[1]

    vm1 = env.get_vm(vm1_name)
    vm2 = env.get_vm(vm2_name)

    # Back up the interface script
    process.run("cp %s %s" % (iface_script, iface_script_bk),
                shell=True,
                verbose=True)
    # Back up vm xml
    vm1_xml_bak = vm_xml.VMXML.new_from_dumpxml(vm1_name)
    vm2_xml_bak = vm_xml.VMXML.new_from_dumpxml(vm2_name)

    # Stop NetworkManager service
    NM_service = service.Factory.create_service("NetworkManager")
    NM_status = NM_service.status()
    if not NM_status:
        NM_service.start()
    mac = utils_net.generate_mac_address_simple()

    try:
        create_bridge(bridge_name, iface_name)
        define_nwfilter(filter_name)
        if hotplug:
            err_msgs = ("No more available PCI slots",
                        "No more available PCI addresses")
            # delete the original interface on the vm before hot-plug
            if vm1.is_alive():
                vm1.destroy()
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm1_name)
            iface_xml = vmxml.get_devices('interface')[0]
            logging.debug("Delete the original interface")
            vmxml.del_device(iface_xml)
            vmxml.sync()
            vm1.start()
            # do hot-plug
            if attach_interface:
                logging.info("Try to hot-plug interface")
                options = (
                    "%s %s --model %s --mac %s" %
                    (iface_type, iface_source['bridge'], iface_model, mac))
                ret = virsh.attach_interface(vm1_name,
                                             options,
                                             ignore_status=True)
            else:
                logging.info("Try to hot-plug device")
                if create_network:
                    create_bridge_network(bridge_name, iface_source["network"])
                target = str({'dev': iface_target})
                iface_alias = str({'name': iface_alias})
                vm_iface_source = str(iface_source)
                iface_params = {
                    "type": iface_type,
                    "source": vm_iface_source,
                    "filter": filter_name,
                    "mac": mac,
                    'alias': iface_alias,
                    'target': target,
                    'model': iface_model,
                    'driver': iface_driver
                }
                attach_xml = interface.Interface(iface_params['type'])
                attach_xml.xml = libvirt.modify_vm_iface(
                    vm1_name, 'get_xml', iface_params)
                ret = virsh.attach_device(vm1_name,
                                          attach_xml.xml,
                                          ignore_status=True,
                                          debug=True)
            if ret.exit_status:
                if any([msg in ret.stderr for msg in err_msgs]):
                    test.error("No more pci slots, can't attach more devices")
                else:
                    test.fail("Failed to attach-interface: %s" %
                              ret.stderr.strip())
            else:
                logging.debug("Hot-plug interface or device pass")
                if update_device:
                    # As the interface type will change to actual type "bridge" in live xml, we need to ensure
                    # the update with original "network" type will not fail.
                    # Try to delete the nwfilter with original type in iface_params
                    update_xml = interface.Interface(iface_type)
                    iface_params_update = {
                        "del_filter": "yes",
                        "type": "network",
                        "source": vm_iface_source
                    }
                    update_xml.xml = libvirt.modify_vm_iface(
                        vm1_name, 'get_xml', iface_params_update)
                    ret = virsh.update_device(vm1_name,
                                              update_xml.xml,
                                              ignore_status=True,
                                              debug=True)
                    libvirt.check_exit_status(ret)

        else:
            vm_iface_source = str(iface_source)
            vm1_iface_params = {
                "type": "bridge",
                "source": vm_iface_source,
                "filter": filter_name,
                "mac": mac,
                'driver': iface_driver,
                "iface_model": iface_model
            }
            libvirt.modify_vm_iface(vm1_name, "update_iface", vm1_iface_params)

            if vm1.is_alive():
                vm1.destroy()

            vm1.start()
        # apply ip address as it may not be initialized
        session1 = session2 = None
        session1 = vm1.wait_for_serial_login()
        utils_net.restart_guest_network(session1)
        output = session1.cmd_output("ifconfig || ip a")
        logging.debug("guest1 ip info %s" % output)

        # Check guest's network function
        host_ip = utils_net.get_ip_address_by_interface(bridge_name)
        remote_url = params.get("remote_ip", "www.google.com")

        try:
            vm1_ip = utils_net.get_guest_ip_addr(session1, mac)
        except Exception as errs:
            test.fail("vm1 can't get IP with the new create bridge: %s" % errs)
        if hotplug:
            # reboot vm1 then check network function to ensure the interface still there and works fine
            logging.info("reboot the vm")
            virsh.reboot(vm1)
            if session1 is None:
                session1 = vm1.wait_for_serial_login()
            ping(vm1_ip,
                 remote_url,
                 ping_count,
                 ping_timeout,
                 session=session1)
            # restart libvirtd service then check the interface still works fine
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            vm1.cleanup_serial_console()
            vm1.create_serial_console()
            session1 = vm1.wait_for_serial_login()
            ping(vm1_ip,
                 remote_url,
                 ping_count,
                 ping_timeout,
                 session=session1)
            logging.info(
                "after reboot and restart libvirtd, the network works fine")
            if iface_driver:
                try:
                    driver_dict = eval(iface_driver)
                    if session1 is None:
                        session1 = vm1.wait_for_serial_login()
                    guest_iface_info = session1.cmd_output("ip l").strip()
                    guest_iface_name = re.findall(
                        r"^\d+: (\S+?)[@:].*state UP.*$", guest_iface_info,
                        re.MULTILINE)[0]
                    comb_size = driver_dict.get('queues')
                    rx_size = driver_dict.get('rx_queue_size')
                    session1.cmd_status("ethtool -L %s combined %s" %
                                        (guest_iface_name, comb_size))
                    ret, outp = session1.cmd_status_output("ethtool -l %s" %
                                                           guest_iface_name)
                    logging.debug("ethtool cmd output:%s" % outp)
                    if not ret:
                        pre_comb = re.search(
                            "Pre-set maximums:[\s\S]*?Combined:.*?(\d+)",
                            outp).group(1)
                        cur_comb = re.search(
                            "Current hardware settings:[\s\S]*?Combined:.*?(\d+)",
                            outp).group(1)
                        if int(pre_comb) != int(comb_size) or int(
                                cur_comb) != int(comb_size):
                            test.fail(
                                "Fail to check the combined size: setting: %s,"
                                "Pre-set: %s, Current-set: %s" %
                                (comb_size, pre_comb, cur_comb))
                        else:
                            logging.info(
                                "Getting correct Pre-set and Current set value"
                            )
                    else:
                        test.error("ethtool list fail: %s" % outp)
                    # as tx_queue size is only supported for vhost-user interface, only check rx_queue size
                    ret1, outp1 = session1.cmd_status_output("ethtool -g %s" %
                                                             guest_iface_name)
                    logging.debug("guest queue size setting is %s" % outp1)
                    if not ret1:
                        pre_set = re.search(r"Pre-set maximums:\s*RX:\s*(\d+)",
                                            outp1).group(1)
                        cur_set = re.search(
                            r"Current hardware settings:\s*RX:\s*(\d+)",
                            outp1).group(1)
                        if int(pre_set) != int(rx_size) or int(cur_set) != int(
                                rx_size):
                            test.fail("Fail to check the rx_queue_size!")
                except Exception as errs:
                    test.fail("fail to get driver info")
            # hot-unplug interface/device
            if attach_interface:
                ret = virsh.detach_interface(vm1_name,
                                             "bridge",
                                             ignore_status=True)
            else:
                ret = virsh.detach_device(vm1_name,
                                          attach_xml.xml,
                                          ignore_status=True,
                                          debug=True)
            if ret.exit_status:
                test.fail("Hot-unplug interface/device fail")
            else:
                logging.info("hot-unplug interface/device succeed")

        else:
            if start_vm2:
                # Start vm2 connect to the same bridge
                mac2 = utils_net.generate_mac_address_simple()
                vm2_iface_params = {
                    "type": "bridge",
                    "source": vm_iface_source,
                    "filter": filter_name,
                    "mac": mac2
                }
                libvirt.modify_vm_iface(vm2_name, "update_iface",
                                        vm2_iface_params)
                if vm2.is_alive():
                    vm2.destroy()
                vm2.start()

                # Check if vm1 and vm2 can ping each other
                try:
                    utils_net.update_mac_ip_address(vm2, timeout=120)
                    vm2_ip = vm2.get_address()
                except Exception as errs:
                    test.fail(
                        "vm2 can't get IP with the new create bridge: %s" %
                        errs)
                session2 = vm2.wait_for_login()
                # make sure guest has got ip address
                utils_net.restart_guest_network(session2)
                output2 = session2.cmd_output("ifconfig || ip a")
                logging.debug("guest ip info %s" % output2)
                # check 2 guests' network functions
                check_net_functions(vm1_ip, ping_count, ping_timeout, session1,
                                    host_ip, remote_url, vm2_ip)
                check_net_functions(vm2_ip, ping_count, ping_timeout, session2,
                                    host_ip, remote_url, vm1_ip)

    finally:
        logging.debug("Start to restore")
        vm1_xml_bak.sync()
        vm2_xml_bak.sync()
        virsh.nwfilter_undefine(filter_name, ignore_status=True)
        if libvirt.check_iface(bridge_name, "exists", "--all"):
            virsh.iface_unbridge(bridge_name, timeout=60, debug=True)
        if os.path.exists(iface_script_bk):
            process.run("mv %s %s" % (iface_script_bk, iface_script),
                        shell=True,
                        verbose=True)
        if os.path.exists(bridge_script):
            process.run("rm -rf %s" % bridge_script, shell=True, verbose=True)
        cmd = 'tmux -c "ip link set {1} nomaster;  ip link delete {0};' \
              'pkill dhclient; sleep 6; dhclient {1}"'.format(bridge_name, iface_name)
        process.run(cmd, shell=True, verbose=True)
        # reload network configuration
        NM_service.restart()
        # recover NetworkManager
        if NM_status is True:
            NM_service.start()
        if 'network' in iface_source and iface_source[
                "network"] in virsh.net_state_dict():
            virsh.net_destroy(iface_source["network"], ignore_status=False)
Ejemplo n.º 9
0
            complete_uri = libvirt_vm.complete_uri(local_ip)
            try:
                session = remote.remote_login("ssh", remote_ip, "22", "root",
                                              remote_pwd, "#")
                session.cmd_output('LANG=C')
                command = "virsh -c %s reboot %s %s" % (complete_uri, vm_name,
                                                        mode)
                status, output = session.cmd_status_output(command,
                                                           internal_timeout=5)
                session.close()
            except (remote.LoginError, process.CmdError, aexpect.ShellError), e:
                logging.error("Exception: %s", str(e))
                status = -1
        if vm_ref != "remote_name":
            vm_ref = "%s %s" % (vm_ref, extra)
            cmdresult = virsh.reboot(vm_ref, mode,
                                     ignore_status=True, debug=True)
            status = cmdresult.exit_status
            if status:
                logging.debug("Error status, cmd error: %s", cmdresult.stderr)
                if not virsh.has_command_help_match('reboot', '\s+--mode\s+'):
                    # old libvirt doesn't support reboot
                    status = -2
        output = virsh.dom_list(ignore_status=True).stdout.strip()

        # recover libvirtd service start
        if libvirtd == "off":
            utils_libvirtd.libvirtd_start()

        # check status_error
        if status_error:
            if not status:
Ejemplo n.º 10
0
def run(test, params, env):
    """
    Test command: virsh reboot.

    Run a reboot command in the target domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh reboot operation.
    4.Recover test environment.(libvirts service)
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # run test case
    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("reboot_vm_ref")
    status_error = ("yes" == params.get("status_error"))
    extra = params.get("reboot_extra", "")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_pwd = params.get("remote_pwd", "password")
    agent = ("yes" == params.get("reboot_agent", "no"))
    mode = params.get("reboot_mode", "")
    pre_domian_status = params.get("reboot_pre_domian_status", "running")
    reboot_readonly = "yes" == params.get("reboot_readonly", "no")
    xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        # Add or remove qemu-agent from guest before test
        try:
            vm.prepare_guest_agent(channel=agent, start=agent)
        except virt_vm.VMError as e:
            logging.debug(e)
            # qemu-guest-agent is not available on REHL5
            test.cancel("qemu-guest-agent package is not available")

        if pre_domian_status == "shutoff":
            virsh.destroy(vm_name)
        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

        domid = vm.get_id()
        domuuid = vm.get_uuid()
        if vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "name":
            vm_ref = vm_name
        elif vm_ref == "uuid":
            vm_ref = domuuid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref.find("invalid") != -1:
            vm_ref = params.get(vm_ref)
        elif vm_ref == "remote_name":
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                test.cancel("remote_ip and/or local_ip parameters"
                            " not changed from default values")
            complete_uri = libvirt_vm.complete_uri(local_ip)
            try:
                session = remote.remote_login("ssh", remote_ip, "22", "root",
                                              remote_pwd, "#")
                session.cmd_output('LANG=C')
                command = "virsh -c %s reboot %s %s" % (complete_uri, vm_name,
                                                        mode)
                status, output = session.cmd_status_output(command,
                                                           internal_timeout=5)
                session.close()
                if not status:
                    # the operation before the end of reboot
                    # may result in data corruption
                    vm.wait_for_login().close()
            except (remote.LoginError, process.CmdError, aexpect.ShellError) as e:
                logging.error("Exception: %s", str(e))
                status = -1
        if vm_ref != "remote_name":
            vm_ref = "%s" % vm_ref
            if extra:
                vm_ref += " %s" % extra
            cmdresult = virsh.reboot(vm_ref, mode,
                                     ignore_status=True, debug=True)
            status = cmdresult.exit_status
            if status:
                logging.debug("Error status, cmd error: %s", cmdresult.stderr)
                if not virsh.has_command_help_match('reboot', '\s+--mode\s+'):
                    # old libvirt doesn't support reboot
                    status = -2
            time.sleep(5)
            # avoid the check if it is negative test
            if not status_error:
                cmdoutput = virsh.domstate(vm_ref, '--reason',
                                           ignore_status=True, debug=True)
                domstate_status = cmdoutput.exit_status
                output = "running" in cmdoutput.stdout
                if domstate_status or (not output):
                    test.fail("Cmd error: %s Error status: %s" %
                              (cmdoutput.stderr, cmdoutput.stdout))
            elif pre_domian_status != 'shutoff':
                vm.wait_for_login().close()
        output = virsh.dom_list(ignore_status=True).stdout.strip()

        # Test the readonly mode
        if reboot_readonly:
            result = virsh.reboot(vm_ref, ignore_status=True, debug=True, readonly=True)
            libvirt.check_exit_status(result, expect_error=True)
            # This is for status_error check
            status = result.exit_status

        # recover libvirtd service start
        if libvirtd == "off":
            utils_libvirtd.libvirtd_start()

        # check status_error
        if status_error:
            if not status:
                test.fail("Run successfully with wrong command!")
        else:
            if status or (not re.search(vm_name, output)):
                if status == -2:
                    test.cancel("Reboot command doesn't work on older libvirt "
                                "versions")
                test.fail("Run failed with right command")
    finally:
        xml_backup.sync()
Ejemplo n.º 11
0
def run(test, params, env):
    """
    Test command: virsh domstate.

    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh domstate operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    libvirtd_state = params.get("libvirtd", "on")
    vm_ref = params.get("domstate_vm_ref")
    status_error = (params.get("status_error", "no") == "yes")
    extra = params.get("domstate_extra", "")
    vm_action = params.get("domstate_vm_action", "")
    vm_oncrash_action = params.get("domstate_vm_oncrash")
    reset_action = "yes" == params.get("reset_action", "no")
    dump_option = params.get("dump_option", "")
    start_action = params.get("start_action", "normal")
    kill_action = params.get("kill_action", "normal")
    check_libvirtd_log = params.get("check_libvirtd_log", "no")
    err_msg = params.get("err_msg", "")
    remote_uri = params.get("remote_uri")

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref == "uuid":
        vm_ref = domuuid

    # Back up xml file.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Back up qemu.conf
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()

    # Config libvirtd log
    if check_libvirtd_log == "yes":
        libvirtd_conf = utils_config.LibvirtdConfig()
        libvirtd_log_file = os.path.join(data_dir.get_tmp_dir(), "libvirtd.log")
        libvirtd_conf["log_level"] = '1'
        libvirtd_conf["log_filters"] = ('"1:json 1:libvirt 1:qemu 1:monitor '
                                        '3:remote 4:event"')
        libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_file
        logging.debug("the libvirtd config file content is:\n %s" %
                      libvirtd_conf)
        libvirtd.restart()

    # Get image file
    image_source = vm.get_first_disk_devices()['source']
    logging.debug("image source: %s" % image_source)
    new_image_source = image_source + '.rename'

    dump_path = os.path.join(data_dir.get_tmp_dir(), "dump/")
    logging.debug("dump_path: %s", dump_path)
    try:
        os.mkdir(dump_path)
    except OSError:
        # If the path already exists then pass
        pass
    dump_file = ""
    try:
        # Let's have guest memory less so that dumping core takes
        # time which doesn't timeout the testcase
        if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']:
            memory_value = int(params.get("memory_value", "2097152"))
            memory_unit = params.get("memory_unit", "KiB")
            vmxml.set_memory(memory_value)
            vmxml.set_memory_unit(memory_unit)
            logging.debug(vmxml)
            vmxml.sync()

        if vm_action == "crash":
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml.on_crash = vm_oncrash_action
            if not vmxml.xmltreefile.find('devices').findall('panic'):
                # Add <panic> device to domain
                panic_dev = Panic()
                if "ppc" not in platform.machine():
                    panic_dev.addr_type = "isa"
                    panic_dev.addr_iobase = "0x505"
                vmxml.add_device(panic_dev)
            vmxml.sync()
            # Config auto_dump_path in qemu.conf
            qemu_conf.auto_dump_path = dump_path
            libvirtd.restart()
            if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']:
                dump_file = dump_path + "*" + vm_name[:20] + "-*"
            # Start VM and check the panic device
            virsh.start(vm_name, ignore_status=False)
            vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name)
            # Skip this test if no panic device find
            if not vmxml_new.xmltreefile.find('devices').findall('panic'):
                test.cancel("No 'panic' device in the guest. Maybe your "
                            "libvirt version doesn't support it.")
        try:
            if vm_action == "suspend":
                virsh.suspend(vm_name, ignore_status=False)
            elif vm_action == "resume":
                virsh.suspend(vm_name, ignore_status=False)
                virsh.resume(vm_name, ignore_status=False)
            elif vm_action == "destroy":
                virsh.destroy(vm_name, ignore_status=False)
            elif vm_action == "start":
                virsh.destroy(vm_name, ignore_status=False)
                if start_action == "rename":
                    # rename the guest image file to make guest fail to start
                    os.rename(image_source, new_image_source)
                    virsh.start(vm_name, ignore_status=True)
                else:
                    virsh.start(vm_name, ignore_status=False)
                    if start_action == "restart_libvirtd":
                        libvirtd.restart()
            elif vm_action == "kill":
                if kill_action == "stop_libvirtd":
                    libvirtd.stop()
                    utils_misc.kill_process_by_pattern(vm_name)
                    libvirtd.restart()
                elif kill_action == "reboot_vm":
                    virsh.reboot(vm_name, ignore_status=False)
                    utils_misc.kill_process_tree(vm.get_pid(), signal.SIGKILL)
                else:
                    utils_misc.kill_process_tree(vm.get_pid(), signal.SIGKILL)
            elif vm_action == "crash":
                session = vm.wait_for_login()
                session.cmd("service kdump stop", ignore_all_errors=True)
                # Enable sysRq
                session.cmd("echo 1 > /proc/sys/kernel/sysrq")
                # Send key ALT-SysRq-c to crash VM, and command will not
                # return as vm crashed, so fail early for 'destroy' and
                # 'preserve' action. For 'restart', 'coredump-restart'
                # and 'coredump-destroy' actions, they all need more time
                # to dump core file or restart OS, so using the default
                # session command timeout(60s)
                try:
                    if vm_oncrash_action in ['destroy', 'preserve']:
                        timeout = 3
                    else:
                        timeout = 60
                    session.cmd("echo c > /proc/sysrq-trigger",
                                timeout=timeout)
                except (ShellTimeoutError, ShellProcessTerminatedError):
                    pass
                session.close()
            elif vm_action == "dump":
                dump_file = dump_path + "*" + vm_name + "-*"
                virsh.dump(vm_name, dump_file, dump_option, ignore_status=False)
        except process.CmdError as detail:
            test.error("Guest prepare action error: %s" % detail)

        if libvirtd_state == "off":
            libvirtd.stop()

        # Timing issue cause test to check domstate before prior action
        # kill gets completed
        if vm_action == "kill":
            time.sleep(2)

        if remote_uri:
            remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
            remote_pwd = params.get("remote_pwd", None)
            remote_user = params.get("remote_user", "root")
            if remote_ip.count("EXAMPLE.COM"):
                test.cancel("Test 'remote' parameters not setup")
            ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd)

        result = virsh.domstate(vm_ref, extra, ignore_status=True,
                                debug=True, uri=remote_uri)
        status = result.exit_status
        output = result.stdout.strip()

        # check status_error
        if status_error:
            if not status:
                test.fail("Run successfully with wrong command!")
        else:
            if status or not output:
                test.fail("Run failed with right command")
            if extra.count("reason"):
                if vm_action == "suspend":
                    # If not, will cost long time to destroy vm
                    virsh.destroy(vm_name)
                    if not output.count("user"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "resume":
                    if not output.count("unpaused"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "destroy":
                    if not output.count("destroyed"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "start":
                    if start_action == "rename":
                        if not output.count("shut off (failed)"):
                            test.fail(err_msg % vm_action)
                    else:
                        if not output.count("booted"):
                            test.fail(err_msg % vm_action)
                elif vm_action == "kill":
                    if not output.count("crashed"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "crash":
                    if not check_crash_state(output, vm_oncrash_action,
                                             vm_name, dump_file):
                        test.fail(err_msg % vm_action)
                    # VM will be in preserved state, perform virsh reset
                    # and check VM reboots and domstate reflects running
                    # state from crashed state as bug is observed here
                    if vm_oncrash_action == "preserve" and reset_action:
                        virsh_dargs = {'debug': True, 'ignore_status': True}
                        ret = virsh.reset(vm_name, **virsh_dargs)
                        libvirt.check_exit_status(ret)
                        ret = virsh.domstate(vm_name, extra,
                                             **virsh_dargs).stdout.strip()
                        if "paused (crashed)" not in ret:
                            test.fail("vm fails to change state from crashed"
                                      " to paused after virsh reset")
                        # it will be in paused (crashed) state after reset
                        # and resume is required for the vm to reboot
                        ret = virsh.resume(vm_name, **virsh_dargs)
                        libvirt.check_exit_status(ret)
                        vm.wait_for_login()
                        cmd_output = virsh.domstate(vm_name,
                                                    '--reason').stdout.strip()
                        if "running" not in cmd_output:
                            test.fail("guest state failed to get updated")
                    if vm_oncrash_action in ['coredump-destroy',
                                             'coredump-restart']:
                        if not find_dump_file:
                            test.fail("Core dump file is not created in dump "
                                      "path: %s" % dump_path)
                    # For cover bug 1178652
                    if (vm_oncrash_action == "rename-restart" and
                            check_libvirtd_log == "yes"):
                        libvirtd.restart()
                        if not os.path.exists(libvirtd_log_file):
                            test.fail("Expected VM log file: %s not exists"
                                      % libvirtd_log_file)
                        cmd = ("grep -nr '%s' %s" % (err_msg, libvirtd_log_file))
                        if not process.run(cmd, ignore_status=True, shell=True).exit_status:
                            test.fail("Find error message %s from log file: %s."
                                      % (err_msg, libvirtd_log_file))
                elif vm_action == "dump":
                    if dump_option == "--live":
                        if not output.count("running (unpaused)"):
                            test.fail(err_msg % vm_action)
                    elif dump_option == "--crash":
                        if not output.count("shut off (crashed)"):
                            test.fail(err_msg % vm_action)
            if vm_ref == "remote":
                if not (re.search("running", output) or
                        re.search("blocked", output) or
                        re.search("idle", output)):
                    test.fail("Run failed with right command")
    finally:
        qemu_conf.restore()
        if check_libvirtd_log == "yes":
            libvirtd_conf.restore()
            if os.path.exists(libvirtd_log_file):
                os.remove(libvirtd_log_file)
        libvirtd.restart()
        if vm_action == "start" and start_action == "rename":
            os.rename(new_image_source, image_source)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if os.path.exists(dump_path):
            shutil.rmtree(dump_path)
Ejemplo n.º 12
0
def run(test, params, env):
    """
    Test memory management of nvdimm
    """
    vm_name = params.get('main_vm')

    nvdimm_file = params.get('nvdimm_file')
    check = params.get('check', '')
    status_error = "yes" == params.get('status_error', 'no')
    error_msg = params.get('error_msg', '')
    qemu_checks = params.get('qemu_checks', '').split('`')
    test_str = 'This is a test'

    def check_boot_config(session):
        """
        Check /boot/config-$KVER file
        """
        check_list = [
            'CONFIG_LIBNVDIMM=m', 'CONFIG_BLK_DEV_PMEM=m', 'CONFIG_ACPI_NFIT=m'
        ]
        current_boot = session.cmd('uname -r').strip()
        content = session.cmd('cat /boot/config-%s' % current_boot).strip()
        for item in check_list:
            if item in content:
                logging.info(item)
            else:
                logging.error(item)
                test.fail('/boot/config content not correct')

    def check_file_in_vm(session, path, expect=True):
        """
        Check whether the existance of file meets expectation
        """
        exist = session.cmd_status('ls %s' % path)
        logging.debug(exist)
        exist = True if exist == 0 else False
        status = '' if exist else 'NOT'
        logging.info('File %s does %s exist', path, status)
        if exist != expect:
            err_msg = 'Existance doesn\'t meet expectation: %s ' % path
            if expect:
                err_msg += 'should exist.'
            else:
                err_msg += 'should not exist'
            test.fail(err_msg)

    def create_cpuxml():
        """
        Create cpu xml for test
        """
        cpu_params = {
            k: v
            for k, v in params.items() if k.startswith('cpuxml_')
        }
        logging.debug(cpu_params)
        cpu_xml = vm_xml.VMCPUXML()
        cpu_xml.xml = "<cpu><numa/></cpu>"
        for attr_key in cpu_params:
            val = cpu_params[attr_key]
            logging.debug('Set cpu params')
            setattr(cpu_xml, attr_key.replace('cpuxml_', ''),
                    eval(val) if ':' in val else val)
        logging.debug(cpu_xml)
        return cpu_xml.copy()

    def create_nvdimm_xml(**mem_param):
        """
        Create xml of nvdimm memory device
        """
        mem_xml = utils_hotplug.create_mem_xml(
            tg_size=mem_param['target_size'],
            mem_addr={'slot': mem_param['address_slot']},
            tg_sizeunit=mem_param['target_size_unit'],
            tg_node=mem_param['target_node'],
            mem_discard=mem_param.get('discard'),
            mem_model="nvdimm",
            lb_size=mem_param.get('label_size'),
            lb_sizeunit=mem_param.get('label_size_unit'),
            mem_access=mem_param['mem_access'])

        source_xml = memory.Memory.Source()
        source_xml.path = mem_param['source_path']
        mem_xml.source = source_xml
        logging.debug(mem_xml)

        return mem_xml.copy()

    def check_nvdimm_file(file_name):
        """
        check if the file exists in nvdimm memory device

        :param file_name: the file name in nvdimm device
        """
        vm_session = vm.wait_for_login()
        if test_str not in vm_session.cmd('cat /mnt/%s ' % file_name):
            test.fail('"%s" should be in output' % test_str)

    bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    if 'ppc64le' in platform.machine().lower():
        if not libvirt_version.version_compare(6, 2, 0):
            test.cancel('Libvirt version should be > 6.2.0'
                        ' to support nvdimm on pseries')

    try:
        vm = env.get_vm(vm_name)
        # Create nvdimm file on the host
        process.run('truncate -s 512M %s' % nvdimm_file, verbose=True)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        # Set cpu according to params
        cpu_xml = create_cpuxml()
        vmxml.cpu = cpu_xml

        # Update other vcpu, memory info according to params
        update_vm_args = {
            k: params[k]
            for k in params if k.startswith('setvm_')
        }
        logging.debug(update_vm_args)
        for key, value in list(update_vm_args.items()):
            attr = key.replace('setvm_', '')
            logging.debug('Set %s = %s', attr, value)
            setattr(vmxml, attr, int(value) if value.isdigit() else value)
        logging.debug(virsh.dumpxml(vm_name))

        # Add an nvdimm mem device to vm xml
        nvdimm_params = {
            k.replace('nvdimmxml_', ''): v
            for k, v in params.items() if k.startswith('nvdimmxml_')
        }
        nvdimm_xml = create_nvdimm_xml(**nvdimm_params)
        vmxml.add_device(nvdimm_xml)
        if check in ['ppc_no_label', 'discard']:
            result = virsh.define(vmxml.xml, debug=True)
            libvirt.check_result(result, expected_fails=[error_msg])
            return
        vmxml.sync()
        logging.debug(virsh.dumpxml(vm_name))

        virsh.start(vm_name, debug=True, ignore_status=False)

        # Check qemu command line one by one
        map(libvirt.check_qemu_cmd_line, qemu_checks)

        alive_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        # Check if the guest support NVDIMM:
        # check /boot/config-$KVER file
        vm_session = vm.wait_for_login()
        check_boot_config(vm_session)

        # check /dev/pmem0 existed inside guest
        check_file_in_vm(vm_session, '/dev/pmem0')

        if check == 'back_file':
            # Create a file system on /dev/pmem0
            if platform.platform().count('el8'):
                vm_session.cmd('mkfs.xfs -f /dev/pmem0 -m reflink=0')
            else:
                vm_session.cmd('mkfs.xfs -f /dev/pmem0')

            vm_session.cmd('mount -o dax /dev/pmem0 /mnt')
            vm_session.cmd('echo \"%s\" >/mnt/foo' % test_str)
            vm_session.cmd('umount /mnt')
            vm_session.close()

            # Shutdown the guest, then start it, remount /dev/pmem0,
            # check if the test file is still on the file system
            vm.destroy()
            vm.start()
            vm_session = vm.wait_for_login()

            vm_session.cmd('mount -o dax /dev/pmem0 /mnt')
            if test_str not in vm_session.cmd('cat /mnt/foo'):
                test.fail('\"%s\" should be in /mnt/foo' % test_str)

            # From the host, check the file has changed:
            host_output = process.run('hexdump -C /tmp/nvdimm',
                                      shell=True,
                                      verbose=True).stdout_text
            if test_str not in host_output:
                test.fail('\"%s\" should be in output' % test_str)

            # Shutdown the guest, and edit the xml,
            # include: access='private'
            vm_session.close()
            vm.destroy()
            vm_devices = vmxml.devices
            nvdimm_device = vm_devices.by_device_tag('memory')[0]
            nvdimm_index = vm_devices.index(nvdimm_device)
            vm_devices[nvdimm_index].mem_access = 'private'
            vmxml.devices = vm_devices
            vmxml.sync()

            # Login to the guest, mount the /dev/pmem0 and .
            # create a file: foo-private
            vm.start()
            vm_session = vm.wait_for_login()

            libvirt.check_qemu_cmd_line('mem-path=/tmp/nvdimm,share=no')

            private_str = 'This is a test for foo-private'
            vm_session.cmd('mount -o dax /dev/pmem0 /mnt/')

            file_private = 'foo-private'
            vm_session.cmd("echo '%s' >/mnt/%s" % (private_str, file_private))
            if private_str not in vm_session.cmd('cat /mnt/%s' % file_private):
                test.fail('"%s" should be in output' % private_str)

            # Shutdown the guest, then start it,
            # check the file: foo-private is no longer existed
            vm_session.close()
            vm.destroy()

            vm.start()
            vm_session = vm.wait_for_login()
            vm_session.cmd('mount -o dax /dev/pmem0 /mnt/')
            if file_private in vm_session.cmd('ls /mnt/'):
                test.fail('%s should not exist, for it was '
                          'created when access=private' % file_private)

        if check == 'label_back_file':
            # Create an xfs file system on /dev/pmem0
            if platform.platform().count('el8'):
                vm_session.cmd(
                    'mkfs.xfs -f -b size=4096 /dev/pmem0 -m reflink=0')
            else:
                vm_session.cmd('mkfs.xfs -f -b size=4096 /dev/pmem0')

            # Mount the file system with DAX enabled for page cache bypass
            output = vm_session.cmd_output('mount -o dax /dev/pmem0 /mnt/')
            logging.info(output)

            # Create a file on the nvdimm device.
            test_str = 'This is a test with label'
            vm_session.cmd('echo "%s" >/mnt/foo-label' % test_str)
            if test_str not in vm_session.cmd('cat /mnt/foo-label '):
                test.fail('"%s" should be in the output of cat cmd' % test_str)

            # Reboot the guest, and remount the nvdimm device in the guest.
            # Check the file foo-label is exited
            vm_session.close()
            virsh.reboot(vm_name, debug=True)
            vm_session = vm.wait_for_login()

            vm_session.cmd('mount -o dax /dev/pmem0  /mnt')
            if test_str not in vm_session.cmd('cat /mnt/foo-label '):
                test.fail('"%s" should be in output' % test_str)

            if params.get('check_life_cycle', 'no') == 'yes':
                virsh.managedsave(vm_name, ignore_status=False, debug=True)
                vm.start()
                check_nvdimm_file('foo-label')

                vm_s1 = vm_name + ".s1"
                virsh.save(vm_name, vm_s1, ignore_status=False, debug=True)
                virsh.restore(vm_s1, ignore_status=False, debug=True)
                check_nvdimm_file('foo-label')

                virsh.snapshot_create_as(vm_name,
                                         vm_s1,
                                         ignore_status=False,
                                         debug=True)
                virsh.snapshot_revert(vm_name,
                                      vm_s1,
                                      ignore_status=False,
                                      debug=True)
                virsh.snapshot_delete(vm_name,
                                      vm_s1,
                                      ignore_status=False,
                                      debug=True)

        if check == 'hot_plug':
            # Create file for 2nd nvdimm device
            nvdimm_file_2 = params.get('nvdimm_file_2')
            process.run('truncate -s 512M %s' % nvdimm_file_2)

            # Add 2nd nvdimm device to vm xml
            nvdimm2_params = {
                k.replace('nvdimmxml2_', ''): v
                for k, v in params.items() if k.startswith('nvdimmxml2_')
            }
            nvdimm2_xml = create_nvdimm_xml(**nvdimm2_params)

            ori_devices = vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices(
                'memory')
            logging.debug('Starts with %d memory devices', len(ori_devices))

            result = virsh.attach_device(vm_name, nvdimm2_xml.xml, debug=True)
            libvirt.check_exit_status(result)

            # After attach, there should be an extra memory device
            devices_after_attach = vm_xml.VMXML.new_from_dumpxml(
                vm_name).get_devices('memory')
            logging.debug('After detach, vm has %d memory devices',
                          len(devices_after_attach))
            if len(ori_devices) != len(devices_after_attach) - 1:
                test.fail(
                    'Number of memory devices after attach is %d, should be %d'
                    % (len(devices_after_attach), len(ori_devices) + 1))

            time.sleep(5)
            check_file_in_vm(vm_session, '/dev/pmem1')

            nvdimm_detach = alive_vmxml.get_devices('memory')[-1]
            logging.debug(nvdimm_detach)

            # Hot-unplug nvdimm device
            result = virsh.detach_device(vm_name,
                                         nvdimm_detach.xml,
                                         debug=True)
            libvirt.check_exit_status(result)

            vm_session.close()
            vm_session = vm.wait_for_login()

            virsh.dumpxml(vm_name, debug=True)

            left_devices = vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices(
                'memory')
            logging.debug(left_devices)

            if len(left_devices) != len(ori_devices):
                test.fail(
                    'Number of memory devices after detach is %d, should be %d'
                    % (len(left_devices), len(ori_devices)))

            time.sleep(5)
            check_file_in_vm(vm_session, '/dev/pmem1', expect=False)
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        bkxml.sync()
        os.remove(nvdimm_file)
Ejemplo n.º 13
0
def run(test, params, env):
    """
    Test memory management of nvdimm
    """
    vm_name = params.get('main_vm')

    nvdimm_file = params.get('nvdimm_file')
    check = params.get('check', '')
    qemu_checks = params.get('qemu_checks', '').split('`')
    test_str = 'This is a test!'

    def check_boot_config(session):
        """
        Check /boot/config-$KVER file
        """
        check_list = [
            'CONFIG_LIBNVDIMM=m',
            'CONFIG_BLK_DEV_PMEM=m',
            'CONFIG_ACPI_NFIT=m'
        ]
        current_boot = session.cmd('uname -r').strip()
        content = session.cmd('cat /boot/config-%s' % current_boot).strip()
        for item in check_list:
            if item in content:
                logging.info(item)
            else:
                logging.error(item)
                test.fail('/boot/config content not correct')

    def check_file_in_vm(session, path, expect=True):
        """
        Check whether the existance of file meets expectation
        """
        exist = session.cmd_status('ls %s' % path)
        logging.debug(exist)
        exist = True if exist == 0 else False
        status = '' if exist else 'NOT'
        logging.info('File %s does %s exist', path, status)
        if exist != expect:
            err_msg = 'Existance doesn\'t meet expectation: %s ' % path
            if expect:
                err_msg += 'should exist.'
            else:
                err_msg += 'should not exist'
            test.fail(err_msg)

    def create_cpuxml():
        """
        Create cpu xml for test
        """
        cpu_params = {k: v for k, v in params.items() if k.startswith('cpuxml_')}
        logging.debug(cpu_params)
        cpu_xml = vm_xml.VMCPUXML()
        cpu_xml.xml = "<cpu><numa/></cpu>"
        for attr_key in cpu_params:
            val = cpu_params[attr_key]
            logging.debug('Set cpu params')
            setattr(cpu_xml, attr_key.replace('cpuxml_', ''),
                    eval(val) if ':' in val else val)
        logging.debug(cpu_xml)
        return cpu_xml.copy()

    def create_nvdimm_xml(**mem_param):
        """
        Create xml of nvdimm memory device
        """
        mem_xml = utils_hotplug.create_mem_xml(
            tg_size=mem_param['target_size'],
            mem_addr={'slot': mem_param['address_slot']},
            tg_sizeunit=mem_param['target_size_unit'],
            tg_node=mem_param['target_node'],
            mem_model="nvdimm",
            lb_size=mem_param.get('label_size'),
            lb_sizeunit=mem_param.get('label_size_unit'),
            mem_access=mem_param['mem_access']
        )

        source_xml = memory.Memory.Source()
        source_xml.path = mem_param['source_path']
        mem_xml.source = source_xml
        logging.debug(mem_xml)

        return mem_xml.copy()

    bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        vm = env.get_vm(vm_name)
        # Create nvdimm file on the host
        process.run('truncate -s 512M %s' % nvdimm_file, verbose=True)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        # Set cpu according to params
        cpu_xml = create_cpuxml()
        vmxml.cpu = cpu_xml

        # Update other vcpu, memory info according to params
        update_vm_args = {k: params[k] for k in params
                          if k.startswith('setvm_')}
        logging.debug(update_vm_args)
        for key, value in list(update_vm_args.items()):
            attr = key.replace('setvm_', '')
            logging.debug('Set %s = %s', attr, value)
            setattr(vmxml, attr, int(value) if value.isdigit() else value)
        logging.debug(virsh.dumpxml(vm_name))

        # Add an nvdimm mem device to vm xml
        nvdimm_params = {k.replace('nvdimmxml_', ''): v
                         for k, v in params.items() if k.startswith('nvdimmxml_')}
        nvdimm_xml = create_nvdimm_xml(**nvdimm_params)
        vmxml.add_device(nvdimm_xml)
        vmxml.sync()
        logging.debug(virsh.dumpxml(vm_name))

        virsh.start(vm_name, debug=True, ignore_status=False)

        # Check qemu command line one by one
        map(libvirt.check_qemu_cmd_line, qemu_checks)

        alive_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        # Check if the guest support NVDIMM:
        # check /boot/config-$KVER file
        vm_session = vm.wait_for_login()
        check_boot_config(vm_session)

        # check /dev/pmem0 existed inside guest
        check_file_in_vm(vm_session, '/dev/pmem0')

        if check == 'back_file':
            # Create an ext4 file system on /dev/pmem0
            cmd_list = [
                "mkfs.ext4 /dev/pmem0",
                "mount -o dax /dev/pmem0 /mnt",
                "echo '%s' >/mnt/foo" % test_str,
                "umount /mnt"
            ]
            map(vm_session.cmd, cmd_list)
            vm_session.close()

            # Shutdown the guest, then start it, remount /dev/pmem0,
            # check if the test file is still on the file system
            vm.destroy()
            vm.start()
            vm_session = vm.wait_for_login()

            vm_session.cmd('mount -o dax /dev/pmem0 /mnt')
            if test_str not in vm_session.cmd('cat /mnt/foo'):
                test.fail('"%s" should be in /mnt/foo')

            # From the host, check the file has changed:
            host_output = process.run('hexdump -C /tmp/nvdimm',
                                      shell=True, verbose=True).stdout_text
            if test_str not in host_output:
                test.fail('"%s" should be in output')

            # Shutdown the guest, and edit the xml,
            # include: access='private'
            vm_session.close()
            vm.destroy()
            vm_devices = vmxml.devices
            nvdimm_device = vm_devices.by_device_tag('memory')[0]
            nvdimm_index = vm_devices.index(nvdimm_device)
            vm_devices[nvdimm_index].mem_access = 'private'
            vmxml.devices = vm_devices
            vmxml.sync()

            # Login to the guest, mount the /dev/pmem0 and .
            # create a file: foo-private
            vm.start()
            vm_session = vm.wait_for_login()

            libvirt.check_qemu_cmd_line('mem-path=/tmp/nvdimm,share=no')

            private_str = 'This is a test for foo-private!'
            vm_session.cmd('mount -o dax /dev/pmem0 /mnt/')

            file_private = 'foo-private'
            vm_session.cmd("echo '%s' >/mnt/%s" % (private_str, file_private))
            if private_str not in vm_session.cmd('cat /mnt/%s' % file_private):
                test.fail('"%s" should be in output' % private_str)

            # Shutdown the guest, then start it,
            # check the file: foo-private is no longer existed
            vm_session.close()
            vm.destroy()

            vm.start()
            vm_session = vm.wait_for_login()
            vm_session.cmd('mount -o dax /dev/pmem0 /mnt/')
            if file_private in vm_session.cmd('ls /mnt/'):
                test.fail('%s should not exist, for it was '
                          'created when access=private' % file_private)

        if check == 'label_back_file':
            # Create an xfs file system on /dev/pmem0
            vm_session.cmd('mkfs.xfs -f -b size=4096 /dev/pmem0')
            # Mount the file system with DAX enabled for page cache bypass
            output = vm_session.cmd_output('mount -o dax /dev/pmem0 /mnt/')
            logging.info(output)

            # Create a file on the nvdimm device.
            test_str = 'This is a test with label'
            vm_session.cmd('echo "%s" >/mnt/foo-label' % test_str)
            if test_str not in vm_session.cmd('cat /mnt/foo-label '):
                test.fail('"%s" should be in output' % test_str)

            # Reboot the guest, and remount the nvdimm device in the guest.
            # Check the file foo-label is exited
            vm_session.close()
            virsh.reboot(vm_name, debug=True)
            vm_session = vm.wait_for_login()

            vm_session.cmd('mount -o dax /dev/pmem0  /mnt')
            if test_str not in vm_session.cmd('cat /mnt/foo-label '):
                test.fail('"%s" should be in output' % test_str)

        if check == 'hot_plug':
            # Create file for 2nd nvdimm device
            nvdimm_file_2 = params.get('nvdimm_file_2')
            process.run('truncate -s 512M %s' % nvdimm_file_2)

            # Add 2nd nvdimm device to vm xml
            nvdimm2_params = {k.replace('nvdimmxml2_', ''): v
                              for k, v in params.items() if k.startswith('nvdimmxml2_')}
            nvdimm2_xml = create_nvdimm_xml(**nvdimm2_params)

            ori_devices = vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices('memory')
            logging.debug('Starts with %d memory devices', len(ori_devices))

            result = virsh.attach_device(vm_name, nvdimm2_xml.xml, debug=True)
            libvirt.check_exit_status(result)

            # After attach, there should be an extra memory device
            devices_after_attach = vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices('memory')
            logging.debug('After detach, vm has %d memory devices',
                          len(devices_after_attach))
            if len(ori_devices) != len(devices_after_attach) - 1:
                test.fail('Number of memory devices after attach is %d, should be %d'
                          % (len(devices_after_attach), len(ori_devices) + 1))

            time.sleep(5)
            check_file_in_vm(vm_session, '/dev/pmem1')

            nvdimm_detach = alive_vmxml.get_devices('memory')[-1]
            logging.debug(nvdimm_detach)

            # Hot-unplug nvdimm device
            result = virsh.detach_device(vm_name, nvdimm_detach.xml, debug=True)
            libvirt.check_exit_status(result)

            vm_session.close()
            vm_session = vm.wait_for_login()

            virsh.dumpxml(vm_name, debug=True)

            left_devices = vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices('memory')
            logging.debug(left_devices)

            if len(left_devices) != len(ori_devices):
                test.fail('Number of memory devices after detach is %d, should be %d'
                          % (len(left_devices), len(ori_devices)))

            time.sleep(5)
            check_file_in_vm(vm_session, '/dev/pmem1', expect=False)
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        bkxml.sync()
        os.remove(nvdimm_file)
Ejemplo n.º 14
0
def run(test, params, env):
    """
    LXC container life cycle testing by virsh command
    """
    uri = params.get("connect_uri", "lxc:///")
    vm_name = params.get("main_vm")
    dom_type = params.get("lxc_domtype", "lxc")
    vcpu = int(params.get("lxc_vcpu", 1))
    max_mem = int(params.get("lxc_max_mem", 500000))
    current_mem = int(params.get("lxc_current_mem", 500000))
    os_type = params.get("lxc_ostype", "exe")
    os_arch = params.get("lxc_osarch", "x86_64")
    os_init = params.get("lxc_osinit", "/bin/sh")
    emulator_path = params.get("lxc_emulator", "/usr/libexec/libvirt_lxc")
    interface_type = params.get("lxc_interface_type", "network")
    net_name = params.get("lxc_net_name", "default")
    full_os = ("yes" == params.get("lxc_full_os", "no"))
    install_root = params.get("lxc_install_root", "/")
    fs_target = params.get("lxc_fs_target", "/")
    fs_accessmode = params.get("lxc_fs_accessmode", "passthrough")
    passwd = params.get("lxc_fs_passwd", "redhat")

    def generate_container_xml():
        """
        Generate container xml
        """
        vmxml = vm_xml.VMXML(dom_type)
        vmxml.vm_name = vm_name
        vmxml.max_mem = max_mem
        vmxml.current_mem = current_mem
        vmxml.vcpu = vcpu
        # Generate os
        vm_os = vm_xml.VMOSXML()
        vm_os.type = os_type
        vm_os.arch = os_arch
        vm_os.init = os_init
        vmxml.os = vm_os
        # Generate emulator
        emulator = Emulator()
        emulator.path = emulator_path
        # Generate console
        console = Console()
        filesystem = Filesystem()
        filesystem.accessmode = fs_accessmode
        filesystem.source = {'dir': install_root}
        filesystem.target = {'dir': fs_target}
        # Add emulator and console in devices
        devices = vm_xml.VMXMLDevices()
        devices.append(emulator)
        devices.append(console)
        devices.append(filesystem)
        # Add network device
        network = Interface(type_name=interface_type)
        network.mac_address = utils_net.generate_mac_address_simple()
        network.source = {interface_type: net_name}
        devices.append(network)
        vmxml.set_devices(devices)
        return vmxml

    def check_state(expected_state):
        result = virsh.domstate(vm_name, uri=uri)
        utlv.check_exit_status(result)
        vm_state = result.stdout.strip()
        if vm_state == expected_state:
            logging.info("Get expected state: %s", vm_state)
        else:
            raise TestFail("Get unexpected state: %s", vm_state)

    virsh_args = {'uri': uri, 'debug': True}
    try:
        vmxml = generate_container_xml()
        with open(vmxml.xml, 'r') as f:
            logging.info("Container XML:\n%s", f.read())

        if full_os:
            if not os.path.exists(install_root):
                os.mkdir(install_root)
            # Install core os under installroot
            cmd = "yum --releasever=/ --installroot=%s" % install_root
            cmd += " --nogpgcheck -y groupinstall core"
            process.run(cmd, shell=True)
            # Fix root login on console
            process.run("echo 'pts/0' >> %s/etc/securetty" % install_root,
                        shell=True)
            for i in [
                    "session    required     pam_selinux.so close",
                    "session    required     pam_selinux.so open",
                    "session    required     pam_loginuid.so"
            ]:
                process.run('sed -i s/"%s\"/"#%s"/g %s/etc/pam.d/login' %
                            (i, i, install_root),
                            shell=True)
                # Fix root login for sshd
                process.run('sed -i s/"%s\"/"#%s"/g %s/etc/pam.d/sshd' %
                            (i, i, install_root),
                            shell=True)

            # Config basic network
            net_file = install_root + '/etc/sysconfig/network'
            with open(net_file, 'w') as f:
                f.write('NETWORKING=yes\nHOSTNAME=%s\n' % vm_name)
            net_script = install_root + '/etc/sysconfig/network-scripts/ifcfg-eth0'
            with open(net_script, 'w') as f:
                f.write('DEVICE=eth0\nBOOTPROTO=dhcp\nONBOOT=yes\n')

            # Set root password and enable sshd
            session = aexpect.ShellSession("chroot %s" % install_root)
            session.sendline('echo %s|passwd root --stdin' % passwd)
            session.sendline('chkconfig sshd on')
            session.close()

        # Create
        result = virsh.create(vmxml.xml, **virsh_args)
        utlv.check_exit_status(result)
        check_state('running')

        # Destroy
        result = virsh.destroy(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        if not virsh.domain_exists(vm_name, **virsh_args):
            logging.info("Destroy transient LXC domain successfully")
        else:
            raise TestFail("Transient LXC domain still exist after destroy")

        # Define
        result = virsh.define(vmxml.xml, **virsh_args)
        utlv.check_exit_status(result)
        check_state('shut off')

        # List
        result = virsh.dom_list('--inactive', **virsh_args)
        utlv.check_exit_status(result)
        if re.findall("(%s)\s+shut off" % vm_name, result.stdout):
            logging.info("Find %s in virsh list output", vm_name)
        else:
            raise TestFail("Not find %s in virsh list output")

        # Dumpxml
        result = virsh.dumpxml(vm_name, uri=uri, debug=False)
        utlv.check_exit_status(result)

        # Edit
        edit_vcpu = '2'
        logging.info("Change vcpu of LXC container to %s", edit_vcpu)
        edit_cmd = [r":%s /[0-9]*<\/vcpu>/" + edit_vcpu + r"<\/vcpu>"]
        if not utlv.exec_virsh_edit(vm_name, edit_cmd, connect_uri=uri):
            raise TestFail("Run edit command fail")
        else:
            result = virsh.dumpxml(vm_name, **virsh_args)
            new_vcpu = re.search(r'(\d*)</vcpu>', result.stdout).group(1)
            if new_vcpu == edit_vcpu:
                logging.info("vcpu number is expected after do edit")
            else:
                raise TestFail("vcpu number is unexpected after do edit")

        # Start
        result = virsh.start(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        check_state('running')

        # Suspend
        result = virsh.suspend(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        check_state('paused')

        # Resume
        result = virsh.resume(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        check_state('running')

        # Reboot(not supported on RHEL6)
        result = virsh.reboot(vm_name, **virsh_args)
        supported_err = 'not supported by the connection driver: virDomainReboot'
        if supported_err in result.stderr.strip():
            logging.info("Reboot is not supported")
        else:
            utlv.check_exit_status(result)

        # Destroy
        result = virsh.destroy(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        check_state('shut off')

        # Undefine
        result = virsh.undefine(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        if not virsh.domain_exists(vm_name, **virsh_args):
            logging.info("Undefine LXC domain successfully")
        else:
            raise TestFail("LXC domain still exist after undefine")

    finally:
        virsh.remove_domain(vm_name, **virsh_args)
        if full_os and os.path.exists(install_root):
            shutil.rmtree(install_root)
Ejemplo n.º 15
0
def run(test, params, env):
    """
    Test command: virsh reboot.

    Run a reboot command in the target domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh reboot operation.
    4.Recover test environment.(libvirts service)
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # run test case
    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("reboot_vm_ref")
    status_error = ("yes" == params.get("status_error"))
    extra = params.get("reboot_extra")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_pwd = params.get("remote_pwd", "password")
    agent = ("yes" == params.get("reboot_agent", "no"))
    mode = params.get("reboot_mode", "")
    pre_domian_status = params.get("reboot_pre_domian_status", "running")
    xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        # Add or remove qemu-agent from guest before test
        if agent:
            vm_xml.VMXML.set_agent_channel(vm_name)
        else:
            vm_xml.VMXML.remove_agent_channel(vm_name)

        virsh.start(vm_name)
        guest_session = vm.wait_for_login()
        if agent:
            guest_session.cmd("qemu-ga -d")
            stat_ps = guest_session.cmd_status("ps aux |grep [q]emu-ga")
            guest_session.close()
            if stat_ps:
                raise error.TestError("Fail to start qemu-guest-agent!")
        if pre_domian_status == "shutoff":
            virsh.destroy(vm_name)
        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

        domid = vm.get_id()
        domuuid = vm.get_uuid()
        if vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "name":
            vm_ref = vm_name
        elif vm_ref == "uuid":
            vm_ref = domuuid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref.find("invalid") != -1:
            vm_ref = params.get(vm_ref)
        elif vm_ref == "remote_name":
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                raise error.TestNAError("remote_ip and/or local_ip parameters"
                                        " not changed from default values")
            complete_uri = libvirt_vm.complete_uri(local_ip)
            try:
                session = remote.remote_login("ssh", remote_ip, "22", "root",
                                              remote_pwd, "#")
                session.cmd_output('LANG=C')
                command = "virsh -c %s reboot %s %s" % (complete_uri, vm_name,
                                                        mode)
                status, output = session.cmd_status_output(command,
                                                           internal_timeout=5)
                session.close()
            # FIXME: Catch specific exception
            except Exception, detail:
                logging.error("Exception: %s", str(detail))
                status = -1
        if vm_ref != "remote_name":
            vm_ref = "%s %s" % (vm_ref, extra)
            cmdresult = virsh.reboot(vm_ref,
                                     mode,
                                     ignore_status=True,
                                     debug=True)
            status = cmdresult.exit_status
            if status:
                logging.debug("Error status, cmd error: %s", cmdresult.stderr)
                if not virsh.has_command_help_match('reboot', '\s+--mode\s+'):
                    # old libvirt doesn't support reboot
                    status = -2
        output = virsh.dom_list(ignore_status=True).stdout.strip()

        # recover libvirtd service start
        if libvirtd == "off":
            utils_libvirtd.libvirtd_start()

        # check status_error
        if status_error:
            if not status:
                raise error.TestFail("Run successfully with wrong command!")
        else:
            if status or (not re.search(vm_name, output)):
                if status == -2:
                    raise error.TestNAError(
                        "Reboot command doesn't work on older libvirt versions"
                    )
                raise error.TestFail("Run failed with right command")
Ejemplo n.º 16
0
def run(test, params, env):
    """
    Test command: virsh pool-define-as; pool-build; pool-start; vol-create-as;
    vol-list; attach-device; login; mount and dd; reboot; check persistence;
    detach-device; pool-destroy; pool-undefine; clear lv,vg and pv;
    Create a libvirt npiv pool from a vHBA's device mapper device and create
    a volume out of the newly created pool and attach it to a guest, mount it,
    reboot and check persistence after reboot.

    Pre-requisite :
    Host should have a vHBA associated with a mpath device
    """

    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    pool_type = params.get("pool_type", "dir")
    scsi_wwpn = params.get("scsi_wwpn", "WWPN_EXAMPLE")
    scsi_wwnn = params.get("scsi_wwnn", "WWNN_EXAMPLE")
    pool_target = params.get("pool_target", "pool_target")
    target_device = params.get("disk_target_dev", "vda")
    volume_name = params.get("volume_name", "imagefrommapper.qcow2")
    volume_capacity = params.get("volume_capacity", '1G')
    allocation = params.get("allocation", '1G')
    frmt = params.get("volume_format", 'qcow2')
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    mount_disk = None
    test_unit = None

    if 'EXAMPLE' in scsi_wwnn or 'EXAMPLE' in scsi_wwpn:
        raise exceptions.TestSkipError("Please provide proper WWPN/WWNN")

    if not vm.is_alive():
        vm.start()
    pool_extra_args = ""
    libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache)
    process.run("service multipathd restart", shell=True)
    online_hbas_list = nodedev.find_hbas("hba")
    first_online_hba = online_hbas_list[0]
    old_mpath_devs = nodedev.find_mpath_devs()
    logging.debug("the old mpath devs are: %s" % old_mpath_devs)
    new_vhbas = nodedev.nodedev_create_from_xml({
        "nodedev_parent": first_online_hba,
        "scsi_wwnn": scsi_wwnn,
        "scsi_wwpn": scsi_wwpn
    })
    logging.info("Newly created vHBA %s" % new_vhbas)
    process.run("service multipathd restart", shell=True)

    utils_misc.wait_for(lambda: nodedev.is_mpath_devs_added(old_mpath_devs),
                        timeout=5)

    cur_mpath_devs = nodedev.find_mpath_devs()
    logging.debug("the current mpath devs are: %s" % cur_mpath_devs)
    new_mpath_devs = list(set(cur_mpath_devs).difference(set(old_mpath_devs)))

    logging.debug("newly added mpath devs are: %s" % new_mpath_devs)
    if not new_mpath_devs:
        raise exceptions.TestFail("No newly added mpath devices found, \
                please check your FC settings")
    source_dev = os.path.join('/dev/mapper/', new_mpath_devs[0])
    logging.debug("We are going to use \"%s\" as our source device"
                  " to create a logical pool" % source_dev)

    cmd = "parted %s mklabel msdos -s" % source_dev
    cmd_result = process.run(cmd, shell=True)
    utlv.check_exit_status(cmd_result)

    if source_dev:
        pool_extra_args = ' --source-dev %s' % source_dev
    else:
        raise exceptions.TestFail(
            "The vHBA %s does not have any associated mpath device" %
            new_vhbas)

    pool_ins = libvirt_storage.StoragePool()
    if pool_ins.pool_exists(pool_name):
        raise exceptions.TestFail("Pool %s already exist" % pool_name)
    # if no online hba cards on host, mark case failed
    if not online_hbas_list:
        raise exceptions.TestSkipError("Host doesn't have online hba cards")
    try:
        cmd_result = virsh.pool_define_as(pool_name,
                                          pool_type,
                                          pool_target,
                                          pool_extra_args,
                                          ignore_status=True,
                                          debug=True)
        utlv.check_exit_status(cmd_result)

        cmd_result = virsh.pool_build(pool_name)
        utlv.check_exit_status(cmd_result)

        cmd_result = virsh.pool_start(pool_name)
        utlv.check_exit_status(cmd_result)

        utlv.check_actived_pool(pool_name)
        pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name)
        logging.debug("Pool detail: %s", pool_detail)

        cmd_result = virsh.vol_create_as(volume_name,
                                         pool_name,
                                         volume_capacity,
                                         allocation,
                                         frmt,
                                         "",
                                         debug=True)
        utlv.check_exit_status(cmd_result)

        vol_list = utlv.get_vol_list(pool_name, timeout=10)
        logging.debug('Volume list %s', vol_list)
        for unit in vol_list:
            test_unit = vol_list[unit]
            logging.debug(unit)

        disk_params = {
            'type_name': "file",
            'target_dev': target_device,
            'target_bus': "virtio",
            'source_file': test_unit,
            'driver_name': "qemu",
            'driver_type': "raw"
        }
        disk_xml = utlv.create_disk_xml(disk_params)
        session = vm.wait_for_login()

        bf_disks = libvirt_vm.get_disks()

        attach_success = virsh.attach_device(vm_name, disk_xml, debug=True)

        utlv.check_exit_status(attach_success)

        logging.debug("Disks before attach: %s", bf_disks)

        af_disks = libvirt_vm.get_disks()

        logging.debug("Disks after attach: %s", af_disks)

        mount_disk = "".join(list(set(bf_disks) ^ set(af_disks)))
        if not mount_disk:
            raise exceptions.TestFail("Can not get attached device in vm.")
        logging.debug("Attached device in vm:%s", mount_disk)

        output = session.cmd_status_output('lsblk', timeout=15)
        logging.debug("%s", output[1])

        session.cmd_status_output('mkfs.ext4 %s' % mount_disk)
        if mount_disk:
            logging.info("%s", mount_disk)
            mount_success = mount_and_dd(session, mount_disk)
            if not mount_success:
                raise exceptions.TestFail("Can not find mounted device")
        session.close()

        virsh.reboot(vm_name, debug=True)

        session = vm.wait_for_login()
        output = session.cmd_status_output('mount')
        logging.debug("Mount output: %s", output[1])
        if '/mnt' in output[1]:
            logging.debug("Mount Successful accross reboot")
        session.close()

        status = virsh.detach_device(vm_name, disk_xml, debug=True)
        utlv.check_exit_status(status)

    finally:
        vm.destroy(gracefully=False)
        logging.debug('Destroying pool %s', pool_name)
        virsh.pool_destroy(pool_name)
        logging.debug('Undefining pool %s', pool_name)
        virsh.pool_undefine(pool_name)
        if test_unit:
            process.system('lvremove -f %s' % test_unit, verbose=True)
            process.system('vgremove -f %s' % pool_name, verbose=True)
            process.system('pvremove -f %s' % source_dev, verbose=True)
        if new_vhbas:
            nodedev.vhbas_cleanup(new_vhbas.split())
        process.run("service multipathd restart", shell=True)
Ejemplo n.º 17
0
def run(test, params, env):
    """
    Test command: virsh reboot.

    Run a reboot command in the target domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh reboot operation.
    4.Recover test environment.(libvirts service)
    5.Confirm the test result.
    """
    def boot_time():
        """
        Get guest boot time
        """
        session = vm.wait_for_login()

        def get_boot_time():
            """
            Waiting for boot time getting when exception happened
            """
            boot_time = None
            try:
                boot_time = session.cmd_output("uptime --since")
            except (aexpect.ShellStatusError,
                    aexpect.ShellProcessTerminatedError) as e:
                logging.error("Get boot_time error:%s" % e)
            return boot_time

        boot_time = utils_misc.wait_for(get_boot_time, 60)
        session.close()
        return boot_time

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # run test case
    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("reboot_vm_ref")
    status_error = ("yes" == params.get("status_error"))
    extra = params.get("reboot_extra", "")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_pwd = params.get("remote_pwd", "password")
    local_pwd = params.get("local_pwd", "password")
    agent = ("yes" == params.get("reboot_agent", "no"))
    mode = params.get("reboot_mode", "")
    pre_domian_status = params.get("reboot_pre_domian_status", "running")
    reboot_readonly = "yes" == params.get("reboot_readonly", "no")
    wait_time = int(params.get('wait_time', 5))
    xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        # Add or remove qemu-agent from guest before test
        try:
            vm.prepare_guest_agent(channel=agent, start=agent)
        except virt_vm.VMError as e:
            logging.debug(e)
            # qemu-guest-agent is not available on REHL5
            test.cancel("qemu-guest-agent package is not available")

        if pre_domian_status == "shutoff":
            virsh.destroy(vm_name)
        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

        domid = vm.get_id()
        domuuid = vm.get_uuid()
        if vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "name":
            vm_ref = vm_name
        elif vm_ref == "uuid":
            vm_ref = domuuid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref.find("invalid") != -1:
            vm_ref = params.get(vm_ref)
        elif vm_ref == "remote_name":
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                test.cancel("remote_ip and/or local_ip parameters"
                            " not changed from default values")
            complete_uri = libvirt_vm.complete_uri(local_ip)

            # Setup ssh connection
            ssh_connection = utils_conn.SSHConnection(server_ip=local_ip,
                                                      server_pwd=local_pwd,
                                                      client_ip=remote_ip,
                                                      client_pwd=remote_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()

            try:
                session = remote.remote_login("ssh", remote_ip, "22", "root",
                                              remote_pwd, "#")
                session.cmd_output('LANG=C')
                command = "virsh -c %s reboot %s %s" % (complete_uri, vm_name,
                                                        mode)
                status, output = session.cmd_status_output(command,
                                                           internal_timeout=5)
                session.close()
                if not status:
                    # the operation before the end of reboot
                    # may result in data corruption
                    vm.wait_for_login().close()
            except (remote.LoginError, process.CmdError,
                    aexpect.ShellError) as e:
                logging.error("Exception: %s", str(e))
                status = -1
        if vm_ref != "remote_name":
            if not status_error:
                # Not need to check the boot up time if it is a negative test
                first_boot_time = boot_time()

            vm_ref = "%s" % vm_ref
            if extra:
                vm_ref += " %s" % extra
            cmdresult = virsh.reboot(vm_ref,
                                     mode,
                                     ignore_status=True,
                                     debug=True)
            status = cmdresult.exit_status
            if status:
                logging.debug("Error status, cmd error: %s", cmdresult.stderr)
                if not virsh.has_command_help_match('reboot', '\s+--mode\s+'):
                    # old libvirt doesn't support reboot
                    status = -2
            # avoid the check if it is negative test
            if not status_error:
                cmdoutput = ''

                def _wait_for_reboot_up():
                    second_boot_time = boot_time()
                    is_rebooted = second_boot_time > first_boot_time
                    cmdoutput = virsh.domstate(vm_ref,
                                               '--reason',
                                               ignore_status=True,
                                               debug=True)
                    domstate_status = cmdoutput.exit_status
                    output = "running" in cmdoutput.stdout
                    return not domstate_status and output and is_rebooted

                if not wait.wait_for(
                        _wait_for_reboot_up, timeout=wait_time, step=1):
                    test.fail("Cmd error: %s Error status: %s" %
                              (cmdoutput.stderr, cmdoutput.stdout))
            elif pre_domian_status != 'shutoff':
                vm.wait_for_login().close()
        output = virsh.dom_list(ignore_status=True).stdout.strip()

        # Test the readonly mode
        if reboot_readonly:
            result = virsh.reboot(vm_ref,
                                  ignore_status=True,
                                  debug=True,
                                  readonly=True)
            libvirt.check_exit_status(result, expect_error=True)
            # This is for status_error check
            status = result.exit_status

        # recover libvirtd service start
        if libvirtd == "off":
            utils_libvirtd.libvirtd_start()

        # check status_error
        if status_error:
            if not status:
                test.fail("Run successfully with wrong command!")
        else:
            if status or (not re.search(vm_name, output)):
                if status == -2:
                    test.cancel("Reboot command doesn't work on older libvirt "
                                "versions")
                test.fail("Run failed with right command")
    finally:
        xml_backup.sync()

        if 'ssh_connection' in locals():
            ssh_connection.auto_recover = True
Ejemplo n.º 18
0
def run(test, params, env):
    """
    Test command: virsh domstate.

    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh domstate operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    libvirtd_state = params.get("libvirtd", "on")
    vm_ref = params.get("domstate_vm_ref")
    status_error = (params.get("status_error", "no") == "yes")
    extra = params.get("domstate_extra", "")
    vm_action = params.get("domstate_vm_action", "")
    vm_oncrash_action = params.get("domstate_vm_oncrash")
    reset_action = "yes" == params.get("reset_action", "no")
    dump_option = params.get("dump_option", "")
    start_action = params.get("start_action", "normal")
    kill_action = params.get("kill_action", "normal")
    check_libvirtd_log = params.get("check_libvirtd_log", "no")
    err_msg = params.get("err_msg", "")
    remote_uri = params.get("remote_uri")

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref == "uuid":
        vm_ref = domuuid

    # Back up xml file.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Back up qemu.conf
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()

    # Config libvirtd log
    if check_libvirtd_log == "yes":
        libvirtd_conf = utils_config.LibvirtdConfig()
        libvirtd_log_file = os.path.join(data_dir.get_tmp_dir(),
                                         "libvirtd.log")
        libvirtd_conf["log_level"] = '1'
        libvirtd_conf["log_filters"] = ('"1:json 1:libvirt 1:qemu 1:monitor '
                                        '3:remote 4:event"')
        libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_file
        logging.debug("the libvirtd config file content is:\n %s" %
                      libvirtd_conf)
        libvirtd.restart()

    # Get image file
    image_source = vm.get_first_disk_devices()['source']
    logging.debug("image source: %s" % image_source)
    new_image_source = image_source + '.rename'

    dump_path = os.path.join(data_dir.get_tmp_dir(), "dump/")
    logging.debug("dump_path: %s", dump_path)
    try:
        os.mkdir(dump_path)
    except OSError:
        # If the path already exists then pass
        pass
    dump_file = ""
    try:
        # Let's have guest memory less so that dumping core takes
        # time which doesn't timeout the testcase
        if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']:
            memory_value = int(params.get("memory_value", "2097152"))
            memory_unit = params.get("memory_unit", "KiB")
            vmxml.set_memory(memory_value)
            vmxml.set_memory_unit(memory_unit)
            logging.debug(vmxml)
            vmxml.sync()

        if vm_action == "crash":
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml.on_crash = vm_oncrash_action
            if not vmxml.xmltreefile.find('devices').findall('panic'):
                # Add <panic> device to domain
                panic_dev = Panic()
                if "ppc" not in platform.machine():
                    panic_dev.addr_type = "isa"
                    panic_dev.addr_iobase = "0x505"
                vmxml.add_device(panic_dev)
            vmxml.sync()
            # Config auto_dump_path in qemu.conf
            qemu_conf.auto_dump_path = dump_path
            libvirtd.restart()
            if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']:
                dump_file = dump_path + "*" + vm_name[:20] + "-*"
            # Start VM and check the panic device
            virsh.start(vm_name, ignore_status=False)
            vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name)
            # Skip this test if no panic device find
            if not vmxml_new.xmltreefile.find('devices').findall('panic'):
                test.cancel("No 'panic' device in the guest. Maybe your "
                            "libvirt version doesn't support it.")
        try:
            if vm_action == "suspend":
                virsh.suspend(vm_name, ignore_status=False)
            elif vm_action == "resume":
                virsh.suspend(vm_name, ignore_status=False)
                virsh.resume(vm_name, ignore_status=False)
            elif vm_action == "destroy":
                virsh.destroy(vm_name, ignore_status=False)
            elif vm_action == "start":
                virsh.destroy(vm_name, ignore_status=False)
                if start_action == "rename":
                    # rename the guest image file to make guest fail to start
                    os.rename(image_source, new_image_source)
                    virsh.start(vm_name, ignore_status=True)
                else:
                    virsh.start(vm_name, ignore_status=False)
                    if start_action == "restart_libvirtd":
                        libvirtd.restart()
            elif vm_action == "kill":
                if kill_action == "stop_libvirtd":
                    libvirtd.stop()
                    utils_misc.kill_process_by_pattern(vm_name)
                    libvirtd.restart()
                elif kill_action == "reboot_vm":
                    virsh.reboot(vm_name, ignore_status=False)
                    utils_misc.kill_process_tree(vm.get_pid(), signal.SIGKILL)
                else:
                    utils_misc.kill_process_tree(vm.get_pid(), signal.SIGKILL)
            elif vm_action == "crash":
                session = vm.wait_for_login()
                session.cmd("service kdump stop", ignore_all_errors=True)
                # Enable sysRq
                session.cmd("echo 1 > /proc/sys/kernel/sysrq")
                # Send key ALT-SysRq-c to crash VM, and command will not
                # return as vm crashed, so fail early for 'destroy' and
                # 'preserve' action. For 'restart', 'coredump-restart'
                # and 'coredump-destroy' actions, they all need more time
                # to dump core file or restart OS, so using the default
                # session command timeout(60s)
                try:
                    if vm_oncrash_action in ['destroy', 'preserve']:
                        timeout = 3
                    else:
                        timeout = 60
                    session.cmd("echo c > /proc/sysrq-trigger",
                                timeout=timeout)
                except (ShellTimeoutError, ShellProcessTerminatedError):
                    pass
                session.close()
            elif vm_action == "dump":
                dump_file = dump_path + "*" + vm_name + "-*"
                virsh.dump(vm_name,
                           dump_file,
                           dump_option,
                           ignore_status=False)
        except process.CmdError as detail:
            test.error("Guest prepare action error: %s" % detail)

        if libvirtd_state == "off":
            libvirtd.stop()

        # Timing issue cause test to check domstate before prior action
        # kill gets completed
        if vm_action == "kill":
            utils_misc.wait_for(vm.is_dead, timeout=20)

        if remote_uri:
            remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
            remote_pwd = params.get("remote_pwd", None)
            remote_user = params.get("remote_user", "root")
            if remote_ip.count("EXAMPLE.COM"):
                test.cancel("Test 'remote' parameters not setup")
            ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd)

        result = virsh.domstate(vm_ref,
                                extra,
                                ignore_status=True,
                                debug=True,
                                uri=remote_uri)
        status = result.exit_status
        output = result.stdout.strip()

        # check status_error
        if status_error:
            if not status:
                if libvirtd_state == "off" and libvirt_version.version_compare(
                        5, 6, 0):
                    logging.info(
                        "From libvirt version 5.6.0 libvirtd is restarted "
                        "and command should succeed.")
                else:
                    test.fail("Run successfully with wrong command!")
        else:
            if status or not output:
                test.fail("Run failed with right command")
            if extra.count("reason"):
                if vm_action == "suspend":
                    # If not, will cost long time to destroy vm
                    virsh.destroy(vm_name)
                    if not output.count("user"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "resume":
                    if not output.count("unpaused"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "destroy":
                    if not output.count("destroyed"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "start":
                    if start_action == "rename":
                        if not output.count("shut off (failed)"):
                            test.fail(err_msg % vm_action)
                    else:
                        if not output.count("booted"):
                            test.fail(err_msg % vm_action)
                elif vm_action == "kill":
                    if not output.count("crashed"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "crash":
                    if not check_crash_state(output, vm_oncrash_action,
                                             vm_name, dump_file):
                        test.fail(err_msg % vm_action)
                    # VM will be in preserved state, perform virsh reset
                    # and check VM reboots and domstate reflects running
                    # state from crashed state as bug is observed here
                    if vm_oncrash_action == "preserve" and reset_action:
                        virsh_dargs = {'debug': True, 'ignore_status': True}
                        ret = virsh.reset(vm_name, **virsh_dargs)
                        libvirt.check_exit_status(ret)
                        ret = virsh.domstate(vm_name, extra,
                                             **virsh_dargs).stdout.strip()
                        if "paused (crashed)" not in ret:
                            test.fail("vm fails to change state from crashed"
                                      " to paused after virsh reset")
                        # it will be in paused (crashed) state after reset
                        # and resume is required for the vm to reboot
                        ret = virsh.resume(vm_name, **virsh_dargs)
                        libvirt.check_exit_status(ret)
                        vm.wait_for_login()
                        cmd_output = virsh.domstate(vm_name,
                                                    '--reason').stdout.strip()
                        if "running" not in cmd_output:
                            test.fail("guest state failed to get updated")
                    if vm_oncrash_action in [
                            'coredump-destroy', 'coredump-restart'
                    ]:
                        if not find_dump_file:
                            test.fail("Core dump file is not created in dump "
                                      "path: %s" % dump_path)
                    # For cover bug 1178652
                    if (vm_oncrash_action == "rename-restart"
                            and check_libvirtd_log == "yes"):
                        libvirtd.restart()
                        if not os.path.exists(libvirtd_log_file):
                            test.fail("Expected VM log file: %s not exists" %
                                      libvirtd_log_file)
                        cmd = ("grep -nr '%s' %s" %
                               (err_msg, libvirtd_log_file))
                        if not process.run(cmd, ignore_status=True,
                                           shell=True).exit_status:
                            test.fail(
                                "Find error message %s from log file: %s." %
                                (err_msg, libvirtd_log_file))
                elif vm_action == "dump":
                    if dump_option == "--live":
                        if not output.count("running (unpaused)"):
                            test.fail(err_msg % vm_action)
                    elif dump_option == "--crash":
                        if not output.count("shut off (crashed)"):
                            test.fail(err_msg % vm_action)
            if vm_ref == "remote":
                if not (re.search("running", output) or re.search(
                        "blocked", output) or re.search("idle", output)):
                    test.fail("Run failed with right command")
    finally:
        qemu_conf.restore()
        if check_libvirtd_log == "yes":
            libvirtd_conf.restore()
            if os.path.exists(libvirtd_log_file):
                os.remove(libvirtd_log_file)
        libvirtd.restart()
        if vm_action == "start" and start_action == "rename":
            os.rename(new_image_source, image_source)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if os.path.exists(dump_path):
            shutil.rmtree(dump_path)
Ejemplo n.º 19
0
def run(test, params, env):
    """
    Test command: virsh pool-define-as; pool-build; pool-start; vol-create-as;
    vol-list; attach-device; login; mount and dd; reboot; check persistence;
    detach-device; pool-destroy; pool-undefine; clear lv,vg and pv;
    Create a libvirt npiv pool from a vHBA's device mapper device and create
    a volume out of the newly created pool and attach it to a guest, mount it,
    reboot and check persistence after reboot.

    Pre-requisite :
    Host should have a vHBA associated with a mpath device
    """

    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    pool_type = params.get("pool_type", "dir")
    scsi_wwpn = params.get("scsi_wwpn", "WWPN_EXAMPLE")
    scsi_wwnn = params.get("scsi_wwnn", "WWNN_EXAMPLE")
    pool_target = params.get("pool_target", "pool_target")
    target_device = params.get("disk_target_dev", "vda")
    volume_name = params.get("volume_name", "imagefrommapper.qcow2")
    volume_capacity = params.get("volume_capacity", '1G')
    allocation = params.get("allocation", '1G')
    frmt = params.get("volume_format", 'qcow2')
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    mount_disk = None
    test_unit = None

    if 'EXAMPLE' in scsi_wwnn or 'EXAMPLE' in scsi_wwpn:
        raise exceptions.TestSkipError("Please provide proper WWPN/WWNN")

    if not vm.is_alive():
        vm.start()
    pool_extra_args = ""
    libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir,
                           vm.address_cache)
    process.run("service multipathd restart", shell=True)
    online_hbas_list = nodedev.find_hbas("hba")
    first_online_hba = online_hbas_list[0]
    old_mpath_devs = nodedev.find_mpath_devs()
    logging.debug("the old mpath devs are: %s" % old_mpath_devs)
    new_vhbas = nodedev.nodedev_create_from_xml(
        {"nodedev_parent": first_online_hba,
         "scsi_wwnn": scsi_wwnn,
         "scsi_wwpn": scsi_wwpn})
    logging.info("Newly created vHBA %s" % new_vhbas)
    process.run("service multipathd restart", shell=True)

    utils_misc.wait_for(
        lambda: nodedev.is_mpath_devs_added(old_mpath_devs), timeout=5)

    cur_mpath_devs = nodedev.find_mpath_devs()
    logging.debug("the current mpath devs are: %s" % cur_mpath_devs)
    new_mpath_devs = list(set(cur_mpath_devs).difference(
        set(old_mpath_devs)))

    logging.debug("newly added mpath devs are: %s" % new_mpath_devs)
    if not new_mpath_devs:
        raise exceptions.TestFail("No newly added mpath devices found, \
                please check your FC settings")
    source_dev = os.path.join('/dev/mapper/', new_mpath_devs[0])
    logging.debug("We are going to use \"%s\" as our source device"
                  " to create a logical pool" % source_dev)

    cmd = "parted %s mklabel msdos -s" % source_dev
    cmd_result = process.run(cmd, shell=True)
    utlv.check_exit_status(cmd_result)

    if source_dev:
        pool_extra_args = ' --source-dev %s' % source_dev
    else:
        raise exceptions.TestFail(
            "The vHBA %s does not have any associated mpath device" % new_vhbas)

    pool_ins = libvirt_storage.StoragePool()
    if pool_ins.pool_exists(pool_name):
        raise exceptions.TestFail("Pool %s already exist" % pool_name)
    # if no online hba cards on host, mark case failed
    if not online_hbas_list:
        raise exceptions.TestSkipError("Host doesn't have online hba cards")
    try:
        cmd_result = virsh.pool_define_as(
            pool_name, pool_type, pool_target, pool_extra_args, ignore_status=True,
            debug=True)
        utlv.check_exit_status(cmd_result)

        cmd_result = virsh.pool_build(pool_name)
        utlv.check_exit_status(cmd_result)

        cmd_result = virsh.pool_start(pool_name)
        utlv.check_exit_status(cmd_result)

        utlv.check_actived_pool(pool_name)
        pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name)
        logging.debug("Pool detail: %s", pool_detail)

        cmd_result = virsh.vol_create_as(
            volume_name, pool_name, volume_capacity, allocation, frmt, "", debug=True)
        utlv.check_exit_status(cmd_result)

        vol_list = utlv.get_vol_list(pool_name, timeout=10)
        logging.debug('Volume list %s', vol_list)
        for unit in vol_list:
            test_unit = vol_list[unit]
            logging.debug(unit)

        disk_params = {'type_name': "file", 'target_dev': target_device,
                       'target_bus': "virtio", 'source_file': test_unit,
                       'driver_name': "qemu", 'driver_type': "raw"}
        disk_xml = utlv.create_disk_xml(disk_params)
        session = vm.wait_for_login()

        bf_disks = libvirt_vm.get_disks()

        attach_success = virsh.attach_device(
            vm_name, disk_xml, debug=True)

        utlv.check_exit_status(attach_success)

        logging.debug("Disks before attach: %s", bf_disks)

        af_disks = libvirt_vm.get_disks()

        logging.debug("Disks after attach: %s", af_disks)

        mount_disk = "".join(list(set(bf_disks) ^ set(af_disks)))
        if not mount_disk:
            raise exceptions.TestFail("Can not get attached device in vm.")
        logging.debug("Attached device in vm:%s", mount_disk)

        output = session.cmd_status_output('lsblk', timeout=15)
        logging.debug("%s", output[1])

        session.cmd_status_output('mkfs.ext4 %s' % mount_disk)
        if mount_disk:
            logging.info("%s", mount_disk)
            mount_success = mount_and_dd(session, mount_disk)
            if not mount_success:
                raise exceptions.TestFail("Can not find mounted device")
        session.close()

        virsh.reboot(vm_name, debug=True)

        session = vm.wait_for_login()
        output = session.cmd_status_output('mount')
        logging.debug("Mount output: %s", output[1])
        if '/mnt' in output[1]:
            logging.debug("Mount Successful accross reboot")
        session.close()

        status = virsh.detach_device(vm_name, disk_xml,
                                     debug=True)
        utlv.check_exit_status(status)

    finally:
        vm.destroy(gracefully=False)
        logging.debug('Destroying pool %s', pool_name)
        virsh.pool_destroy(pool_name)
        logging.debug('Undefining pool %s', pool_name)
        virsh.pool_undefine(pool_name)
        if test_unit:
            process.system('lvremove -f %s' % test_unit, verbose=True)
            process.system('vgremove -f %s' % pool_name, verbose=True)
            process.system('pvremove -f %s' % source_dev, verbose=True)
        if new_vhbas:
            nodedev.vhbas_cleanup(new_vhbas.split())
        process.run("service multipathd restart", shell=True)
Ejemplo n.º 20
0
def run(test, params, env):
    """
    LXC container life cycle testing by virsh command
    """
    uri = params.get("connect_uri", "lxc:///")
    vm_name = params.get("main_vm")
    dom_type = params.get("lxc_domtype", "lxc")
    vcpu = int(params.get("lxc_vcpu", 1))
    max_mem = int(params.get("lxc_max_mem", 500000))
    current_mem = int(params.get("lxc_current_mem", 500000))
    os_type = params.get("lxc_ostype", "exe")
    os_arch = params.get("lxc_osarch", "x86_64")
    os_init = params.get("lxc_osinit", "/bin/sh")
    emulator_path = params.get("lxc_emulator",
                               "/usr/libexec/libvirt_lxc")
    interface_type = params.get("lxc_interface_type", "network")
    net_name = params.get("lxc_net_name", "default")
    full_os = ("yes" == params.get("lxc_full_os", "no"))
    install_root = params.get("lxc_install_root", "/")
    fs_target = params.get("lxc_fs_target", "/")
    fs_accessmode = params.get("lxc_fs_accessmode", "passthrough")
    passwd = params.get("lxc_fs_passwd", "redhat")

    def generate_container_xml():
        """
        Generate container xml
        """
        vmxml = vm_xml.VMXML(dom_type)
        vmxml.vm_name = vm_name
        vmxml.max_mem = max_mem
        vmxml.current_mem = current_mem
        vmxml.vcpu = vcpu
        # Generate os
        vm_os = vm_xml.VMOSXML()
        vm_os.type = os_type
        vm_os.arch = os_arch
        vm_os.init = os_init
        vmxml.os = vm_os
        # Generate emulator
        emulator = Emulator()
        emulator.path = emulator_path
        # Generate console
        console = Console()
        filesystem = Filesystem()
        filesystem.accessmode = fs_accessmode
        filesystem.source = {'dir': install_root}
        filesystem.target = {'dir': fs_target}
        # Add emulator and console in devices
        devices = vm_xml.VMXMLDevices()
        devices.append(emulator)
        devices.append(console)
        devices.append(filesystem)
        # Add network device
        network = Interface(type_name=interface_type)
        network.mac_address = utils_net.generate_mac_address_simple()
        network.source = {interface_type: net_name}
        devices.append(network)
        vmxml.set_devices(devices)
        return vmxml

    def check_state(expected_state):
        result = virsh.domstate(vm_name, uri=uri)
        utlv.check_exit_status(result)
        vm_state = result.stdout.strip()
        if vm_state == expected_state:
            logging.info("Get expected state: %s", vm_state)
        else:
            raise TestFail("Get unexpected state: %s", vm_state)

    virsh_args = {'uri': uri, 'debug': True}
    try:
        vmxml = generate_container_xml()
        with open(vmxml.xml, 'r') as f:
            logging.info("Container XML:\n%s", f.read())

        if full_os:
            if not os.path.exists(install_root):
                os.mkdir(install_root)
            # Install core os under installroot
            cmd = "yum --releasever=/ --installroot=%s" % install_root
            cmd += " --nogpgcheck -y groupinstall core"
            process.run(cmd, shell=True)
            # Fix root login on console
            process.run("echo 'pts/0' >> %s/etc/securetty" % install_root,
                        shell=True)
            for i in ["session    required     pam_selinux.so close",
                      "session    required     pam_selinux.so open",
                      "session    required     pam_loginuid.so"]:
                process.run('sed -i s/"%s\"/"#%s"/g %s/etc/pam.d/login' %
                            (i, i, install_root), shell=True)
                # Fix root login for sshd
                process.run('sed -i s/"%s\"/"#%s"/g %s/etc/pam.d/sshd' %
                            (i, i, install_root), shell=True)

            # Config basic network
            net_file = install_root + '/etc/sysconfig/network'
            with open(net_file, 'w') as f:
                f.write('NETWORKING=yes\nHOSTNAME=%s\n' % vm_name)
            net_script = install_root + '/etc/sysconfig/network-scripts/ifcfg-eth0'
            with open(net_script, 'w') as f:
                f.write('DEVICE=eth0\nBOOTPROTO=dhcp\nONBOOT=yes\n')

            # Set root password and enable sshd
            session = aexpect.ShellSession("chroot %s" % install_root)
            session.sendline('echo %s|passwd root --stdin' % passwd)
            session.sendline('chkconfig sshd on')
            session.close()

        # Create
        result = virsh.create(vmxml.xml, **virsh_args)
        utlv.check_exit_status(result)
        check_state('running')

        # Destroy
        result = virsh.destroy(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        if not virsh.domain_exists(vm_name, **virsh_args):
            logging.info("Destroy transient LXC domain successfully")
        else:
            raise TestFail("Transient LXC domain still exist after destroy")

        # Define
        result = virsh.define(vmxml.xml, **virsh_args)
        utlv.check_exit_status(result)
        check_state('shut off')

        # List
        result = virsh.dom_list('--inactive', **virsh_args)
        utlv.check_exit_status(result)
        if re.findall("(%s)\s+shut off" % vm_name, result.stdout):
            logging.info("Find %s in virsh list output", vm_name)
        else:
            raise TestFail("Not find %s in virsh list output")

        # Dumpxml
        result = virsh.dumpxml(vm_name, uri=uri, debug=False)
        utlv.check_exit_status(result)

        # Edit
        edit_vcpu = '2'
        logging.info("Change vcpu of LXC container to %s", edit_vcpu)
        edit_cmd = [r":%s /[0-9]*<\/vcpu>/" + edit_vcpu + r"<\/vcpu>"]
        if not utlv.exec_virsh_edit(vm_name, edit_cmd, connect_uri=uri):
            raise TestFail("Run edit command fail")
        else:
            result = virsh.dumpxml(vm_name, **virsh_args)
            new_vcpu = re.search(r'(\d*)</vcpu>', result.stdout).group(1)
            if new_vcpu == edit_vcpu:
                logging.info("vcpu number is expected after do edit")
            else:
                raise TestFail("vcpu number is unexpected after do edit")

        # Start
        result = virsh.start(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        check_state('running')

        # Suspend
        result = virsh.suspend(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        check_state('paused')

        # Resume
        result = virsh.resume(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        check_state('running')

        # Reboot(not supported on RHEL6)
        result = virsh.reboot(vm_name, **virsh_args)
        supported_err = 'not supported by the connection driver: virDomainReboot'
        if supported_err in result.stderr.strip():
            logging.info("Reboot is not supported")
        else:
            utlv.check_exit_status(result)

        # Destroy
        result = virsh.destroy(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        check_state('shut off')

        # Undefine
        result = virsh.undefine(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        if not virsh.domain_exists(vm_name, **virsh_args):
            logging.info("Undefine LXC domain successfully")
        else:
            raise TestFail("LXC domain still exist after undefine")

    finally:
        virsh.remove_domain(vm_name, **virsh_args)
        if full_os and os.path.exists(install_root):
            shutil.rmtree(install_root)
Ejemplo n.º 21
0
def run(test, params, env):
    """
    Test command: virsh reboot.

    Run a reboot command in the target domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh reboot operation.
    4.Recover test environment.(libvirts service)
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # run test case
    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("reboot_vm_ref")
    status_error = ("yes" == params.get("status_error"))
    extra = params.get("reboot_extra", "")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_pwd = params.get("remote_pwd", "password")
    agent = ("yes" == params.get("reboot_agent", "no"))
    mode = params.get("reboot_mode", "")
    pre_domian_status = params.get("reboot_pre_domian_status", "running")
    reboot_readonly = "yes" == params.get("reboot_readonly", "no")
    xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        # Add or remove qemu-agent from guest before test
        try:
            vm.prepare_guest_agent(channel=agent, start=agent)
        except virt_vm.VMError as e:
            logging.debug(e)
            # qemu-guest-agent is not available on REHL5
            test.cancel("qemu-guest-agent package is not available")

        if pre_domian_status == "shutoff":
            virsh.destroy(vm_name)
        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

        domid = vm.get_id()
        domuuid = vm.get_uuid()
        if vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "name":
            vm_ref = vm_name
        elif vm_ref == "uuid":
            vm_ref = domuuid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref.find("invalid") != -1:
            vm_ref = params.get(vm_ref)
        elif vm_ref == "remote_name":
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                test.cancel("remote_ip and/or local_ip parameters"
                            " not changed from default values")
            complete_uri = libvirt_vm.complete_uri(local_ip)
            try:
                session = remote.remote_login("ssh", remote_ip, "22", "root",
                                              remote_pwd, "#")
                session.cmd_output('LANG=C')
                command = "virsh -c %s reboot %s %s" % (complete_uri, vm_name,
                                                        mode)
                status, output = session.cmd_status_output(command,
                                                           internal_timeout=5)
                session.close()
                if not status:
                    # the operation before the end of reboot
                    # may result in data corruption
                    vm.wait_for_login().close()
            except (remote.LoginError, process.CmdError,
                    aexpect.ShellError) as e:
                logging.error("Exception: %s", str(e))
                status = -1
        if vm_ref != "remote_name":
            vm_ref = "%s" % vm_ref
            if extra:
                vm_ref += " %s" % extra
            cmdresult = virsh.reboot(vm_ref,
                                     mode,
                                     ignore_status=True,
                                     debug=True)
            status = cmdresult.exit_status
            if status:
                logging.debug("Error status, cmd error: %s", cmdresult.stderr)
                if not virsh.has_command_help_match('reboot', '\s+--mode\s+'):
                    # old libvirt doesn't support reboot
                    status = -2
            else:
                vm.wait_for_login().close()
        output = virsh.dom_list(ignore_status=True).stdout.strip()

        # Test the readonly mode
        if reboot_readonly:
            result = virsh.reboot(vm_ref,
                                  ignore_status=True,
                                  debug=True,
                                  readonly=True)
            libvirt.check_exit_status(result, expect_error=True)
            # This is for status_error check
            status = result.exit_status

        # recover libvirtd service start
        if libvirtd == "off":
            utils_libvirtd.libvirtd_start()

        # check status_error
        if status_error:
            if not status:
                test.fail("Run successfully with wrong command!")
        else:
            if status or (not re.search(vm_name, output)):
                if status == -2:
                    test.cancel("Reboot command doesn't work on older libvirt "
                                "versions")
                test.fail("Run failed with right command")
    finally:
        xml_backup.sync()
Ejemplo n.º 22
0
    def run_test(dev_type, params, test_obj=None):
        """
        Test domain lifecycle

        1) Start the vm and check network
        2) Destroy and start the VM, and check network
        3) Save and restore, and check network
        4) Suspend and resume, and check network
        5) Reboot the VM and check the network
        """
        # Setup Iface device
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        iface_dict = eval(params.get('iface_dict', '{}'))
        iface_dev = interface_base.create_iface(dev_type, iface_dict)
        libvirt.add_vm_device(vmxml, iface_dev)

        logging.info("Start a VM with a '%s' type interface.", dev_type)
        vm.start()
        vm.wait_for_serial_login(timeout=240).close()
        check_points.check_network_accessibility(vm,
                                                 test_obj=test_obj,
                                                 **params)

        logging.info("Destroy and start the VM.")
        virsh.destroy(vm.name, **VIRSH_ARGS)
        virsh.start(vm.name, **VIRSH_ARGS)
        check_points.check_network_accessibility(vm,
                                                 test_obj=test_obj,
                                                 config_vdpa=True,
                                                 **params)

        logging.info("Save the VM.")
        save_error = "yes" == params.get("save_error", "no")
        save_path = os.path.join(data_dir.get_tmp_dir(), vm.name + '.save')
        res = virsh.save(vm.name, 'sss', debug=True)
        libvirt.check_exit_status(res, expect_error=save_error)
        if not save_error:
            logging.info("Restore vm.")
            virsh.restore(save_path, **VIRSH_ARGS)
            check_points.check_network_accessibility(vm,
                                                     test_obj=test_obj,
                                                     config_vdpa=False,
                                                     **params)

        logging.info("Suspend and resume the vm.")
        virsh.suspend(vm.name, **VIRSH_ARGS)
        if not libvirt.check_vm_state(vm_name, "paused"):
            test.fail("VM should be paused!")
        virsh.resume(vm.name, **VIRSH_ARGS)
        if not libvirt.check_vm_state(vm_name, "running"):
            test.fail("VM should be running!")
        check_points.check_network_accessibility(vm,
                                                 test_obj=test_obj,
                                                 config_vdpa=False,
                                                 **params)

        logging.debug("Reboot VM and check network.")
        virsh.reboot(vm.name, **VIRSH_ARGS)
        check_points.check_network_accessibility(vm,
                                                 test_obj=test_obj,
                                                 config_vdpa=False,
                                                 **params)
Ejemplo n.º 23
0
def run(test, params, env):
    """
    Test command: virsh reboot.

    Run a reboot command in the target domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh reboot operation.
    4.Recover test environment.(libvirts service)
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # run test case
    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("reboot_vm_ref")
    status_error = ("yes" == params.get("status_error"))
    extra = params.get("reboot_extra")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_pwd = params.get("remote_pwd", "password")
    agent = ("yes" == params.get("reboot_agent", "no"))
    mode = params.get("reboot_mode", "")
    pre_domian_status = params.get("reboot_pre_domian_status", "running")
    xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        # Add or remove qemu-agent from guest before test
        if agent:
            vm_xml.VMXML.set_agent_channel(vm_name)
        else:
            vm_xml.VMXML.remove_agent_channel(vm_name)

        virsh.start(vm_name)
        guest_session = vm.wait_for_login()
        if agent:
            if guest_session.cmd_status("which qemu-ga"):
                raise error.TestNAError("Cannot execute this test for domain"
                                        " doesn't have qemu-ga command!")
            # check if the qemu-guest-agent is active or not firstly
            stat_ps = guest_session.cmd_status("ps aux |grep [q]emu-ga")
            if stat_ps != 0:
                s, o = guest_session.cmd_status_output("qemu-ga -d")
                if s != 0:
                    raise error.TestError("'qemu-ga -d' failed.\noutput:%s" % o)
            stat_ps = guest_session.cmd_status("ps aux |grep [q]emu-ga")
            guest_session.close()
            if stat_ps:
                raise error.TestError("Fail to start qemu-guest-agent!")
        if pre_domian_status == "shutoff":
            virsh.destroy(vm_name)
        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

        domid = vm.get_id()
        domuuid = vm.get_uuid()
        if vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "name":
            vm_ref = vm_name
        elif vm_ref == "uuid":
            vm_ref = domuuid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref.find("invalid") != -1:
            vm_ref = params.get(vm_ref)
        elif vm_ref == "remote_name":
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                raise error.TestNAError("remote_ip and/or local_ip parameters"
                                        " not changed from default values")
            complete_uri = libvirt_vm.complete_uri(local_ip)
            try:
                session = remote.remote_login("ssh", remote_ip, "22", "root",
                                              remote_pwd, "#")
                session.cmd_output('LANG=C')
                command = "virsh -c %s reboot %s %s" % (complete_uri, vm_name,
                                                        mode)
                status, output = session.cmd_status_output(command,
                                                           internal_timeout=5)
                session.close()
            # FIXME: Catch specific exception
            except Exception, detail:
                logging.error("Exception: %s", str(detail))
                status = -1
        if vm_ref != "remote_name":
            vm_ref = "%s %s" % (vm_ref, extra)
            cmdresult = virsh.reboot(vm_ref, mode,
                                     ignore_status=True, debug=True)
            status = cmdresult.exit_status
            if status:
                logging.debug("Error status, cmd error: %s", cmdresult.stderr)
                if not virsh.has_command_help_match('reboot', '\s+--mode\s+'):
                    # old libvirt doesn't support reboot
                    status = -2
        output = virsh.dom_list(ignore_status=True).stdout.strip()

        # recover libvirtd service start
        if libvirtd == "off":
            utils_libvirtd.libvirtd_start()

        # check status_error
        if status_error:
            if not status:
                raise error.TestFail("Run successfully with wrong command!")
        else:
            if status or (not re.search(vm_name, output)):
                if status == -2:
                    raise error.TestNAError(
                        "Reboot command doesn't work on older libvirt versions")
                raise error.TestFail("Run failed with right command")
Ejemplo n.º 24
0
        logging.debug("%s", output[1])
        old_count = vmxml.get_disk_count(vm_name)
        bf_disks = libvirt_vm.get_disks()
        disk_params = {'type_name': 'volume', 'target_dev': target_device,
                       'target_bus': 'virtio', 'source_pool': pool_name,
                       'source_volume': test_unit, 'driver_type': 'raw'}
        disk_xml = os.path.join(data_dir.get_tmp_dir(), 'disk_xml.xml')
        lun_disk_xml = utlv.create_disk_xml(disk_params)

        copyfile(lun_disk_xml, disk_xml)
        attach_success = virsh.attach_device(
            vm_name, disk_xml, debug=True)

        utlv.check_exit_status(attach_success)

        virsh.reboot(vm_name, debug=True)

        logging.info("Checking disk availability in domain")
        if not vmxml.get_disk_count(vm_name):
            raise exceptions.TestFail("No disk in domain %s." % vm_name)
        new_count = vmxml.get_disk_count(vm_name)

        if new_count <= old_count:
            raise exceptions.TestFail(
                "Failed to attach disk %s" % lun_disk_xml)

        session = vm.wait_for_login()
        output = session.cmd_status_output('lsblk')
        logging.debug("%s", output[1])
        logging.debug("Disks before attach: %s", bf_disks)
Ejemplo n.º 25
0
def run(test, params, env):
    """
    Test command: virsh pool-define;pool-start;vol-list pool;
    attach-device LUN to guest; mount the device, dd; unmount;
    reboot guest; mount the device, dd again; pool-destroy; pool-undefine;

    Create a libvirt npiv pool from an XML file. The test needs to have a wwpn
    and wwnn of a vhba in host which is zoned & mapped to a SAN controller.

    Pre-requiste:
    Host needs to have a wwpn and wwnn of a vHBA which is zoned and mapped to
    SAN controller.
    """
    pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML")
    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    pre_def_pool = "yes" == params.get("pre_def_pool", "no")
    pool_type = params.get("pool_type", "dir")
    source_format = params.get("pool_src_format", "")
    source_name = params.get("pool_source_name", "")
    source_path = params.get("pool_source_path", "/")
    pool_target = params.get("pool_target", "pool_target")
    pool_adapter_type = params.get("pool_adapter_type", "")
    pool_adapter_parent = params.get("pool_adapter_parent", "")
    target_device = params.get("pool_target_device", "sdc")
    pool_wwnn = params.get("pool_wwnn", "WWNN_EXAMPLE")
    pool_wwpn = params.get("pool_wwpn", "WWPN_EXAMPLE")
    test_unit = None
    mount_disk = None

    if 'EXAMPLE' in pool_wwnn or 'EXAMPLE' in pool_wwpn:
        raise exceptions.TestSkipError("Please provide proper WWPN/WWNN")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    if not vm.is_alive():
        vm.start()
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir,
                           vm.address_cache)

    pool_ins = libvirt_storage.StoragePool()
    if pre_def_pool and pool_ins.pool_exists(pool_name):
        raise exceptions.TestFail("Pool %s already exist" % pool_name)
    online_hbas_list = nodedev.find_hbas("hba")
    logging.debug("The online hbas are: %s", online_hbas_list)

    # if no online hba cards on host test fails
    if not online_hbas_list:
        raise exceptions.TestSkipError("Host doesn't have online hba cards")
    else:
        if pool_adapter_parent == "":
            pool_adapter_parent = online_hbas_list[0]

    kwargs = {'source_path': source_path,
              'source_name': source_name,
              'source_format': source_format,
              'pool_adapter_type': pool_adapter_type,
              'pool_adapter_parent': pool_adapter_parent,
              'pool_wwnn': pool_wwnn,
              'pool_wwpn': pool_wwpn}

    pvt = utlv.PoolVolumeTest(test, params)
    emulated_image = "emulated-image"
    old_vhbas = nodedev.find_hbas("vhba")
    try:
        pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image,
                     **kwargs)
        utils_misc.wait_for(
            lambda: nodedev.is_vhbas_added(old_vhbas), _DELAY_TIME)
        virsh.pool_dumpxml(pool_name, to_file=pool_xml_f)
        virsh.pool_destroy(pool_name)
    except Exception as e:
        pvt.cleanup_pool(pool_name, pool_type, pool_target,
                         emulated_image, **kwargs)
        raise exceptions.TestError(
            "Error occurred when prepare pool xml:\n %s" % e)
    if os.path.exists(pool_xml_f):
        with open(pool_xml_f, 'r') as f:
            logging.debug("Create pool from file:\n %s", f.read())

    try:
        cmd_result = virsh.pool_define(pool_xml_f, ignore_status=True,
                                       debug=True)
        utlv.check_exit_status(cmd_result)

        cmd_result = virsh.pool_start(pool_name)
        utlv.check_exit_status(cmd_result)
        utlv.check_actived_pool(pool_name)
        pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name)
        logging.debug("Pool detail: %s", pool_detail)

        vol_list = utlv.get_vol_list(pool_name, timeout=10)
        test_unit = list(vol_list.keys())[0]
        logging.info(
            "Using the first LUN unit %s to attach to a guest", test_unit)

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        session = vm.wait_for_login()
        output = session.cmd_status_output('lsblk')
        logging.debug("%s", output[1])
        old_count = vmxml.get_disk_count(vm_name)
        bf_disks = libvirt_vm.get_disks()
        disk_params = {'type_name': 'volume', 'target_dev': target_device,
                       'target_bus': 'virtio', 'source_pool': pool_name,
                       'source_volume': test_unit, 'driver_type': 'raw'}
        disk_xml = os.path.join(data_dir.get_tmp_dir(), 'disk_xml.xml')
        lun_disk_xml = utlv.create_disk_xml(disk_params)

        copyfile(lun_disk_xml, disk_xml)
        attach_success = virsh.attach_device(
            vm_name, disk_xml, debug=True)

        utlv.check_exit_status(attach_success)

        virsh.reboot(vm_name, debug=True)

        logging.info("Checking disk availability in domain")
        if not vmxml.get_disk_count(vm_name):
            raise exceptions.TestFail("No disk in domain %s." % vm_name)
        new_count = vmxml.get_disk_count(vm_name)

        if new_count <= old_count:
            raise exceptions.TestFail(
                "Failed to attach disk %s" % lun_disk_xml)

        session = vm.wait_for_login()
        output = session.cmd_status_output('lsblk')
        logging.debug("%s", output[1])
        logging.debug("Disks before attach: %s", bf_disks)

        af_disks = libvirt_vm.get_disks()
        logging.debug("Disks after attach: %s", af_disks)

        mount_disk = "".join(list(set(bf_disks) ^ set(af_disks)))
        if not mount_disk:
            raise exceptions.TestFail("Can not get attached device in vm.")
        logging.debug("Attached device in vm:%s", mount_disk)

        logging.debug("Creating file system for %s", mount_disk)
        output = session.cmd_status_output(
            'echo yes | mkfs.ext4 %s' % mount_disk)
        logging.debug("%s", output[1])
        if mount_disk:
            mount_success = mount_and_dd(session, mount_disk)
            if not mount_success:
                raise exceptions.TestFail("Mount failed")
        else:
            raise exceptions.TestFail("Partition not available for disk")

        logging.debug("Unmounting disk")
        session.cmd_status_output('umount %s' % mount_disk)

        virsh.reboot(vm_name, debug=True)

        session = vm.wait_for_login()
        output = session.cmd_status_output('mount')
        logging.debug("%s", output[1])
        mount_success = mount_and_dd(session, mount_disk)
        if not mount_success:
            raise exceptions.TestFail("Mount failed")

        logging.debug("Unmounting disk")
        session.cmd_status_output('umount %s' % mount_disk)
        session.close()

        detach_status = virsh.detach_device(vm_name, disk_xml,
                                            debug=True)
        utlv.check_exit_status(detach_status)

    finally:
        vm.destroy(gracefully=False)
        vmxml_backup.sync()
        logging.debug('Destroying pool %s', pool_name)
        virsh.pool_destroy(pool_name)
        logging.debug('Undefining pool %s', pool_name)
        virsh.pool_undefine(pool_name)
        pvt.cleanup_pool(pool_name, pool_type, pool_target,
                         emulated_image, **kwargs)
        if os.path.exists(pool_xml_f):
            os.remove(pool_xml_f)
        if os.path.exists(disk_xml):
            logging.debug("Cleanup disk xml")
            data_dir.clean_tmp_files()
Ejemplo n.º 26
0
            'type_name': 'volume',
            'target_dev': target_device,
            'target_bus': 'virtio',
            'source_pool': pool_name,
            'source_volume': test_unit,
            'driver_type': 'raw'
        }
        disk_xml = os.path.join(data_dir.get_tmp_dir(), 'disk_xml.xml')
        lun_disk_xml = utlv.create_disk_xml(disk_params)

        copyfile(lun_disk_xml, disk_xml)
        attach_success = virsh.attach_device(vm_name, disk_xml, debug=True)

        utlv.check_exit_status(attach_success)

        virsh.reboot(vm_name, debug=True)

        logging.info("Checking disk availability in domain")
        if not vmxml.get_disk_count(vm_name):
            raise exceptions.TestFail("No disk in domain %s." % vm_name)
        new_count = vmxml.get_disk_count(vm_name)

        if new_count <= old_count:
            raise exceptions.TestFail("Failed to attach disk %s" %
                                      lun_disk_xml)

        session = vm.wait_for_login()
        output = session.cmd_status_output('lsblk')
        logging.debug("%s", output[1])
        logging.debug("Disks before attach: %s", bf_disks)