Пример #1
0
 def hotplug_device(type, char_dev, id=0):
     tmp_file = os.path.join(tmp_dir, char_dev)
     if type == "qmp":
         char_add_opt = "chardev-add "
         dev_add_opt = "device_add virtserialport,chardev="
         if char_dev == "file":
             char_add_opt += "file,path=%s,id=file" % tmp_file
             dev_add_opt += "file,name=file,bus=virtio-serial0.0,id=file"
         elif char_dev == "socket":
             char_add_opt += "socket,path=%s,server,nowait,id=socket" % tmp_file
             dev_add_opt += "socket,name=socket,bus=virtio-serial0.0,id=socket"
         elif char_dev == "pty":
             char_add_opt += ("pty,path=/dev/pts/%s,id=pty" % id)
             dev_add_opt += "pty,name=pty,bus=virtio-serial0.0,id=pty"
         result = virsh.qemu_monitor_command(vm_name, char_add_opt, "--hmp")
         if result.exit_status:
             raise error.TestError('Failed to add chardev %s to %s. Result:\n %s'
                                   % (char_dev, vm_name, result))
         result = virsh.qemu_monitor_command(vm_name, dev_add_opt, "--hmp")
         if result.exit_status:
             raise error.TestError('Failed to add device %s to %s. Result:\n %s'
                                   % (char_dev, vm_name, result))
     elif type == "attach":
         xml_file = os.path.join(tmp_dir, "xml_%s" % char_dev)
         if char_dev in ["file", "socket"]:
             prepare_channel_xml(xml_file, char_dev)
         elif char_dev == "pty":
             prepare_channel_xml(xml_file, char_dev, id)
         result = virsh.attach_device(vm_name, xml_file)
     return result
Пример #2
0
 def hotplug_device(type, char_dev, id=0):
     tmp_file = os.path.join(tmp_dir, char_dev)
     if type == "qmp":
         char_add_opt = "chardev-add "
         dev_add_opt = "device_add virtserialport,chardev="
         if char_dev == "file":
             char_add_opt += "file,path=%s,id=file" % tmp_file
             dev_add_opt += "file,name=file,bus=virtio-serial0.0,id=file"
         elif char_dev == "socket":
             char_add_opt += "socket,path=%s,server,nowait,id=socket" % tmp_file
             dev_add_opt += "socket,name=socket,bus=virtio-serial0.0,id=socket"
         elif char_dev == "pty":
             char_add_opt += "pty,path=/dev/pts/%s,id=pty" % id
             dev_add_opt += "pty,name=pty,bus=virtio-serial0.0,id=pty"
         result = virsh.qemu_monitor_command(vm_name, char_add_opt, "--hmp")
         if result.exit_status:
             raise error.TestError("Failed to add chardev %s to %s. Result:\n %s" % (char_dev, vm_name, result))
         result = virsh.qemu_monitor_command(vm_name, dev_add_opt, "--hmp")
         if result.exit_status:
             raise error.TestError("Failed to add device %s to %s. Result:\n %s" % (char_dev, vm_name, result))
     elif type == "attach":
         xml_file = os.path.join(tmp_dir, "xml_%s" % char_dev)
         if char_dev in ["file", "socket"]:
             prepare_channel_xml(xml_file, char_dev)
         elif char_dev == "pty":
             prepare_channel_xml(xml_file, char_dev, id)
         result = virsh.attach_device(vm_name, xml_file)
         # serial device was introduced by the following commit,
         # http://libvirt.org/git/?
         # p=libvirt.git;a=commit;h=b63ea467617e3cbee4282ab2e5e780b4119cef3d
         if "unknown device type" in result.stderr:
             raise error.TestNAError("Failed to attach %s to %s. Result:\n %s" % (char_dev, vm_name, result))
     return result
Пример #3
0
 def dup_hotplug(type, char_dev, id, dup_charid=False, dup_devid=False, diff_devid=False):
     tmp_file = os.path.join(tmp_dir, char_dev)
     if type == "qmp":
         char_add_opt = "chardev-add "
         dev_add_opt = "device_add virtserialport,chardev="
         if char_dev == "file":
             if dup_charid:
                 char_add_opt += "file,path=%s,id=file" % tmp_file
             if dup_devid:
                 dev_add_opt += "file,name=file,bus=virtio-serial0.0,id=file"
             if diff_devid:
                 dev_add_opt += "file,name=file,bus=virtio-serial0.0,id=file1"
         elif char_dev == "socket":
             if dup_charid:
                 char_add_opt += "socket,path=%s,server,nowait,id=socket" % tmp_file
             if dup_devid:
                 dev_add_opt += "socket,name=socket,bus=virtio-serial0.0,id=socket"
             if diff_devid:
                 dev_add_opt += "socket,name=socket,bus=virtio-serial0.0,id=socket1"
         elif char_dev == "pty":
             if dup_charid:
                 char_add_opt += "pty,path=/dev/pts/%s,id=pty" % id
             if dup_devid:
                 dev_add_opt += "pty,name=pty,bus=virtio-serial0.0,id=pty"
             if diff_devid:
                 dev_add_opt += "pty,name=pty,bus=virtio-serial0.0,id=pty1"
         if dup_charid:
             result = virsh.qemu_monitor_command(vm_name, char_add_opt, "--hmp")
         if dup_devid or diff_devid:
             result = virsh.qemu_monitor_command(vm_name, dev_add_opt, "--hmp")
     elif type == "attach":
         if dup_devid:
             result = hotplug_device(type, char_dev, id)
     return result
Пример #4
0
 def hotplug_device(type, char_dev, id=0):
     tmp_file = "/tmp/%s" % char_dev
     if type == "qmp":
         char_add_opt = "chardev-add "
         dev_add_opt = "device_add virtserialport,chardev="
         if char_dev == "file":
             char_add_opt += "file,path=/tmp/file,id=file"
             dev_add_opt += "file,name=file,bus=virtio-serial0.0,id=file"
         elif char_dev == "socket":
             char_add_opt += "socket,path=/tmp/socket,server,nowait,id=socket"
             dev_add_opt += "socket,name=socket,bus=virtio-serial0.0,id=socket"
         elif char_dev == "pty":
             char_add_opt += ("pty,path=/dev/pts/%s,id=pty" % id)
             dev_add_opt += "pty,name=pty,bus=virtio-serial0.0,id=pty"
         result = virsh.qemu_monitor_command(vm_name, char_add_opt, "--hmp")
         if result.exit_status:
             raise error.TestError('Failed to add chardev %s to %s. Result:\n %s'
                                   % (char_dev, vm_name, result))
         result = virsh.qemu_monitor_command(vm_name, dev_add_opt, "--hmp")
         if result.exit_status:
             raise error.TestError('Failed to add device %s to %s. Result:\n %s'
                                   % (char_dev, vm_name, result))
     elif type == "attach":
         if char_dev in ["file", "socket"]:
             xml_info = create_channel_xml(vm_name, char_dev)
         elif char_dev == "pty":
             xml_info = create_channel_xml(vm_name, char_dev, id)
         f = open(xml_file, "w")
         f.write(xml_info)
         f.close()
         if os.path.exists(tmp_file):
             os.chmod(tmp_file, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
         result = virsh.attach_device(vm_name, xml_file)
     return result
 def hotplug_device(hotplug_type, char_dev, index=1, id=0):
     if hotplug_type == "qmp":
         char_add_opt = "chardev-add "
         dev_add_opt = "device_add virtserialport,chardev="
         if char_dev == "file":
             char_add_opt += ("file,path=%s/file%s,id=file%s"
                              % (tmp_dir, index, index))
             dev_add_opt += ("file%s,name=file%s,bus=virtio-serial0.0,id=file%s"
                             % (index, index, index))
         elif char_dev == "socket":
             char_add_opt += ("socket,path=%s/socket%s,server,nowait,id=socket%s"
                              % (tmp_dir, index, index))
             dev_add_opt += ("socket%s,name=socket%s,bus=virtio-serial0.0,id=socket%s"
                             % (index, index, index))
         elif char_dev == "pty":
             char_add_opt += "pty,path=/dev/pts/%s,id=pty%s" % (id, index)
             dev_add_opt += ("pty%s,name=pty%s,bus=virtio-serial0.0,id=pty%s"
                             % (index, index, index))
         virsh.qemu_monitor_command(vm_name, char_add_opt, "--hmp")
         virsh.qemu_monitor_command(vm_name, dev_add_opt, "--hmp")
     elif hotplug_type == "attach":
         xml_file = "%s/xml_%s%s" % (tmp_dir, char_dev, index)
         if char_dev in ["file", "socket"]:
             prepare_channel_xml(xml_file, char_dev, index)
         elif char_dev == "pty":
             prepare_channel_xml(xml_file, char_dev, index, id)
         virsh.attach_device(vm_name, xml_file, flagstr="--live")
 def unhotplug_serial_device(hotplug_type, char_dev, index=1):
     if hotplug_type == "qmp":
         del_dev_opt = "device_del %s%s" % (char_dev, index)
         del_char_opt = "chardev-remove %s%s" % (char_dev, index)
         virsh.qemu_monitor_command(vm_name, del_dev_opt, "--hmp")
         virsh.qemu_monitor_command(vm_name, del_char_opt, "--hmp")
     elif hotplug_type == "attach":
         xml_file = "%s/xml_%s%s" % (tmp_dir, char_dev, index)
         virsh.detach_device(vm_name, xml_file, flagstr="--live")
Пример #7
0
 def unhotplug_serial_device(type, char_dev):
     if type == "qmp":
         del_dev_opt = "device_del %s" % char_dev
         del_char_opt = "chardev-remove %s" % char_dev
         result = virsh.qemu_monitor_command(vm_name, del_dev_opt, "--hmp")
         if result.exit_status:
             raise error.TestError('Failed to del device %s from %s.Result:\n%s'
                                   % (char_dev, vm_name, result))
         result = virsh.qemu_monitor_command(vm_name, del_char_opt, "--hmp")
     elif type == "attach":
         result = virsh.detach_device(vm_name, xml_file)
Пример #8
0
 def confirm_hotplug_result(char_dev, id=0):
     tmp_file = os.path.join(tmp_dir, char_dev)
     serial_file = os.path.join("/dev/virtio-ports", char_dev)
     result = virsh.qemu_monitor_command(vm_name, "info qtree", "--hmp")
     h_o = result.stdout.strip()
     if not h_o.count('name = "%s"' % char_dev):
         raise error.TestFail("Cann't find device(%s) from:\n%s" % (char_dev, h_o))
     if char_dev == "file":
         session.cmd("echo test > %s" % serial_file)
         f = open(tmp_file, "r")
         r_o = f.read()
         f.close()
     elif char_dev == "socket":
         session.cmd("echo test > /tmp/file")
         sock = socket.socket(socket.AF_UNIX)
         sock.connect(tmp_file)
         session.cmd("dd if=/tmp/file of=%s" % serial_file)
         r_o = sock.recv(1024)
     elif char_dev == "pty":
         session.cmd("echo test > /tmp/file")
         session.cmd("dd if=/tmp/file of=%s &" % serial_file)
         dev_file = "/dev/pts/%s" % id
         if not os.path.exists(dev_file):
             raise error.TestFail("%s doesn't exist." % dev_file)
         p = subprocess.Popen(["/usr/bin/cat", dev_file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
         session.cmd("echo test >> /tmp/file &")
         while True:
             r_o = p.stdout.readline()
             if r_o or p.poll():
                 break
             time.sleep(0.2)
         p.kill()
     if not r_o.count("test"):
         err_info = "%s device file doesn't match 'test':%s" % (char_dev, r_o)
         raise error.TestFail(err_info)
Пример #9
0
    def check_detach_attach_result(vm_name,
                                   cmd,
                                   pattern,
                                   expect_output,
                                   option='--hmp'):
        """
        Check the attach/detach result by qemu_monitor_command.

        :param vm_name: guest name
        :param cmd: the command for qemu_monitor_command
        :param pattern: regular expr used to search
                        the output of qemu_monitor_command
        :param expect_output: the expected output for qemu_monitor_command
        :param option: option for qemu_monitor_command
        :raise test.fail if the pattern is not matched
        :return: the qemu_monitor_command output
        """
        ret = virsh.qemu_monitor_command(vm_name, cmd, option)
        libvirt.check_result(ret)
        if pattern and expect_output:
            if not re.findall(pattern, ret.stdout.strip()):
                test.fail("Can't find the pattern '{}' in "
                          "qemu monitor command "
                          "output'{}'".format(pattern, ret.stdout.strip()))
        else:
            return expect_output == ret.stdout.strip()
Пример #10
0
def hotplug_supported(vm_name, mtype):
    """
    hotplug support check for ppc64le

    :param vm_name: VM name
    :param mtype: machine type

    :return: True if supported and False in all other cases
    """
    supported = False
    if "ppc64" in platform.machine():
        cmd = '{\"execute\":\"query-machines\"}'
        json_result = virsh.qemu_monitor_command(vm_name, cmd, "--pretty",
                                                 debug=False)
        try:
            result = json.loads(json_result.stdout_text)
        except Exception:
            # Failure to parse json output and default support to False
            # TODO: Handle for failure cases
            return supported
        for item in result['return']:
            try:
                if item['name'] == mtype:
                    try:
                        if item['hotpluggable-cpus'] == 'True':
                            supported = True
                    except KeyError:
                        pass
            except KeyError:
                pass
    else:
        # For now returning for other arch by default true
        supported = True
    return supported
Пример #11
0
 def confirm_hotplug_result(char_dev, id=0):
     tmp_file = os.path.join(tmp_dir, char_dev)
     serial_file = os.path.join("/dev/virtio-ports", char_dev)
     result = virsh.qemu_monitor_command(vm_name, "info qtree", "--hmp")
     h_o = result.stdout.strip()
     if not h_o.count("name = \"%s\"" % char_dev):
         test.fail("Can't find device(%s) from:\n%s" % (char_dev, h_o))
     if char_dev == "file":
         session.cmd("echo test > %s" % serial_file)
         with open(tmp_file, "r") as f:
             r_o = f.read()
     elif char_dev == "socket":
         session.cmd("echo test > /tmp/file")
         sock = socket.socket(socket.AF_UNIX)
         sock.connect(tmp_file)
         session.cmd("dd if=/tmp/file of=%s" % serial_file)
         r_o = sock.recv(1024)
     elif char_dev == "pty":
         session.cmd("echo test > /tmp/file")
         session.cmd("dd if=/tmp/file of=%s &" % serial_file)
         dev_file = "/dev/pts/%s" % id
         if not os.path.exists(dev_file):
             test.fail("%s doesn't exist." % dev_file)
         p = subprocess.Popen(["/usr/bin/cat", dev_file], universal_newlines=True,
                              stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
         session.cmd("echo test >> /tmp/file &")
         while True:
             r_o = p.stdout.readline()
             if r_o or p.poll():
                 break
             time.sleep(0.2)
         p.kill()
     if not r_o.count("test"):
         err_info = "%s device file doesn't match 'test':%s" % (char_dev, r_o)
         test.fail(err_info)
Пример #12
0
def hotplug_supported(vm_name, mtype):
    """
    hotplug support check for ppc64le

    :param vm_name: VM name
    :param mtype: machine type

    :return: True if supported and False in all other cases
    """
    supported = False
    if "ppc64" in platform.machine():
        cmd = '{\"execute\":\"query-machines\"}'
        json_result = virsh.qemu_monitor_command(vm_name, cmd, "--pretty",
                                                 debug=False)
        try:
            result = json.loads(results_stdout_52lts(json_result))
        except Exception:
            # Failure to parse json output and default support to False
            # TODO: Handle for failure cases
            return supported
        for item in result['return']:
            try:
                if item['name'] == mtype:
                    try:
                        if item['hotpluggable-cpus'] == 'True':
                            supported = True
                    except KeyError:
                        pass
            except KeyError:
                pass
    else:
        # For now returning for other arch by default true
        supported = True
    return supported
Пример #13
0
 def confirm_unhotplug_result(char_dev):
     serial_file = os.path.join("/dev/virtio-ports", char_dev)
     result = virsh.qemu_monitor_command(vm_name, "info qtree", "--hmp")
     uh_o = result.stdout.strip()
     if uh_o.count('chardev = "%s"' % char_dev):
         raise error.TestFail("Still can get serial device(%s) from: '%s'" % (char_dev, uh_o))
     if os.path.exists(serial_file):
         raise error.TestFail("File '%s' still exists after unhotplug" % serial_file)
Пример #14
0
 def confirm_unhotplug_result(char_dev, index=1):
     serial_file = "/dev/virtio-ports/%s%s" % (char_dev, index)
     result = virsh.qemu_monitor_command(vm_name, "info qtree", "--hmp")
     uh_o = result.stdout.strip()
     if uh_o.count("chardev = %s%s" % (char_dev, index)):
         test.fail("Still can get serial device info: '%s'" % uh_o)
     if not session.cmd_status("test -e %s" % serial_file):
         test.fail("File '%s' still exists after unhotplug" % serial_file)
 def confirm_unhotplug_result(char_dev, index=1):
     serial_file = "/dev/virtio-ports/%s%s" % (char_dev, index)
     result = virsh.qemu_monitor_command(vm_name, "info qtree", "--hmp")
     uh_o = result.stdout.strip()
     if uh_o.count("chardev = %s%s" % (char_dev, index)):
         raise error.TestFail("Still can get serial device info: '%s'" % uh_o)
     if not session.cmd_status("test -e %s" % serial_file):
         raise error.TestFail("File '%s' still exists after unhotplug" % serial_file)
Пример #16
0
 def confirm_unhotplug_result(char_dev):
     serial_file = os.path.join("/dev/virtio-ports", char_dev)
     result = virsh.qemu_monitor_command(vm_name, "info qtree", "--hmp")
     uh_o = result.stdout.strip()
     if uh_o.count("chardev = \"%s\"" % char_dev):
         test.fail("Still can get serial device(%s) from: '%s'"
                   % (char_dev, uh_o))
     if os.path.exists(serial_file):
         test.fail("File '%s' still exists after unhotplug" % serial_file)
Пример #17
0
def run_job_acquire(params, libvirtd, vm):
    """
    Save domain after queried block info
    """
    vm.start()
    res = virsh.qemu_monitor_command(vm.name, 'info block', '--hmp')
    logging.debug(res)
    save_path = os.path.join(data_dir.get_tmp_dir(), 'tmp.save')
    virsh.save(vm.name, save_path)
    vm.wait_for_shutdown()
Пример #18
0
def run_job_acquire(params, libvirtd, vm):
    """
    Save domain after queried block info
    """
    vm.start()
    res = virsh.qemu_monitor_command(vm.name, 'info block', '--hmp')
    logging.debug(res)
    save_path = os.path.join(data_dir.get_tmp_dir(), 'tmp.save')
    virsh.save(vm.name, save_path)
    vm.wait_for_shutdown()
Пример #19
0
 def dup_hotplug(type,
                 char_dev,
                 id,
                 dup_charid=False,
                 dup_devid=False,
                 diff_devid=False):
     tmp_file = os.path.join(tmp_dir, char_dev)
     if type == "qmp":
         char_add_opt = "chardev-add "
         dev_add_opt = "device_add virtserialport,chardev="
         if char_dev == "file":
             if dup_charid:
                 char_add_opt += "file,path=%s,id=file" % tmp_file
             if dup_devid:
                 dev_add_opt += "file,name=file,bus=virtio-serial0.0,id=file"
             if diff_devid:
                 dev_add_opt += "file,name=file,bus=virtio-serial0.0,id=file1"
         elif char_dev == "socket":
             if dup_charid:
                 char_add_opt += "socket,path=%s,server,nowait,id=socket" % tmp_file
             if dup_devid:
                 dev_add_opt += "socket,name=socket,bus=virtio-serial0.0,id=socket"
             if diff_devid:
                 dev_add_opt += "socket,name=socket,bus=virtio-serial0.0,id=socket1"
         elif char_dev == "pty":
             if dup_charid:
                 char_add_opt += "pty,path=/dev/pts/%s,id=pty" % id
             if dup_devid:
                 dev_add_opt += "pty,name=pty,bus=virtio-serial0.0,id=pty"
             if diff_devid:
                 dev_add_opt += "pty,name=pty,bus=virtio-serial0.0,id=pty1"
         if dup_charid:
             result = virsh.qemu_monitor_command(vm_name, char_add_opt,
                                                 "--hmp")
         if dup_devid or diff_devid:
             result = virsh.qemu_monitor_command(vm_name, dev_add_opt,
                                                 "--hmp")
     elif type == "attach":
         if dup_devid:
             result = hotplug_device(type, char_dev, id)
     return result
Пример #20
0
 def check_dom_iothread():
     """
     Check iothread by qemu-monitor-command.
     """
     ret = virsh.qemu_monitor_command(vm_name,
                                      '{"execute": "query-iothreads"}',
                                      "--pretty")
     libvirt.check_exit_status(ret)
     logging.debug("Domain iothreads: %s", ret.stdout)
     iothreads_ret = json.loads(ret.stdout)
     if len(iothreads_ret['return']) != int(dom_iothreads):
         raise exceptions.TestFail("Failed to check domain iothreads")
 def check_dom_iothread():
     """
     Check iothread by qemu-monitor-command.
     """
     ret = virsh.qemu_monitor_command(vm_name,
                                      '{"execute": "query-iothreads"}',
                                      "--pretty")
     libvirt.check_exit_status(ret)
     logging.debug("Domain iothreads: %s", ret.stdout)
     iothreads_ret = json.loads(ret.stdout)
     if len(iothreads_ret['return']) != int(dom_iothreads):
         raise error.TestFail("Failed to check domain iothreads")
Пример #22
0
 def hotplug_device(type, char_dev, id=0):
     tmp_file = os.path.join(tmp_dir, char_dev)
     if type == "qmp":
         char_add_opt = "chardev-add "
         dev_add_opt = "device_add virtserialport,chardev="
         if char_dev == "file":
             char_add_opt += "file,path=%s,id=file" % tmp_file
             dev_add_opt += "file,name=file,bus=virtio-serial0.0,id=file"
         elif char_dev == "socket":
             char_add_opt += "socket,path=%s,server,nowait,id=socket" % tmp_file
             dev_add_opt += "socket,name=socket,bus=virtio-serial0.0,id=socket"
         elif char_dev == "pty":
             char_add_opt += ("pty,path=/dev/pts/%s,id=pty" % id)
             dev_add_opt += "pty,name=pty,bus=virtio-serial0.0,id=pty"
         result = virsh.qemu_monitor_command(vm_name, char_add_opt, "--hmp")
         if result.exit_status:
             raise error.TestError(
                 'Failed to add chardev %s to %s. Result:\n %s' %
                 (char_dev, vm_name, result))
         result = virsh.qemu_monitor_command(vm_name, dev_add_opt, "--hmp")
         if result.exit_status:
             raise error.TestError(
                 'Failed to add device %s to %s. Result:\n %s' %
                 (char_dev, vm_name, result))
     elif type == "attach":
         xml_file = os.path.join(tmp_dir, "xml_%s" % char_dev)
         if char_dev in ["file", "socket"]:
             prepare_channel_xml(xml_file, char_dev)
         elif char_dev == "pty":
             prepare_channel_xml(xml_file, char_dev, id)
         result = virsh.attach_device(vm_name, xml_file)
         # serial device was introduced by the following commit,
         # http://libvirt.org/git/?
         # p=libvirt.git;a=commit;h=b63ea467617e3cbee4282ab2e5e780b4119cef3d
         if "unknown device type" in result.stderr:
             raise error.TestNAError(
                 'Failed to attach %s to %s. Result:\n %s' %
                 (char_dev, vm_name, result))
     return result
Пример #23
0
def get_deprecated_name_list_qmp(vm_name, cmd):
    """
    Get the list of deprecated items by executing given QMP command.

    :param vm_name: the VM name to communicate with.
    :param cmd: QMP command to execute.
    :return: List of deprecated items.
    """
    res = virsh.qemu_monitor_command(vm_name, cmd)
    jdata = json.loads(res.stdout_text)
    qmp_deprecated = []
    for data in jdata['return']:
        for key in data:
            if key == "name":
                name = data[key]
            if key == "deprecated" and data[key]:
                qmp_deprecated.append(name)
    logging.debug("List of deprecated items per QMP: {}".format(qmp_deprecated))
    return qmp_deprecated
Пример #24
0
    def confirm_hotplug_result(char_dev, index=1, id=0):
        result = virsh.qemu_monitor_command(vm_name, "info qtree", "--hmp")
        h_o = result.stdout.strip()
        chardev_c = h_o.count("chardev = %s%s" % (char_dev, index))
        name_c = h_o.count("name = \"%s%s\"" % (char_dev, index))
        if chardev_c == 0 and name_c == 0:
            test.fail("Cannot get serial device info: '%s'" % h_o)

        tmp_file = "%s/%s%s" % (tmp_dir, char_dev, index)
        serial_file = "/dev/virtio-ports/%s%s" % (char_dev, index)
        if char_dev == "file":
            session.cmd("echo test > %s" % serial_file)
            with open(tmp_file, "r") as f:
                output = f.read()
        elif char_dev == "socket":
            session.cmd("echo test > /tmp/file")
            sock = socket.socket(socket.AF_UNIX)
            sock.connect(tmp_file)
            session.cmd("dd if=/tmp/file of=%s" % serial_file)
            output = sock.recv(1024)
            sock.close()
        elif char_dev == "pty":
            session.cmd("echo test > /tmp/file")
            session.cmd("dd if=/tmp/file of=%s &" % serial_file)
            dev_file = "/dev/pts/%s" % id
            if not os.path.exists(dev_file):
                test.fail("%s doesn't exist." % dev_file)
            p = subprocess.Popen(["/usr/bin/cat", dev_file],
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 universal_newlines=True)
            while True:
                output = p.stdout.readline()
                if output or p.poll():
                    break
                time.sleep(0.2)
            p.kill()
        if not output.count("test"):
            err_info = "%s device file doesn't match 'test':%s" % (char_dev,
                                                                   output)
            test.fail(err_info)
    def confirm_hotplug_result(char_dev, index=1, id=0):
        result = virsh.qemu_monitor_command(vm_name, "info qtree", "--hmp")
        h_o = result.stdout.strip()
        chardev_c = h_o.count("chardev = %s%s" % (char_dev, index))
        name_c = h_o.count("name = \"%s%s\"" % (char_dev, index))
        if chardev_c == 0 and name_c == 0:
            raise error.TestFail("Cannot get serial device info: '%s'" % h_o)

        tmp_file = "%s/%s%s" % (tmp_dir, char_dev, index)
        serial_file = "/dev/virtio-ports/%s%s" % (char_dev, index)
        if char_dev == "file":
            session.cmd("echo test > %s" % serial_file)
            f = open(tmp_file, "r")
            output = f.read()
            f.close()
        elif char_dev == "socket":
            session.cmd("echo test > /tmp/file")
            sock = socket.socket(socket.AF_UNIX)
            sock.connect(tmp_file)
            session.cmd("dd if=/tmp/file of=%s" % serial_file)
            output = sock.recv(1024)
            sock.close()
        elif char_dev == "pty":
            session.cmd("echo test > /tmp/file")
            session.cmd("dd if=/tmp/file of=%s &" % serial_file)
            dev_file = "/dev/pts/%s" % id
            if not os.path.exists(dev_file):
                raise error.TestFail("%s doesn't exist." % dev_file)
            p = subprocess.Popen(["/usr/bin/cat", dev_file],
                                 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            while True:
                output = p.stdout.readline()
                if output or p.poll():
                    break
                time.sleep(0.2)
            p.kill()
        if not output.count("test"):
            err_info = "%s device file doesn't match 'test':%s" % (char_dev, output)
            raise error.TestFail(err_info)
def run(test, params, env):
    """
    Test for hotplug usb device.
    """
    # get the params from params
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    usb_type = params.get("usb_type", "kbd")
    attach_type = params.get("attach_type", "attach_device")
    attach_count = int(params.get("attach_count", "1"))
    if usb_type == "storage":
        model = params.get("model", "nec-xhci")
        index = params.get("index", "1")
    status_error = ("yes" == params.get("status_error", "no"))

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status("permissive")

    if usb_type == "storage":
        controllers = vm_xml.get_devices(device_type="controller")
        devices = vm_xml.get_devices()
        for dev in controllers:
            if dev.type == "usb" and dev.index == "1":
                devices.remove(dev)
        controller = Controller("controller")
        controller.type = "usb"
        controller.index = index
        controller.model = model
        devices.append(controller)
        vm_xml.set_devices(devices)

    try:
        session = vm.wait_for_login()
    except (LoginError, VMError, ShellError) as e:
        test.fail("Test failed: %s" % str(e))

    def is_hotplug_ok():
        try:
            output = session.cmd_output("fdisk -l | grep -c '^Disk /dev/.* 1 M'")
            if int(output.strip()) != attach_count:
                return False
            else:
                return True
        except ShellTimeoutError as detail:
            test.fail("unhotplug failed: %s, " % detail)

    tmp_dir = os.path.join(data_dir.get_tmp_dir(), "usb_hotplug_files")
    if not os.path.isdir(tmp_dir):
        os.mkdir(tmp_dir)

    try:
        result = None
        dev_xml = None
        opt = "--hmp"
        for i in range(attach_count):
            if usb_type == "storage":
                path = os.path.join(tmp_dir, "%s.img" % i)
                libvirt.create_local_disk("file", path, size="1M", disk_format="qcow2")
                os.chmod(path, 0o666)

            if attach_type == "qemu_monitor":
                if usb_type == "storage":
                    attach_cmd = "drive_add"
                    attach_cmd += (" 0 id=drive-usb-%s,if=none,file=%s" % (i, path))

                    result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=opt)
                    if result.exit_status or (result.stdout.strip().find("OK") == -1):
                        raise process.CmdError(result.command, result)

                    attach_cmd = "device_add usb-storage,"
                    attach_cmd += ("id=drive-usb-%s,bus=usb1.0,drive=drive-usb-%s" % (i, i))
                else:
                    attach_cmd = "device_add"
                    attach_cmd += " usb-%s,bus=usb1.0,id=%s%s" % (usb_type, usb_type, i)

                result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=opt)
                if result.exit_status:
                    raise process.CmdError(result.command, result)
            else:
                attributes = {'type_name': "usb", 'bus': "1", 'port': "0"}
                if usb_type == "storage":
                    dev_xml = Disk(type_name="file")
                    dev_xml.device = "disk"
                    dev_xml.source = dev_xml.new_disk_source(**{"attrs": {'file': path}})
                    dev_xml.driver = {"name": "qemu", "type": 'qcow2', "cache": "none"}
                    dev_xml.target = {"dev": 'sdb', "bus": "usb"}
                    dev_xml.address = dev_xml.new_disk_address(**{"attrs": attributes})
                else:
                    if usb_type == "mouse":
                        dev_xml = Input("mouse")
                    elif usb_type == "tablet":
                        dev_xml = Input("tablet")
                    else:
                        dev_xml = Input("keyboard")

                    dev_xml.input_bus = "usb"
                    dev_xml.address = dev_xml.new_input_address(**{"attrs": attributes})

                result = virsh.attach_device(vm_name, dev_xml.xml)
                if result.exit_status:
                    raise process.CmdError(result.command, result)

        if status_error and usb_type == "storage":
            if utils_misc.wait_for(is_hotplug_ok, timeout=30):
                # Sometimes we meet an error but the ret in $? is 0.
                test.fail("\nAttach device successfully in negative case."
                          "\nExcept it fail when attach count exceed maximum."
                          "\nDetail: %s" % result)

        for i in range(attach_count):
            attach_cmd = "device_del"
            if attach_type == "qemu_monitor":
                if usb_type == "storage":
                    attach_cmd += (" drive-usb-%s" % i)
                else:
                    if usb_type == "mouse":
                        attach_cmd += " mouse"
                    elif usb_type == "tablet":
                        attach_cmd += " tablet"
                    else:
                        attach_cmd += " keyboard"

                result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=opt)
                if result.exit_status:
                    raise process.CmdError(result.command, result)
            else:
                result = virsh.detach_device(vm_name, dev_xml.xml)
                if result.exit_status:
                    raise process.CmdError(result.command, result)
    except process.CmdError as e:
        if not status_error:
            # live attach of device 'input' is not supported
            ret = result.stderr.find("Operation not supported")
            if usb_type != "storage" and ret > -1:
                pass
            else:
                test.fail("failed to attach device.\nDetail: %s." % result)
    finally:
        session.close()
        if os.path.isdir(tmp_dir):
            shutil.rmtree(tmp_dir)
        utils_selinux.set_status(backup_sestatus)
        vm_xml_backup.sync()
Пример #27
0
def run(test, params, env):
    """
    Test for hotplug usb device.
    """
    # get the params from params
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    usb_type = params.get("usb_type", "kbd")
    attach_type = params.get("attach_type", "attach_device")
    attach_count = int(params.get("attach_count", "1"))
    if usb_type == "storage":
        model = params.get("model", "nec-xhci")
        index = params.get("index", "1")
    status_error = ("yes" == params.get("status_error", "no"))

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status("permissive")

    if usb_type == "storage":
        controllers = vm_xml.get_devices(device_type="controller")
        devices = vm_xml.get_devices()
        for dev in controllers:
            if dev.type == "usb" and dev.index == "1":
                devices.remove(dev)
        controller = Controller("controller")
        controller.type = "usb"
        controller.index = index
        controller.model = model
        devices.append(controller)
        vm_xml.set_devices(devices)

    try:
        session = vm.wait_for_login()
    except (LoginError, VMError, ShellError) as e:
        test.fail("Test failed: %s" % str(e))

    def is_hotplug_ok():
        try:
            output = session.cmd_output(
                "fdisk -l | grep -c '^Disk /dev/.* 1 M'")
            if int(output.strip()) != attach_count:
                return False
            else:
                return True
        except ShellTimeoutError as detail:
            test.fail("unhotplug failed: %s, " % detail)

    tmp_dir = os.path.join(data_dir.get_tmp_dir(), "usb_hotplug_files")
    if not os.path.isdir(tmp_dir):
        os.mkdir(tmp_dir)

    try:
        result = None
        dev_xml = None
        opt = "--hmp"
        for i in range(attach_count):
            if usb_type == "storage":
                path = os.path.join(tmp_dir, "%s.img" % i)
                libvirt.create_local_disk("file",
                                          path,
                                          size="1M",
                                          disk_format="qcow2")
                os.chmod(path, 0o666)

            if attach_type == "qemu_monitor":
                if usb_type == "storage":
                    attach_cmd = "drive_add"
                    attach_cmd += (" 0 id=drive-usb-%s,if=none,file=%s" %
                                   (i, path))

                    result = virsh.qemu_monitor_command(vm_name,
                                                        attach_cmd,
                                                        options=opt)
                    if result.exit_status or (result.stdout.strip().find("OK")
                                              == -1):
                        raise process.CmdError(result.command, result)

                    attach_cmd = "device_add usb-storage,"
                    attach_cmd += (
                        "id=drive-usb-%s,bus=usb1.0,drive=drive-usb-%s" %
                        (i, i))
                else:
                    attach_cmd = "device_add"
                    attach_cmd += " usb-%s,bus=usb1.0,id=%s%s" % (usb_type,
                                                                  usb_type, i)

                result = virsh.qemu_monitor_command(vm_name,
                                                    attach_cmd,
                                                    options=opt)
                if result.exit_status:
                    raise process.CmdError(result.command, result)
            else:
                attributes = {'type_name': "usb", 'bus': "1", 'port': "0"}
                if usb_type == "storage":
                    dev_xml = Disk(type_name="file")
                    dev_xml.device = "disk"
                    dev_xml.source = dev_xml.new_disk_source(
                        **{"attrs": {
                            'file': path
                        }})
                    dev_xml.driver = {
                        "name": "qemu",
                        "type": 'qcow2',
                        "cache": "none"
                    }
                    dev_xml.target = {"dev": 'sdb', "bus": "usb"}
                    dev_xml.address = dev_xml.new_disk_address(
                        **{"attrs": attributes})
                else:
                    if usb_type == "mouse":
                        dev_xml = Input("mouse")
                    elif usb_type == "tablet":
                        dev_xml = Input("tablet")
                    else:
                        dev_xml = Input("keyboard")

                    dev_xml.input_bus = "usb"
                    dev_xml.address = dev_xml.new_input_address(
                        **{"attrs": attributes})

                result = virsh.attach_device(vm_name, dev_xml.xml)
                if result.exit_status:
                    raise process.CmdError(result.command, result)

        if status_error and usb_type == "storage":
            if utils_misc.wait_for(is_hotplug_ok, timeout=30):
                # Sometimes we meet an error but the ret in $? is 0.
                test.fail("\nAttach device successfully in negative case."
                          "\nExcept it fail when attach count exceed maximum."
                          "\nDetail: %s" % result)

        for i in range(attach_count):
            attach_cmd = "device_del"
            if attach_type == "qemu_monitor":
                if usb_type == "storage":
                    attach_cmd += (" drive-usb-%s" % i)
                else:
                    if usb_type == "mouse":
                        attach_cmd += " mouse"
                    elif usb_type == "tablet":
                        attach_cmd += " tablet"
                    else:
                        attach_cmd += " keyboard"

                result = virsh.qemu_monitor_command(vm_name,
                                                    attach_cmd,
                                                    options=opt)
                if result.exit_status:
                    raise process.CmdError(result.command, result)
            else:
                result = virsh.detach_device(vm_name, dev_xml.xml)
                if result.exit_status:
                    raise process.CmdError(result.command, result)
    except process.CmdError as e:
        if not status_error:
            # live attach of device 'input' is not supported
            ret = result.stderr.find("Operation not supported")
            if usb_type != "storage" and ret > -1:
                pass
            else:
                test.fail("failed to attach device.\nDetail: %s." % result)
    finally:
        session.close()
        if os.path.isdir(tmp_dir):
            shutil.rmtree(tmp_dir)
        utils_selinux.set_status(backup_sestatus)
        vm_xml_backup.sync()
Пример #28
0
def run(test, params, env):
    """
    1. Configure kernel cmdline to support kdump
    2. Start kdump service
    3. Inject NMI to the guest
    4. Check NMI times
    """
    for cmd in 'inject-nmi', 'qemu-monitor-command':
        if not virsh.has_help_command(cmd):
            raise error.TestNAError("This version of libvirt does not "
                                    " support the %s test", cmd)

    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    start_vm = params.get("start_vm")
    expected_nmi_times = params.get("expected_nmi_times", '0')
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    if start_vm == "yes":
        # start kdump service in the guest
        cmd = "which kdump"
        try:
            run_cmd_in_guest(vm, cmd)
        except:
            try:
                # try to install kexec-tools on fedoraX/rhelx.y guest
                run_cmd_in_guest(vm, "yum install -y kexec-tools")
            except:
                raise error.TestNAError("Requires kexec-tools(or the "
                                        "equivalent for your distro)")

        # enable kdump service in the guest
        cmd = "service kdump start"
        run_cmd_in_guest(vm, cmd)

        # filter original 'NMI' information from the /proc/interrupts
        cmd = "grep NMI /proc/interrupts"
        nmi_str = run_cmd_in_guest(vm, cmd)

        # filter CPU from the /proc/cpuinfo and count number
        cmd = "grep -E '^process' /proc/cpuinfo | wc -l"
        vcpu_num = run_cmd_in_guest(vm, cmd).strip()

        logging.info("Inject NMI to the guest via virsh inject_nmi")
        virsh.inject_nmi(vm_name, debug=True, ignore_status=False)

        logging.info("Inject NMI to the guest via virsh qemu_monitor_command")
        virsh.qemu_monitor_command(vm_name, '{"execute":"inject-nmi"}')

        # injects a Non-Maskable Interrupt into the default CPU (x86/s390)
        # or all CPUs (ppc64), as usual, the default CPU index is 0
        cmd = "grep NMI /proc/interrupts | awk '{print $2}'"
        nmi_from_default_vcpu = run_cmd_in_guest(vm, cmd)
        real_nmi_times = nmi_from_default_vcpu.splitlines()[0]
        logging.debug("The current Non-Maskable Interrupts: %s", real_nmi_times)

        # check Non-maskable interrupts times
        if real_nmi_times != expected_nmi_times:
            raise error.TestFail("NMI times aren't expected %s:%s",
                                 real_nmi_times, expected_nmi_times)
def run(test, params, env):
    """
    Test command: virsh qemu-monitor-command.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_ref = params.get("vm_ref", "domname")
    vm_state = params.get("vm_state", "running")
    cmd = params.get("qemu_cmd", "")
    options = params.get("options", "")
    status_error = "yes" == params.get("status_error", "no")
    domuuid = vm.get_uuid()
    domid = ""
    libvirtd_inst = utils_libvirtd.Libvirtd()

    help_info = virsh.help("qemu-monitor-command").stdout.strip()
    if "--pretty" in options:
        if "--pretty" not in help_info:
            raise error.TestNAError("--pretty option is not supported in"
                                    " current version")

    try:
        # Prepare vm state for test
        if vm_state != "shutoff":
            vm.start()
            vm.wait_for_login()
            domid = vm.get_id()
        if vm_state == "paused":
            vm.pause()

        if vm_ref == "domname":
            vm_ref = vm_name
        elif vm_ref == "domid":
            vm_ref = domid
        elif vm_ref == "domuuid":
            vm_ref = domuuid
        elif domid and vm_ref == "hex_id":
            vm_ref = hex(int(domid))

        # Run virsh command
        cmd_result = virsh.qemu_monitor_command(vm_ref, cmd, options,
                                                ignore_status=True,
                                                debug=True)
        output = cmd_result.stdout.strip()
        status = cmd_result.exit_status

        # Check result
        if not libvirtd_inst.is_running():
            raise error.TestFail("Libvirtd is not running after run command.")
        if status_error:
            if not status:
                # Return status is 0 with unknown command
                if "unknown command:" in output:
                    logging.debug("Command failed: %s" % output)
                else:
                    raise error.TestFail("Expect fail, but run successfully.")
            else:
                logging.debug("Command failed as expected.")
        else:
            if status:
                raise error.TestFail("Expect succeed, but run fail.")
    finally:
        # Cleanup
        if not libvirtd_inst.is_running():
            libvirtd_inst.restart()
Пример #30
0
def run(test, params, env):
    """
    Test command: virsh dump.

    This command can dump the core of a domain to a file for analysis.
    1. Positive testing
        1.1 Dump domain with valid options.
        1.2 Avoid file system cache when dumping.
        1.3 Compress the dump images to valid/invalid formats.
    2. Negative testing
        2.1 Dump domain to a non-exist directory.
        2.2 Dump domain with invalid option.
        2.3 Dump a shut-off domain.
    """

    vm_name = params.get("main_vm", "vm1")
    vm = env.get_vm(vm_name)
    options = params.get("dump_options")
    dump_file = params.get("dump_file", "vm.core")
    dump_dir = params.get("dump_dir", data_dir.get_tmp_dir())
    if os.path.dirname(dump_file) is "":
        dump_file = os.path.join(dump_dir, dump_file)
    dump_image_format = params.get("dump_image_format")
    start_vm = params.get("start_vm") == "yes"
    paused_after_start_vm = params.get("paused_after_start_vm") == "yes"
    status_error = params.get("status_error", "no") == "yes"
    check_bypass_timeout = int(params.get("check_bypass_timeout", "120"))
    memory_dump_format = params.get("memory_dump_format", "")
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    def check_flag(file_flags):
        """
        Check if file flag include O_DIRECT.

        :param file_flags: The flags of dumped file

        Note, O_DIRECT(direct disk access hint) is defined as:
        on x86_64:
        #define O_DIRECT        00040000
        on ppc64le or arch64:
        #define O_DIRECT        00200000
        """
        arch = platform.machine()
        file_flag_check = int('00040000', 16)
        if 'ppc64' in arch or 'aarch64' in arch:
            file_flag_check = int('00200000', 16)

        if int(file_flags, 16) & file_flag_check == file_flag_check:
            logging.info("File flags include O_DIRECT")
            return True
        else:
            logging.error("File flags doesn't include O_DIRECT")
            return False

    def check_bypass(dump_file, result_dict):
        """
        Get the file flags of domain core dump file and check it.
        """
        error = ''
        cmd1 = "lsof -w %s" % dump_file
        while True:
            if not os.path.exists(dump_file) or process.system(cmd1):
                time.sleep(0.1)
                continue
            cmd2 = ("cat /proc/$(%s |awk '/libvirt_i/{print $2}')/fdinfo/1"
                    "|grep flags|awk '{print $NF}'" % cmd1)
            ret = process.run(cmd2, allow_output_check='combined', shell=True)
            status, output = ret.exit_status, ret.stdout_text.strip()
            if status:
                error = "Fail to get the flags of dumped file"
                logging.error(error)
                break
            if not len(output):
                continue
            try:
                logging.debug("The flag of dumped file: %s", output)
                if check_flag(output):
                    logging.info("Bypass file system cache "
                                 "successfully when dumping")
                    break
                else:
                    error = "Bypass file system cache fail when dumping"
                    logging.error(error)
                    break
            except (ValueError, IndexError) as detail:
                error = detail
                logging.error(error)
                break
        result_dict['bypass'] = error

    def check_domstate(actual, options):
        """
        Check the domain status according to dump options.
        """

        if options.find('live') >= 0:
            domstate = "running"
            if options.find('crash') >= 0 or options.find('reset') > 0:
                domstate = "running"
            if paused_after_start_vm:
                domstate = "paused"
        elif options.find('crash') >= 0:
            domstate = "shut off"
            if options.find('reset') >= 0:
                domstate = "running"
        elif options.find('reset') >= 0:
            domstate = "running"
            if paused_after_start_vm:
                domstate = "paused"
        else:
            domstate = "running"
            if paused_after_start_vm:
                domstate = "paused"

        if not start_vm:
            domstate = "shut off"

        logging.debug("Domain should %s after run dump %s", domstate, options)

        return (domstate == actual)

    def check_dump_format(dump_image_format, dump_file):
        """
        Check the format of dumped file.

        If 'dump_image_format' is not specified or invalid in qemu.conf, then
        the file shoule be normal raw file, otherwise it shoud be compress to
        specified format, the supported compress format including: lzop, gzip,
        bzip2, and xz.
        For memory-only dump, the default dump format is ELF, and it can also
        specify format by --format option, the result could be 'elf' or 'data'.
        """

        valid_format = ["lzop", "gzip", "bzip2", "xz", 'elf', 'data']
        if len(dump_image_format) == 0 or dump_image_format not in valid_format:
            logging.debug("No need check the dumped file format")
            return True
        else:
            file_cmd = "file %s" % dump_file
            ret = process.run(file_cmd, allow_output_check='combined', shell=True)
            status, output = ret.exit_status, ret.stdout_text.strip()
            if status:
                logging.error("Fail to check dumped file %s", dump_file)
                return False
            logging.debug("Run file %s output: %s", dump_file, output)
            actual_format = output.split(" ")[1]
            if actual_format.lower() != dump_image_format.lower():
                logging.error("Compress dumped file to %s fail: %s" %
                              (dump_image_format, actual_format))
                return False
            else:
                return True

    # Configure dump_image_format in /etc/libvirt/qemu.conf.
    qemu_config = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()

    # Install lsof pkg if not installed
    if not utils_package.package_install("lsof"):
        test.cancel("Failed to install lsof in host\n")

    if len(dump_image_format):
        qemu_config.dump_image_format = dump_image_format
        libvirtd.restart()

    # Deal with memory-only dump format
    if len(memory_dump_format):
        # Make sure libvirt support this option
        if virsh.has_command_help_match("dump", "--format") is None:
            test.cancel("Current libvirt version doesn't support"
                        " --format option for dump command")
        # Make sure QEMU support this format
        query_cmd = '{"execute":"query-dump-guest-memory-capability"}'
        qemu_capa = virsh.qemu_monitor_command(vm_name, query_cmd).stdout
        if (memory_dump_format not in qemu_capa) and not status_error:
            test.cancel("Unsupported dump format '%s' for"
                        " this QEMU binary" % memory_dump_format)
        options += " --format %s" % memory_dump_format
        if memory_dump_format == 'elf':
            dump_image_format = 'elf'
        if memory_dump_format in ['kdump-zlib', 'kdump-lzo', 'kdump-snappy']:
            dump_image_format = 'data'

    # Back up xml file
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    dump_guest_core = params.get("dump_guest_core", "")
    if dump_guest_core not in ["", "on", "off"]:
        test.error("invalid dumpCore value: %s" % dump_guest_core)
    try:
        # Set dumpCore in guest xml
        if dump_guest_core:
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml.dumpcore = dump_guest_core
            vmxml.sync()
            vm.start()
            # check qemu-kvm cmdline
            vm_pid = vm.get_pid()
            cmd = "cat /proc/%d/cmdline|xargs -0 echo" % vm_pid
            cmd += "|grep dump-guest-core=%s" % dump_guest_core
            result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("cmdline: %s" % result.stdout_text)
            if result.exit_status:
                test.fail("Not find dump-guest-core=%s in qemu cmdline"
                          % dump_guest_core)
            else:
                logging.info("Find dump-guest-core=%s in qemum cmdline",
                             dump_guest_core)

        # Deal with bypass-cache option
        if options.find('bypass-cache') >= 0:
            vm.wait_for_login()
            result_dict = multiprocessing.Manager().dict()
            child_process = multiprocessing.Process(target=check_bypass,
                                                    args=(dump_file, result_dict))
            child_process.start()

        # Run virsh command
        cmd_result = virsh.dump(vm_name, dump_file, options,
                                unprivileged_user=unprivileged_user,
                                uri=uri,
                                ignore_status=True, debug=True)
        status = cmd_result.exit_status
        if 'child_process' in locals():
            child_process.join(timeout=check_bypass_timeout)
            params['bypass'] = result_dict['bypass']

        logging.info("Start check result")
        if not check_domstate(vm.state(), options):
            test.fail("Domain status check fail.")
        if status_error:
            if not status:
                test.fail("Expect fail, but run successfully")
        else:
            if status:
                test.fail("Expect succeed, but run fail")
            if not os.path.exists(dump_file):
                test.fail("Fail to find domain dumped file.")
            if check_dump_format(dump_image_format, dump_file):
                logging.info("Successfully dump domain to %s", dump_file)
            else:
                test.fail("The format of dumped file is wrong.")
        if params.get('bypass'):
            test.fail(params['bypass'])
    finally:
        backup_xml.sync()
        qemu_config.restore()
        libvirtd.restart()
        if os.path.isfile(dump_file):
            os.remove(dump_file)
Пример #31
0
def copied_migration(vm, params, blockjob_type=None, block_target="vda"):
    """
    Migrate vms with storage copied under some stress.
    And during it, some qemu-monitor-command will be sent.
    """
    dest_uri = params.get("migrate_dest_uri")
    remote_host = params.get("migrate_dest_host")
    copy_option = params.get("copy_storage_option", "")
    username = params.get("remote_user")
    password = params.get("migrate_dest_pwd")
    timeout = int(params.get("thread_timeout", 1200))
    options = "--live %s --unsafe" % copy_option

    # Get vm ip for remote checking
    if vm.is_dead():
        vm.start()
    vm.wait_for_login()
    vms_ip = {}
    vms_ip[vm.name] = vm.get_address()
    logging.debug("VM %s IP: %s", vm.name, vms_ip[vm.name])

    # Start to load stress
    stress_type = params.get("migrate_stress_type")
    if stress_type == "cpu":
        params['stress_args'] = "--cpu 2 --quiet --timeout 60"
    elif stress_type == "memory":
        params['stress_args'] = "--vm 2 --vm-bytes 256M --vm-keep --timeout 60"
    if stress_type is not None:
        utils_test.load_stress("stress_in_vms", [vm], params)

    cp_mig = utlv.MigrationTest()
    migration_thread = threading.Thread(target=cp_mig.thread_func_migration,
                                        args=(vm, dest_uri, options))
    migration_thread.start()
    # Wait for migration launched
    time.sleep(5)
    job_ret = virsh.domjobinfo(vm.name, debug=True)
    if job_ret.exit_status:
        raise error.TestError("Prepare migration for blockjob failed.")

    # Execute some qemu monitor commands
    pause_cmd = "block-job-pause %s" % block_target
    resume_cmd = "block-job-resume %s" % block_target
    cancel_cmd = "block-job-cancel %s" % block_target
    complete_cmd = "block-job-complete %s" % block_target

    blockjob_failures = []
    try:
        if blockjob_type == "cancel":
            virsh.qemu_monitor_command(vm.name,
                                       cancel_cmd,
                                       debug=True,
                                       ignore_status=False)
        elif blockjob_type == "pause_resume":
            virsh.qemu_monitor_command(vm.name,
                                       pause_cmd,
                                       debug=True,
                                       ignore_status=False)
            # TODO: Check whether it is paused.
            virsh.qemu_monitor_command(vm.name,
                                       resume_cmd,
                                       debug=True,
                                       ignore_status=False)
        elif blockjob_type == "complete":
            virsh.qemu_monitor_command(vm.name,
                                       complete_cmd,
                                       debug=True,
                                       ignore_status=False)
    except error.CmdError, detail:
        blockjob_failures.append(str(detail))
Пример #32
0
def run(test, params, env):
    """
    1. Configure kernel cmdline to support kdump
    2. Start kdump service
    3. Inject NMI to the guest
    4. Check NMI times
    """
    for cmd in 'inject-nmi', 'qemu-monitor-command':
        if not virsh.has_help_command(cmd):
            test.cancel(
                "This version of libvirt does not "
                " support the %s test", cmd)

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    start_vm = params.get("start_vm")
    expected_nmi_times = params.get("expected_nmi_times", '0')
    kernel_params = params.get("kernel_params", "")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")
    try:
        if kernel_params:
            update_boot_option_and_reboot(vm, kernel_params, test)
        if start_vm == "yes":
            # start kdump service in the guest
            cmd = "which kdump"
            try:
                run_cmd_in_guest(vm, cmd, test)
            except Exception:
                try:
                    # try to install kexec-tools on fedoraX/rhelx.y guest
                    run_cmd_in_guest(vm, "yum install -y kexec-tools", test)
                except Exception:
                    test.error(
                        "Requires kexec-tools(or the equivalent for your distro)"
                    )

            # enable kdump service in the guest
            cmd = "service kdump start"
            run_cmd_in_guest(vm, cmd, test, timeout=120)

            # filter original 'NMI' information from the /proc/interrupts
            cmd = "grep NMI /proc/interrupts"
            nmi_str = run_cmd_in_guest(vm, cmd, test)

            # filter CPU from the /proc/cpuinfo and count number
            cmd = "grep -E '^process' /proc/cpuinfo | wc -l"
            vcpu_num = run_cmd_in_guest(vm, cmd, test).strip()

            logging.info("Inject NMI to the guest via virsh inject_nmi")
            virsh.inject_nmi(vm_name, debug=True, ignore_status=False)

            logging.info(
                "Inject NMI to the guest via virsh qemu_monitor_command")
            virsh.qemu_monitor_command(vm_name, '{"execute":"inject-nmi"}')

            # injects a Non-Maskable Interrupt into the default CPU (x86/s390)
            # or all CPUs (ppc64), as usual, the default CPU index is 0
            cmd = "grep NMI /proc/interrupts | awk '{print $2}'"
            nmi_from_default_vcpu = run_cmd_in_guest(vm, cmd, test)
            real_nmi_times = nmi_from_default_vcpu.splitlines()[0]
            logging.debug("The current Non-Maskable Interrupts: %s",
                          real_nmi_times)

            # check Non-maskable interrupts times
            if real_nmi_times != expected_nmi_times:
                test.fail("NMI times aren't expected %s:%s" %
                          (real_nmi_times, expected_nmi_times))
    finally:
        if kernel_params:
            cmd = "grubby --update-kernel=`grubby --default-kernel` --remove-args='%s'" % kernel_params
            run_cmd_in_guest(vm, cmd, test)
            vm.reboot()
Пример #33
0
def copied_migration(test, vm, params, blockjob_type=None, block_target="vda"):
    """
    Migrate vms with storage copied under some stress.
    And during it, some qemu-monitor-command will be sent.
    """
    dest_uri = params.get("migrate_dest_uri")
    remote_host = params.get("migrate_dest_host")
    copy_option = params.get("copy_storage_option", "")
    username = params.get("remote_user")
    password = params.get("migrate_dest_pwd")
    timeout = int(params.get("thread_timeout", 1200))
    options = "--live %s --unsafe" % copy_option

    # Get vm ip for remote checking
    if vm.is_dead():
        vm.start()
    vm.wait_for_login()
    vms_ip = {}
    vms_ip[vm.name] = vm.get_address()
    logging.debug("VM %s IP: %s", vm.name, vms_ip[vm.name])

    # Start to load stress
    stress_type = params.get("migrate_stress_type")
    if stress_type == "cpu":
        params['stress_args'] = "--cpu 2 --quiet --timeout 60"
    elif stress_type == "memory":
        params['stress_args'] = "--vm 2 --vm-bytes 256M --vm-keep --timeout 60"
    if stress_type is not None:
        utils_test.load_stress("stress_in_vms", params=params, vms=[vm])

    cp_mig = migration.MigrationTest()
    migration_thread = threading.Thread(target=cp_mig.thread_func_migration,
                                        args=(vm, dest_uri, options))
    migration_thread.start()
    # Wait for migration launched
    time.sleep(5)
    job_ret = virsh.domjobinfo(vm.name, debug=True)
    if job_ret.exit_status:
        test.error("Prepare migration for blockjob failed.")

    # Execute some qemu monitor commands
    pause_cmd = "block-job-pause %s" % block_target
    resume_cmd = "block-job-resume %s" % block_target
    cancel_cmd = "block-job-cancel %s" % block_target
    complete_cmd = "block-job-complete %s" % block_target

    blockjob_failures = []
    try:
        if blockjob_type == "cancel":
            virsh.qemu_monitor_command(vm.name,
                                       cancel_cmd,
                                       debug=True,
                                       ignore_status=False)
        elif blockjob_type == "pause_resume":
            virsh.qemu_monitor_command(vm.name,
                                       pause_cmd,
                                       debug=True,
                                       ignore_status=False)
            # TODO: Check whether it is paused.
            virsh.qemu_monitor_command(vm.name,
                                       resume_cmd,
                                       debug=True,
                                       ignore_status=False)
        elif blockjob_type == "complete":
            virsh.qemu_monitor_command(vm.name,
                                       complete_cmd,
                                       debug=True,
                                       ignore_status=False)
    except process.CmdError as detail:
        blockjob_failures.append(str(detail))

    # Job info FYI
    virsh.domjobinfo(vm.name, debug=True)

    if len(blockjob_failures):
        timeout = 30

    migration_thread.join(timeout)
    if migration_thread.isAlive():
        logging.error("Migrate %s timeout.", migration_thread)
        cp_mig.RET_LOCK.acquire()
        cp_mig.RET_MIGRATION = False
        cp_mig.RET_LOCK.release()

    if len(blockjob_failures):
        cp_mig.cleanup_dest_vm(vm, None, dest_uri)
        test.fail("Run qemu monitor command failed %s" % blockjob_failures)

    check_ip_failures = []
    if cp_mig.RET_MIGRATION:
        try:
            utils_test.check_dest_vm_network(vm, vms_ip[vm.name], remote_host,
                                             username, password)
        except exceptions.TestFail as detail:
            check_ip_failures.append(str(detail))
        cp_mig.cleanup_dest_vm(vm, None, dest_uri)
        if blockjob_type in ["cancel", "complete"]:
            test.fail("Storage migration passed even after " "cancellation.")
    else:
        cp_mig.cleanup_dest_vm(vm, None, dest_uri)
        if blockjob_type in ["cancel", "complete"]:
            logging.error("Expected Migration Error for %s", blockjob_type)
            return
        else:
            test.fail("Command blockjob does not work well under "
                      "storage copied migration.")

    if len(check_ip_failures):
        test.fail("Check IP failed:%s" % check_ip_failures)
Пример #34
0
def run(test, params, env):
    """
    Test command: virsh dump.

    This command can dump the core of a domain to a file for analysis.
    1. Positive testing
        1.1 Dump domain with valid options.
        1.2 Avoid file system cache when dumping.
        1.3 Compress the dump images to valid/invalid formats.
    2. Negative testing
        2.1 Dump domain to a non-exist directory.
        2.2 Dump domain with invalid option.
        2.3 Dump a shut-off domain.
    """

    vm_name = params.get("main_vm", "vm1")
    vm = env.get_vm(vm_name)
    options = params.get("dump_options")
    dump_file = params.get("dump_file", "vm.core")
    dump_dir = params.get("dump_dir", data_dir.get_tmp_dir())
    if os.path.dirname(dump_file) is "":
        dump_file = os.path.join(dump_dir, dump_file)
    dump_image_format = params.get("dump_image_format")
    start_vm = params.get("start_vm") == "yes"
    paused_after_start_vm = params.get("paused_after_start_vm") == "yes"
    status_error = params.get("status_error", "no") == "yes"
    timeout = int(params.get("check_pid_timeout", "5"))
    memory_dump_format = params.get("memory_dump_format", "")
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    def check_domstate(actual, options):
        """
        Check the domain status according to dump options.
        """

        if options.find('live') >= 0:
            domstate = "running"
            if options.find('crash') >= 0 or options.find('reset') > 0:
                domstate = "running"
            if paused_after_start_vm:
                domstate = "paused"
        elif options.find('crash') >= 0:
            domstate = "shut off"
            if options.find('reset') >= 0:
                domstate = "running"
        elif options.find('reset') >= 0:
            domstate = "running"
            if paused_after_start_vm:
                domstate = "paused"
        else:
            domstate = "running"
            if paused_after_start_vm:
                domstate = "paused"

        if not start_vm:
            domstate = "shut off"

        logging.debug("Domain should %s after run dump %s", domstate, options)

        return (domstate == actual)

    def check_dump_format(dump_image_format, dump_file):
        """
        Check the format of dumped file.

        If 'dump_image_format' is not specified or invalid in qemu.conf, then
        the file shoule be normal raw file, otherwise it shoud be compress to
        specified format, the supported compress format including: lzop, gzip,
        bzip2, and xz.
        For memory-only dump, the default dump format is ELF, and it can also
        specify format by --format option, the result could be 'elf' or 'data'.
        """

        valid_format = ["lzop", "gzip", "bzip2", "xz", 'elf', 'data']
        if len(dump_image_format
               ) == 0 or dump_image_format not in valid_format:
            logging.debug("No need check the dumped file format")
            return True
        else:
            file_cmd = "file %s" % dump_file
            (status, output) = commands.getstatusoutput(file_cmd)
            if status:
                logging.error("Fail to check dumped file %s", dump_file)
                return False
            logging.debug("Run file %s output: %s", dump_file, output)
            actual_format = output.split(" ")[1]
            if actual_format.lower() != dump_image_format.lower():
                logging.error("Compress dumped file to %s fail: %s" %
                              (dump_image_format, actual_format))
                return False
            else:
                return True

    # Configure dump_image_format in /etc/libvirt/qemu.conf.
    qemu_config = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    if len(dump_image_format):
        qemu_config.dump_image_format = dump_image_format
        libvirtd.restart()

    # Deal with bypass-cache option
    child_pid = 0
    if options.find('bypass-cache') >= 0:
        pid = os.fork()
        if pid:
            # Guarantee check_bypass function has run before dump
            child_pid = pid
            try:
                wait_pid_active(pid, timeout)
            finally:
                os.kill(child_pid, signal.SIGTERM)
        else:
            check_bypass(dump_file)
            # Wait for parent process kills us
            while True:
                time.sleep(1)

    # Deal with memory-only dump format
    if len(memory_dump_format):
        # Make sure libvirt support this option
        if virsh.has_command_help_match("dump", "--format") is None:
            raise error.TestNAError("Current libvirt version doesn't support"
                                    " --format option for dump command")
        # Make sure QEMU support this format
        query_cmd = '{"execute":"query-dump-guest-memory-capability"}'
        qemu_capa = virsh.qemu_monitor_command(vm_name, query_cmd).stdout
        if (memory_dump_format not in qemu_capa) and not status_error:
            raise error.TestNAError("Unsupported dump format '%s' for"
                                    " this QEMU binary" % memory_dump_format)
        options += " --format %s" % memory_dump_format
        if memory_dump_format == 'elf':
            dump_image_format = 'elf'
        if memory_dump_format in ['kdump-zlib', 'kdump-lzo', 'kdump-snappy']:
            dump_image_format = 'data'

    # Back up xml file
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    dump_guest_core = params.get("dump_guest_core", "")
    if dump_guest_core not in ["", "on", "off"]:
        raise error.TestError("invalid dumpCore value: %s" % dump_guest_core)
    try:
        # Set dumpCore in guest xml
        if dump_guest_core:
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml.dumpcore = dump_guest_core
            vmxml.sync()
            vm.start()
            # check qemu-kvm cmdline
            vm_pid = vm.get_pid()
            cmd = "cat /proc/%d/cmdline|xargs -0 echo" % vm_pid
            cmd += "|grep dump-guest-core=%s" % dump_guest_core
            result = utils.run(cmd, ignore_status=True)
            logging.debug("cmdline: %s" % result.stdout)
            if result.exit_status:
                error.TestFail("Not find dump-guest-core=%s in qemu cmdline" %
                               dump_guest_core)
            else:
                logging.info("Find dump-guest-core=%s in qemum cmdline",
                             dump_guest_core)

        # Run virsh command
        cmd_result = virsh.dump(vm_name,
                                dump_file,
                                options,
                                unprivileged_user=unprivileged_user,
                                uri=uri,
                                ignore_status=True,
                                debug=True)
        status = cmd_result.exit_status

        logging.info("Start check result")
        if not check_domstate(vm.state(), options):
            raise error.TestFail("Domain status check fail.")
        if status_error:
            if not status:
                raise error.TestFail("Expect fail, but run successfully")
        else:
            if status:
                raise error.TestFail("Expect succeed, but run fail")
            if not os.path.exists(dump_file):
                raise error.TestFail("Fail to find domain dumped file.")
            if check_dump_format(dump_image_format, dump_file):
                logging.info("Successfully dump domain to %s", dump_file)
            else:
                raise error.TestFail("The format of dumped file is wrong.")
    finally:
        if child_pid:
            os.kill(child_pid, signal.SIGTERM)
        if os.path.isfile(dump_file):
            os.remove(dump_file)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        qemu_config.restore()
        libvirtd.restart()
Пример #35
0
def run(test, params, env):
    """
    Test command: virsh dump.

    This command can dump the core of a domain to a file for analysis.
    1. Positive testing
        1.1 Dump domain with valid options.
        1.2 Avoid file system cache when dumping.
        1.3 Compress the dump images to valid/invalid formats.
    2. Negative testing
        2.1 Dump domain to a non-exist directory.
        2.2 Dump domain with invalid option.
        2.3 Dump a shut-off domain.
    """

    vm_name = params.get("main_vm", "vm1")
    vm = env.get_vm(vm_name)
    options = params.get("dump_options")
    dump_file = params.get("dump_file", "vm.core")
    if os.path.dirname(dump_file) is "":
        dump_file = os.path.join(test.tmpdir, dump_file)
    dump_image_format = params.get("dump_image_format")
    start_vm = params.get("start_vm") == "yes"
    paused_after_start_vm = params.get("paused_after_start_vm") == "yes"
    status_error = params.get("status_error", "no") == "yes"
    timeout = int(params.get("timeout", "5"))
    memory_dump_format = params.get("memory_dump_format", "")
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    def check_domstate(actual, options):
        """
        Check the domain status according to dump options.
        """

        if options.find('live') >= 0:
            domstate = "running"
            if options.find('crash') >= 0 or options.find('reset') > 0:
                domstate = "running"
            if paused_after_start_vm:
                domstate = "paused"
        elif options.find('crash') >= 0:
            domstate = "shut off"
            if options.find('reset') >= 0:
                domstate = "running"
        elif options.find('reset') >= 0:
            domstate = "running"
            if paused_after_start_vm:
                domstate = "paused"
        else:
            domstate = "running"
            if paused_after_start_vm:
                domstate = "paused"

        if not start_vm:
            domstate = "shut off"

        logging.debug("Domain should %s after run dump %s", domstate, options)

        return (domstate == actual)

    def check_dump_format(dump_image_format, dump_file):
        """
        Check the format of dumped file.

        If 'dump_image_format' is not specified or invalid in qemu.conf, then
        the file shoule be normal raw file, otherwise it shoud be compress to
        specified format, the supported compress format including: lzop, gzip,
        bzip2, and xz.
        For memory-only dump, the default dump format is ELF, and it can also
        specify format by --format option, the result could be 'elf' or 'data'.
        """

        valid_format = ["lzop", "gzip", "bzip2", "xz", 'elf', 'data']
        if len(dump_image_format) == 0 or dump_image_format not in valid_format:
            logging.debug("No need check the dumped file format")
            return True
        else:
            file_cmd = "file %s" % dump_file
            (status, output) = commands.getstatusoutput(file_cmd)
            if status:
                logging.error("Fail to check dumped file %s", dump_file)
                return False
            logging.debug("Run file %s output: %s", dump_file, output)
            actual_format = output.split(" ")[1]
            if actual_format.lower() != dump_image_format.lower():
                logging.error("Compress dumped file to %s fail: %s" %
                              (dump_image_format, actual_format))
                return False
            else:
                return True

    # Configure dump_image_format in /etc/libvirt/qemu.conf.
    qemu_config = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    if len(dump_image_format):
        qemu_config.dump_image_format = dump_image_format
        libvirtd.restart()

    # Deal with bypass-cache option
    child_pid = 0
    if options.find('bypass-cache') >= 0:
        pid = os.fork()
        if pid:
            # Guarantee check_bypass function has run before dump
            child_pid = pid
            try:
                wait_pid_active(pid, timeout)
            finally:
                os.kill(child_pid, signal.SIGUSR1)
        else:
            check_bypass(dump_file)
            # Wait for parent process over
            while True:
                time.sleep(1)

    # Deal with memory-only dump format
    if len(memory_dump_format):
        # Make sure libvirt support this option
        if virsh.has_command_help_match("dump", "--format") is None:
            raise error.TestNAError("Current libvirt version doesn't support"
                                    " --format option for dump command")
        # Make sure QEMU support this format
        query_cmd = '{"execute":"query-dump-guest-memory-capability"}'
        qemu_capa = virsh.qemu_monitor_command(vm_name, query_cmd).stdout
        if (memory_dump_format not in qemu_capa) and not status_error:
            raise error.TestNAError("Unsupported dump format '%s' for"
                                    " this QEMU binary" % memory_dump_format)
        options += " --format %s" % memory_dump_format
        if memory_dump_format == 'elf':
            dump_image_format = 'elf'
        if memory_dump_format in ['kdump-zlib', 'kdump-lzo', 'kdump-snappy']:
            dump_image_format = 'data'

    # Back up xml file
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    dump_guest_core = params.get("dump_guest_core", "")
    if dump_guest_core not in ["", "on", "off"]:
        raise error.TestError("invalid dumpCore value: %s" % dump_guest_core)
    try:
        # Set dumpCore in guest xml
        if dump_guest_core:
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml.dumpcore = dump_guest_core
            vmxml.sync()
            vm.start()
            # check qemu-kvm cmdline
            vm_pid = vm.get_pid()
            cmd = "cat /proc/%d/cmdline|xargs -0 echo" % vm_pid
            cmd += "|grep dump-guest-core=%s" % dump_guest_core
            result = utils.run(cmd, ignore_status=True)
            logging.debug("cmdline: %s" % result.stdout)
            if result.exit_status:
                error.TestFail("Not find dump-guest-core=%s in qemu cmdline"
                               % dump_guest_core)
            else:
                logging.info("Find dump-guest-core=%s in qemum cmdline",
                             dump_guest_core)

        # Run virsh command
        cmd_result = virsh.dump(vm_name, dump_file, options,
                                unprivileged_user=unprivileged_user,
                                uri=uri,
                                ignore_status=True, debug=True)
        status = cmd_result.exit_status

        logging.info("Start check result")
        if not check_domstate(vm.state(), options):
            raise error.TestFail("Domain status check fail.")
        if status_error:
            if not status:
                raise error.TestFail("Expect fail, but run successfully")
        else:
            if status:
                raise error.TestFail("Expect succeed, but run fail")
            if not os.path.exists(dump_file):
                raise error.TestFail("Fail to find domain dumped file.")
            if check_dump_format(dump_image_format, dump_file):
                logging.info("Successfully dump domain to %s", dump_file)
            else:
                raise error.TestFail("The format of dumped file is wrong.")
    finally:
        if child_pid:
            os.kill(child_pid, signal.SIGUSR1)
        if os.path.isfile(dump_file):
            os.remove(dump_file)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        qemu_config.restore()
        libvirtd.restart()
Пример #36
0
    # Configure dump_image_format in /etc/libvirt/qemu.conf.
    qemu_config = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    if len(dump_image_format):
        qemu_config.dump_image_format = dump_image_format
        libvirtd.restart()

    # Deal with memory-only dump format
    if len(memory_dump_format):
        # Make sure libvirt support this option
        if virsh.has_command_help_match("dump", "--format") is None:
            test.cancel("Current libvirt version doesn't support"
                        " --format option for dump command")
        # Make sure QEMU support this format
        query_cmd = '{"execute":"query-dump-guest-memory-capability"}'
        qemu_capa = virsh.qemu_monitor_command(vm_name, query_cmd).stdout
        if (memory_dump_format not in qemu_capa) and not status_error:
            test.cancel("Unsupported dump format '%s' for"
                        " this QEMU binary" % memory_dump_format)
        options += " --format %s" % memory_dump_format
        if memory_dump_format == 'elf':
            dump_image_format = 'elf'
        if memory_dump_format in ['kdump-zlib', 'kdump-lzo', 'kdump-snappy']:
            dump_image_format = 'data'

    # Back up xml file
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    dump_guest_core = params.get("dump_guest_core", "")
    if dump_guest_core not in ["", "on", "off"]:
def run(test, params, env):
    """
    Stress test for the hotplug feature of usb device.
    """
    # get the params from params
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    keyboard = "yes" == params.get("usb_hotplug_keyboard", "no")
    mouse = "yes" == params.get("usb_hotplug_mouse", "no")
    tablet = "yes" == params.get("usb_hotplug_tablet", "no")
    disk = "yes" == params.get("usb_hotplug_disk", "no")

    attach_count = int(params.get("attach_count", "1"))
    attach_type = params.get("attach_type", "attach_device")
    bench_type = params.get("guest_bench", None)
    control_file = params.get("control_file", None)

    status_error = "yes" == params.get("status_error", "no")

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    tmp_dir = os.path.join(data_dir.get_tmp_dir(), "usb_hotplug_files")

    if control_file is not None:
        params["test_control_file"] = control_file
        params["main_vm"] = vm_name
        control_path = os.path.join(test.virtdir, "control", control_file)

        session = vm.wait_for_login()
        command = utils_test.run_autotest(vm, session, control_path, None, None, params, copy_only=True)
        session.cmd("%s &" % command)

        def _is_iozone_running():
            session_tmp = vm.wait_for_login()
            return not session_tmp.cmd_status("ps -ef|grep iozone|grep -v grep")

        def _is_stress_running():
            session_tmp = vm.wait_for_login()
            return not session_tmp.cmd_status("ps -ef|grep stress|grep -v grep")

        if bench_type == "stress":
            if not utils_misc.wait_for(_is_stress_running, timeout=160):
                raise error.TestNAError(
                    "Failed to run stress in guest.\n"
                    "Since we need to run a autotest of iozone "
                    "in guest, so please make sure there are "
                    "some necessary packages in guest,"
                    "such as gcc, tar, bzip2"
                )
        elif bench_type == "iozone":
            if not utils_misc.wait_for(_is_iozone_running, timeout=160):
                raise error.TestNAError(
                    "Failed to run iozone in guest.\n"
                    "Since we need to run a autotest of iozone "
                    "in guest, so please make sure there are "
                    "some necessary packages in guest,"
                    "such as gcc, tar, bzip2"
                )
        logging.debug("bench is already running in guest.")
    try:
        try:
            result = None
            disk_xml = None
            tablet_xml = None
            mouse_xml = None
            if not os.path.isdir(tmp_dir):
                os.mkdir(tmp_dir)
            for i in range(attach_count):
                path = os.path.join(tmp_dir, "%s.img" % i)
                if attach_type == "qemu_monitor":
                    options = "--hmp"
                    if disk:
                        utils_test.libvirt.create_local_disk("file", path, size="1M")
                        attach_cmd = "drive_add"
                        attach_cmd += " 0 id=drive-usb-disk%s,if=none,file=%s" % (i, path)

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if keyboard:
                        attach_cmd = "device_add"
                        attach_cmd += " usb-kdb,bus=usb1.0,id=kdb"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if mouse:
                        attach_cmd = "device_add"
                        attach_cmd += " usb-mouse,bus=usb1.0,id=mouse"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if tablet:
                        attach_cmd = "device_add"
                        attach_cmd += " usb-tablet,bus=usb1.0,id=tablet"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                else:
                    if disk:
                        utils_test.libvirt.create_local_disk("file", path, size="1M")
                        os.chmod(path, 0666)
                        disk_xml = Disk(type_name="file")
                        disk_xml.device = "disk"
                        disk_xml.source = disk_xml.new_disk_source(**{"attrs": {"file": path}})
                        disk_xml.driver = {"name": "qemu", "type": "raw", "cache": "none"}
                        disk_xml.target = {"dev": "sdb", "bus": "usb"}

                        attributes = {"type_name": "usb", "bus": "1", "port": "0"}
                        disk_xml.address = disk_xml.new_disk_address(**{"attrs": attributes})

                        result = virsh.attach_device(vm_name, disk_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if mouse:
                        mouse_xml = Input("mouse")
                        mouse_xml.input_bus = "usb"
                        attributes = {"type_name": "usb", "bus": "1", "port": "0"}
                        mouse_xml.address = mouse_xml.new_input_address(**{"attrs": attributes})

                        result = virsh.attach_device(vm_name, mouse_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if tablet:
                        tablet_xml = Input("tablet")
                        tablet_xml.input_bus = "usb"
                        attributes = {"type_name": "usb", "bus": "1", "port": "0"}
                        tablet_xml.address = tablet_xml.new_input_address(**{"attrs": attributes})

                        result = virsh.attach_device(vm_name, tablet_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if keyboard:
                        kbd_xml = Input("keyboard")
                        kbd_xml.input_bus = "usb"
                        attributes = {"type_name": "usb", "bus": "1", "port": "0"}
                        kbd_xml.address = kbd_xml.new_input_address(**{"attrs": attributes})

                        result = virsh.attach_device(vm_name, kbd_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)

                if attach_type == "qemu_monitor":
                    options = "--hmp"
                    if disk:
                        attach_cmd = "drive_del"
                        attach_cmd += " drive-usb-disk"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if mouse:
                        attach_cmd = "device_del"
                        attach_cmd += " mouse"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if keyboard:
                        attach_cmd = "device_del"
                        attach_cmd += " keyboard"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if tablet:
                        attach_cmd = "device_del"
                        attach_cmd += " tablet"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                else:
                    if disk:
                        result = virsh.detach_device(vm_name, disk_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if mouse:
                        result = virsh.detach_device(vm_name, mouse_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if keyboard:
                        result = virsh.detach_device(vm_name, kbd_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if tablet:
                        result = virsh.detach_device(vm_name, tablet_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
        except process.CmdError, e:
            if not status_error:
                raise error.TestFail("failed to attach device.\n" "Detail: %s." % result)
    finally:
        if os.path.isdir(tmp_dir):
            shutil.rmtree(tmp_dir)
        vm_xml_backup.sync()
Пример #38
0
            if usb_type == "storage":
                path = os.path.join(tmp_dir, "%s.img" % i)
                libvirt.create_local_disk("file",
                                          path,
                                          size="1M",
                                          disk_format="qcow2")
                os.chmod(path, 0666)

            if attach_type == "qemu_monitor":
                if usb_type == "storage":
                    attach_cmd = "drive_add"
                    attach_cmd += (" 0 id=drive-usb-%s,if=none,file=%s" %
                                   (i, path))

                    result = virsh.qemu_monitor_command(vm_name,
                                                        attach_cmd,
                                                        options=opt)
                    if result.exit_status or (result.stdout.find("OK") == -1):
                        raise process.CmdError(result.command, result)

                    attach_cmd = "device_add usb-storage,"
                    attach_cmd += (
                        "id=drive-usb-%s,bus=usb1.0,drive=drive-usb-%s" %
                        (i, i))
                else:
                    attach_cmd = "device_add"
                    attach_cmd += " usb-%s,bus=usb1.0,id=%s%s" % (usb_type,
                                                                  usb_type, i)

                result = virsh.qemu_monitor_command(vm_name,
                                                    attach_cmd,
Пример #39
0
    # Configure dump_image_format in /etc/libvirt/qemu.conf.
    qemu_config = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    if len(dump_image_format):
        qemu_config.dump_image_format = dump_image_format
        libvirtd.restart()

    # Deal with memory-only dump format
    if len(memory_dump_format):
        # Make sure libvirt support this option
        if virsh.has_command_help_match("dump", "--format") is None:
            test.cancel("Current libvirt version doesn't support"
                        " --format option for dump command")
        # Make sure QEMU support this format
        query_cmd = '{"execute":"query-dump-guest-memory-capability"}'
        qemu_capa = virsh.qemu_monitor_command(vm_name, query_cmd).stdout
        if (memory_dump_format not in qemu_capa) and not status_error:
            test.cancel("Unsupported dump format '%s' for"
                        " this QEMU binary" % memory_dump_format)
        options += " --format %s" % memory_dump_format
        if memory_dump_format == 'elf':
            dump_image_format = 'elf'
        if memory_dump_format in ['kdump-zlib', 'kdump-lzo', 'kdump-snappy']:
            dump_image_format = 'data'

    # Back up xml file
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    dump_guest_core = params.get("dump_guest_core", "")
    if dump_guest_core not in ["", "on", "off"]:
Пример #40
0
def run(test, params, env):
    """
    Test command: virsh qemu-monitor-command.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_ref = params.get("vm_ref", "domname")
    vm_state = params.get("vm_state", "running")
    cmd = params.get("qemu_cmd", "")
    options = params.get("options", "")
    status_error = "yes" == params.get("status_error", "no")
    domuuid = vm.get_uuid()
    domid = ""
    libvirtd_inst = utils_libvirtd.Libvirtd()

    help_info = virsh.help("qemu-monitor-command").stdout.strip()
    if "--pretty" in options:
        if "--pretty" not in help_info:
            raise error.TestNAError("--pretty option is not supported in"
                                    " current version")

    try:
        # Prepare vm state for test
        if vm_state != "shutoff":
            vm.start()
            vm.wait_for_login()
            domid = vm.get_id()
        if vm_state == "paused":
            vm.pause()

        if vm_ref == "domname":
            vm_ref = vm_name
        elif vm_ref == "domid":
            vm_ref = domid
        elif vm_ref == "domuuid":
            vm_ref = domuuid
        elif domid and vm_ref == "hex_id":
            vm_ref = hex(int(domid))

        # Run virsh command
        cmd_result = virsh.qemu_monitor_command(vm_ref,
                                                cmd,
                                                options,
                                                ignore_status=True,
                                                debug=True)
        output = cmd_result.stdout.strip()
        status = cmd_result.exit_status

        # Check result
        if not libvirtd_inst.is_running():
            raise error.TestFail("Libvirtd is not running after run command.")
        if status_error:
            if not status:
                # Return status is 0 with unknown command
                if "unknown command:" in output:
                    logging.debug("Command failed: %s" % output)
                else:
                    raise error.TestFail("Expect fail, but run successfully.")
            else:
                logging.debug("Command failed as expected.")
        else:
            if status:
                raise error.TestFail("Expect succeed, but run fail.")
    finally:
        # Cleanup
        if not libvirtd_inst.is_running():
            libvirtd_inst.restart()
    try:
        result = None
        dev_xml = None
        opt = "--hmp"
        for i in range(attach_count):
            if usb_type == "storage":
                path = os.path.join(tmp_dir, "%s.img" % i)
                libvirt.create_local_disk("file", path, size="1M", disk_format="qcow2")
                os.chmod(path, 0666)

            if attach_type == "qemu_monitor":
                if usb_type == "storage":
                    attach_cmd = "drive_add"
                    attach_cmd += (" 0 id=drive-usb-%s,if=none,file=%s" % (i, path))

                    result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=opt)
                    if result.exit_status or (result.stdout.find("OK") == -1):
                        raise error.CmdError(result.command, result)

                    attach_cmd = "device_add usb-storage,"
                    attach_cmd += ("id=drive-usb-%s,bus=usb1.0,drive=drive-usb-%s" % (i, i))
                else:
                    attach_cmd = "device_add"
                    attach_cmd += " usb-%s,bus=usb1.0,id=%s%s" % (usb_type, usb_type, i)

                result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=opt)
                if result.exit_status:
                    raise error.CmdError(result.command, result)
            else:
                attributes = {'type_name': "usb", 'bus': "1", 'port': "0"}
                if usb_type == "storage":
def copied_migration(test, vm, params, blockjob_type=None, block_target="vda"):
    """
    Migrate vms with storage copied under some stress.
    And during it, some qemu-monitor-command will be sent.
    """
    dest_uri = params.get("migrate_dest_uri")
    remote_host = params.get("migrate_dest_host")
    copy_option = params.get("copy_storage_option", "")
    username = params.get("remote_user")
    password = params.get("migrate_dest_pwd")
    timeout = int(params.get("thread_timeout", 1200))
    options = "--live %s --unsafe" % copy_option

    # Get vm ip for remote checking
    if vm.is_dead():
        vm.start()
    vm.wait_for_login()
    vms_ip = {}
    vms_ip[vm.name] = vm.get_address()
    logging.debug("VM %s IP: %s", vm.name, vms_ip[vm.name])

    # Start to load stress
    stress_type = params.get("migrate_stress_type")
    if stress_type == "cpu":
        params['stress_args'] = "--cpu 2 --quiet --timeout 60"
    elif stress_type == "memory":
        params['stress_args'] = "--vm 2 --vm-bytes 256M --vm-keep --timeout 60"
    if stress_type is not None:
        utils_test.load_stress("stress_in_vms", params=params, vms=[vm])

    cp_mig = utlv.MigrationTest()
    migration_thread = threading.Thread(target=cp_mig.thread_func_migration,
                                        args=(vm, dest_uri, options))
    migration_thread.start()
    # Wait for migration launched
    time.sleep(5)
    job_ret = virsh.domjobinfo(vm.name, debug=True)
    if job_ret.exit_status:
        test.error("Prepare migration for blockjob failed.")

    # Execute some qemu monitor commands
    pause_cmd = "block-job-pause %s" % block_target
    resume_cmd = "block-job-resume %s" % block_target
    cancel_cmd = "block-job-cancel %s" % block_target
    complete_cmd = "block-job-complete %s" % block_target

    blockjob_failures = []
    try:
        if blockjob_type == "cancel":
            virsh.qemu_monitor_command(vm.name, cancel_cmd, debug=True,
                                       ignore_status=False)
        elif blockjob_type == "pause_resume":
            virsh.qemu_monitor_command(vm.name, pause_cmd, debug=True,
                                       ignore_status=False)
            # TODO: Check whether it is paused.
            virsh.qemu_monitor_command(vm.name, resume_cmd, debug=True,
                                       ignore_status=False)
        elif blockjob_type == "complete":
            virsh.qemu_monitor_command(vm.name, complete_cmd, debug=True,
                                       ignore_status=False)
    except process.CmdError as detail:
        blockjob_failures.append(str(detail))

    # Job info FYI
    virsh.domjobinfo(vm.name, debug=True)

    if len(blockjob_failures):
        timeout = 30

    migration_thread.join(timeout)
    if migration_thread.isAlive():
        logging.error("Migrate %s timeout.", migration_thread)
        cp_mig.RET_LOCK.acquire()
        cp_mig.RET_MIGRATION = False
        cp_mig.RET_LOCK.release()

    if len(blockjob_failures):
        cp_mig.cleanup_dest_vm(vm, None, dest_uri)
        test.fail("Run qemu monitor command failed %s"
                  % blockjob_failures)

    check_ip_failures = []
    if cp_mig.RET_MIGRATION:
        try:
            utils_test.check_dest_vm_network(vm, vms_ip[vm.name],
                                             remote_host, username,
                                             password)
        except exceptions.TestFail as detail:
            check_ip_failures.append(str(detail))
        cp_mig.cleanup_dest_vm(vm, None, dest_uri)
        if blockjob_type in ["cancel", "complete"]:
            test.fail("Storage migration passed even after "
                      "cancellation.")
    else:
        cp_mig.cleanup_dest_vm(vm, None, dest_uri)
        if blockjob_type in ["cancel", "complete"]:
            logging.error("Expected Migration Error for %s", blockjob_type)
            return
        else:
            test.fail("Command blockjob does not work well under "
                      "storage copied migration.")

    if len(check_ip_failures):
        test.fail("Check IP failed:%s" % check_ip_failures)
Пример #43
0
def hotplug_domain_vcpu(vm, count, by_virsh=True, hotplug=True):
    """
    Hot-plug/Hot-unplug vcpu for domian

    :param vm:   VM object
    :param count:    to setvcpus it's the current vcpus number,
                     but to qemu-monitor-command,
                     we need to designate a specific CPU ID.
                     The default will be got by (count - 1)
    :param by_virsh: True means hotplug/unplug by command setvcpus,
                     otherwise, using qemu_monitor
    :param hotplug:  True means hot-plug, False means hot-unplug
    """
    if by_virsh:
        result = virsh.setvcpus(vm.name, count, "--live", debug=True)
    else:
        cmds = []
        cmd_type = "--hmp"
        result = None
        if "ppc" in platform.machine():
            vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm.name)
            topology = vmxml.get_cpu_topology()
            vcpu_count = vm.get_cpu_count()

            if topology:
                threads = int(topology["threads"])
            else:
                threads = 1
            # test if count multiple of threads
            err_str = "Expected vcpu counts to be multiples of %d" % threads
            if hotplug:
                err_str += ",Invalid vcpu counts for hotplug"
            else:
                err_str += ",Invalid vcpu counts for hotunplug"
            if (count % threads) != 0:
                raise exceptions.TestError(err_str)
            if hotplug:
                for item in range(0, int(count), threads):
                    if item < vcpu_count:
                        continue
                    cmds.append("device_add host-spapr-cpu-core,id=core%d,core-id=%d" % (item, item))
            else:
                for item in range(int(count), vcpu_count, threads):
                    cmds.append("device_del core%d" % item)
        else:
            cmd_type = "--pretty"
            if hotplug:
                cpu_opt = "cpu-add"
            else:
                cpu_opt = "cpu-del"
                # Note: cpu-del is supported currently, it will return error.
                # as follow,
                # {
                #    "id": "libvirt-23",
                #    "error": {
                #        "class": "CommandNotFound",
                #        "desc": "The command cpu-del has not been found"
                #    }
                # }
                # so, the caller should check the result.
            # hot-plug/hot-plug the CPU has maximal ID
            params = (cpu_opt, (count - 1))
            cmds.append('{\"execute\":\"%s\",\"arguments\":{\"id\":%d}}' % params)
        # Execute cmds to hot(un)plug
        for cmd in cmds:
            result = virsh.qemu_monitor_command(vm.name, cmd, cmd_type,
                                                debug=True)
            if result.exit_status != 0:
                raise exceptions.TestFail(result.stderr_text)
            else:
                logging.debug("Command output:\n%s",
                              result.stdout_text.strip())
    return result
def copied_migration(vm, params, blockjob_type=None, block_target="vda"):
    """
    Migrate vms with storage copied under some stress.
    And during it, some qemu-monitor-command will be sent.
    """
    dest_uri = params.get("migrate_dest_uri")
    remote_host = params.get("migrate_dest_host")
    copy_option = params.get("copy_storage_option", "")
    username = params.get("remote_user")
    password = params.get("migrate_dest_pwd")
    timeout = int(params.get("thread_timeout", 1200))
    options = "--live %s --unsafe" % copy_option

    # Get vm ip for remote checking
    if vm.is_dead():
        vm.start()
    vm.wait_for_login()
    vms_ip = {}
    vms_ip[vm.name] = vm.get_address()
    logging.debug("VM %s IP: %s", vm.name, vms_ip[vm.name])

    # Start to load stress
    stress_type = params.get("migrate_stress_type")
    if stress_type == "cpu":
        params['stress_args'] = "--cpu 2 --quiet --timeout 60"
    elif stress_type == "memory":
        params['stress_args'] = "--vm 2 --vm-bytes 256M --vm-keep --timeout 60"
    if stress_type is not None:
        utils_test.load_stress("stress_in_vms", [vm], params)

    cp_mig = utlv.MigrationTest()
    migration_thread = threading.Thread(target=cp_mig.thread_func_migration,
                                        args=(vm, dest_uri, options))
    migration_thread.start()
    # Wait for migration launched
    time.sleep(5)
    job_ret = virsh.domjobinfo(vm.name, debug=True)
    if job_ret.exit_status:
        raise error.TestError("Prepare migration for blockjob failed.")

    # Execute some qemu monitor commands
    pause_cmd = "block-job-pause %s" % block_target
    resume_cmd = "block-job-resume %s" % block_target
    cancel_cmd = "block-job-cancel %s" % block_target
    complete_cmd = "block-job-complete %s" % block_target

    blockjob_failures = []
    try:
        if blockjob_type == "cancel":
            virsh.qemu_monitor_command(vm.name, cancel_cmd, debug=True,
                                       ignore_status=False)
        elif blockjob_type == "pause_resume":
            virsh.qemu_monitor_command(vm.name, pause_cmd, debug=True,
                                       ignore_status=False)
            # TODO: Check whether it is paused.
            virsh.qemu_monitor_command(vm.name, resume_cmd, debug=True,
                                       ignore_status=False)
        elif blockjob_type == "complete":
            virsh.qemu_monitor_command(vm.name, complete_cmd, debug=True,
                                       ignore_status=False)
    except error.CmdError, detail:
        blockjob_failures.append(str(detail))
def run(test, params, env):
    """
    Stress test for the hotplug feature of usb device.
    """
    # get the params from params
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)

    keyboard = "yes" == params.get("usb_hotplug_keyboard", "no")
    mouse = "yes" == params.get("usb_hotplug_mouse", "no")
    tablet = "yes" == params.get("usb_hotplug_tablet", "no")
    disk = "yes" == params.get("usb_hotplug_disk", "no")

    attach_count = int(params.get("attach_count", "1"))
    attach_type = params.get("attach_type", "attach_device")
    bench_type = params.get("guest_bench", None)
    control_file = params.get("control_file", None)

    status_error = ("yes" == params.get("status_error", "no"))

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    tmp_dir = os.path.join(data_dir.get_tmp_dir(), "usb_hotplug_files")

    if control_file is not None:
        params["test_control_file"] = control_file
        params["main_vm"] = vm_name
        control_path = os.path.join(test.virtdir, "control", control_file)

        session = vm.wait_for_login()
        command = utils_test.run_autotest(vm,
                                          session,
                                          control_path,
                                          None,
                                          None,
                                          params,
                                          copy_only=True)
        session.cmd("%s &" % command)

        def _is_iozone_running():
            session_tmp = vm.wait_for_login()
            return (
                not session_tmp.cmd_status("ps -ef|grep iozone|grep -v grep"))

        def _is_stress_running():
            session_tmp = vm.wait_for_login()
            return (
                not session_tmp.cmd_status("ps -ef|grep stress|grep -v grep"))

        if bench_type == "stress":
            if not utils_misc.wait_for(_is_stress_running, timeout=160):
                raise error.TestNAError(
                    "Failed to run stress in guest.\n"
                    "Since we need to run a autotest of iozone "
                    "in guest, so please make sure there are "
                    "some necessary packages in guest,"
                    "such as gcc, tar, bzip2")
        elif bench_type == "iozone":
            if not utils_misc.wait_for(_is_iozone_running, timeout=160):
                raise error.TestNAError(
                    "Failed to run iozone in guest.\n"
                    "Since we need to run a autotest of iozone "
                    "in guest, so please make sure there are "
                    "some necessary packages in guest,"
                    "such as gcc, tar, bzip2")
        logging.debug("bench is already running in guest.")
    try:
        try:
            result = None
            disk_xml = None
            tablet_xml = None
            mouse_xml = None
            if not os.path.isdir(tmp_dir):
                os.mkdir(tmp_dir)
            for i in range(attach_count):
                path = os.path.join(tmp_dir, "%s.img" % i)
                if attach_type == "qemu_monitor":
                    options = "--hmp"
                    if disk:
                        utils_test.libvirt.create_local_disk("file",
                                                             path,
                                                             size="1M")
                        attach_cmd = "drive_add"
                        attach_cmd += (
                            " 0 id=drive-usb-disk%s,if=none,file=%s" %
                            (i, path))

                        result = virsh.qemu_monitor_command(vm_name,
                                                            attach_cmd,
                                                            options=options)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
                    if keyboard:
                        attach_cmd = "device_add"
                        attach_cmd += " usb-kdb,bus=usb1.0,id=kdb"

                        result = virsh.qemu_monitor_command(vm_name,
                                                            attach_cmd,
                                                            options=options)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
                    if mouse:
                        attach_cmd = "device_add"
                        attach_cmd += " usb-mouse,bus=usb1.0,id=mouse"

                        result = virsh.qemu_monitor_command(vm_name,
                                                            attach_cmd,
                                                            options=options)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
                    if tablet:
                        attach_cmd = "device_add"
                        attach_cmd += " usb-tablet,bus=usb1.0,id=tablet"

                        result = virsh.qemu_monitor_command(vm_name,
                                                            attach_cmd,
                                                            options=options)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
                else:
                    if disk:
                        utils_test.libvirt.create_local_disk("file",
                                                             path,
                                                             size="1M")
                        os.chmod(path, 0666)
                        disk_xml = Disk(type_name="file")
                        disk_xml.device = "disk"
                        disk_xml.source = disk_xml.new_disk_source(
                            **{"attrs": {
                                'file': path
                            }})
                        disk_xml.driver = {
                            "name": "qemu",
                            "type": 'raw',
                            "cache": "none"
                        }
                        disk_xml.target = {"dev": 'sdb', "bus": "usb"}

                        attributes = {
                            'type_name': "usb",
                            'bus': "1",
                            'port': "0"
                        }
                        disk_xml.address = disk_xml.new_disk_address(
                            **{"attrs": attributes})

                        result = virsh.attach_device(vm_name, disk_xml.xml)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
                    if mouse:
                        mouse_xml = Input("mouse")
                        mouse_xml.input_bus = "usb"
                        attributes = {
                            'type_name': "usb",
                            'bus': "1",
                            'port': "0"
                        }
                        mouse_xml.address = mouse_xml.new_input_address(
                            **{"attrs": attributes})

                        result = virsh.attach_device(vm_name, mouse_xml.xml)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
                    if tablet:
                        tablet_xml = Input("tablet")
                        tablet_xml.input_bus = "usb"
                        attributes = {
                            'type_name': "usb",
                            'bus': "1",
                            'port': "0"
                        }
                        tablet_xml.address = tablet_xml.new_input_address(
                            **{"attrs": attributes})

                        result = virsh.attach_device(vm_name, tablet_xml.xml)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
                    if keyboard:
                        kbd_xml = Input("keyboard")
                        kbd_xml.input_bus = "usb"
                        attributes = {
                            'type_name': "usb",
                            'bus': "1",
                            'port': "0"
                        }
                        kbd_xml.address = kbd_xml.new_input_address(
                            **{"attrs": attributes})

                        result = virsh.attach_device(vm_name, kbd_xml.xml)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)

                if attach_type == "qemu_monitor":
                    options = "--hmp"
                    if disk:
                        attach_cmd = "drive_del"
                        attach_cmd += (" drive-usb-disk")

                        result = virsh.qemu_monitor_command(vm_name,
                                                            attach_cmd,
                                                            options=options)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
                    if mouse:
                        attach_cmd = "device_del"
                        attach_cmd += (" mouse")

                        result = virsh.qemu_monitor_command(vm_name,
                                                            attach_cmd,
                                                            options=options)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
                    if keyboard:
                        attach_cmd = "device_del"
                        attach_cmd += (" keyboard")

                        result = virsh.qemu_monitor_command(vm_name,
                                                            attach_cmd,
                                                            options=options)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
                    if tablet:
                        attach_cmd = "device_del"
                        attach_cmd += (" tablet")

                        result = virsh.qemu_monitor_command(vm_name,
                                                            attach_cmd,
                                                            options=options)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
                else:
                    if disk:
                        result = virsh.detach_device(vm_name, disk_xml.xml)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
                    if mouse:
                        result = virsh.detach_device(vm_name, mouse_xml.xml)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
                    if keyboard:
                        result = virsh.detach_device(vm_name, kbd_xml.xml)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
                    if tablet:
                        result = virsh.detach_device(vm_name, tablet_xml.xml)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
        except error.CmdError, e:
            if not status_error:
                raise error.TestFail("failed to attach device.\n"
                                     "Detail: %s." % result)
    finally:
        if os.path.isdir(tmp_dir):
            shutil.rmtree(tmp_dir)
        vm_xml_backup.sync()