Esempio n. 1
0
    def setUp(self):
        # Make all virsh commands fail the test unconditionally
        for symbol in dir(virsh):
            if symbol not in virsh.NOCLOSE:
                # Exceptions are callable
                setattr(virsh, symbol, self.bogusVirshFailureException)
        # cause any called virsh commands to fail testing unless a mock declared
        # necessary so virsh module doesn't complain about missing virsh command
        # and to catch any libvirt_xml interface which calls virsh functions
        # unexpectidly.
        self.dummy_virsh = virsh.Virsh(virsh_exec='/bin/false',
                                       uri='qemu:///system',
                                       debug=True,
                                       ignore_status=True)

        # make a tmp_dir to store informations.
        LibvirtXMLTestBase.__doms_dir__ = os.path.join(data_dir.get_tmp_dir(),
                                                       'domains')
        if not os.path.isdir(LibvirtXMLTestBase.__doms_dir__):
            os.makedirs(LibvirtXMLTestBase.__doms_dir__)

        # Normally not kosher to call super_set, but required here for testing
        self.dummy_virsh.__super_set__('capabilities', self._capabilities)
        self.dummy_virsh.__super_set__('dumpxml', self._dumpxml)
        self.dummy_virsh.__super_set__('domuuid', self._domuuid)
        self.dummy_virsh.__super_set__('define', self._define)
        self.dummy_virsh.__super_set__('nodedev_dumpxml',
                                       self._nodedev_dumpxml)
Esempio n. 2
0
def do_high_level_test(virsh_dargs, test_xml, net_name, net_uuid, bridge):

    test_netxml = libvirt_xml.NetworkXML(virsh.Virsh(**virsh_dargs))
    test_netxml.xml = test_xml.name

    # modify XML if called for
    if net_name is not "":
        test_netxml.name = net_name
    else:
        test_netxml.name = "default"
    if net_uuid is not "":
        test_netxml.uuid = net_uuid
    else:
        del test_netxml.uuid  # let libvirt auto-generate
    if bridge is not None:
        test_netxml.bridge = bridge

    # TODO: Test other network parameters

    # Network XML is not big, just print out.
    logging.debug("Modified XML:")
    test_netxml.debug_xml()

    try:
        test_netxml.create()
        return test_netxml.defined
    except (IOError, error.CmdError), cmd_excpt:
        # CmdError catches failing virsh commands
        # IOError catches corrupt XML data
        logging.debug("Exception-thrown: " + str(cmd_excpt))
        return False
Esempio n. 3
0
def FakeVirshFactory(preserve=None):
    """
    Return Virsh() instance with methods to raise bogusVirshFailureException.

    Users of this class should override methods under test on instance.
    :param preserve: List of symbol names NOT to modify, None for all
    """
    from virttest import virsh

    def raise_bogusVirshFailureException(*args, **dargs):
        raise bogusVirshFailureException()

    if preserve is None:
        preserve = []
    fake_virsh = virsh.Virsh(virsh_exec='/bin/false',
                             uri='qemu:///system', debug=True,
                             ignore_status=True)
    # Make all virsh commands throw an exception by calling it
    for symbol in dir(virsh):
        # Get names of just closure functions by Virsh class
        if symbol in virsh.NOCLOSE + preserve:
            continue
        if isinstance(getattr(fake_virsh, symbol), virsh.VirshClosure):
            # fake_virsh is a propcan, can't use setattr.
            fake_virsh.__super_set__(symbol, raise_bogusVirshFailureException)
    return fake_virsh
Esempio n. 4
0
 def setUp(self):
     # cause all virsh commands to do nothing and return nothing
     # necessary so virsh module doesn't complain about missing virsh command
     self.dummy_virsh = virsh.Virsh(virsh_exec='/bin/true',
                                    uri='qemu:///system',
                                    debug=True,
                                    ignore_status=True)
     # Normally not kosher to call super_set, but required here for testing
     self.dummy_virsh.super_set('capabilities', self._capabilities)
     self.dummy_virsh.super_set('dumpxml', self._dumpxml)
     self.dummy_virsh.super_set('domuuid', self._domuuid)
Esempio n. 5
0
    def __init__(self, persistent=False, virsh_dargs=None):
        """
        Initialize instance's internal virsh interface from virsh_dargs

        @param: persistent: Use persistent virsh connection for this instance
        @param: virsh_dargs: virsh module Virsh class dargs API keywords
        """

        if virsh_dargs is None:
            virsh_dargs = {}  # avoid additional conditionals below
        if persistent:
            self.super_set('__virsh__', virsh.VirshPersistent(**virsh_dargs))
        else:
            self.super_set('__virsh__', virsh.Virsh(**virsh_dargs))
        # Don't define any initial property values
        super(LibvirtXMLBase, self).__init__()
Esempio n. 6
0
    def setUp(self):
        # To avoid not installed libvirt packages
        self.bogus_virsh = virsh.Virsh(virsh_exec=virsh.VIRSH_EXEC,
                                       uri='qemu:///system',
                                       debug=True,
                                       ignore_status=True)

        # Use defined virsh methods above
        self.bogus_virsh.super_set('pool_list', self._pool_list)
        self.bogus_virsh.super_set('pool_info', self._pool_info)
        self.bogus_virsh.super_set('pool_define_as', self._pool_define_as)
        self.bogus_virsh.super_set('pool_build', self._pool_build)
        self.bogus_virsh.super_set('pool_start', self._pool_start)
        self.bogus_virsh.super_set('pool_destroy', self._pool_destroy)
        self.bogus_virsh.super_set('pool_undefine', self._pool_undefine)
        self.bogus_virsh.super_set('pool_autostart', self._pool_autostart)
        self.sp = libvirt_storage.StoragePool(virsh_instance=self.bogus_virsh)
Esempio n. 7
0
    def get_checkpoint_hash_value(self, vm, test_disk_source_file,
                                  checkpoint_name):
        """
        Get the hash value of vm disk's checkpoint

        :param vm: The vm to be operated
        :param test_disk_source_file: The source file of the test disk
        :param checkpoint_name: From which checkpoint to retrieve the hash value
        :return: The checkpoint's hash value
        """
        virsh_instance = virsh.Virsh(uri=vm.connect_uri)

        def get_disk_node_name():
            """
            Get the node name for the test disk
            :return: The node name of the test disk
            """
            cmd = '{\"execute\":\"query-block\"}'
            result = virsh_instance.qemu_monitor_command(
                vm.name, cmd, '--pretty',
                **self.virsh_dargs).stdout_text.strip()
            json_result = json.loads(result)
            blk_dev_list = json_result['return']
            for blk_dev in blk_dev_list:
                if test_disk_source_file == blk_dev['inserted']['file']:
                    if (checkpoint_name not in [
                            sub['name']
                            for sub in blk_dev['inserted']['dirty-bitmaps']
                    ]):
                        self.test.fail(
                            'test image doesn\'t have checkpoint %s' %
                            checkpoint_name)
                    return blk_dev['inserted']['node-name']

        disk_node_name = get_disk_node_name()
        logging.debug('test disk node name is: %s' % disk_node_name)
        if not disk_node_name:
            self.test.fail('%s not used by vm' % test_disk_source_file)
        cmd = ('{{\"execute\": \"x-debug-block-dirty-bitmap-sha256\",'
               '\"arguments\": {{\"node\":\"{0}\",'
               '\"name\":\"{1}\"}}}}').format(disk_node_name, checkpoint_name)
        result = virsh_instance.qemu_monitor_command(
            vm.name, cmd, '--pretty', **self.virsh_dargs).stdout_text.strip()
        json_result = json.loads(result)
        return json_result['return']['sha256']
    def hotunplug_device(self, vm, device_xml, *check_funcs):
        """
        Hotunplug device from vm

        :param vm: vm object
        :param device_xml: device xml to be hotunplugged
        :param check_funcs: funcs to check the hotunplugged device in dumpxml
        """
        logging.debug("Hotunplug device xml: %s from vm: %s",
                      device_xml, vm.name)
        virsh_instance = virsh.Virsh(uri=vm.connect_uri)
        ret = virsh_instance.detach_device(vm.name, device_xml.xml,
                                           flagstr="--live",
                                           debug=True, ignore_status=True)
        libvirt.check_exit_status(ret)

        # Check device after hotunplug
        for func in check_funcs:
            func()
    def check_rng_presence_in_dumpxml(self, vm):
        """
        Check rng presence in domain xml

        :param vm: vm object

        :return: boolean: True if rng is present in dumpxml else False
        """
        logging.debug("Check rng presence in domain xml")

        virsh_instance = virsh.Virsh(uri=vm.connect_uri)
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name,
                                              virsh_instance=virsh_instance)
        # Get all current xml rng devices
        xml_devices = vmxml.devices
        rng_devices = xml_devices.by_device_tag("rng")
        logging.debug("rng_devices is %s", rng_devices)

        return True if rng_devices else False
def get_network_xml_instance(virsh_dargs, test_xml, net_name, net_uuid,
                             bridge):
    test_netxml = network_xml.NetworkXML(virsh_instance=virsh.Virsh(
        **virsh_dargs))
    test_netxml.xml = test_xml.name

    # modify XML if called for
    if net_name is not "":
        test_netxml.name = net_name
    else:
        test_netxml.name = "default"
    if net_uuid is not "":
        test_netxml.uuid = net_uuid
    else:
        del test_netxml.uuid  # let libvirt auto-generate
    if bridge is not None:
        test_netxml.bridge = bridge

    # TODO: Test other network parameters

    logging.debug("Modified XML:")
    test_netxml.debug_xml()
    return test_netxml
Esempio n. 11
0
def run(test, params, env):
    """
    Test virsh {at|de}tach-interface command.

    1) Prepare test environment and its parameters
    2) Operate virsh on one or more devices
    3) Check functionality of each device
    4) Check functionality of mmconfig option
    5) Restore domain
    6) Handle results
    """

    dev_obj = params.get("vadu_dev_objs")
    # Skip chardev hotplug on rhel6 host as it is not supported
    if "Serial" in dev_obj:
        if not libvirt_version.version_compare(1, 1, 0):
            raise error.TestNAError("You libvirt version not supported"
                                    " attach/detach Serial devices")

    logging.info("Preparing initial VM state")
    # Prepare test environment and its parameters
    test_params = TestParams(params, env, test)
    if test_params.start_vm:
        # Make sure VM is working
        test_params.main_vm.verify_alive()
        test_params.main_vm.wait_for_login().close()
    else:  # VM not suppose to be started
        if test_params.main_vm.is_alive():
            test_params.main_vm.destroy(gracefully=True)
    # Capture backup of original XML early in test
    test_params.vmxml = VMXML.new_from_inactive_dumpxml(
        test_params.main_vm.name)
    # All devices should share same access state
    test_params.virsh = virsh.Virsh(ignore_status=True)
    logging.info("Creating %d test device instances", len(test_params.devs))
    # Create test objects from cfg. class names via subclasses above
    test_devices = [globals()[class_name](test_params)  # instantiate
                    for class_name in test_params.devs]  # vadu_dev_objs
    operational_results = []
    preboot_results = []
    pstboot_results = []
    try:
        operational_action(test_params, test_devices, operational_results)
        # Fail early if attach-device return value is not expected
        analyze_results(test_params=test_params,
                        operational_results=operational_results)

        #  Can't do functional testing with a cold VM, only test hot-attach
        preboot_action(test_params, test_devices, preboot_results)

        logging.info("Preparing test VM state for post-boot functional testing")
        if test_params.start_vm:
            # Hard-reboot required
            test_params.main_vm.destroy(gracefully=True,
                                        free_mac_addresses=False)
        try:
            test_params.main_vm.start()
        except virt_vm.VMStartError:
            raise error.TestFail('VM Failed to start for some reason!')
        # Signal devices reboot is finished
        for test_device in test_devices:
            test_device.booted = True
        test_params.main_vm.wait_for_login().close()
        postboot_action(test_params, test_devices, pstboot_results)
        analyze_results(test_params=test_params,
                        preboot_results=preboot_results,
                        pstboot_results=pstboot_results)
    finally:
        logging.info("Restoring VM from backup, then checking results")
        test_params.main_vm.destroy(gracefully=False,
                                    free_mac_addresses=False)
        test_params.vmxml.undefine()
        test_params.vmxml.restore()  # Recover the original XML
        test_params.vmxml.define()
        if not test_params.start_vm:
            # Test began with not start_vm, shut it down.
            test_params.main_vm.destroy(gracefully=True)
        # Device cleanup can raise multiple exceptions, do it last:
        logging.info("Cleaning up test devices")
        test_params.cleanup(test_devices)
def run(test, params, env):
    """
    Test virsh {at|de}tach-device command.

    1) Prepare test environment and its parameters
    2) Operate virsh on one or more devices
    3) Check functionality of each device
    4) Check functionality of mmconfig option
    5) Restore domain
    6) Handle results
    """
    vm_name = params.get('main_vm')
    machine_type = params.get("machine_type", "pc")
    backup_vm_xml = vmxml = VMXML.new_from_inactive_dumpxml(vm_name)

    dev_obj = params.get("vadu_dev_objs")
    vadu_vdb = int(params.get("vadu_dev_obj_count_VirtualDiskBasic", "0"))
    vadu_dom_ref = params.get("vadu_dom_ref", "dom_ref")
    status_error = "yes" == params.get("status_error", "no")
    vadu_domain_positional = "yes" == params.get("vadu_domain_positional",
                                                 "no")
    vadu_file_positional = "yes" == params.get("vadu_file_positional", "no")
    vadu_preboot_error = "yes" == params.get("vadu_preboot_function_error",
                                             "no")

    # Skip chardev hotplug on rhel6 host as it is not supported
    if "Serial" in dev_obj:
        if not libvirt_version.version_compare(1, 1, 0):
            test.cancel("You libvirt version not supported"
                        " attach/detach Serial devices")
    # Prepare test environment and its parameters
    test_params = TestParams(params, env, test)

    xml_machine = vmxml.os.machine
    # Only limited q35 with machine_type set correctly under block type condition
    if 'q35' in xml_machine and machine_type == 'q35' and "VirtualDiskBasic" in dev_obj:
        # Only apply change on some cases with feature:
        # block.multi_virtio_file..normal_test.hot_attach_hot_vm..name_ref.file_positional.domain_positional
        # those cases often throw No more available PCI slots
        if vadu_vdb == 16 and not status_error \
            and not vadu_preboot_error and 'name' in vadu_dom_ref \
                and vadu_file_positional and vadu_domain_positional:

            previous_state_running = test_params.main_vm.is_alive()
            if previous_state_running:
                test_params.main_vm.destroy(gracefully=True)
            vmxml.remove_all_device_by_type('controller')
            machine_list = vmxml.os.machine.split("-")
            vmxml.set_os_attrs(
                **{"machine": machine_list[0] + "-q35-" + machine_list[2]})
            q35_pcie_dict0 = {
                'controller_model': 'pcie-root',
                'controller_type': 'pci',
                'controller_index': 0
            }
            q35_pcie_dict1 = {
                'controller_model': 'pcie-root-port',
                'controller_type': 'pci'
            }
            vmxml.add_device(libvirt.create_controller_xml(q35_pcie_dict0))
            # Add enough controllers to match max times disk attaching requirements
            for i in list(range(1, 24)):
                q35_pcie_dict1.update({'controller_index': "%d" % i})
                vmxml.add_device(libvirt.create_controller_xml(q35_pcie_dict1))
            vmxml.sync()
            logging.debug("Guest XMl with adding many controllers: %s",
                          test_params.main_vm.get_xml())
            if previous_state_running:
                test_params.main_vm.start()

    remove_non_disks(vm_name, vmxml)
    update_controllers_ppc(vm_name, vmxml)

    if params.get("remove_all_chardev", "no") == "yes":
        remove_chardevs(vm_name, vmxml)

    logging.info("Preparing initial VM state")

    if test_params.start_vm:
        # Make sure VM is working
        test_params.main_vm.verify_alive()
        test_params.main_vm.wait_for_login().close()
    else:  # VM not suppose to be started
        if test_params.main_vm.is_alive():
            test_params.main_vm.destroy(gracefully=True)
    # Capture backup of original XML early in test
    test_params.vmxml = VMXML.new_from_inactive_dumpxml(
        test_params.main_vm.name)
    # All devices should share same access state
    test_params.virsh = virsh.Virsh(ignore_status=True)
    logging.info("Creating %d test device instances", len(test_params.devs))
    # Create test objects from cfg. class names via subclasses above
    test_devices = [
        globals()[class_name](test_params, test)  # instantiate
        for class_name in test_params.devs
    ]  # vadu_dev_objs
    operational_results = []
    preboot_results = []
    pstboot_results = []
    try:
        operational_action(test_params, test_devices, operational_results)
        # Fail early if attach-device return value is not expected
        analyze_results(test_params,
                        test,
                        operational_results=operational_results)

        #  Can't do functional testing with a cold VM, only test hot-attach
        preboot_action(test_params, test_devices, preboot_results)

        logging.info(
            "Preparing test VM state for post-boot functional testing")
        if test_params.start_vm:
            # Hard-reboot required
            test_params.main_vm.destroy(gracefully=True,
                                        free_mac_addresses=False)
        try:
            logging.debug("vmxml %s", VMXML.new_from_inactive_dumpxml(vm_name))
            test_params.main_vm.start()
        except virt_vm.VMStartError as details:
            test.fail('VM Failed to start for some reason!: %s' % details)
        # Signal devices reboot is finished
        for test_device in test_devices:
            test_device.booted = True
        logging.debug("Current VMXML %s", test_params.main_vm.get_xml())
        test_params.main_vm.wait_for_login().close()
        postboot_action(test_params, test_devices, pstboot_results)
        analyze_results(test_params,
                        test,
                        preboot_results=preboot_results,
                        pstboot_results=pstboot_results)
    finally:
        logging.info("Restoring VM from backup, then checking results")
        test_params.main_vm.destroy(gracefully=False, free_mac_addresses=False)
        test_params.vmxml.undefine()
        test_params.vmxml.restore()  # Recover the original XML
        test_params.vmxml.define()
        if not test_params.start_vm:
            # Test began with not start_vm, shut it down.
            test_params.main_vm.destroy(gracefully=True)
        # Device cleanup can raise multiple exceptions, do it last:
        logging.info("Cleaning up test devices")
        try:
            test_params.cleanup(test_devices)
        except RuntimeError as e:
            logging.debug("Error cleaning up devices: %s", e)
        backup_vm_xml.sync()
def run(test, params, env):
    """
    Test virsh {at|de}tach-device command.

    1) Prepare test environment and its parameters
    2) Operate virsh on one or more devices
    3) Check functionality of each device
    4) Check functionality of mmconfig option
    5) Restore domain
    6) Handle results
    """
    # Remove non-disk disks before test to avoid influence.
    # None-disk disks such as cdrom with bus 'SATA' will be recognized
    # as 'cdrom', not 'sda' as expected, therefore the attached SATA
    # disk will not be recognized as 'sdb' as expected.
    vm_name = params.get('main_vm')
    backup_vm_xml = vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    disks = vmxml.devices.by_device_tag('disk')
    for disk in disks:
        if disk.device != 'disk':
            virsh.detach_disk(vm_name,
                              disk.target['dev'],
                              extra='--current',
                              debug=True)

    dev_obj = params.get("vadu_dev_objs")
    # Skip chardev hotplug on rhel6 host as it is not supported
    if "Serial" in dev_obj:
        if not libvirt_version.version_compare(1, 1, 0):
            test.cancel("You libvirt version not supported"
                        " attach/detach Serial devices")

    logging.info("Preparing initial VM state")
    # Prepare test environment and its parameters
    test_params = TestParams(params, env, test)
    if test_params.start_vm:
        # Make sure VM is working
        test_params.main_vm.verify_alive()
        test_params.main_vm.wait_for_login().close()
    else:  # VM not suppose to be started
        if test_params.main_vm.is_alive():
            test_params.main_vm.destroy(gracefully=True)
    # Capture backup of original XML early in test
    test_params.vmxml = VMXML.new_from_inactive_dumpxml(
        test_params.main_vm.name)
    # All devices should share same access state
    test_params.virsh = virsh.Virsh(ignore_status=True)
    logging.info("Creating %d test device instances", len(test_params.devs))
    # Create test objects from cfg. class names via subclasses above
    test_devices = [
        globals()[class_name](test_params, test)  # instantiate
        for class_name in test_params.devs
    ]  # vadu_dev_objs
    operational_results = []
    preboot_results = []
    pstboot_results = []
    try:
        operational_action(test_params, test_devices, operational_results)
        # Fail early if attach-device return value is not expected
        analyze_results(test_params,
                        test,
                        operational_results=operational_results)

        #  Can't do functional testing with a cold VM, only test hot-attach
        preboot_action(test_params, test_devices, preboot_results)

        logging.info(
            "Preparing test VM state for post-boot functional testing")
        if test_params.start_vm:
            # Hard-reboot required
            test_params.main_vm.destroy(gracefully=True,
                                        free_mac_addresses=False)
        try:
            test_params.main_vm.start()
        except virt_vm.VMStartError as details:
            test.fail('VM Failed to start for some reason!: %s' % details)
        # Signal devices reboot is finished
        for test_device in test_devices:
            test_device.booted = True
        test_params.main_vm.wait_for_login().close()
        postboot_action(test_params, test_devices, pstboot_results)
        analyze_results(test_params,
                        test,
                        preboot_results=preboot_results,
                        pstboot_results=pstboot_results)
    finally:
        logging.info("Restoring VM from backup, then checking results")
        test_params.main_vm.destroy(gracefully=False, free_mac_addresses=False)
        test_params.vmxml.undefine()
        test_params.vmxml.restore()  # Recover the original XML
        test_params.vmxml.define()
        if not test_params.start_vm:
            # Test began with not start_vm, shut it down.
            test_params.main_vm.destroy(gracefully=True)
        # Device cleanup can raise multiple exceptions, do it last:
        logging.info("Cleaning up test devices")
        test_params.cleanup(test_devices)
        backup_vm_xml.sync()
Esempio n. 14
0
def run(test, params, env):
    """
    Test the command virsh maxvcpus

    (1) Call virsh maxvcpus
    (2) Call virsh -c remote_uri maxvcpus
    (3) Call virsh maxvcpus with an unexpected option
    """

    # get the params from subtests.
    # params for general.
    option = params.get("virsh_maxvcpus_options")
    status_error = params.get("status_error")
    connect_arg = params.get("connect_arg", "")

    # params for transport connect.
    local_ip = params.get("local_ip", "ENTER.YOUR.LOCAL.IP")
    local_pwd = params.get("local_pwd", "ENTER.YOUR.LOCAL.ROOT.PASSWORD")
    server_ip = params.get("remote_ip", local_ip)
    server_pwd = params.get("remote_pwd", local_pwd)
    transport_type = params.get("connect_transport_type", "local")
    transport = params.get("connect_transport", "ssh")

    # check the config
    if (connect_arg == "transport" and transport_type == "remote"
            and local_ip.count("ENTER")):
        raise exceptions.TestSkipError("Parameter local_ip is not configured "
                                       "in remote test.")
    if (connect_arg == "transport" and transport_type == "remote"
            and local_pwd.count("ENTER")):
        raise exceptions.TestSkipError("Parameter local_pwd is not configured "
                                       "in remote test.")

    if connect_arg == "transport":
        canonical_uri_type = virsh.driver()

        if transport == "ssh":
            ssh_connection = utils_conn.SSHConnection(server_ip=server_ip,
                                                      server_pwd=server_pwd,
                                                      client_ip=local_ip,
                                                      client_pwd=local_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()

            connect_uri = libvirt_vm.get_uri_with_transport(
                uri_type=canonical_uri_type,
                transport=transport,
                dest_ip=server_ip)
    else:
        connect_uri = connect_arg

    if libvirt_version.version_compare(2, 3, 0):
        try:
            maxvcpus = None
            # make sure we take maxvcpus from right host, helps incase remote
            virsh_dargs = {'uri': connect_uri}
            virsh_instance = virsh.Virsh(virsh_dargs)
            try:
                capa = capability_xml.CapabilityXML(virsh_instance)
                host_arch = capa.arch
                maxvcpus = capa.get_guest_capabilities(
                )['hvm'][host_arch]['maxcpus']
            except:
                raise exceptions.TestFail("Failed to get maxvcpus from "
                                          "capabilities xml\n%s" % capa)
            if not maxvcpus:
                raise exceptions.TestFail("Failed to get guest section for "
                                          "host arch: %s from capabilities "
                                          "xml\n%s" % (host_arch, capa))
        except Exception, details:
            raise exceptions.TestFail(
                "Failed get the virsh instance with uri: "
                "%s\n Details: %s" % (connect_uri, details))
Esempio n. 15
0
def run(test, params, env):
    """
    Test virsh migrate command.
    """
    def cleanup_vm(vm, vm_name='', uri=''):
        """
        Clean up vm in the src or destination host environment
        when doing the uni-direction migration.
        """
        # Backup vm name and uri
        uri_bak = vm.connect_uri
        vm_name_bak = vm.name

        # Destroy and undefine vm
        vm.connect_uri = uri if uri else uri_bak
        vm.name = vm_name if vm_name else vm_name_bak
        logging.info("Cleaning up VM %s on %s", vm.name, vm.connect_uri)
        if vm.is_alive():
            vm.destroy()
        if vm.is_persistent():
            vm.undefine()

        # Restore vm connect_uri
        vm.connect_uri = uri_bak
        vm.name = vm_name_bak

    # Check whether there are unset parameters
    for v in list(itervalues(params)):
        if isinstance(v, string_types) and v.count("EXAMPLE"):
            test.cancel("Please set real value for %s" % v)

    # Params for virsh migrate options:
    live_migration = params.get("live_migration") == "yes"
    offline_migration = params.get("offline_migration") == "yes"
    persistent = params.get("persistent") == "yes"
    undefinesource = params.get("undefinesource") == "yes"
    p2p = params.get("p2p") == "yes"
    tunnelled = params.get("tunnelled") == "yes"
    postcopy = params.get("postcopy") == "yes"
    dname = params.get("dname")
    xml_option = params.get("xml_option") == "yes"
    persistent_xml_option = params.get("persistent_xml_option") == "yes"
    extra_options = params.get("virsh_migrate_extra", "")

    if live_migration and not extra_options.count("--live"):
        extra_options = "%s --live" % extra_options
    if offline_migration and not extra_options.count("--offline"):
        extra_options = "%s --offline" % extra_options
    if persistent and not extra_options.count("--persistent"):
        extra_options = "%s --persistent" % extra_options
    if undefinesource and not extra_options.count("--undefinesource"):
        extra_options = "%s --undefinesource" % extra_options
    if p2p and not extra_options.count("--p2p"):
        extra_options = "%s --p2p" % extra_options
    if tunnelled and not extra_options.count("--tunnelled"):
        extra_options = "%s --tunnelled" % extra_options
    if tunnelled and not extra_options.count("--p2p"):
        extra_options = "%s --p2p" % extra_options
    if postcopy and not extra_options.count("--postcopy"):
        extra_options = "%s --postcopy" % extra_options
    if dname and not extra_options.count("--dname"):
        extra_options = "%s --dname %s" % (extra_options, dname)
    if xml_option:
        pass
    if persistent_xml_option and not extra_options.count("--persistent"):
        extra_options = "%s --persistent" % extra_options
    if persistent_xml_option:
        pass

    # Set param migrate_options in case it is used somewhere:
    params.setdefault("migrate_options", extra_options)

    # Params for postcopy migration
    postcopy_timeout = int(params.get("postcopy_migration_timeout", "180"))

    # Params for migrate hosts:
    server_cn = params.get("server_cn")
    client_cn = params.get("client_cn")
    migrate_source_host = client_cn if client_cn else params.get(
        "migrate_source_host")
    migrate_dest_host = server_cn if server_cn else params.get(
        "migrate_dest_host")

    # Params for migrate uri
    transport = params.get("transport", "tls")
    transport_port = params.get("transport_port")
    uri_port = ":%s" % transport_port if transport_port else ''
    hypervisor_driver = params.get("hypervisor_driver", "qemu")
    hypervisor_mode = params.get("hypervisor_mode", 'system')
    if "virsh_migrate_desturi" not in list(params.keys()):
        params["virsh_migrate_desturi"] = "%s+%s://%s%s/%s" % (
            hypervisor_driver, transport, migrate_dest_host, uri_port,
            hypervisor_mode)
    if "virsh_migrate_srcuri" not in list(params.keys()):
        params["virsh_migrate_srcuri"] = "%s:///%s" % (hypervisor_driver,
                                                       hypervisor_mode)
    dest_uri = params.get("virsh_migrate_desturi")
    src_uri = params.get("virsh_migrate_srcuri")

    # Params for src vm cfg:
    src_vm_cfg = params.get("src_vm_cfg")
    src_vm_status = params.get("src_vm_status")
    with_graphic_passwd = params.get("with_graphic_passwd")
    graphic_passwd = params.get("graphic_passwd")

    # For test result check
    cancel_exception = False
    fail_exception = False
    exception = False
    result_check_pass = True

    # Objects(SSH, TLS and TCP, etc) to be cleaned up in finally
    objs_list = []

    # VM objects for migration test
    vms = []

    try:
        # Get a MigrationTest() Object
        logging.debug("Get a MigrationTest()  object")
        obj_migration = migration.MigrationTest()

        # Setup libvirtd remote connection TLS connection env
        if transport == "tls":
            tls_obj = TLSConnection(params)
            # Setup CA, server(on dest host) and client(on src host)
            tls_obj.conn_setup()
            # Add tls_obj to objs_list
            objs_list.append(tls_obj)

        # Enable libvirtd remote connection transport port
        if transport == 'tls':
            transport_port = '16514'
        elif transport == 'tcp':
            transport_port = '16509'
        obj_migration.migrate_pre_setup(dest_uri, params, ports=transport_port)

        # Back up vm name for recovery in finally
        vm_name_backup = params.get("migrate_main_vm")

        # Get a vm object for migration
        logging.debug("Get a vm object for migration")
        vm = env.get_vm(vm_name_backup)

        # Back up vm xml for recovery in finally
        logging.debug("Backup vm xml before migration")
        vm_xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        if not vm_xml_backup:
            test.error("Backing up xmlfile failed.")

        # Prepare shared disk in vm xml for live migration:
        # Change the source of the first disk of vm to shared disk
        if live_migration:
            logging.debug("Prepare shared disk in vm xml for live migration")
            storage_type = params.get("storage_type")
            if storage_type == 'nfs':
                logging.debug("Prepare nfs shared disk in vm xml")
                nfs_mount_dir = params.get("nfs_mount_dir")
                libvirt.update_vm_disk_source(vm.name, nfs_mount_dir)
                libvirt.update_vm_disk_driver_cache(vm.name,
                                                    driver_cache="none")
            else:
                # TODO:Other storage types
                test.cancel("Other storage type is not supported for now")
                pass

        # Prepare graphic password in vm xml
        if with_graphic_passwd in ["yes", "no"]:
            logging.debug("Set VM graphic passwd in vm xml")
            # Get graphics list in vm xml
            vmxml_tmp = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
            graphics_list = vmxml_tmp.get_graphics_devices

            if not graphics_list:
                # Add spice graphic with passwd to vm xml
                logging.debug("Add spice graphic to vm xml")
                graphics.Graphics.add_graphic(vm.name, graphic_passwd, "spice")
            elif graphic_passwd:
                # Graphics already exist in vm xml and passwd is required
                # Add passwd to the first graphic device in vm xml
                logging.debug("Add graphic passwd to vm xml")
                vm_xml.VMXML.add_security_info(vmxml_tmp, graphic_passwd)
                vmxml_tmp.sync()
            else:
                # Graphics already exist in vm xml and non-passwd is required
                # Do nothing here as passwd has been removed by new_from_inactive_dumpxml()
                pass

        # Prepare for required src vm status.
        logging.debug("Turning %s into certain state.", vm.name)
        if src_vm_status == "running" and not vm.is_alive():
            vm.start()
        elif src_vm_status == "shut off" and not vm.is_dead():
            vm.destroy()

        # Prepare for required src vm persistency.
        logging.debug("Prepare for required src vm persistency")
        if src_vm_cfg == "persistent" and not vm.is_persistent():
            logging.debug("Make src vm persistent")
            vm_xml_backup.define()
        elif src_vm_cfg == "transient" and vm.is_persistent():
            logging.debug("Make src vm transient")
            vm.undefine()

        # Prepare for postcopy migration: install and run stress in VM
        if postcopy and src_vm_status == "running":
            logging.debug(
                "Install and run stress in vm for postcopy migration")
            pkg_name = 'stress'

            # Get a vm session
            logging.debug("Get a vm session")
            vm_session = vm.wait_for_login()
            if not vm_session:
                test.error("Can't get a vm session successfully")

            # Install package stress if it is not installed in vm
            logging.debug(
                "Check if stress tool is installed for postcopy migration")
            pkg_mgr = utils_package.package_manager(vm_session, pkg_name)
            if not pkg_mgr.is_installed(pkg_name):
                logging.debug("Stress tool will be installed")
                if not pkg_mgr.install():
                    test.error("Package '%s' installation fails" % pkg_name)

            # Run stress in vm
            logging.debug("Run stress in vm")
            stress_args = params.get("stress_args")
            vm_session.cmd('stress %s' % stress_args)

        # Prepare for --xml <updated_xml_file>.
        if xml_option:
            logging.debug("Preparing new xml file for --xml option.")

            # Get the vm xml
            vmxml_tmp = vm_xml.VMXML.new_from_dumpxml(
                vm.name, "--security-info --migratable")

            # Update something in the xml file: e.g. title
            # Note: VM ABI shall not be broken when migrating with updated_xml
            updated_title = "VM Title in updated xml"
            vmxml_tmp.title = updated_title

            # Add --xml to migrate extra_options
            extra_options = ("%s --xml=%s" % (extra_options, vmxml_tmp.xml))

        # Prepare for --persistent-xml <updated_xml_file>.
        if persistent_xml_option:
            logging.debug(
                "Preparing new xml file for --persistent-xml option.")

            # Get the vm xml
            vmxml_persist_tmp = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm.name, "--security-info")

            # Update something in the xml file: e.g. title
            # Note: VM ABI shall not be broken when migrating with updated_xml
            updated_persist_title = "VM Title in updated persist xml"
            vmxml_persist_tmp.title = updated_persist_title

            # Add --persistent-xml to migrate extra_options
            extra_options = ("%s --persistent-xml=%s" %
                             (extra_options, vmxml_persist_tmp.xml))

        # Prepare host env: clean up vm on dest host
        logging.debug("Clean up vm on dest host before migration")
        if dname:
            cleanup_vm(vm, dname, dest_uri)
        else:
            cleanup_vm(vm, vm.name, dest_uri)

        # Prepare host env: set selinux state before migration
        logging.debug("Set selinux to enforcing before migration")
        utils_selinux.set_status(params.get("selinux_state", "enforcing"))

        # Check vm network connectivity by ping before migration
        logging.debug("Check vm network before migration")
        if src_vm_status == "running":
            obj_migration.ping_vm(vm, params)

        # Get VM uptime before migration
        if src_vm_status == "running":
            vm_uptime = vm.uptime()
            logging.info("Check VM uptime before migration: %s", vm_uptime)

        # Print vm active xml before migration
        process.system_output("virsh dumpxml %s --security-info" % vm.name,
                              shell=True)

        # Print vm inactive xml before migration
        process.system_output("virsh dumpxml %s --security-info --inactive" %
                              vm.name,
                              shell=True)

        # Do uni-direction migration.
        # NOTE: vm.connect_uri will be set to dest_uri once migration is complete successfully
        logging.debug("Start to do migration test.")
        vms.append(vm)
        if postcopy:
            # Monitor the qemu monitor event of "postcopy-active" for postcopy migration
            logging.debug(
                "Monitor the qemu monitor event for postcopy migration")
            virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC,
                                               auto_close=True)
            cmd = "qemu-monitor-event --loop --domain %s --event MIGRATION" % vm.name
            virsh_session.sendline(cmd)

            # Do live migration and switch to postcopy by "virsh migrate-postcopy"
            logging.debug("Start to do postcopy migration")
            obj_migration.do_migration(vms,
                                       src_uri,
                                       dest_uri,
                                       "orderly",
                                       options="",
                                       thread_timeout=postcopy_timeout,
                                       ignore_status=True,
                                       func=virsh.migrate_postcopy,
                                       extra_opts=extra_options,
                                       shell=True)
            # Check migration result
            obj_migration.check_result(obj_migration.ret, params)

            # Check "postcopy-active" event after postcopy migration
            logging.debug(
                "Check postcopy-active event after postcopy migration")
            virsh_session.send_ctrl("^C")
            events_output = virsh_session.get_stripped_output()
            logging.debug("events_output are %s", events_output)
            pattern = "postcopy-active"
            if not re.search(pattern, events_output):
                test.fail("Migration didn't switch to postcopy mode")
                virsh_session.close()
            virsh_session.close()

        else:
            logging.debug("Start to do precopy migration")
            obj_migration.do_migration(vms,
                                       src_uri,
                                       dest_uri,
                                       "orderly",
                                       options="",
                                       ignore_status=True,
                                       extra_opts=extra_options)
            # Check migration result
            obj_migration.check_result(obj_migration.ret, params)
        """
        # Check src vm after migration
        # First, update vm name and connect_uri to src vm's
        """
        vm.name = vm_name_backup
        vm.connect_uri = src_uri
        logging.debug("Start to check %s state on src %s after migration.",
                      vm.name, src_uri)

        # Check src vm status after migration: existence, running, shutoff, etc
        logging.debug("Check vm status on source after migration")
        if offline_migration:
            if src_vm_status == "shut off" and undefinesource:
                if vm.exists():
                    result_check_pass = False
                    logging.error(
                        "Src vm should not exist after offline migration"
                        " with --undefinesource")
                    logging.debug("Src vm state is %s" % vm.state())
            elif not libvirt.check_vm_state(
                    vm.name, src_vm_status, uri=vm.connect_uri):
                result_check_pass = False
                logging.error("Src vm should be %s after offline migration" %
                              src_vm_status)
                logging.debug("Src vm state is %s" % vm.state())

        if live_migration:
            if not undefinesource and src_vm_cfg == "persistent":
                if not libvirt.check_vm_state(
                        vm.name, "shut off", uri=vm.connect_uri):
                    result_check_pass = False
                    logging.error(
                        "Src vm should be shutoff after live migration")
                    logging.debug("Src vm state is %s" % vm.state())
            elif vm.exists():
                result_check_pass = False
                logging.error("Src vm should not exist after live migration")
                logging.debug("Src vm state is %s" % vm.state())

        # Check src vm status after migration: persistency
        logging.debug("Check vm persistency on source after migration")
        if src_vm_cfg == "persistent" and not undefinesource:
            if not vm.is_persistent():
                # Src vm should be persistent after migration without --undefinesource
                result_check_pass = False
                logging.error("Src vm should be persistent after migration")
        elif vm.is_persistent():
            result_check_pass = False
            logging.error("Src vm should be not be persistent after migration")
        """
        # Check dst vm after migration
        # First, update vm name and connect_uri to dst vm's
        """
        vm.name = dname if dname else vm.name
        vm.connect_uri = dest_uri
        logging.debug("Start to check %s state on target %s after migration.",
                      vm.name, vm.connect_uri)

        # Check dst vm status after migration: running, shutoff, etc
        logging.debug("Check vm status on target after migration")
        if live_migration:
            if not libvirt.check_vm_state(
                    vm.name, src_vm_status, uri=vm.connect_uri):
                result_check_pass = False
                logging.error("Dst vm should be %s after live migration",
                              src_vm_status)
        elif vm.is_alive():
            result_check_pass = False
            logging.error("Dst vm should not be alive after offline migration")

        # Print vm active xml after migration
        process.system_output("virsh -c %s dumpxml %s --security-info" %
                              (vm.connect_uri, vm.name),
                              shell=True)

        # Print vm inactive xml after migration
        process.system_output(
            "virsh -c %s dumpxml %s --security-info --inactive" %
            (vm.connect_uri, vm.name),
            shell=True)

        # Check dst vm xml after migration
        logging.debug("Check vm xml on target after migration")
        remote_virsh = virsh.Virsh(uri=vm.connect_uri)
        vmxml_active_tmp = vm_xml.VMXML.new_from_dumpxml(
            vm.name, "--security-info", remote_virsh)
        vmxml_inactive_tmp = vm_xml.VMXML.new_from_inactive_dumpxml(
            vm.name, "--security-info", remote_virsh)
        # Check dst vm xml after migration: --xml <updated_xml_file>
        if xml_option and not offline_migration:
            logging.debug("Check vm active xml for --xml")
            if not vmxml_active_tmp.title == updated_title:
                print("vmxml active tmp title is %s" % vmxml_active_tmp.title)
                result_check_pass = False
                logging.error("--xml doesn't take effect in migration")

        if xml_option and offline_migration:
            logging.debug("Check vm inactive xml for --xml")
            if not vmxml_active_tmp.title == updated_title:
                result_check_pass = False
                logging.error("--xml doesn't take effect in migration")

        # Check dst vm xml after migration: --persistent-xml <updated_xml_file>
        if persistent_xml_option:
            logging.debug("Check vm inactive xml for --persistent-xml")
            if not offline_migration and not vmxml_inactive_tmp.title == updated_persist_title:
                print("vmxml inactive tmp title is %s" %
                      vmxml_inactive_tmp.title)
                result_check_pass = False
                logging.error(
                    "--persistent-xml doesn't take effect in live migration")
            elif offline_migration and vmxml_inactive_tmp.title == updated_persist_title:
                result_check_pass = False
                logging.error(
                    "--persistent-xml should not take effect in offline "
                    "migration")

        # Check dst vm xml after migration: graphic passwd
        if with_graphic_passwd == "yes":
            logging.debug("Check graphic passwd in vm xml after migration")
            graphic_active = vmxml_active_tmp.devices.by_device_tag(
                'graphics')[0]
            graphic_inactive = vmxml_inactive_tmp.devices.by_device_tag(
                'graphics')[0]
            try:
                logging.debug("Check graphic passwd in active vm xml")
                if graphic_active.passwd != graphic_passwd:
                    result_check_pass = False
                    logging.error(
                        "Graphic passwd in active xml of dst vm should be %s",
                        graphic_passwd)

                logging.debug("Check graphic passwd in inactive vm xml")
                if graphic_inactive.passwd != graphic_passwd:
                    result_check_pass = False
                    logging.error(
                        "Graphic passwd in inactive xml of dst vm should be %s",
                        graphic_passwd)
            except LibvirtXMLNotFoundError:
                result_check_pass = False
                logging.error("Graphic passwd lost in dst vm xml")

        # Check dst vm uptime, network, etc after live migration
        if live_migration:
            # Check dst VM uptime after migration
            # Note: migrated_vm_uptime should be greater than the vm_uptime got
            # before migration
            migrated_vm_uptime = vm.uptime(connect_uri=dest_uri)
            logging.info(
                "Check VM uptime in destination after "
                "migration: %s", migrated_vm_uptime)
            if not migrated_vm_uptime:
                result_check_pass = False
                logging.error("Failed to check vm uptime after migration")
            elif vm_uptime > migrated_vm_uptime:
                result_check_pass = False
                logging.error(
                    "VM went for a reboot while migrating to destination")

            # Check dst VM network connectivity after migration
            logging.debug("Check VM network connectivity after migrating")
            obj_migration.ping_vm(vm, params, uri=dest_uri)

            # Restore vm.connect_uri as it is set to src_uri in ping_vm()
            logging.debug(
                "Restore vm.connect_uri as it is set to src_uri in ping_vm()")
            vm.connect_uri = dest_uri

        # Check dst vm status after migration: persistency
        logging.debug("Check vm persistency on target after migration")
        if persistent:
            if not vm.is_persistent():
                result_check_pass = False
                logging.error("Dst vm should be persistent after migration "
                              "with --persistent")
                time.sleep(10)
            # Destroy vm and check vm state should be shutoff. BZ#1076354
            vm.destroy()
            if not libvirt.check_vm_state(
                    vm.name, "shut off", uri=vm.connect_uri):
                result_check_pass = False
                logging.error(
                    "Dst vm with name %s should exist and be shutoff", vm.name)
        elif vm.is_persistent():
            result_check_pass = False
            logging.error("Dst vm should not be persistent after migration "
                          "without --persistent")

    finally:
        logging.debug("Start to clean up env")
        # Clean up vm on dest and src host
        for vm in vms:
            cleanup_vm(vm, vm_name=dname, uri=dest_uri)
            cleanup_vm(vm, vm_name=vm_name_backup, uri=src_uri)

        # Recover source vm defination (just in case).
        logging.info("Recover vm defination on source")
        if vm_xml_backup:
            vm_xml_backup.define()

        # Clean up SSH, TCP, TLS test env
        if objs_list and len(objs_list) > 0:
            logging.debug("Clean up test env: SSH, TCP, TLS, etc")
            for obj in objs_list:
                obj.auto_recover = True
                obj.__del__()

        # Disable libvirtd remote connection transport port
        obj_migration.migrate_pre_setup(dest_uri,
                                        params,
                                        cleanup=True,
                                        ports=transport_port)

        # Check test result.
        if not result_check_pass:
            test.fail("Migration succeed, but some check points didn't pass."
                      "Please check the error log for details")
Esempio n. 16
0
def run(test, params, env):
    """
    Test KVM migration scenarios
    """
    migrate_options = params.get("migrate_options", "")
    migrate_postcopy = params.get("migrate_postcopy", "")
    migrate_dest_ip = params.get("migrate_dest_host")
    nfs_mount_path = params.get("nfs_mount_dir")
    migrate_start_state = params.get("migrate_start_state", "paused")
    machine_types = params.get("migrate_all_machine_types", "no") == "yes"
    migrate_back = params.get("migrate_back", "yes") == "yes"
    postcopy_func = None
    if migrate_postcopy:
        postcopy_func = virsh.migrate_postcopy
    migrate_type = params.get("migrate_type", "orderly")
    vm_state = params.get("migrate_vm_state", "running")
    ping_count = int(params.get("ping_count", 10))
    thread_timeout = int(params.get("thread_timeout", 3600))

    vm_list = env.get_all_vms()

    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = nfs_mount_path

    src_uri = "qemu:///system"
    dest_uri = libvirt_vm.complete_uri(params["server_ip"])

    vmxml_dict = {}
    vmxml_machine = {}

    machine_list = params.get("machine_type").split()
    virt_type = params.get("hvm_or_pv", "hvm")
    arch = params.get("vm_arch_name", platform.machine())

    # Get all supported machine types in source
    if machine_types:
        machine_list = libvirt.get_machine_types(arch, virt_type)
        if not machine_list:
            test.cancel("Libvirt doesn't support %s virtualization on "
                        "arch %s in source host" % (virt_type, arch))
        logging.debug("Supported machine types in source: %s",
                      ", ".join(map(str, machine_list)))

        # Get all supported machine types in target host
        virsh_remote = virsh.Virsh(uri=dest_uri)
        remote_machine_list = libvirt.get_machine_types(
            arch, virt_type, virsh_instance=virsh_remote)
        if not remote_machine_list:
            test.cancel("Libvirt doesn't support %s virtualization on "
                        "arch %s in target host" % (virt_type, arch))
        logging.debug("Supported machine types in target: %s",
                      ", ".join(map(str, remote_machine_list)))

        # use machine types supported by both source and target host
        machine_list = list(
            set(machine_list).intersection(remote_machine_list))
        if not machine_list:
            test.cancel(
                "Migration not supported as source machine type and target "
                "machine type doesn't match")
        logging.debug(
            "Supported machine types that are common in  source and "
            "target are: %s", ", ".join(map(str, machine_list)))

    migrate_setup = libvirt.MigrationTest()
    # Perform migration with each machine type
    try:
        for vm in vm_list:
            vmxml_dict[vm.name] = libvirt_xml.vm_xml.VMXML.new_from_dumpxml(
                vm.name)
            params["source_dist_img"] = "%s-nfs-img" % vm.name
            if vm.is_alive():
                vm.destroy()
            libvirt.set_vm_disk(vm, params)
        info_list = []
        for machine in machine_list:
            uptime = {}
            for vm in vm_list:
                vmxml_machine[
                    vm.name] = libvirt_xml.vm_xml.VMXML.new_from_dumpxml(
                        vm.name)
                # update machine type
                update_machinetype(test, vmxml_machine[vm.name], machine)
                if vm.is_alive():
                    vm.destroy()
                if "offline" not in migrate_options:
                    vm.start()
                    vm.wait_for_login()
                    uptime[vm.name] = vm.uptime()
                    logging.info("uptime of VM %s: %s", vm.name,
                                 uptime[vm.name])
                    migrate_setup.ping_vm(vm, params, ping_count=ping_count)
            try:
                logging.debug(
                    "Migrating source to target from %s to %s "
                    "with machine type: %s", src_uri, dest_uri, machine)
                # Initialize it to avoid current iteration fail to not affect
                # next iteration
                migrate_setup.RET_MIGRATION = True
                migrate_setup.do_migration(
                    vm_list,
                    src_uri,
                    dest_uri,
                    migrate_type,
                    migrate_options,
                    func=postcopy_func,
                    migrate_start_state=migrate_start_state,
                    thread_timeout=thread_timeout)
            except Exception as info:
                info_list.append(info)
                logging.error(
                    "Failed to migrate VM from source to target "
                    "%s to %s with machine type: %s", dest_uri, src_uri,
                    machine)

            if migrate_setup.RET_MIGRATION:
                uptime = migrate_setup.post_migration_check(vm_list,
                                                            params,
                                                            uptime,
                                                            uri=dest_uri)
                if migrate_back:
                    migrate_setup.migrate_pre_setup(src_uri, params)
                    logging.debug(
                        "Migrating back to source from %s to %s "
                        "with machine type: %s", dest_uri, src_uri, machine)
                    try:
                        migrate_setup.do_migration(
                            vm_list,
                            dest_uri,
                            src_uri,
                            migrate_type,
                            options=migrate_options,
                            func=postcopy_func,
                            migrate_start_state=migrate_start_state,
                            thread_timeout=thread_timeout,
                            virsh_uri=dest_uri)
                    except Exception as info:
                        logging.error(
                            "Failed to migrate back to source from "
                            "%s to %s with machine type: %s", dest_uri,
                            src_uri, machine)
                        info_list.append(info)
                        cleanup_vm(vm_list, vmxml_machine, migrate_setup,
                                   src_uri, dest_uri)
                        continue
                    uptime = migrate_setup.post_migration_check(
                        vm_list, params, uptime)
                    migrate_setup.migrate_pre_setup(src_uri,
                                                    params,
                                                    cleanup=True)
        if info_list:
            test.fail(" |".join(map(str, info_list)))
    finally:
        logging.debug("cleanup the migration setup in source/destination")
        cleanup_vm(vm_list, vmxml_dict, migrate_setup, src_uri, dest_uri)
        for source_file in params.get("source_file_list", []):
            libvirt.delete_local_disk("file", path=source_file)
    def check_rng_values_in_dumpxml(self, vm, xml_set):
        """
        Check the set rng xml with dumpxml by comparing the attributes

        :param vm: vm object
        :param xml_set: rng xml object

        :return: boolean: True if check succeeds else False
        """
        logging.debug("Check rng values in dumpxml")

        virsh_instance = virsh.Virsh(uri=vm.connect_uri)
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name,
                                              virsh_instance=virsh_instance)
        # Get all current xml rng devices
        xml_devices = vmxml.devices
        rng_devices = xml_devices.by_device_tag("rng")
        logging.debug("rng_devices is %s", rng_devices)

        rng_index = xml_devices.index(rng_devices[-1])
        xml_get = xml_devices[rng_index]

        def get_compare_values(xml_set, xml_get, rng_attr):
            """
            Get set and get value to compare

            :param xml_set: setting xml object
            :param xml_get: getting xml object
            :param rng_attr: attribute of rng device
            :return: set and get value in xml
            """
            try:
                set_value = xml_set[rng_attr]
            except xcepts.LibvirtXMLNotFoundError:
                set_value = None
            try:
                get_value = xml_get[rng_attr]
            except xcepts.LibvirtXMLNotFoundError:
                get_value = None
            logging.debug("get xml_set value(%s) is %s, get xml_get value is\
                          %s", rng_attr, set_value, get_value)
            return (set_value, get_value)

        match = True
        for rng_attr in xml_set.__slots__:
            set_value, get_value = get_compare_values(xml_set, xml_get,
                                                      rng_attr)
            logging.debug("rng_attr=%s, set_value=%s, get_value=%s",
                          rng_attr, set_value, get_value)
            if set_value and set_value != get_value:
                if rng_attr == 'backend':
                    for bak_attr in xml_set.backend.__slots__:
                        set_backend, get_backend = get_compare_values(
                                xml_set.backend, xml_get.backend, bak_attr)
                        if set_backend and set_backend != get_backend:
                            if bak_attr == 'source':
                                set_source = xml_set.backend.source
                                get_source = xml_get.backend.source
                                find = False
                                for i in range(len(set_source)):
                                    for j in get_source:
                                        set_item = set_source[i].items()
                                        get_item = j.items()
                                        if set(set_item).issubset(get_item):
                                            find = True
                                            break
                                    if not find:
                                        logging.debug("set source(%s) not in\
                                                      get source(%s)",
                                                      set_source[i],
                                                      get_source)
                                        match = False
                                        break
                                    else:
                                        continue
                            else:
                                logging.debug("set backend(%s)- %s not equal\
                                               to get backend-%s",
                                              rng_attr, set_backend,
                                              get_backend)
                                match = False
                                break
                        else:
                            continue
                        if not match:
                            break
                else:
                    logging.debug("set value(%s)-%s not equal to get value-%s",
                                  rng_attr, set_value, get_value)
                    match = False
                    break
            else:
                continue
            if not match:
                break

        return match