Example #1
0
    def attach_disk_encryption(vol_path, uuid, params):
        """
        Attach a disk with luks encryption

        :vol_path: the volume path used in disk XML
        :uuid: the secret uuid of the volume
        :params: the parameter dictionary
        :raise: test.fail when disk cannot be attached

        """
        target_dev = params.get("target_dev", "vdb")
        vol_format = params.get("vol_format", "qcow2")
        disk_path = vol_path
        new_disk_dict = {}
        new_disk_dict.update({
            "driver_type": vol_format,
            "source_encryption_dict": {
                "encryption": 'luks',
                "secret": {
                    "type": "passphrase",
                    "uuid": uuid
                }
            }
        })
        result = utlv.attach_additional_device(vm_name, target_dev, disk_path,
                                               new_disk_dict)
        if result.exit_status:
            raise test.fail("Attach device %s failed." % target_dev)
    def attach_disk_secret(params):
        """
        Attach a disk with secret to VM

        :params: the parameter dictionary
        :raise: test.fail when disk cannot be attached
        """
        secret_string = params.get("secret_base64_no_encoded")
        target_dev = params.get("target_dev", "vdb")
        uuid = params.get("secret_uuid")
        # TODO: support encoded data
        extra = "--object secret,id=sec0,data=%s -o key-secret=sec0" % secret_string
        tmp_dir = data_dir.get_tmp_dir()
        disk_path = os.path.join(tmp_dir, "test.img")
        libvirt.create_local_disk("file",
                                  disk_format="luks",
                                  path=disk_path,
                                  size="1",
                                  extra=extra)
        new_disk_dict = {}
        new_disk_dict.update({
            "source_encryption_dict": {
                "encryption": 'luks',
                "secret": {
                    "type": "passphrase",
                    "uuid": uuid
                }
            }
        })

        result = libvirt.attach_additional_device(vm_name, target_dev,
                                                  disk_path, new_disk_dict)
        if result.exit_status:
            raise test.fail("Attach device %s failed." % target_dev)
    def update_disk(vm, params):
        """
        Update disk for testing.

        :param vm: vm object.
        :param params: the parameters used.
        :return: updated images.
        """
        local_image_list = []
        remote_image_list = []
        vm_did_list = []
        # Change the disk of the vm
        if storage_type == "nfs":
            libvirt.set_vm_disk(vm, params)
        else:
            disk_format = params.get("disk_format", "qcow2")
            disk_num = eval(params.get("disk_num", "1"))
            blk_source = vm.get_first_disk_devices()['source']
            vsize = utils_misc.get_image_info(blk_source).get("vsize")
            remote_session = remote.remote_login("ssh", server_ip, "22",
                                                 server_user, server_pwd,
                                                 r'[$#%]')
            # Create disk on remote host
            utils_misc.make_dirs(os.path.dirname(blk_source), remote_session)
            libvirt_disk.create_disk("file",
                                     disk_format=disk_format,
                                     path=blk_source,
                                     size=vsize,
                                     session=remote_session)
            remote_image_list.append(blk_source)

            for idx in range(2, disk_num + 1):
                disk_path = os.path.join(os.path.dirname(blk_source),
                                         "test%s.img" % str(idx))
                # Create disk on local
                libvirt_disk.create_disk("file",
                                         disk_format=disk_format,
                                         path=disk_path)
                local_image_list.append(disk_path)

                target_dev = 'vd' + chr(idx + ord('a') - 1)
                new_disk_dict = {"driver_type": disk_format}
                result = libvirt.attach_additional_device(
                    vm_name, target_dev, disk_path, new_disk_dict, False)
                libvirt.check_exit_status(result)

                libvirt_disk.create_disk("file",
                                         disk_format=disk_format,
                                         path=disk_path,
                                         session=remote_session)

                remote_image_list.append(disk_path)
                vm_did_list.append(target_dev)

            remote_session.close()
        return local_image_list, remote_image_list, vm_did_list
def run(test, params, env):
    """
    Test command: virsh change-media.

    The command changes the media used by CD or floppy drives.

    Test steps:
    1. Prepare test environment.
    2. Perform virsh change-media operation.
    3. Recover test environment.
    4. Confirm the test result.
    """

    def is_attached(vmxml_devices, disk_type, source_file, target_dev):
        """
        Check attached device and disk exist or not.

        :param vmxml_devices: VMXMLDevices instance
        :param disk_type: disk's device type: cdrom or floppy
        :param source_file : disk's source file to check
        :param target_dev : target device name
        :return: True/False if backing file and device found
        """
        disks = vmxml_devices.by_device_tag('disk')
        for disk in disks:
            if disk.device != disk_type:
                continue
            if disk.target['dev'] != target_dev:
                continue
            if disk.xmltreefile.find('source') is not None:
                if disk.source.attrs['file'] != source_file:
                    continue
            else:
                continue
            # All three conditions met
            logging.debug("Find %s in given disk XML", source_file)
            return True
        logging.debug("Not find %s in gievn disk XML", source_file)
        return False

    def check_result(vm_name, disk_source, disk_type, disk_target,
                     flags, vm_state, attach=True):
        """
        Check the test result of attach/detach-device command.
        """
        active_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        active_attached = is_attached(active_vmxml.devices, disk_type,
                                      disk_source, disk_target)
        if vm_state != "transient":
            inactive_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name,
                                                           options="--inactive")
            inactive_attached = is_attached(inactive_vmxml.devices, disk_type,
                                            disk_source, disk_target)

        if flags.count("config") and not flags.count("live"):
            if vm_state != "transient":
                if attach:
                    if not inactive_attached:
                        raise exceptions.TestFail("Inactive domain XML not updated"
                                                  " when --config options used for"
                                                  " attachment")
                    if vm_state != "shutoff":
                        if active_attached:
                            raise exceptions.TestFail("Active domain XML updated"
                                                      " when --config options used"
                                                      " for attachment")
                else:
                    if inactive_attached:
                        raise exceptions.TestFail("Inactive domain XML not updated"
                                                  " when --config options used for"
                                                  " detachment")
                    if vm_state != "shutoff":
                        if not active_attached:
                            raise exceptions.TestFail("Active domain XML updated"
                                                      " when --config options used"
                                                      " for detachment")
        elif flags.count("live") and not flags.count("config"):
            if attach:
                if vm_state in ["paused", "running", "transient"]:
                    if not active_attached:
                        raise exceptions.TestFail("Active domain XML not updated"
                                                  " when --live options used for"
                                                  " attachment")
                if vm_state in ["paused", "running"]:
                    if inactive_attached:
                        raise exceptions.TestFail("Inactive domain XML updated"
                                                  " when --live options used for"
                                                  " attachment")
            else:
                if vm_state in ["paused", "running", "transient"]:
                    if active_attached:
                        raise exceptions.TestFail("Active domain XML not updated"
                                                  " when --live options used for"
                                                  " detachment")
                if vm_state in ["paused", "running"]:
                    if not inactive_attached:
                        raise exceptions.TestFail("Inactive domain XML updated"
                                                  " when --live options used for"
                                                  " detachment")
        elif flags.count("live") and flags.count("config"):
            if attach:
                if vm_state in ["paused", "running"]:
                    if not active_attached:
                        raise exceptions.TestFail("Active domain XML not updated"
                                                  " when --live --config options"
                                                  " used for attachment")
                    if not inactive_attached:
                        raise exceptions.TestFail("Inactive domain XML not updated"
                                                  " when --live --config options "
                                                  "used for attachment")
            else:
                if vm_state in ["paused", "running"]:
                    if active_attached:
                        raise exceptions.TestFail("Active domain XML not updated "
                                                  "when --live --config options "
                                                  "used for detachment")
                    if inactive_attached:
                        raise exceptions.TestFail("Inactive domain XML not updated"
                                                  " when --live --config options "
                                                  "used for detachment")
        elif flags.count("current") or flags == "":
            if attach:
                if vm_state in ["paused", "running", "transient"]:
                    if not active_attached:
                        raise exceptions.TestFail("Active domain XML not updated"
                                                  " when --current options used "
                                                  "for attachment")
                if vm_state in ["paused", "running"]:
                    if inactive_attached:
                        raise exceptions.TestFail("Inactive domain XML updated "
                                                  "when --current options used "
                                                  "for live attachment")
                if vm_state == "shutoff" and not inactive_attached:
                    raise exceptions.TestFail("Inactive domain XML not updated "
                                              "when --current options used for "
                                              "attachment")
            else:
                if vm_state in ["paused", "running", "transient"]:
                    if active_attached:
                        raise exceptions.TestFail("Active domain XML not updated"
                                                  " when --current options used "
                                                  "for detachment")
                if vm_state in ["paused", "running"]:
                    if not inactive_attached:
                        raise exceptions.TestFail("Inactive domain XML updated "
                                                  "when --current options used "
                                                  "for live detachment")
                if vm_state == "shutoff" and inactive_attached:
                    raise exceptions.TestFail("Inactive domain XML not updated "
                                              "when --current options used for "
                                              "detachment")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_ref = params.get("change_media_vm_ref")
    action = params.get("change_media_action")
    action_twice = params.get("change_media_action_twice", "")
    pre_vm_state = params.get("pre_vm_state")
    options = params.get("change_media_options")
    options_twice = params.get("change_media_options_twice", "")
    device_type = params.get("change_media_device_type", "cdrom")
    target_device = params.get("change_media_target_device", "hdc")
    init_iso_name = params.get("change_media_init_iso")
    old_iso_name = params.get("change_media_old_iso")
    new_iso_name = params.get("change_media_new_iso")
    virsh_dargs = {"debug": True, "ignore_status": True}

    if device_type not in ['cdrom', 'floppy']:
        raise exceptions.TestSkipError("Got a invalid device type:/n%s"
                                       % device_type)

    # Backup for recovery.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    old_iso = os.path.join(data_dir.get_tmp_dir(), old_iso_name)
    new_iso = os.path.join(data_dir.get_tmp_dir(), new_iso_name)

    if vm_ref == "name":
        vm_ref = vm_name

    if vm.is_alive():
        vm.destroy(gracefully=False)

    try:
        if not init_iso_name:
            init_iso = ""
        else:
            init_iso = os.path.join(data_dir.get_tmp_dir(),
                                    init_iso_name)

        # Prepare test files.
        libvirt.create_local_disk("iso", old_iso)
        libvirt.create_local_disk("iso", new_iso)

        # Check domain's disk device
        disk_blk = vm_xml.VMXML.get_disk_blk(vm_name)
        logging.info("disk_blk %s", disk_blk)
        if target_device not in disk_blk:
            if vm.is_alive():
                virsh.destroy(vm_name)
            logging.info("Adding device")
            libvirt.create_local_disk("iso", init_iso)
            disk_params = {"disk_type": "file", "device_type": device_type,
                           "driver_name": "qemu", "driver_type": "raw",
                           "target_bus": "ide", "readonly": "yes"}
            libvirt.attach_additional_device(vm_name, target_device,
                                             init_iso, disk_params)

        vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        # Turn VM into certain state.
        if pre_vm_state == "running":
            logging.info("Starting %s..." % vm_name)
            if vm.is_dead():
                vm.start()
                vm.wait_for_login().close()
        elif pre_vm_state == "shutoff":
            logging.info("Shuting down %s..." % vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
        elif pre_vm_state == "paused":
            logging.info("Pausing %s..." % vm_name)
            if vm.is_dead():
                vm.start()
                vm.wait_for_login().close()
            if not vm.pause():
                raise exceptions.TestSkipError("Cann't pause the domain")
            time.sleep(5)
        elif pre_vm_state == "transient":
            logging.info("Creating %s..." % vm_name)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                raise exceptions.TestSkipError("Cann't create the domain")

        # Libvirt will ignore --source when action is eject
        attach = True
        device_source = old_iso
        if action == "--eject ":
            source = ""
            attach = False
        else:
            source = device_source

        all_options = action + options + " " + source
        ret = virsh.change_media(vm_ref, target_device,
                                 all_options, ignore_status=True, debug=True)
        status_error = False
        if pre_vm_state == "shutoff":
            if options.count("live"):
                status_error = True
        elif pre_vm_state == "transient":
            if options.count("config"):
                status_error = True

        if vm.is_paused():
            vm.resume()
            vm.wait_for_login().close()
            # For paused vm, change_media for eject/update operation
            # should be executed again for it takes effect
            if ret.exit_status:
                if not action.count("insert") and not options.count("force"):
                    ret = virsh.change_media(vm_ref, target_device, all_options,
                                             ignore_status=True, debug=True)
        if not status_error and ret.exit_status:
            raise exceptions.TestFail("Please check: Bug 1289069 - Ejecting "
                                      "locked cdrom tray using update-device"
                                      " fails but next try succeeds")
        libvirt.check_exit_status(ret, status_error)
        if not ret.exit_status:
            check_result(vm_name, device_source, device_type, target_device,
                         options, pre_vm_state, attach)

        if action_twice:
            if pre_vm_state == "paused":
                if not vm.pause():
                    raise exceptions.TestFail("Cann't pause the domain")
                time.sleep(5)
            attach = True
            device_source = new_iso
            if action_twice == "--eject ":
                #options_twice += " --force "
                source = ""
                attach = False
            else:
                source = device_source
            all_options = action_twice + options_twice + " " + source
            time.sleep(5)
            ret = virsh.change_media(vm_ref, target_device, all_options,
                                     ignore_status=True, debug=True)
            status_error = False
            if pre_vm_state == "shutoff":
                if options_twice.count("live"):
                    status_error = True
            elif pre_vm_state == "transient":
                if options_twice.count("config"):
                    status_error = True

            if action_twice == "--insert ":
                if pre_vm_state in ["running", "paused"]:
                    if options in ["--force", "--current", "", "--live"]:
                        if options_twice.count("config"):
                            status_error = True
                    elif options == "--config":
                        if options_twice in ["--force", "--current", ""]:
                            status_error = True
                        elif options_twice.count("live"):
                            status_error = True
                elif pre_vm_state == "transient":
                    if ret.exit_status:
                        status_error = True
                elif pre_vm_state == "shutoff":
                    if options.count("live"):
                        status_error = True
            if vm.is_paused():
                vm.resume()
                vm.wait_for_login().close()
                # For paused vm, change_media for eject/update operation
                # should be executed again for it takes effect
                if ret.exit_status and not action_twice.count("insert"):
                    ret = virsh.change_media(vm_ref, target_device, all_options,
                                             ignore_status=True, debug=True)
            if not status_error and ret.exit_status:
                raise exceptions.TestFail("Please check: Bug 1289069 - Ejecting "
                                          "locked cdrom tray using update-device"
                                          " fails but next try succeeds")
            libvirt.check_exit_status(ret, status_error)
            if not ret.exit_status:
                check_result(vm_name, device_source, device_type, target_device,
                             options_twice, pre_vm_state, attach)

        # Try to start vm.
        if vm.is_dead():
            vm.start()
            vm.wait_for_login().close()
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Recover xml of vm.
        vmxml_backup.sync()
        # Remove disks
        if os.path.exists(init_iso):
            os.remove(init_iso)
        if os.path.exists(old_iso):
            os.remove(old_iso)
        if os.path.exists(init_iso):
            os.remove(new_iso)
        params['target_bus'] = "scsi"
        params['device_type'] = device_type
        params['type_name'] = source_type
        params['added_disk_format'] = disk_format
        if migrate_in_advance:
            params["attach_disk_config"] = "no"
            attach_disk_config = False
        else:
            params["attach_disk_config"] = "yes"
            attach_disk_config = True
        try:
            if source_type == "file":
                utlv.attach_disks(vm, "%s/image" % created_img_path,
                                  None, params)
            else:
                ret = utlv.attach_additional_device(vm.name, "sda", block_device,
                                                    params, config=attach_disk_config)
                if ret.exit_status:
                    raise error.TestFail(ret)
        except (error.TestFail, error.CmdError), detail:
            if status_error:
                logging.debug("Expected failure:%s", detail)
                return
            else:
                raise

        if migrate_in_advance:
            mig_thread.join(60)
            if mig_thread.isAlive():
                mig.RET_LOCK.acquire()
                mig.MIGRATION = False
                mig.RET_LOCK.release()
Example #6
0
def run(test, params, env):
    """
    Test command: virsh change-media.

    The command changes the media used by CD or floppy drives.

    Test steps:
    1. Prepare test environment.
    2. Perform virsh change-media operation.
    3. Recover test environment.
    4. Confirm the test result.
    """
    def is_attached(vmxml_devices, disk_type, source_file, target_dev):
        """
        Check attached device and disk exist or not.

        :param vmxml_devices: VMXMLDevices instance
        :param disk_type: disk's device type: cdrom or floppy
        :param source_file : disk's source file to check
        :param target_dev : target device name
        :return: True/False if backing file and device found
        """
        disks = vmxml_devices.by_device_tag('disk')
        for disk in disks:
            if disk.device != disk_type:
                continue
            if disk.target['dev'] != target_dev:
                continue
            if disk.xmltreefile.find('source') is not None:
                if disk.source.attrs['file'] != source_file:
                    continue
            else:
                continue
            # All three conditions met
            logging.debug("Find %s in given disk XML", source_file)
            return True
        logging.debug("Not find %s in gievn disk XML", source_file)
        return False

    def check_result(vm_name,
                     disk_source,
                     disk_type,
                     disk_target,
                     flags,
                     vm_state,
                     attach=True):
        """
        Check the test result of attach/detach-device command.
        """
        active_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        active_attached = is_attached(active_vmxml.devices, disk_type,
                                      disk_source, disk_target)
        if vm_state != "transient":
            inactive_vmxml = vm_xml.VMXML.new_from_dumpxml(
                vm_name, options="--inactive")
            inactive_attached = is_attached(inactive_vmxml.devices, disk_type,
                                            disk_source, disk_target)

        if flags.count("config") and not flags.count("live"):
            if vm_state != "transient":
                if attach:
                    if not inactive_attached:
                        test.fail("Inactive domain XML not updated"
                                  " when --config options used for"
                                  " attachment")
                    if vm_state != "shutoff":
                        if active_attached:
                            test.fail("Active domain XML updated"
                                      " when --config options used"
                                      " for attachment")
                else:
                    if inactive_attached:
                        test.fail("Inactive domain XML not updated"
                                  " when --config options used for"
                                  " detachment")
                    if vm_state != "shutoff":
                        if not active_attached:
                            test.fail("Active domain XML updated"
                                      " when --config options used"
                                      " for detachment")
        elif flags.count("live") and not flags.count("config"):
            if attach:
                if vm_state in ["paused", "running", "transient"]:
                    if not active_attached:
                        test.fail("Active domain XML not updated"
                                  " when --live options used for"
                                  " attachment")
                if vm_state in ["paused", "running"]:
                    if inactive_attached:
                        test.fail("Inactive domain XML updated"
                                  " when --live options used for"
                                  " attachment")
            else:
                if vm_state in ["paused", "running", "transient"]:
                    if active_attached:
                        test.fail("Active domain XML not updated"
                                  " when --live options used for"
                                  " detachment")
                if vm_state in ["paused", "running"]:
                    if not inactive_attached:
                        test.fail("Inactive domain XML updated"
                                  " when --live options used for"
                                  " detachment")
        elif flags.count("live") and flags.count("config"):
            if attach:
                if vm_state in ["paused", "running"]:
                    if not active_attached:
                        test.fail("Active domain XML not updated"
                                  " when --live --config options"
                                  " used for attachment")
                    if not inactive_attached:
                        test.fail("Inactive domain XML not updated"
                                  " when --live --config options "
                                  "used for attachment")
            else:
                if vm_state in ["paused", "running"]:
                    if active_attached:
                        test.fail("Active domain XML not updated "
                                  "when --live --config options "
                                  "used for detachment")
                    if inactive_attached:
                        test.fail("Inactive domain XML not updated"
                                  " when --live --config options "
                                  "used for detachment")
        elif flags.count("current") or flags == "":
            if attach:
                if vm_state in ["paused", "running", "transient"]:
                    if not active_attached:
                        test.fail("Active domain XML not updated"
                                  " when --current options used "
                                  "for attachment")
                if vm_state in ["paused", "running"]:
                    if inactive_attached:
                        test.fail("Inactive domain XML updated "
                                  "when --current options used "
                                  "for live attachment")
                if vm_state == "shutoff" and not inactive_attached:
                    test.fail("Inactive domain XML not updated "
                              "when --current options used for "
                              "attachment")
            else:
                if vm_state in ["paused", "running", "transient"]:
                    if active_attached:
                        test.fail("Active domain XML not updated"
                                  " when --current options used "
                                  "for detachment")
                if vm_state in ["paused", "running"]:
                    if not inactive_attached:
                        test.fail("Inactive domain XML updated "
                                  "when --current options used "
                                  "for live detachment")
                if vm_state == "shutoff" and inactive_attached:
                    test.fail("Inactive domain XML not updated "
                              "when --current options used for "
                              "detachment")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_ref = params.get("change_media_vm_ref")
    action = params.get("change_media_action")
    action_twice = params.get("change_media_action_twice", "")
    pre_vm_state = params.get("pre_vm_state")
    options = params.get("change_media_options")
    options_twice = params.get("change_media_options_twice", "")
    device_type = params.get("change_media_device_type", "cdrom")
    target_device = params.get("change_media_target_device", "hdc")
    init_iso_name = params.get("change_media_init_iso")
    old_iso_name = params.get("change_media_old_iso")
    new_iso_name = params.get("change_media_new_iso")
    virsh_dargs = {"debug": True, "ignore_status": True}

    if device_type not in ['cdrom', 'floppy']:
        test.cancel("Got a invalid device type:/n%s" % device_type)

    # Backup for recovery.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    old_iso = os.path.join(data_dir.get_tmp_dir(), old_iso_name)
    new_iso = os.path.join(data_dir.get_tmp_dir(), new_iso_name)

    if vm_ref == "name":
        vm_ref = vm_name

    if vm.is_alive():
        vm.destroy(gracefully=False)

    try:
        if not init_iso_name:
            init_iso = ""
        else:
            init_iso = os.path.join(data_dir.get_tmp_dir(), init_iso_name)

        # Prepare test files.
        libvirt.create_local_disk("iso", old_iso)
        libvirt.create_local_disk("iso", new_iso)

        # Check domain's disk device
        disk_blk = vm_xml.VMXML.get_disk_blk(vm_name)
        logging.info("disk_blk %s", disk_blk)
        if target_device not in disk_blk:
            if vm.is_alive():
                virsh.destroy(vm_name)
            logging.info("Adding device")
            libvirt.create_local_disk("iso", init_iso)
            disk_params = {
                "disk_type": "file",
                "device_type": device_type,
                "driver_name": "qemu",
                "driver_type": "raw",
                "target_bus": "ide",
                "readonly": "yes"
            }
            libvirt.attach_additional_device(vm_name, target_device, init_iso,
                                             disk_params)

        vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        # Turn VM into certain state.
        if pre_vm_state == "running":
            logging.info("Starting %s..." % vm_name)
            if vm.is_dead():
                vm.start()
                vm.wait_for_login().close()
        elif pre_vm_state == "shutoff":
            logging.info("Shuting down %s..." % vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
        elif pre_vm_state == "paused":
            logging.info("Pausing %s..." % vm_name)
            if vm.is_dead():
                vm.start()
                vm.wait_for_login().close()
            if not vm.pause():
                test.cancel("Cann't pause the domain")
            time.sleep(5)
        elif pre_vm_state == "transient":
            logging.info("Creating %s..." % vm_name)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                test.cancel("Cann't create the domain")

        # Libvirt will ignore --source when action is eject
        attach = True
        device_source = old_iso
        if action == "--eject ":
            source = ""
            attach = False
        else:
            source = device_source

        all_options = action + options + " " + source
        ret = virsh.change_media(vm_ref,
                                 target_device,
                                 all_options,
                                 ignore_status=True,
                                 debug=True)
        status_error = False
        if pre_vm_state == "shutoff":
            if options.count("live"):
                status_error = True
        elif pre_vm_state == "transient":
            if options.count("config"):
                status_error = True

        if vm.is_paused():
            vm.resume()
            vm.wait_for_login().close()
            # For paused vm, change_media for eject/update operation
            # should be executed again for it takes effect
            if ret.exit_status:
                if not action.count("insert") and not options.count("force"):
                    ret = virsh.change_media(vm_ref,
                                             target_device,
                                             all_options,
                                             ignore_status=True,
                                             debug=True)
        if not status_error and ret.exit_status:
            test.fail("Change media failed: %s" % ret.stderr.strip())
        libvirt.check_exit_status(ret, status_error)
        if not ret.exit_status:
            check_result(vm_name, device_source, device_type, target_device,
                         options, pre_vm_state, attach)

        if action_twice:
            if pre_vm_state == "paused":
                if not vm.pause():
                    test.fail("Cann't pause the domain")
                time.sleep(5)
            attach = True
            device_source = new_iso
            if action_twice == "--eject ":
                #options_twice += " --force "
                source = ""
                attach = False
            else:
                source = device_source
            all_options = action_twice + options_twice + " " + source
            time.sleep(5)
            ret = virsh.change_media(vm_ref,
                                     target_device,
                                     all_options,
                                     ignore_status=True,
                                     debug=True)
            status_error = False
            if pre_vm_state == "shutoff":
                if options_twice.count("live"):
                    status_error = True
            elif pre_vm_state == "transient":
                if options_twice.count("config"):
                    status_error = True

            if action_twice == "--insert ":
                if pre_vm_state in ["running", "paused"]:
                    if options in ["--force", "--current", "", "--live"]:
                        if options_twice.count("config"):
                            status_error = True
                    elif options == "--config":
                        if options_twice in ["--force", "--current", ""]:
                            status_error = True
                        elif options_twice.count("live"):
                            status_error = True
                elif pre_vm_state == "transient":
                    if ret.exit_status:
                        status_error = True
                elif pre_vm_state == "shutoff":
                    if options.count("live"):
                        status_error = True
            if vm.is_paused():
                vm.resume()
                vm.wait_for_login().close()
                # For paused vm, change_media for eject/update operation
                # should be executed again for it takes effect
                if ret.exit_status and not action_twice.count("insert"):
                    ret = virsh.change_media(vm_ref,
                                             target_device,
                                             all_options,
                                             ignore_status=True,
                                             debug=True)
            if not status_error and ret.exit_status:
                test.fail("Change media failed: %s" % ret.stderr.strip())
            libvirt.check_exit_status(ret, status_error)
            if not ret.exit_status:
                check_result(vm_name, device_source, device_type,
                             target_device, options_twice, pre_vm_state,
                             attach)

        # Try to start vm.
        if vm.is_dead():
            vm.start()
            vm.wait_for_login().close()
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Recover xml of vm.
        vmxml_backup.sync()
        # Remove disks
        if os.path.exists(init_iso):
            os.remove(init_iso)
        if os.path.exists(old_iso):
            os.remove(old_iso)
        if os.path.exists(init_iso):
            os.remove(new_iso)
        params['type_name'] = source_type
        params['added_disk_format'] = disk_format
        if migrate_in_advance:
            params["attach_disk_config"] = "no"
            attach_disk_config = False
        else:
            params["attach_disk_config"] = "yes"
            attach_disk_config = True
        try:
            if source_type == "file":
                utlv.attach_disks(vm, "%s/image" % created_img_path, None,
                                  params)
            else:
                ret = utlv.attach_additional_device(vm.name,
                                                    "sda",
                                                    block_device,
                                                    params,
                                                    config=attach_disk_config)
                if ret.exit_status:
                    raise error.TestFail(ret)
        except (error.TestFail, error.CmdError), detail:
            if status_error:
                logging.debug("Expected failure:%s", detail)
                return
            else:
                raise

        if migrate_in_advance:
            mig_thread.join(60)
            if mig_thread.isAlive():
                mig.RET_LOCK.acquire()
Example #8
0
def run(test, params, env):
    """
    Test virsh migrate when disks are virtio-scsi.
    """
    def check_vm_state(vm, state):
        """
        Return True if vm is in the correct state.
        """
        try:
            actual_state = vm.state()
        except process.CmdError:
            return False
        if actual_state == state:
            return True
        else:
            return False

    def check_disks_in_vm(vm, vm_ip, disks_list=[], runner=None):
        """
        Check disks attached to vm.
        """
        fail_list = []
        while len(disks_list):
            disk = disks_list.pop()
            if runner:
                check_cmd = ("ssh %s \"dd if=/dev/urandom of=%s bs=1 "
                             "count=1024\"" % (vm_ip, disk))
                try:
                    logging.debug(runner.run(check_cmd))
                    continue
                except process.CmdError as detail:
                    logging.debug("Remote checking failed:%s", detail)
                    fail_list.append(disk)
            else:
                check_cmd = "dd if=/dev/urandom of=%s bs=1 count=1024"
                session = vm.wait_for_login()
                cs = session.cmd_status(check_cmd)
                if cs:
                    fail_list.append(disk)
                session.close()
        if len(fail_list):
            test.fail("Checking attached devices failed:%s" % fail_list)

    def get_disk_id(device):
        """
        Show disk by id.
        """
        output = process.run("ls /dev/disk/by-id/", shell=True).stdout_text
        for line in output.splitlines():
            disk_ids = line.split()
            for disk_id in disk_ids:
                disk = os.path.basename(
                    process.run("readlink %s" % disk_id,
                                shell=True).stdout_text)
                if disk == os.path.basename(device):
                    return disk_id
        return None

    def cleanup_ssh_config(vm):
        session = vm.wait_for_login()
        session.cmd("rm -f ~/.ssh/authorized_keys")
        session.cmd("rm -f ~/.ssh/id_rsa*")
        session.close()

    vm = env.get_vm(params.get("migrate_main_vm"))
    source_type = params.get("disk_source_type", "file")
    device_type = params.get("disk_device_type", "disk")
    disk_format = params.get("disk_format_type", "raw")
    if source_type == "file":
        params['added_disk_type'] = "file"
    else:
        params['added_disk_type'] = "block"
        block_device = params.get("disk_block_device", "/dev/EXAMPLE")
        if block_device.count("EXAMPLE"):
            # Prepare host parameters
            local_host = params.get("migrate_source_host", "LOCAL.EXAMPLE")
            remote_host = params.get("migrate_dest_host", "REMOTE.EXAMPLE")
            remote_user = params.get("migrate_dest_user", "root")
            remote_passwd = params.get("migrate_dest_pwd")
            if remote_host.count("EXAMPLE") or local_host.count("EXAMPLE"):
                test.cancel("Config remote or local host first.")
            rdm_params = {
                'remote_ip': remote_host,
                'remote_user': remote_user,
                'remote_pwd': remote_passwd
            }
            rdm = utils_test.RemoteDiskManager(rdm_params)
            # Try to build an iscsi device
            # For local, target is a device name
            target = utlv.setup_or_cleanup_iscsi(
                is_setup=True, is_login=True, emulated_image="emulated-iscsi")
            logging.debug("Created target: %s", target)
            try:
                # Attach this iscsi device both local and remote
                remote_device = rdm.iscsi_login_setup(local_host, target)
            except Exception as detail:
                utlv.setup_or_cleanup_iscsi(is_setup=False)
                test.error("Attach iscsi device on remote failed:%s" % detail)

            # Use id to get same path on local and remote
            block_device = get_disk_id(target)
            if block_device is None:
                rdm.iscsi_login_setup(local_host, target, is_login=False)
                utlv.setup_or_cleanup_iscsi(is_setup=False)
                test.error("Set iscsi device couldn't find id?")

    srcuri = params.get("virsh_migrate_srcuri")
    dsturi = params.get("virsh_migrate_dsturi")
    remote_ip = params.get("remote_ip")
    username = params.get("remote_user", "root")
    host_pwd = params.get("remote_pwd")
    # Connection to remote, init here for cleanup
    runner = None
    # Identify easy config. mistakes early
    warning_text = ("Migration VM %s URI %s appears problematic "
                    "this may lead to migration problems. "
                    "Consider specifying vm.connect_uri using "
                    "fully-qualified network-based style.")

    if srcuri.count('///') or srcuri.count('EXAMPLE'):
        test.cancel(warning_text % ('source', srcuri))

    if dsturi.count('///') or dsturi.count('EXAMPLE'):
        test.cancel(warning_text % ('destination', dsturi))

    # Config auto-login to remote host for migration
    ssh_key.setup_ssh_key(remote_ip, username, host_pwd)

    sys_image = vm.get_first_disk_devices()
    sys_image_source = sys_image["source"]
    sys_image_info = utils_misc.get_image_info(sys_image_source)
    logging.debug("System image information:\n%s", sys_image_info)
    sys_image_fmt = sys_image_info["format"]
    created_img_path = os.path.join(os.path.dirname(sys_image_source),
                                    "vsmimages")

    migrate_in_advance = "yes" == params.get("migrate_in_advance", "no")

    status_error = "yes" == params.get("status_error", "no")
    if source_type == "file" and device_type == "lun":
        status_error = True

    try:
        # For safety and easily reasons, we'd better define a new vm
        new_vm_name = "%s_vsmtest" % vm.name
        mig = utlv.MigrationTest()
        if vm.is_alive():
            vm.destroy()
        utlv.define_new_vm(vm.name, new_vm_name)
        vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
                           vm.address_cache)

        # Change the disk of the vm to shared disk
        # Detach exist devices
        devices = vm.get_blk_devices()
        for device in devices:
            s_detach = virsh.detach_disk(vm.name,
                                         device,
                                         "--config",
                                         debug=True)
            if not s_detach:
                test.error("Detach %s failed before test.", device)

        # Attach system image as vda
        # Then added scsi disks will be sda,sdb...
        attach_args = "--subdriver %s --config" % sys_image_fmt
        virsh.attach_disk(vm.name,
                          sys_image_source,
                          "vda",
                          attach_args,
                          debug=True)

        vms = [vm]

        def start_check_vm(vm):
            try:
                vm.start()
            except virt_vm.VMStartError as detail:
                if status_error:
                    logging.debug("Expected failure:%s", detail)
                    return None, None
                else:
                    raise
            vm.wait_for_login()

            # Confirm VM can be accessed through network.
            # And this ip will be used on remote after migration
            vm_ip = vm.get_address()
            vm_pwd = params.get("password")
            s_ping, o_ping = utils_test.ping(vm_ip, count=2, timeout=60)
            logging.info(o_ping)
            if s_ping != 0:
                test.fail("%s did not respond after several "
                          "seconds with attaching new devices." % vm.name)
            return vm_ip, vm_pwd

        options = "--live --unsafe"
        # Do migration before attaching new devices
        if migrate_in_advance:
            vm_ip, vm_pwd = start_check_vm(vm)
            cleanup_ssh_config(vm)
            mig_thread = threading.Thread(target=mig.thread_func_migration,
                                          args=(vm, dsturi, options))
            mig_thread.start()
            # Make sure migration is running
            time.sleep(2)

        # Attach other disks
        params['added_disk_target'] = "scsi"
        params['target_bus'] = "scsi"
        params['device_type'] = device_type
        params['type_name'] = source_type
        params['added_disk_format'] = disk_format
        if migrate_in_advance:
            params["attach_disk_config"] = "no"
            attach_disk_config = False
        else:
            params["attach_disk_config"] = "yes"
            attach_disk_config = True
        try:
            if source_type == "file":
                utlv.attach_disks(vm, "%s/image" % created_img_path, None,
                                  params)
            else:
                ret = utlv.attach_additional_device(vm.name,
                                                    "sda",
                                                    block_device,
                                                    params,
                                                    config=attach_disk_config)
                if ret.exit_status:
                    test.fail(ret)
        except (exceptions.TestFail, process.CmdError) as detail:
            if status_error:
                logging.debug("Expected failure:%s", detail)
                return
            else:
                raise

        if migrate_in_advance:
            mig_thread.join(60)
            if mig_thread.isAlive():
                mig.RET_LOCK.acquire()
                mig.MIGRATION = False
                mig.RET_LOCK.release()
        else:
            vm_ip, vm_pwd = start_check_vm(vm)

        # Have got expected failures when starting vm, end the test
        if vm_ip is None and status_error:
            return

        # Start checking before migration and go on checking after migration
        disks = []
        for target in list(vm.get_disk_devices().keys()):
            if target != "vda":
                disks.append("/dev/%s" % target)

        checked_count = int(params.get("checked_count", 0))
        disks_before = disks[:(checked_count // 2)]
        disks_after = disks[(checked_count // 2):checked_count]
        logging.debug(
            "Disks to be checked:\nBefore migration:%s\n"
            "After migration:%s", disks_before, disks_after)

        options = "--live --unsafe"
        if not migrate_in_advance:
            cleanup_ssh_config(vm)
            mig.do_migration(vms, None, dsturi, "orderly", options, 120)

        if mig.RET_MIGRATION:
            utils_test.check_dest_vm_network(vm, vm_ip, remote_ip, username,
                                             host_pwd)
            runner = remote.RemoteRunner(host=remote_ip,
                                         username=username,
                                         password=host_pwd)
            # After migration, config autologin to vm
            ssh_key.setup_remote_ssh_key(vm_ip, "root", vm_pwd)
            check_disks_in_vm(vm, vm_ip, disks_after, runner)

            if migrate_in_advance:
                test.fail("Migration before attaching successfully, "
                          "but not expected.")

    finally:
        # Cleanup remote vm
        if srcuri != dsturi:
            mig.cleanup_dest_vm(vm, srcuri, dsturi)
        # Cleanup created vm anyway
        if vm.is_alive():
            vm.destroy(gracefully=False)
        virsh.undefine(new_vm_name)

        # Cleanup iscsi device for block if it is necessary
        if source_type == "block":
            if params.get("disk_block_device",
                          "/dev/EXAMPLE").count("EXAMPLE"):
                rdm.iscsi_login_setup(local_host, target, is_login=False)
                utlv.setup_or_cleanup_iscsi(is_setup=False,
                                            emulated_image="emulated-iscsi")

        if runner:
            runner.session.close()
        process.run("rm -f %s/*vsmtest" % created_img_path, shell=True)
def run(test, params, env):
    """
    Test virsh migrate when disks are virtio-scsi.
    """

    def check_vm_state(vm, state):
        """
        Return True if vm is in the correct state.
        """
        try:
            actual_state = vm.state()
        except process.CmdError:
            return False
        if actual_state == state:
            return True
        else:
            return False

    def check_disks_in_vm(vm, vm_ip, disks_list=[], runner=None):
        """
        Check disks attached to vm.
        """
        fail_list = []
        while len(disks_list):
            disk = disks_list.pop()
            if runner:
                check_cmd = ("ssh %s \"dd if=/dev/urandom of=%s bs=1 "
                             "count=1024\"" % (vm_ip, disk))
                try:
                    logging.debug(runner.run(check_cmd))
                    continue
                except process.CmdError as detail:
                    logging.debug("Remote checking failed:%s", detail)
                    fail_list.append(disk)
            else:
                check_cmd = "dd if=/dev/urandom of=%s bs=1 count=1024"
                session = vm.wait_for_login()
                cs = session.cmd_status(check_cmd)
                if cs:
                    fail_list.append(disk)
                session.close()
        if len(fail_list):
            test.fail("Checking attached devices failed:%s"
                      % fail_list)

    def get_disk_id(device):
        """
        Show disk by id.
        """
        output = process.run("ls /dev/disk/by-id/", shell=True).stdout_text
        for line in output.splitlines():
            disk_ids = line.split()
            for disk_id in disk_ids:
                disk = os.path.basename(
                    process.run("readlink %s" % disk_id, shell=True).stdout_text)
                if disk == os.path.basename(device):
                    return disk_id
        return None

    def cleanup_ssh_config(vm):
        session = vm.wait_for_login()
        session.cmd("rm -f ~/.ssh/authorized_keys")
        session.cmd("rm -f ~/.ssh/id_rsa*")
        session.close()

    vm = env.get_vm(params.get("migrate_main_vm"))
    source_type = params.get("disk_source_type", "file")
    device_type = params.get("disk_device_type", "disk")
    disk_format = params.get("disk_format_type", "raw")
    if source_type == "file":
        params['added_disk_type'] = "file"
    else:
        params['added_disk_type'] = "block"
        block_device = params.get("disk_block_device", "/dev/EXAMPLE")
        if block_device.count("EXAMPLE"):
            # Prepare host parameters
            local_host = params.get("migrate_source_host", "LOCAL.EXAMPLE")
            remote_host = params.get("migrate_dest_host", "REMOTE.EXAMPLE")
            remote_user = params.get("migrate_dest_user", "root")
            remote_passwd = params.get("migrate_dest_pwd")
            if remote_host.count("EXAMPLE") or local_host.count("EXAMPLE"):
                test.cancel("Config remote or local host first.")
            rdm_params = {'remote_ip': remote_host,
                          'remote_user': remote_user,
                          'remote_pwd': remote_passwd}
            rdm = utils_test.RemoteDiskManager(rdm_params)
            # Try to build an iscsi device
            # For local, target is a device name
            target = utlv.setup_or_cleanup_iscsi(is_setup=True, is_login=True,
                                                 emulated_image="emulated-iscsi")
            logging.debug("Created target: %s", target)
            try:
                # Attach this iscsi device both local and remote
                remote_device = rdm.iscsi_login_setup(local_host, target)
            except Exception as detail:
                utlv.setup_or_cleanup_iscsi(is_setup=False)
                test.error("Attach iscsi device on remote failed:%s"
                           % detail)

            # Use id to get same path on local and remote
            block_device = get_disk_id(target)
            if block_device is None:
                rdm.iscsi_login_setup(local_host, target, is_login=False)
                utlv.setup_or_cleanup_iscsi(is_setup=False)
                test.error("Set iscsi device couldn't find id?")

    srcuri = params.get("virsh_migrate_srcuri")
    dsturi = params.get("virsh_migrate_dsturi")
    remote_ip = params.get("remote_ip")
    username = params.get("remote_user", "root")
    host_pwd = params.get("remote_pwd")
    # Connection to remote, init here for cleanup
    runner = None
    # Identify easy config. mistakes early
    warning_text = ("Migration VM %s URI %s appears problematic "
                    "this may lead to migration problems. "
                    "Consider specifying vm.connect_uri using "
                    "fully-qualified network-based style.")

    if srcuri.count('///') or srcuri.count('EXAMPLE'):
        test.cancel(warning_text % ('source', srcuri))

    if dsturi.count('///') or dsturi.count('EXAMPLE'):
        test.cancel(warning_text % ('destination', dsturi))

    # Config auto-login to remote host for migration
    ssh_key.setup_ssh_key(remote_ip, username, host_pwd)

    sys_image = vm.get_first_disk_devices()
    sys_image_source = sys_image["source"]
    sys_image_info = utils_misc.get_image_info(sys_image_source)
    logging.debug("System image information:\n%s", sys_image_info)
    sys_image_fmt = sys_image_info["format"]
    created_img_path = os.path.join(os.path.dirname(sys_image_source),
                                    "vsmimages")

    migrate_in_advance = "yes" == params.get("migrate_in_advance", "no")

    status_error = "yes" == params.get("status_error", "no")
    if source_type == "file" and device_type == "lun":
        status_error = True

    try:
        # For safety and easily reasons, we'd better define a new vm
        new_vm_name = "%s_vsmtest" % vm.name
        mig = utlv.MigrationTest()
        if vm.is_alive():
            vm.destroy()
        utlv.define_new_vm(vm.name, new_vm_name)
        vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
                           vm.address_cache)

        # Change the disk of the vm to shared disk
        # Detach exist devices
        devices = vm.get_blk_devices()
        for device in devices:
            s_detach = virsh.detach_disk(vm.name, device, "--config",
                                         debug=True)
            if not s_detach:
                test.error("Detach %s failed before test.", device)

        # Attach system image as vda
        # Then added scsi disks will be sda,sdb...
        attach_args = "--subdriver %s --config" % sys_image_fmt
        virsh.attach_disk(vm.name, sys_image_source, "vda",
                          attach_args, debug=True)

        vms = [vm]

        def start_check_vm(vm):
            try:
                vm.start()
            except virt_vm.VMStartError as detail:
                if status_error:
                    logging.debug("Expected failure:%s", detail)
                    return None, None
                else:
                    raise
            vm.wait_for_login()

            # Confirm VM can be accessed through network.
            # And this ip will be used on remote after migration
            vm_ip = vm.get_address()
            vm_pwd = params.get("password")
            s_ping, o_ping = utils_test.ping(vm_ip, count=2, timeout=60)
            logging.info(o_ping)
            if s_ping != 0:
                test.fail("%s did not respond after several "
                          "seconds with attaching new devices."
                          % vm.name)
            return vm_ip, vm_pwd

        options = "--live --unsafe"
        # Do migration before attaching new devices
        if migrate_in_advance:
            vm_ip, vm_pwd = start_check_vm(vm)
            cleanup_ssh_config(vm)
            mig_thread = threading.Thread(target=mig.thread_func_migration,
                                          args=(vm, dsturi, options))
            mig_thread.start()
            # Make sure migration is running
            time.sleep(2)

        # Attach other disks
        params['added_disk_target'] = "scsi"
        params['target_bus'] = "scsi"
        params['device_type'] = device_type
        params['type_name'] = source_type
        params['added_disk_format'] = disk_format
        if migrate_in_advance:
            params["attach_disk_config"] = "no"
            attach_disk_config = False
        else:
            params["attach_disk_config"] = "yes"
            attach_disk_config = True
        try:
            if source_type == "file":
                utlv.attach_disks(vm, "%s/image" % created_img_path,
                                  None, params)
            else:
                ret = utlv.attach_additional_device(vm.name, "sda", block_device,
                                                    params, config=attach_disk_config)
                if ret.exit_status:
                    test.fail(ret)
        except (exceptions.TestFail, process.CmdError) as detail:
            if status_error:
                logging.debug("Expected failure:%s", detail)
                return
            else:
                raise

        if migrate_in_advance:
            mig_thread.join(60)
            if mig_thread.isAlive():
                mig.RET_LOCK.acquire()
                mig.MIGRATION = False
                mig.RET_LOCK.release()
        else:
            vm_ip, vm_pwd = start_check_vm(vm)

        # Have got expected failures when starting vm, end the test
        if vm_ip is None and status_error:
            return

        # Start checking before migration and go on checking after migration
        disks = []
        for target in list(vm.get_disk_devices().keys()):
            if target != "vda":
                disks.append("/dev/%s" % target)

        checked_count = int(params.get("checked_count", 0))
        disks_before = disks[:(checked_count // 2)]
        disks_after = disks[(checked_count // 2):checked_count]
        logging.debug("Disks to be checked:\nBefore migration:%s\n"
                      "After migration:%s", disks_before, disks_after)

        options = "--live --unsafe"
        if not migrate_in_advance:
            cleanup_ssh_config(vm)
            mig.do_migration(vms, None, dsturi, "orderly", options, 120)

        if mig.RET_MIGRATION:
            utils_test.check_dest_vm_network(vm, vm_ip, remote_ip,
                                             username, host_pwd)
            runner = remote.RemoteRunner(host=remote_ip, username=username,
                                         password=host_pwd)
            # After migration, config autologin to vm
            ssh_key.setup_remote_ssh_key(vm_ip, "root", vm_pwd)
            check_disks_in_vm(vm, vm_ip, disks_after, runner)

            if migrate_in_advance:
                test.fail("Migration before attaching successfully, "
                          "but not expected.")

    finally:
        # Cleanup remote vm
        if srcuri != dsturi:
            mig.cleanup_dest_vm(vm, srcuri, dsturi)
        # Cleanup created vm anyway
        if vm.is_alive():
            vm.destroy(gracefully=False)
        virsh.undefine(new_vm_name)

        # Cleanup iscsi device for block if it is necessary
        if source_type == "block":
            if params.get("disk_block_device",
                          "/dev/EXAMPLE").count("EXAMPLE"):
                rdm.iscsi_login_setup(local_host, target, is_login=False)
                utlv.setup_or_cleanup_iscsi(is_setup=False,
                                            emulated_image="emulated-iscsi")

        if runner:
            runner.session.close()
        process.run("rm -f %s/*vsmtest" % created_img_path, shell=True)