Exemplo n.º 1
0
 def create_disk_xml():
     """
     Create a disk xml file for attaching to a domain.
     """
     if disk_type == "file":
         process.run("qemu-img create %s %s" % (media_file, image_size), shell=True)
     disk_params = {'device_type': device_type,
                    'type_name': disk_type,
                    'target_dev': target_dev,
                    'target_bus': target_bus}
     if disk_type == "file":
         disk_params_src = {'source_protocol': "file",
                            'source_file': media_file,
                            'source_startupPolicy': startup_policy}
     elif disk_type == "volume":
         disk_params_src = {'source_pool': pool_name,
                            'source_volume': vol_name,
                            'driver_type': 'qcow2',
                            'source_startupPolicy': startup_policy}
         if pool_type == "iscsi":
             disk_params_src.update({'source_mode': "host"})
     disk_params.update(disk_params_src)
     disk_xml = libvirt.create_disk_xml(disk_params)
     shutil.copyfile(disk_xml, disk_xml_file)
     return disk_xml
Exemplo n.º 2
0
def create_disk_xml(params):
    """
    Create a XML to be compatible with create_disk_xml function

    :param params: The instance of avocado params class
    """
    disk_type = params.get('disk_type')
    target_dev = params.get('target_dev', 'vda')
    target_bus = params.get('target_bus', 'virtio')
    driver_type = params.get('driver_type', 'qcow2')
    device_type = params.get('device_type', 'disk')
    disk_params = {'type_name': disk_type,
                   'device_type': device_type,
                   'driver_name': "qemu",
                   'driver_type': driver_type,
                   'target_dev': target_dev,
                   'target_bus': target_bus}
    if disk_type in ('file', 'block'):
        disk_params.update({'source_file': params.get("source_file")})
    elif disk_type == 'network':
        disk_params.update({'source_protocol': params.get('source_protocol'),
                            'source_name': params.get('source_name'),
                            'source_host_name': params.get('source_host_name'),
                            'source_host_port': params.get('source_host_port')})
    return utlv.create_disk_xml(disk_params)
Exemplo n.º 3
0
def create_disk_xml(disk_type, device_path, discard_type='ignore',
                    target_dev='sdb', target_bus='scsi'):
    """
    Create a XML contains disk information for virsh attach-device command.
    """
    disk_params = {'type_name': disk_type, 'device': "disk",
                   'driver_name': "qemu",
                   'driver_type': "raw", 'driver_discard': discard_type,
                   'target_dev': target_dev, 'target_bus': target_bus,
                   'source_file': device_path}
    return utlv.create_disk_xml(disk_params)
def create_https_cdrom_disk(params):
    """
    Create one HTTPS cdrom disk

    :param params: dict wrapped with params
    """
    default_iso_name = params.get('default_iso_name')
    if 'EXAMPLE_HTTPS_ISO' in params.get("source_name"):
        params.update({'source_name': default_iso_name})

    https_cdrom_disk = libvirt.create_disk_xml(params)
    return https_cdrom_disk
        def prepare_disk_xml(test_disk, image_path):
            """
            Prepare disk xml to be hot plugged

            :param test_disk: The vm's disk, such as 'vdb'
            :param image_path: The path to the image
            :return: The xml of the test disk
            """
            test_disk_params = {"device_type": "disk",
                                "type_name": "file",
                                "driver_type": "qcow2",
                                "target_dev": test_disk,
                                "source_file": image_path,
                                "target_bus": "scsi"}
            test_disk_xml = libvirt.create_disk_xml(test_disk_params)
            return test_disk_xml
Exemplo n.º 6
0
def prepare_disk_obj(disk_type, disk_device, driver_name, driver_type,
                     path_to_blk, device_target, target_bus):
    """
    Prepare and return disk xml object
    """
    disk_params = {'type_name': disk_type,
                   'device': disk_device,
                   'driver_name': driver_name,
                   'driver_type': driver_type,
                   'source_file': path_to_blk,
                   'target_dev': device_target,
                   'target_bus': target_bus
                   }
    vd_xml = utlv.create_disk_xml(disk_params)
    vd_xml_str = open(vd_xml).read()
    disk_obj = disk.Disk()
    disk_obj.xml = vd_xml_str
    return disk_obj
Exemplo n.º 7
0
def create_disk_xml(disk_type,
                    device_path,
                    discard_type='ignore',
                    target_dev='sdb',
                    target_bus='scsi'):
    """
    Create a XML contains disk information for virsh attach-device command.
    """
    disk_params = {
        'type_name': disk_type,
        'device': "disk",
        'driver_name': "qemu",
        'driver_type': "raw",
        'driver_discard': discard_type,
        'target_dev': target_dev,
        'target_bus': target_bus,
        'source_file': device_path
    }
    return utlv.create_disk_xml(disk_params)
def create_open_tray_cdrom_disk(params):
    """
    Create one open tray cdrom disk

    :param params: dict wrapped with params
    """
    source_file_path = params.get("virt_disk_device_source")
    # Create iso file
    if source_file_path:
        iso_file = "/var/lib/libvirt/images/old_%s.img" % random.choices(
            string.ascii_uppercase)[0]
        process.run("dd if=/dev/urandom of=%s bs=1M count=10" % iso_file,
                    shell=True)
        process.run("mkisofs -o %s %s" % (source_file_path, iso_file),
                    shell=True)
        CLEANUP_FILES.append(iso_file)
        CLEANUP_FILES.append(source_file_path)
    open_tray_cdrom_disk = libvirt.create_disk_xml(params)
    return open_tray_cdrom_disk
Exemplo n.º 9
0
    def add_test_disk_to_vm(self, vm):
        """
        Attach a disk to vm

        :param vm: The vm to attch disk to
        """
        test_disk_path = os.path.join(self.tmp_dir,
                                      self.test_disk_target + '.qcow2')
        libvirt.create_local_disk('file', test_disk_path,
                                  self.test_disk_size, 'qcow2')
        test_disk_params = {'device_type': 'disk',
                            'type_name': 'file',
                            'driver_type': 'qcow2',
                            'target_dev': self.test_disk_target,
                            'source_file': test_disk_path}
        test_disk_xml = libvirt.create_disk_xml(test_disk_params)
        virsh.attach_device(vm.name, test_disk_xml,
                            flagstr='--config', debug=True)
        self.test_disk_path = test_disk_path
Exemplo n.º 10
0
def create_disk(params, test, vm_name, orig_iso, disk_type, target_dev, disk_format="", mode=""):
    """
    Prepare image for attach and attach it to domain

    :param parameters from cfg file
    :param vm_name : vm_name
    :param source_iso : disk's source backing file
    :param disk_type: disk's device type: cdrom or floppy
    :param target_dev: disk's target device name
    :param disk_format: disk's target format
    :param mode: readonly or shareable
    """
    slice_test = "yes" == params.get("disk_slice", "yes")
    try:
        # create slice cdrom image for slice test
        if slice_test:
            libvirt.create_local_disk("file", orig_iso, size="10", disk_format=disk_format, extra="-o preallocation=full")
            params["input_source_file"] = orig_iso
            params["disk_slice"] = {"slice": "yes"}
            params["target_dev"] = "sdc"
            disk_xml = libvirt.create_disk_xml(params)
        else:
            with open(orig_iso, 'wb') as _file:
                _file.seek((1024 * 1024) - 1)
                _file.write(str(0).encode())
    except IOError:
        test.fail("Create orig_iso failed!")
    options = "--type %s --sourcetype=file --config" % disk_type
    try:
        if mode:
            options += " --mode %s" % mode
        if slice_test:
            # Use attach_device to add cdrom file with slice to guest
            virsh.attach_device(vm_name, disk_xml, flagstr="--config")
        else:
            virsh.attach_disk(vm_name, orig_iso, target_dev, options)
    except Exception:
        os.remove(orig_iso)
        test.fail("Failed to attach")
Exemplo n.º 11
0
def run(test, params, env):
    """
    1. prepare a fc lun with one of following methods
        - create a scsi pool&vol
        - create a vhba
    2. prepare the virtual disk xml, as one of following
        - source = /dev/disk/by-path
        - source = /dev/mapper/mpathX
        - source = pool&vol format
    3. start a vm with above disk as vdb
    4. create disk-only snapshot of vdb
    5. check the snapshot-list and snapshot file's existence
    6. mount vdb and touch file to it
    7. revert the snapshot and check file's existence
    8. delete snapshot
    9. cleanup env.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    wwpn = params.get("wwpn", "WWPN_EXAMPLE")
    wwnn = params.get("wwnn", "WWNN_EXAMPLE")
    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "file")
    disk_size = params.get("disk_size", "100M")
    device_target = params.get("device_target", "vdb")
    driver_name = params.get("driver_name", "qemu")
    driver_type = params.get("driver_type", "raw")
    target_bus = params.get("target_bus", "virtio")
    vd_format = params.get("vd_format", "")
    snapshot_dir = params.get("snapshot_dir", "/tmp")
    snapshot_name = params.get("snapshot_name", "s1")
    pool_name = params.get("pool_name", "")
    pool_target = params.get("pool_target", "/dev")
    snapshot_disk_only = "yes" == params.get("snapshot_disk_only", "no")
    new_vhbas = []
    current_vhbas = []
    new_vhba = []
    path_to_blk = ""
    lun_sl = []
    new_disk = ""
    pool_ins = None
    old_mpath_conf = ""
    mpath_conf_path = "/etc/multipath.conf"
    original_mpath_conf_exist = os.path.exists(mpath_conf_path)

    vm = env.get_vm(vm_name)
    online_hbas = utils_npiv.find_hbas("hba")
    if not online_hbas:
        raise exceptions.TestSkipError("There is no online hba cards.")
    old_mpath_conf = utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                                       replace_existing=True)
    first_online_hba = online_hbas[0]
    old_vhbas = utils_npiv.find_hbas("vhba")
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    virt_vm = libvirt_vm.VM(vm_name, vm.params, vm.root_dir,
                            vm.address_cache)
    old_disks = virt_vm.get_disks()

    if vm.is_alive():
        vm.destroy(gracefully=False)
    if pool_name:
        pool_ins = libvirt_storage.StoragePool()
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    try:
        # prepare a fc lun
        if vd_format in ['scsi_vol']:
            if pool_ins.pool_exists(pool_name):
                raise exceptions.TestFail("Pool %s already exist" % pool_name)
            prepare_scsi_pool(pool_name, wwnn, wwpn,
                              first_online_hba, pool_target)
            utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas),
                                timeout=_TIMEOUT)
            if not utils_npiv.is_vhbas_added(old_vhbas):
                raise exceptions.TestFail("vHBA not successfully created")
            current_vhbas = utils_npiv.find_hbas("vhba")
            new_vhba = list(set(current_vhbas).difference(
                set(old_vhbas)))[0]
            new_vhbas.append(new_vhba)
            new_vhba_scsibus = re.sub("\D", "", new_vhba)
            utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus),
                                timeout=_TIMEOUT)
            new_blks = get_blks_by_scsi(new_vhba_scsibus)
            if not new_blks:
                raise exceptions.TestFail("block device not found with scsi_%s",
                                          new_vhba_scsibus)
            first_blk_dev = new_blks[0]
            utils_misc.wait_for(
                lambda: get_symbols_by_blk(first_blk_dev),
                timeout=_TIMEOUT)
            lun_sl = get_symbols_by_blk(first_blk_dev)
            if not lun_sl:
                raise exceptions.TestFail("lun symbolic links not found under "
                                          "/dev/disk/by-path/ for blk dev %s" %
                                          first_blk_dev)
            lun_dev = lun_sl[0]
            path_to_blk = os.path.join(_BY_PATH_DIR, lun_dev)
        elif vd_format in ['mpath', 'by_path']:
            old_mpath_devs = utils_npiv.find_mpath_devs()
            new_vhba = utils_npiv.nodedev_create_from_xml(
                    {"nodedev_parent": first_online_hba,
                     "scsi_wwnn": wwnn,
                     "scsi_wwpn": wwpn})
            utils_misc.wait_for(
                lambda: utils_npiv.is_vhbas_added(old_vhbas),
                timeout=_TIMEOUT*2)
            if not new_vhba:
                raise exceptions.TestFail("vHBA not sucessfully generated.")
            new_vhbas.append(new_vhba)
            if vd_format == "mpath":
                utils_misc.wait_for(
                    lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs),
                    timeout=_TIMEOUT*5)
                if not utils_npiv.is_mpath_devs_added(old_mpath_devs):
                    raise exceptions.TestFail("mpath dev not generated.")
                cur_mpath_devs = utils_npiv.find_mpath_devs()
                new_mpath_devs = list(set(cur_mpath_devs).difference(
                    set(old_mpath_devs)))
                logging.debug("The newly added mpath dev is: %s",
                              new_mpath_devs)
                path_to_blk = "/dev/mapper/" + new_mpath_devs[0]
            elif vd_format == "by_path":
                new_vhba_scsibus = re.sub("\D", "", new_vhba)
                utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus),
                                    timeout=_TIMEOUT)
                new_blks = get_blks_by_scsi(new_vhba_scsibus)
                if not new_blks:
                    raise exceptions.TestFail("blk dev not found with scsi_%s",
                                              new_vhba_scsibus)
                first_blk_dev = new_blks[0]
                utils_misc.wait_for(
                    lambda: get_symbols_by_blk(first_blk_dev),
                    timeout=_TIMEOUT)
                lun_sl = get_symbols_by_blk(first_blk_dev)
                if not lun_sl:
                    raise exceptions.TestFail("lun symbolic links not found in "
                                              "/dev/disk/by-path/ for %s" %
                                              first_blk_dev)
                lun_dev = lun_sl[0]
                path_to_blk = os.path.join(_BY_PATH_DIR, lun_dev)
            else:
                pass
        else:
            raise exceptions.TestSkipError("Not provided how to pass"
                                           "virtual disk to VM.")

        # create qcow2 file on the block device with specified size
        if path_to_blk:
            cmd = "qemu-img create -f qcow2 %s %s" % (path_to_blk, disk_size)
            try:
                process.run(cmd, shell=True)
            except process.cmdError as detail:
                raise exceptions.TestFail("Fail to create qcow2 on blk dev: %s",
                                          detail)
        else:
            raise exceptions.TestFail("Don't have a vaild path to blk dev.")

        # prepare disk xml
        if "vol" in vd_format:
            vol_list = utlv.get_vol_list(pool_name, vol_check=True,
                                         timeout=_TIMEOUT*3)
            test_vol = list(vol_list.keys())[0]
            disk_params = {'type_name': disk_type,
                           'target_dev': device_target,
                           'target_bus': target_bus,
                           'source_pool': pool_name,
                           'source_volume': test_vol,
                           'driver_type': driver_type}
        else:
            disk_params = {'type_name': disk_type,
                           'device': disk_device,
                           'driver_name': driver_name,
                           'driver_type': driver_type,
                           'source_file': path_to_blk,
                           'target_dev': device_target,
                           'target_bus': target_bus}
        if vm.is_alive():
            vm.destroy(gracefully=False)
        new_disk = disk.Disk()
        new_disk.xml = open(utlv.create_disk_xml(disk_params)).read()

        # start vm with the virtual disk
        vmxml.devices = vmxml.devices.append(new_disk)
        vmxml.sync()
        vm.start()
        session = vm.wait_for_login()
        cur_disks = virt_vm.get_disks()
        mount_disk = "".join(list(set(old_disks) ^ set(cur_disks)))

        # mkfs and mount disk in vm, create a file on that disk.
        if not mount_disk:
            logging.debug("old_disk: %s, new_disk: %s", old_disks, cur_disks)
            raise exceptions.TestFail("No new disk found in vm.")
        mkfs_and_mount(session, mount_disk)
        create_file_in_vm(session, "/mnt/before_snapshot.txt", "before")

        # virsh snapshot-create-as vm s --disk-only --diskspec vda,file=path
        if snapshot_disk_only:
            vm_blks = list(vm.get_disk_devices().keys())
            options = "%s --disk-only" % snapshot_name
            for vm_blk in vm_blks:
                snapshot_file = snapshot_dir + "/" + vm_blk + "." + snapshot_name
                if os.path.exists(snapshot_file):
                    os.remove(snapshot_file)
                options = options + " --diskspec %s,file=%s" % (vm_blk,
                                                                snapshot_file)
        else:
            options = snapshot_name
        utlv.check_exit_status(virsh.snapshot_create_as(vm_name, options))

        # check virsh snapshot-list
        logging.debug("Running: snapshot-list %s", vm_name)
        snapshot_list = virsh.snapshot_list(vm_name)
        logging.debug("snapshot list is: %s", snapshot_list)
        if not snapshot_list:
            raise exceptions.TestFail("snapshots not found after creation.")

        # snapshot-revert doesn't support external snapshot for now. so
        # only check this with internal snapshot.
        if not snapshot_disk_only:
            create_file_in_vm(session, "/mnt/after_snapshot.txt", "after")
            logging.debug("Running: snapshot-revert %s %s",
                          vm_name, snapshot_name)
            utlv.check_exit_status(virsh.snapshot_revert(vm_name, snapshot_name))
            session = vm.wait_for_login()
            file_existence, file_content = get_file_in_vm(session,
                                                          "/mnt/after_snapshot.txt")
            logging.debug("file exist = %s, file content = %s",
                          file_existence, file_content)
            if file_existence:
                raise exceptions.TestFail("The file created "
                                          "after snapshot still exists.")
            file_existence, file_content = get_file_in_vm(session,
                                                          "/mnt/before_snapshot.txt")
            logging.debug("file eixst = %s, file content = %s",
                          file_existence, file_content)
            if ((not file_existence) or (file_content.strip() != "before")):
                raise exceptions.TestFail("The file created "
                                          "before snapshot is lost.")
        # delete snapshots
            # if diskonly, delete --metadata and remove files
            # if not diskonly, delete snapshot
        if snapshot_disk_only:
            options = "--metadata"
        else:
            options = ""
        for snap in snapshot_list:
            logging.debug("deleting snapshot %s with options %s",
                          snap, options)
            result = virsh.snapshot_delete(vm_name, snap, options)
            logging.debug("result of snapshot-delete: %s",
                          result.stdout.strip())
            if snapshot_disk_only:
                vm_blks = list(vm.get_disk_devices().keys())
                for vm_blk in vm_blks:
                    snapshot_file = snapshot_dir + "/" + vm_blk + "." + snap
                    if os.path.exists(snapshot_file):
                        os.remove(snapshot_file)
        snapshot_list = virsh.snapshot_list(vm_name)
        if snapshot_list:
            raise exceptions.TestFail("Snapshot not deleted: %s", snapshot_list)
    except Exception as detail:
        raise exceptions.TestFail("exception happens: %s", detail)
    finally:
        logging.debug("Start to clean up env...")
        vmxml_backup.sync()
        if pool_ins and pool_ins.pool_exists(pool_name):
            virsh.pool_destroy(pool_name)
        for new_vhba in new_vhbas:
            virsh.nodedev_destroy(new_vhba)
        utils_npiv.restart_multipathd()
        if old_mpath_conf:
            utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                              conf_content=old_mpath_conf,
                                              replace_existing=True)
        if not original_mpath_conf_exist and os.path.exists(mpath_conf_path):
            os.remove(mpath_conf_path)
Exemplo n.º 12
0
def run(test, params, env):
    """
    Test virsh {at|de}tach-disk command.

    The command can attach new disk/detach disk.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh attach/detach-disk operation.
    3.Recover test environment.
    4.Confirm the test result.
    """
    def check_vm_partition(vm, device, os_type, target_name, old_parts):
        """
        Check VM disk's partition.

        :param vm. VM guest.
        :param os_type. VM's operation system type.
        :param target_name. Device target type.
        :return: True if check successfully.
        """
        logging.info("Checking VM partittion...")
        if vm.is_dead():
            vm.start()
        try:
            attached = False
            if os_type == "linux":
                session = vm.wait_for_login()
                new_parts = utils_disk.get_parts_list(session)
                added_parts = list(set(new_parts).difference(set(old_parts)))
                logging.debug("Added parts: %s" % added_parts)
                for i in range(len(added_parts)):
                    if device == "disk":
                        if target_name.startswith("vd"):
                            if added_parts[i].startswith("vd"):
                                attached = True
                        elif target_name.startswith(
                                "hd") or target_name.startswith("sd"):
                            if added_parts[i].startswith("sd"):
                                attached = True
                    elif device == "cdrom":
                        if added_parts[i].startswith("sr"):
                            attached = True
                session.close()
            return attached
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def acpiphp_module_modprobe(vm, os_type):
        """
        Add acpiphp module if VM's os type is rhle5.*

        :param vm. VM guest.
        :param os_type. VM's operation system type.
        :return: True if operate successfully.
        """
        if vm.is_dead():
            vm.start()
        try:
            if os_type == "linux":
                session = vm.wait_for_login()
                s_rpm, _ = session.cmd_status_output("rpm --version")
                # If status is different from 0, this
                # guest OS doesn't support the rpm package
                # manager
                if s_rpm:
                    session.close()
                    return True
                _, o_vd = session.cmd_status_output(
                    "rpm -qa | grep redhat-release")
                if o_vd.find("5Server") != -1:
                    s_mod, o_mod = session.cmd_status_output(
                        "modprobe acpiphp")
                    del o_mod
                    if s_mod != 0:
                        session.close()
                        return False
                session.close()
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def check_shareable(at_with_shareable, test_twice):
        """
        check if current libvirt version support shareable option

        at_with_shareable: True or False. Whether attach disk with shareable option
        test_twice: True or False. Whether perform operations twice
        return: True or cancel the test
        """
        if at_with_shareable or test_twice:
            if libvirt_version.version_compare(3, 9, 0):
                return True
            else:
                test.cancel(
                    "Current libvirt version doesn't support shareable feature"
                )

    # Get test command.
    test_cmd = params.get("at_dt_disk_test_cmd", "attach-disk")

    vm_ref = params.get("at_dt_disk_vm_ref", "name")
    at_options = params.get("at_dt_disk_at_options", "")
    dt_options = params.get("at_dt_disk_dt_options", "")
    at_with_shareable = "yes" == params.get("at_with_shareable", 'no')
    pre_vm_state = params.get("at_dt_disk_pre_vm_state", "running")
    status_error = "yes" == params.get("status_error", 'no')
    no_attach = params.get("at_dt_disk_no_attach", 'no')
    os_type = params.get("os_type", "linux")
    qemu_file_lock = params.get("qemu_file_lock", "")
    if qemu_file_lock:
        if utils_misc.compare_qemu_version(2, 9, 0):
            logging.info('From qemu-kvm-rhev 2.9.0:'
                         'QEMU image locking, which should prevent multiple '
                         'runs of QEMU or qemu-img when a VM is running.')
            if test_cmd == "detach-disk" or pre_vm_state == "shut off":
                test.cancel('This case is not supported.')
            else:
                logging.info(
                    'The expect result is failure as opposed with succeed')
                status_error = True

    # Disk specific attributes.
    device = params.get("at_dt_disk_device", "disk")
    device_source_name = params.get("at_dt_disk_device_source", "attach.img")
    device_source_format = params.get("at_dt_disk_device_source_format", "raw")
    device_target = params.get("at_dt_disk_device_target", "vdd")
    device_disk_bus = params.get("at_dt_disk_bus_type", "virtio")
    source_path = "yes" == params.get("at_dt_disk_device_source_path", "yes")
    create_img = "yes" == params.get("at_dt_disk_create_image", "yes")
    test_twice = "yes" == params.get("at_dt_disk_test_twice", "no")
    test_type = "yes" == params.get("at_dt_disk_check_type", "no")
    test_audit = "yes" == params.get("at_dt_disk_check_audit", "no")
    test_block_dev = "yes" == params.get("at_dt_disk_iscsi_device", "no")
    test_logcial_dev = "yes" == params.get("at_dt_disk_logical_device", "no")
    restart_libvirtd = "yes" == params.get("at_dt_disk_restart_libvirtd", "no")
    detach_disk_with_print_xml = "yes" == params.get(
        "detach_disk_with_print_xml", "no")
    vg_name = params.get("at_dt_disk_vg", "vg_test_0")
    lv_name = params.get("at_dt_disk_lv", "lv_test_0")
    serial = params.get("at_dt_disk_serial", "")
    address = params.get("at_dt_disk_address", "")
    address2 = params.get("at_dt_disk_address2", "")
    cache_options = params.get("cache_options", "")
    time_sleep = params.get("time_sleep", 3)
    if check_shareable(at_with_shareable, test_twice):
        at_options += " --mode shareable"
    if serial:
        at_options += (" --serial %s" % serial)
    if address2:
        at_options_twice = at_options + (" --address %s" % address2)
    if address:
        at_options += (" --address %s" % address)
    if cache_options:
        if cache_options.count("directsync"):
            if not libvirt_version.version_compare(1, 0, 0):
                test.cancel("'directsync' cache option doesn't "
                            "support in current libvirt version.")
        at_options += (" --cache %s" % cache_options)

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Create virtual device file.
    device_source_path = os.path.join(data_dir.get_tmp_dir(),
                                      device_source_name)
    if test_block_dev:
        device_source = libvirt.setup_or_cleanup_iscsi(True)
        if not device_source:
            # We should skip this case
            test.cancel("Can not get iscsi device name in host")
        if test_logcial_dev:
            lv_utils.vg_create(vg_name, device_source)
            device_source = libvirt.create_local_disk("lvm",
                                                      size="10M",
                                                      vgname=vg_name,
                                                      lvname=lv_name)
            logging.debug("New created volume: %s", lv_name)
    else:
        if source_path and create_img:
            device_source = libvirt.create_local_disk(
                "file",
                path=device_source_path,
                size="1G",
                disk_format=device_source_format)
        else:
            device_source = device_source_name

    # if we are testing audit, we need to start audit servcie first.
    if test_audit:
        auditd_service = Factory.create_service("auditd")
        if not auditd_service.status():
            auditd_service.start()
        logging.info("Auditd service status: %s" % auditd_service.status())

    # If we are testing cdrom device, we need to detach hdc in VM first.
    if device == "cdrom":
        if vm.is_alive():
            vm.destroy(gracefully=False)
        s_detach = virsh.detach_disk(vm_name, device_target, "--config")
        if not s_detach:
            logging.error("Detach hdc failed before test.")

    # If we are testing detach-disk, we need to attach certain device first.
    if test_cmd == "detach-disk" and no_attach != "yes":
        s_at_options = "--driver qemu --config"
        #Since lock feature is introduced in libvirt 3.9.0 afterwards, disk shareable options
        #need be set if disk needs be attached multitimes
        if check_shareable(at_with_shareable, test_twice):
            s_at_options += " --mode shareable"

        s_attach = virsh.attach_disk(vm_name,
                                     device_source,
                                     device_target,
                                     s_at_options,
                                     debug=True).exit_status
        if s_attach != 0:
            logging.error("Attaching device failed before testing detach-disk")
        else:
            logging.debug(
                "Attaching device succeeded before testing detach-disk")
        if test_twice:
            device_target2 = params.get("at_dt_disk_device_target2",
                                        device_target)
            device_source = libvirt.create_local_disk(
                "file",
                path=device_source_path,
                size="1",
                disk_format=device_source_format)
            s_attach = virsh.attach_disk(vm_name, device_source,
                                         device_target2,
                                         s_at_options).exit_status
            if s_attach != 0:
                logging.error("Attaching device failed before testing "
                              "detach-disk test_twice")

    vm.start()
    vm.wait_for_login()

    # Add acpiphp module before testing if VM's os type is rhle5.*
    if not acpiphp_module_modprobe(vm, os_type):
        test.error("Add acpiphp module failed before test.")

    # Turn VM into certain state.
    if pre_vm_state == "paused":
        logging.info("Suspending %s..." % vm_name)
        if vm.is_alive():
            vm.pause()
    elif pre_vm_state == "shut off":
        logging.info("Shuting down %s..." % vm_name)
        if vm.is_alive():
            vm.destroy(gracefully=False)

    # Get disk count before test.
    disk_count_before_cmd = vm_xml.VMXML.get_disk_count(vm_name)

    # Test.
    domid = vm.get_id()
    domuuid = vm.get_uuid()

    # Confirm how to reference a VM.
    if vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    else:
        vm_ref = ""

    if test_cmd == "attach-disk":
        status = virsh.attach_disk(vm_ref,
                                   device_source,
                                   device_target,
                                   at_options,
                                   debug=True).exit_status
    elif test_cmd == "detach-disk":
        # For detach disk with print-xml option, it only print information,and not actual disk detachment.
        if detach_disk_with_print_xml and libvirt_version.version_compare(
                4, 5, 0):
            ret = virsh.detach_disk(vm_ref, device_target, at_options)
            libvirt.check_exit_status(ret)
            cmd = ("echo \"%s\" | grep -A 16 %s" %
                   (ret.stdout.strip(), device_source_name))
            if process.system(cmd, ignore_status=True, shell=True):
                test.error("Check disk with source image name failed")
        status = virsh.detach_disk(vm_ref,
                                   device_target,
                                   dt_options,
                                   debug=True).exit_status

    if restart_libvirtd:
        libvirtd_serv = utils_libvirtd.Libvirtd()
        libvirtd_serv.restart()

    if test_twice:
        device_target2 = params.get("at_dt_disk_device_target2", device_target)
        device_source = libvirt.create_local_disk(
            "file",
            path=device_source_path,
            size="1G",
            disk_format=device_source_format)
        if test_cmd == "attach-disk":
            if address2:
                at_options = at_options_twice
            status = virsh.attach_disk(vm_ref,
                                       device_source,
                                       device_target2,
                                       at_options,
                                       debug=True).exit_status
        elif test_cmd == "detach-disk":
            status = virsh.detach_disk(vm_ref,
                                       device_target2,
                                       dt_options,
                                       debug=True).exit_status

    # Resume guest after command. On newer libvirt this is fixed as it has
    # been a bug. The change in xml file is done after the guest is resumed.
    if pre_vm_state == "paused":
        vm.resume()
        time.sleep(5)

    # Check audit log
    check_audit_after_cmd = True
    if test_audit:
        grep_audit = ('grep "%s" /var/log/audit/audit.log' %
                      test_cmd.split("-")[0])
        cmd = (grep_audit + ' | ' +
               'grep "%s" | tail -n1 | grep "res=success"' % device_source)
        if process.run(cmd, shell=True).exit_status:
            logging.error("Audit check failed")
            check_audit_after_cmd = False

    # Need wait a while for xml to sync
    time.sleep(float(time_sleep))
    # Check disk count after command.
    check_count_after_cmd = True
    disk_count_after_cmd = vm_xml.VMXML.get_disk_count(vm_name)
    if test_cmd == "attach-disk":
        if disk_count_after_cmd == disk_count_before_cmd:
            check_count_after_cmd = False
    elif test_cmd == "detach-disk":
        if disk_count_after_cmd < disk_count_before_cmd:
            check_count_after_cmd = False

    # Recover VM state.
    if pre_vm_state == "shut off":
        vm.start()

    # Check in VM after command.
    check_vm_after_cmd = True
    check_vm_after_cmd = check_vm_partition(vm, device, os_type, device_target,
                                            old_parts)

    # Check disk type after attach.
    check_disk_type = True
    if test_type:
        if test_block_dev:
            check_disk_type = vm_xml.VMXML.check_disk_type(
                vm_name, device_source, "block")
        else:
            check_disk_type = vm_xml.VMXML.check_disk_type(
                vm_name, device_source, "file")
    # Check disk serial after attach.
    check_disk_serial = True
    if serial:
        disk_serial = vm_xml.VMXML.get_disk_serial(vm_name, device_target)
        if serial != disk_serial:
            check_disk_serial = False

    # Check disk address after attach.
    check_disk_address = True
    if address:
        disk_address = vm_xml.VMXML.get_disk_address(vm_name, device_target)
        if address != disk_address:
            check_disk_address = False

    # Check multifunction address after attach.
    check_disk_address2 = True
    if address2:
        disk_address2 = vm_xml.VMXML.get_disk_address(vm_name, device_target2)
        if address2 != disk_address2:
            check_disk_address2 = False

    # Check disk cache option after attach.
    check_cache_after_cmd = True
    if cache_options:
        disk_cache = vm_xml.VMXML.get_disk_attr(vm_name, device_target,
                                                "driver", "cache")
        if cache_options == "default":
            if disk_cache is not None:
                check_cache_after_cmd = False
        elif disk_cache != cache_options:
            check_cache_after_cmd = False

    # Eject cdrom test
    eject_cdrom = "yes" == params.get("at_dt_disk_eject_cdrom", "no")
    save_vm = "yes" == params.get("at_dt_disk_save_vm", "no")
    save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save")
    try:
        if eject_cdrom:
            eject_params = {
                'type_name': "file",
                'device_type': "cdrom",
                'target_dev': device_target,
                'target_bus': device_disk_bus
            }
            eject_xml = libvirt.create_disk_xml(eject_params)
            with open(eject_xml) as eject_file:
                logging.debug("Eject CDROM by XML: %s", eject_file.read())
            # Run command tiwce to make sure cdrom tray open first #BZ892289
            # Open tray
            virsh.attach_device(domainarg=vm_name,
                                filearg=eject_xml,
                                debug=True)
            # Add time sleep between two attach commands.
            if time_sleep:
                time.sleep(float(time_sleep))
            # Eject cdrom
            result = virsh.attach_device(domainarg=vm_name,
                                         filearg=eject_xml,
                                         debug=True)
            if result.exit_status != 0:
                test.fail("Eject CDROM failed")
            if vm_xml.VMXML.check_disk_exist(vm_name, device_source):
                test.fail("Find %s after do eject" % device_source)
        # Save and restore VM
        if save_vm:
            result = virsh.save(vm_name, save_file, debug=True)
            libvirt.check_exit_status(result)
            result = virsh.restore(save_file, debug=True)
            libvirt.check_exit_status(result)
            if vm_xml.VMXML.check_disk_exist(vm_name, device_source):
                test.fail("Find %s after do restore" % device_source)

        # Destroy VM.
        vm.destroy(gracefully=False)

        # Check disk count after VM shutdown (with --config).
        check_count_after_shutdown = True
        inactive_vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        disk_count_after_shutdown = len(inactive_vmxml.get_disk_all())
        if test_cmd == "attach-disk":
            if disk_count_after_shutdown == disk_count_before_cmd:
                check_count_after_shutdown = False
        elif test_cmd == "detach-disk":
            if disk_count_after_shutdown < disk_count_before_cmd:
                check_count_after_shutdown = False

    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.debug("Restore the VM XML")
        backup_xml.sync()
        if os.path.exists(save_file):
            os.remove(save_file)
        if test_block_dev:
            if test_logcial_dev:
                libvirt.delete_local_disk("lvm",
                                          vgname=vg_name,
                                          lvname=lv_name)
                lv_utils.vg_remove(vg_name)
                process.run("pvremove %s" % device_source,
                            shell=True,
                            ignore_status=True)
            libvirt.setup_or_cleanup_iscsi(False)
        else:
            libvirt.delete_local_disk("file", device_source)

    # Check results.
    if status_error:
        if not status:
            test.fail("virsh %s exit with unexpected value." % test_cmd)
    else:
        if status:
            test.fail("virsh %s failed." % test_cmd)
        if test_cmd == "attach-disk":
            if at_options.count("config"):
                if not check_count_after_shutdown:
                    test.fail("Cannot see config attached device "
                              "in xml file after VM shutdown.")
                if not check_disk_serial:
                    test.fail("Serial set failed after attach")
                if not check_disk_address:
                    test.fail("Address set failed after attach")
                if not check_disk_address2:
                    test.fail("Address(multifunction) set failed"
                              " after attach")
            else:
                if not check_count_after_cmd:
                    test.fail("Cannot see device in xml file" " after attach.")
                if not check_vm_after_cmd:
                    test.fail("Cannot see device in VM after" " attach.")
                if not check_disk_type:
                    test.fail("Check disk type failed after" " attach.")
                if not check_audit_after_cmd:
                    test.fail("Audit hotplug failure after attach")
                if not check_cache_after_cmd:
                    test.fail("Check cache failure after attach")
                if at_options.count("persistent"):
                    if not check_count_after_shutdown:
                        test.fail("Cannot see device attached "
                                  "with persistent after "
                                  "VM shutdown.")
                else:
                    if check_count_after_shutdown:
                        test.fail("See non-config attached device "
                                  "in xml file after VM shutdown.")
        elif test_cmd == "detach-disk":
            if dt_options.count("config"):
                if check_count_after_shutdown:
                    test.fail("See config detached device in "
                              "xml file after VM shutdown.")
            else:
                if check_count_after_cmd:
                    test.fail("See device in xml file " "after detach.")
                if check_vm_after_cmd:
                    test.fail("See device in VM after detach.")
                if not check_audit_after_cmd:
                    test.fail("Audit hotunplug failure " "after detach")

                if dt_options.count("persistent"):
                    if check_count_after_shutdown:
                        test.fail("See device deattached "
                                  "with persistent after "
                                  "VM shutdown.")
                else:
                    if not check_count_after_shutdown:
                        test.fail("See non-config detached "
                                  "device in xml file after "
                                  "VM shutdown.")

        else:
            test.error("Unknown command %s." % test_cmd)
Exemplo n.º 13
0
def run(test, params, env):
    """
    Test command: virsh pool-define-as; pool-build; pool-start; vol-create-as;
    vol-list; attach-device; login; mount and dd; reboot; check persistence;
    detach-device; pool-destroy; pool-undefine; clear lv,vg and pv;
    Create a libvirt npiv pool from a vHBA's device mapper device and create
    a volume out of the newly created pool and attach it to a guest, mount it,
    reboot and check persistence after reboot.

    Pre-requisite :
    Host should have a vHBA associated with a mpath device
    """

    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    pool_type = params.get("pool_type", "dir")
    scsi_wwpn = params.get("scsi_wwpn", "WWPN_EXAMPLE")
    scsi_wwnn = params.get("scsi_wwnn", "WWNN_EXAMPLE")
    pool_target = params.get("pool_target", "pool_target")
    target_device = params.get("disk_target_dev", "vda")
    volume_name = params.get("volume_name", "imagefrommapper.qcow2")
    volume_capacity = params.get("volume_capacity", '1G')
    allocation = params.get("allocation", '1G')
    frmt = params.get("volume_format", 'qcow2')
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    mount_disk = None
    test_unit = None

    if 'EXAMPLE' in scsi_wwnn or 'EXAMPLE' in scsi_wwpn:
        raise exceptions.TestSkipError("Please provide proper WWPN/WWNN")

    if not vm.is_alive():
        vm.start()
    pool_extra_args = ""
    libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache)
    process.run("service multipathd restart", shell=True)
    online_hbas_list = nodedev.find_hbas("hba")
    first_online_hba = online_hbas_list[0]
    old_mpath_devs = nodedev.find_mpath_devs()
    logging.debug("the old mpath devs are: %s" % old_mpath_devs)
    new_vhbas = nodedev.nodedev_create_from_xml({
        "nodedev_parent": first_online_hba,
        "scsi_wwnn": scsi_wwnn,
        "scsi_wwpn": scsi_wwpn
    })
    logging.info("Newly created vHBA %s" % new_vhbas)
    process.run("service multipathd restart", shell=True)

    utils_misc.wait_for(lambda: nodedev.is_mpath_devs_added(old_mpath_devs),
                        timeout=5)

    cur_mpath_devs = nodedev.find_mpath_devs()
    logging.debug("the current mpath devs are: %s" % cur_mpath_devs)
    new_mpath_devs = list(set(cur_mpath_devs).difference(set(old_mpath_devs)))

    logging.debug("newly added mpath devs are: %s" % new_mpath_devs)
    if not new_mpath_devs:
        raise exceptions.TestFail("No newly added mpath devices found, \
                please check your FC settings")
    source_dev = os.path.join('/dev/mapper/', new_mpath_devs[0])
    logging.debug("We are going to use \"%s\" as our source device"
                  " to create a logical pool" % source_dev)

    cmd = "parted %s mklabel msdos -s" % source_dev
    cmd_result = process.run(cmd, shell=True)
    utlv.check_exit_status(cmd_result)

    if source_dev:
        pool_extra_args = ' --source-dev %s' % source_dev
    else:
        raise exceptions.TestFail(
            "The vHBA %s does not have any associated mpath device" %
            new_vhbas)

    pool_ins = libvirt_storage.StoragePool()
    if pool_ins.pool_exists(pool_name):
        raise exceptions.TestFail("Pool %s already exist" % pool_name)
    # if no online hba cards on host, mark case failed
    if not online_hbas_list:
        raise exceptions.TestSkipError("Host doesn't have online hba cards")
    try:
        cmd_result = virsh.pool_define_as(pool_name,
                                          pool_type,
                                          pool_target,
                                          pool_extra_args,
                                          ignore_status=True,
                                          debug=True)
        utlv.check_exit_status(cmd_result)

        cmd_result = virsh.pool_build(pool_name)
        utlv.check_exit_status(cmd_result)

        cmd_result = virsh.pool_start(pool_name)
        utlv.check_exit_status(cmd_result)

        utlv.check_actived_pool(pool_name)
        pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name)
        logging.debug("Pool detail: %s", pool_detail)

        cmd_result = virsh.vol_create_as(volume_name,
                                         pool_name,
                                         volume_capacity,
                                         allocation,
                                         frmt,
                                         "",
                                         debug=True)
        utlv.check_exit_status(cmd_result)

        vol_list = utlv.get_vol_list(pool_name, timeout=10)
        logging.debug('Volume list %s', vol_list)
        for unit in vol_list:
            test_unit = vol_list[unit]
            logging.debug(unit)

        disk_params = {
            'type_name': "file",
            'target_dev': target_device,
            'target_bus': "virtio",
            'source_file': test_unit,
            'driver_name': "qemu",
            'driver_type': "raw"
        }
        disk_xml = utlv.create_disk_xml(disk_params)
        session = vm.wait_for_login()

        bf_disks = libvirt_vm.get_disks()

        attach_success = virsh.attach_device(vm_name, disk_xml, debug=True)

        utlv.check_exit_status(attach_success)

        logging.debug("Disks before attach: %s", bf_disks)

        af_disks = libvirt_vm.get_disks()

        logging.debug("Disks after attach: %s", af_disks)

        mount_disk = "".join(list(set(bf_disks) ^ set(af_disks)))
        if not mount_disk:
            raise exceptions.TestFail("Can not get attached device in vm.")
        logging.debug("Attached device in vm:%s", mount_disk)

        output = session.cmd_status_output('lsblk', timeout=15)
        logging.debug("%s", output[1])

        session.cmd_status_output('mkfs.ext4 %s' % mount_disk)
        if mount_disk:
            logging.info("%s", mount_disk)
            mount_success = mount_and_dd(session, mount_disk)
            if not mount_success:
                raise exceptions.TestFail("Can not find mounted device")
        session.close()

        virsh.reboot(vm_name, debug=True)

        session = vm.wait_for_login()
        output = session.cmd_status_output('mount')
        logging.debug("Mount output: %s", output[1])
        if '/mnt' in output[1]:
            logging.debug("Mount Successful accross reboot")
        session.close()

        status = virsh.detach_device(vm_name, disk_xml, debug=True)
        utlv.check_exit_status(status)

    finally:
        vm.destroy(gracefully=False)
        logging.debug('Destroying pool %s', pool_name)
        virsh.pool_destroy(pool_name)
        logging.debug('Undefining pool %s', pool_name)
        virsh.pool_undefine(pool_name)
        if test_unit:
            process.system('lvremove -f %s' % test_unit, verbose=True)
            process.system('vgremove -f %s' % pool_name, verbose=True)
            process.system('pvremove -f %s' % source_dev, verbose=True)
        if new_vhbas:
            nodedev.vhbas_cleanup(new_vhbas.split())
        process.run("service multipathd restart", shell=True)
Exemplo n.º 14
0
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocol = params.get("disk_source_protocol", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")
    # Indicate the PPC platform
    on_ppc = False
    if platform.platform().count('ppc64'):
        on_ppc = True

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            raise error.TestNAError("'iscsi' disk doesn't support in"
                                    " current libvirt version.")
    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            raise error.TestNAError("'volume' type disk doesn't support in"
                                    " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocol
            secret_xml.target = secret_usage_target
            logging.debug("Define secret by XML: %s",
                          open(secret_xml.xml).read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                raise error.TestError("Fail to get new created secret uuid")

            # Set secret value
            secret_string = base64.b64encode(chap_passwd)
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
            is_setup=True,
            is_login=False,
            image_size='1G',
            chap_user=chap_user,
            chap_passwd=chap_passwd,
            portal_ip=disk_src_host)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.host_name = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_pool
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = "/dev/disk/by-path"
            # Create iscsi pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            def get_vol():
                """Get the volume info"""
                # Refresh the pool
                cmd_result = virsh.pool_refresh(disk_src_pool)
                libvirt.check_exit_status(cmd_result)
                # Get volume name
                cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
                libvirt.check_exit_status(cmd_result)
                vol_list = []
                vol_list = re.findall(r"(\S+)\ +(\S+)[\ +\n]",
                                      str(cmd_result.stdout))
                if len(vol_list) > 1:
                    return vol_list[1]
                else:
                    return None

            # Wait for a while so that we can get the volume info
            vol_info = utils_misc.wait_for(get_vol, 10)
            if vol_info:
                vol_name, vol_path = vol_info
            else:
                raise error.TestError("Failed to get volume info")
            # Snapshot doesn't support raw disk format, create a qcow2 volume
            # disk for snapshot operation.
            process.run('qemu-img create -f qcow2 %s %s' % (vol_path, '100M'),
                        shell=True)

        # Create iscsi network disk XML
        disk_params = {
            'device_type': disk_device,
            'type_name': disk_type,
            'target_dev': disk_target,
            'target_bus': disk_target_bus,
            'readonly': disk_readonly
        }
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {
                'source_protocol': disk_src_protocol,
                'source_name': iscsi_target + "/%s" % lun_num,
                'source_host_name': disk_src_host,
                'source_host_port': disk_src_port
            }
        elif disk_type == "volume":
            disk_params_src = {
                'source_pool': disk_src_pool,
                'source_volume': vol_name,
                'driver_type': 'qcow2',
                'source_mode': disk_src_mode
            }
        else:
            error.TestNAError("Unsupport disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth:
            disk_params_auth = {
                'auth_user': chap_user,
                'secret_type': disk_src_protocol,
                'secret_usage': secret_xml.target
            }
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)

        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
        else:
            if not vm.is_dead():
                vm.destroy()
        attach_option = params.get("attach_option", "")
        disk_xml_f = open(disk_xml)
        disk_xml_content = disk_xml_f.read()
        disk_xml_f.close()
        logging.debug("Attach disk by XML: %s", disk_xml_content)
        cmd_result = virsh.attach_device(domainarg=vm_name,
                                         filearg=disk_xml,
                                         flagstr=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        # Wait for domain is stable
        vm.wait_for_login().close()
        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(test.tmpdir, "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            try:
                virsh.snapshot_list(vm_name, **virsh_dargs)
            except process.CmdError:
                error.TestFail("Failed getting snapshots list for %s" %
                               vm_name)

            try:
                virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs)
            except process.CmdError:
                error.TestFail("Failed getting snapshots info for %s" %
                               vm_name)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            snapshot_file = os.path.join(test.tmpdir, snapshot_name2)
            sn_create_op = ("%s --disk-only --diskspec %s,file=%s" %
                            (snapshot_name2, disk_target, snapshot_file))
            cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op,
                                                  **virsh_dargs)

            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            if snapshot_name2 not in cmd_result:
                raise error.TestError("Snapshot %s not found" % snapshot_name2)

        else:
            logging.error("Unsupport operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                raise error.TestError("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    # Here the script needs wait for a while for the guest to
                    # recognize the hotplugged disk on PPC
                    if on_ppc:
                        time.sleep(10)
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError), e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                raise error.TestError("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name, disk_target)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)
Exemplo n.º 15
0
def run(test, params, env):
    """
    Test rbd disk device.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    additional_xml_file = os.path.join(data_dir.get_tmp_dir(),
                                       "additional_disk.xml")

    def config_ceph():
        """
        Write the configs to the file.
        """
        src_host = disk_src_host.split()
        src_port = disk_src_port.split()
        conf_str = "mon_host = "
        hosts = []
        for host, port in zip(src_host, src_port):
            hosts.append("%s:%s" % (host, port))
        with open(disk_src_config, 'w') as f:
            f.write(conf_str + ','.join(hosts) + '\n')

    def create_pool():
        """
        Define and start a pool.
        """
        sp = libvirt_storage.StoragePool()
        if create_by_xml:
            p_xml = pool_xml.PoolXML(pool_type=pool_type)
            p_xml.name = pool_name
            s_xml = pool_xml.SourceXML()
            s_xml.vg_name = disk_src_pool
            source_host = []
            for (host_name, host_port) in zip(disk_src_host.split(),
                                              disk_src_port.split()):
                source_host.append({'name': host_name, 'port': host_port})

            s_xml.hosts = source_host
            if auth_type:
                s_xml.auth_type = auth_type
            if auth_user:
                s_xml.auth_username = auth_user
            if auth_usage:
                s_xml.secret_usage = auth_usage
            p_xml.source = s_xml
            logging.debug("Pool xml: %s", p_xml)
            p_xml.xmltreefile.write()
            ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_build(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_start(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
        else:
            auth_opt = ""
            if client_name and client_key:
                auth_opt = (
                    "--auth-type %s --auth-username %s --secret-usage '%s'" %
                    (auth_type, auth_user, auth_usage))
            if not sp.define_rbd_pool(
                    pool_name, mon_host, disk_src_pool, extra=auth_opt):
                test.fail("Failed to define storage pool")
            if not sp.build_pool(pool_name):
                test.fail("Failed to build storage pool")
            if not sp.start_pool(pool_name):
                test.fail("Failed to start storage pool")

        # Check pool operation
        ret = virsh.pool_refresh(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_uuid(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        # pool-info
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'no':
            test.fail("Failed to check pool information")
        # pool-autostart
        if not sp.set_pool_autostart(pool_name):
            test.fail("Failed to set pool autostart")
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'yes':
            test.fail("Failed to check pool information")
        # pool-autostart --disable
        if not sp.set_pool_autostart(pool_name, "--disable"):
            test.fail("Failed to set pool autostart")
        # If port is not pre-configured, port value should not be hardcoded in pool information.
        if "yes" == params.get("rbd_port", "no"):
            if 'port' in virsh.pool_dumpxml(pool_name):
                test.fail("port attribute should not be in pool information")
        # find-storage-pool-sources-as
        if "yes" == params.get("find_storage_pool_sources_as", "no"):
            ret = virsh.find_storage_pool_sources_as("rbd", mon_host)
            libvirt.check_result(ret, skip_if=unsupported_err)

    def create_vol(vol_params):
        """
        Create volume.

        :param p_name. Pool name.
        :param vol_params. Volume parameters dict.
        :return: True if create successfully.
        """
        pvt = libvirt.PoolVolumeTest(test, params)
        if create_by_xml:
            pvt.pre_vol_by_xml(pool_name, **vol_params)
        else:
            pvt.pre_vol(vol_name, None, '2G', None, pool_name)

    def check_vol(vol_params):
        """
        Check volume information.
        """
        pv = libvirt_storage.PoolVolume(pool_name)
        # Supported operation
        if vol_name not in pv.list_volumes():
            test.fail("Volume %s doesn't exist" % vol_name)
        ret = virsh.vol_dumpxml(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        # vol-info
        if not pv.volume_info(vol_name):
            test.fail("Can't see volume info")
        # vol-key
        ret = virsh.vol_key(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip():
            test.fail("Volume key isn't correct")
        # vol-path
        ret = virsh.vol_path(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip():
            test.fail("Volume path isn't correct")
        # vol-pool
        ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if pool_name not in ret.stdout.strip():
            test.fail("Volume pool isn't correct")
        # vol-name
        ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if vol_name not in ret.stdout.strip():
            test.fail("Volume name isn't correct")
        # vol-resize
        ret = virsh.vol_resize(vol_name, "2G", pool_name)
        libvirt.check_exit_status(ret)

        # Not supported operation
        # vol-clone
        ret = virsh.vol_clone(vol_name, cloned_vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-create-from
        volxml = vol_xml.VolXML()
        vol_params.update({"name": "%s" % create_from_cloned_volume})
        v_xml = volxml.new_vol(**vol_params)
        v_xml.xmltreefile.write()
        ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)

        # vol-wipe
        ret = virsh.vol_wipe(vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-upload
        ret = virsh.vol_upload(vol_name,
                               vm.get_first_disk_devices()['source'],
                               "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-download
        ret = virsh.vol_download(vol_name, cloned_vol_name,
                                 "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)

    def check_qemu_cmd():
        """
        Check qemu command line options.
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        process.run(cmd, shell=True)
        if disk_src_name:
            cmd += " | grep file=rbd:%s:" % disk_src_name
            if auth_user and auth_key:
                cmd += ('id=%s:auth_supported=cephx' % auth_user)
        if disk_src_config:
            cmd += " | grep 'conf=%s'" % disk_src_config
        elif mon_host:
            hosts = '\:6789\;'.join(mon_host.split())
            cmd += " | grep 'mon_host=%s'" % hosts
        if driver_iothread:
            cmd += " | grep iothread%s" % driver_iothread
        # Run the command
        process.run(cmd, shell=True)

    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()

    def check_snapshot(snap_option, target_dev='vda'):
        """
        Test snapshot operation.
        """
        snap_name = "s1"
        snap_mem = os.path.join(data_dir.get_tmp_dir(), "rbd.mem")
        snap_disk = os.path.join(data_dir.get_tmp_dir(), "rbd.disk")
        xml_snap_exp = [
            "disk name='%s' snapshot='external' type='file'" % target_dev
        ]
        xml_dom_exp = [
            "source file='%s'" % snap_disk,
            "backingStore type='network' index='1'",
            "source protocol='rbd' name='%s'" % disk_src_name
        ]
        if snap_option.count("disk-only"):
            options = ("%s --diskspec %s,file=%s --disk-only" %
                       (snap_name, target_dev, snap_disk))
        elif snap_option.count("disk-mem"):
            options = ("%s --memspec file=%s --diskspec %s,file="
                       "%s" % (snap_name, snap_mem, target_dev, snap_disk))
            xml_snap_exp.append("memory snapshot='external' file='%s'" %
                                snap_mem)
        else:
            options = snap_name

        ret = virsh.snapshot_create_as(vm_name, options)
        if test_disk_internal_snapshot or test_disk_readonly:
            libvirt.check_result(ret, expected_fails=unsupported_err)
        else:
            libvirt.check_result(ret, skip_if=unsupported_err)

        # check xml file.
        if not ret.exit_status:
            snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name,
                                              debug=True).stdout.strip()
            dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
            # Delete snapshots.
            libvirt.clean_up_snapshots(vm_name)
            if os.path.exists(snap_mem):
                os.remove(snap_mem)
            if os.path.exists(snap_disk):
                os.remove(snap_disk)

            if not all([x in snap_xml for x in xml_snap_exp]):
                test.fail("Failed to check snapshot xml")
            if not all([x in dom_xml for x in xml_dom_exp]):
                test.fail("Failed to check domain xml")

    def check_blockcopy(target):
        """
        Block copy operation test.
        """
        blk_file = os.path.join(data_dir.get_tmp_dir(), "blk.rbd")
        if os.path.exists(blk_file):
            os.remove(blk_file)
        blk_mirror = ("mirror type='file' file='%s' "
                      "format='raw' job='copy'" % blk_file)

        # Do blockcopy
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_result(ret, skip_if=unsupported_err)

        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count(blk_mirror):
            test.fail("Can't see block job in domain xml")

        # Abort
        ret = virsh.blockjob(vm_name, target, "--abort")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if dom_xml.count(blk_mirror):
            test.fail("Failed to abort block job")
        if os.path.exists(blk_file):
            os.remove(blk_file)

        # Sleep for a while after abort operation.
        time.sleep(5)
        # Do blockcopy again
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_exit_status(ret)

        # Wait for complete
        def wait_func():
            ret = virsh.blockjob(vm_name, target, "--info")
            return ret.stderr.count("Block Copy: [100 %]")

        timeout = params.get("blockjob_timeout", 600)
        utils_misc.wait_for(wait_func, int(timeout))

        # Pivot
        ret = virsh.blockjob(vm_name, target, "--pivot")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count("source file='%s'" % blk_file):
            test.fail("Failed to pivot block job")
        # Remove the disk file.
        if os.path.exists(blk_file):
            os.remove(blk_file)

    def check_in_vm(vm_obj, target, old_parts, read_only=False):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm_obj.wait_for_login()
            new_parts = utils_disk.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]

            if not added_part:
                logging.error("Can't see added partition in VM")
                return False

            cmd = ("mount /dev/{0} /mnt && ls /mnt && (sleep 15;"
                   " touch /mnt/testfile; umount /mnt)".format(added_part))
            s, o = session.cmd_status_output(cmd, timeout=60)
            session.close()
            logging.info("Check disk operation in VM:\n, %s, %s", s, o)
            # Readonly fs, check the error messages.
            # The command may return True, read-only
            # messges can be found from the command output
            if read_only:
                if "Read-only file system" not in o:
                    return False
                else:
                    return True

            # Other errors
            if s != 0:
                return False
            return True

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def clean_up_volume_snapshots():
        """
        Get all snapshots for rbd_vol.img volume,unprotect and then clean up them.
        """
        cmd = ("rbd -m {0} {1} info {2}"
               "".format(mon_host, key_opt,
                         os.path.join(disk_src_pool, vol_name)))
        if process.run(cmd, ignore_status=True, shell=True).exit_status:
            return
        # Get snapshot list.
        cmd = ("rbd -m {0} {1} snap"
               " list {2}"
               "".format(mon_host, key_opt,
                         os.path.join(disk_src_pool, vol_name)))
        snaps_out = process.run(cmd, ignore_status=True,
                                shell=True).stdout_text
        snap_names = []
        if snaps_out:
            for line in snaps_out.rsplit("\n"):
                if line.startswith("SNAPID") or line == "":
                    continue
                snap_line = line.rsplit()
                if len(snap_line) == 4:
                    snap_names.append(snap_line[1])
            logging.debug("Find snapshots: %s", snap_names)
            # Unprotect snapshot first,otherwise it will fail to purge volume
            for snap_name in snap_names:
                cmd = ("rbd -m {0} {1} snap"
                       " unprotect {2}@{3}"
                       "".format(mon_host, key_opt,
                                 os.path.join(disk_src_pool, vol_name),
                                 snap_name))
                process.run(cmd, ignore_status=True, shell=True)
        # Purge volume,and then delete volume.
        cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
               " purge {2} && rbd -m {0} {1} rm {2}"
               "".format(mon_host, key_opt,
                         os.path.join(disk_src_pool, vol_name)))
        process.run(cmd, ignore_status=True, shell=True)

    def make_snapshot():
        """
        make external snapshots.

        :return external snapshot path list
        """
        logging.info("Making snapshot...")
        first_disk_source = vm.get_first_disk_devices()['source']
        snapshot_path_list = []
        snapshot2_file = os.path.join(data_dir.get_tmp_dir(), "mem.s2")
        snapshot3_file = os.path.join(data_dir.get_tmp_dir(), "mem.s3")
        snapshot4_file = os.path.join(data_dir.get_tmp_dir(), "mem.s4")
        snapshot4_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s4")
        snapshot5_file = os.path.join(data_dir.get_tmp_dir(), "mem.s5")
        snapshot5_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s5")

        # Attempt to take different types of snapshots.
        snapshots_param_dict = {
            "s1":
            "s1 --disk-only --no-metadata",
            "s2":
            "s2 --memspec %s --no-metadata" % snapshot2_file,
            "s3":
            "s3 --memspec %s --no-metadata --live" % snapshot3_file,
            "s4":
            "s4 --memspec %s --diskspec vda,file=%s --no-metadata" %
            (snapshot4_file, snapshot4_disk_file),
            "s5":
            "s5 --memspec %s --diskspec vda,file=%s --live --no-metadata" %
            (snapshot5_file, snapshot5_disk_file)
        }
        for snapshot_name in sorted(snapshots_param_dict.keys()):
            ret = virsh.snapshot_create_as(vm_name,
                                           snapshots_param_dict[snapshot_name],
                                           **virsh_dargs)
            libvirt.check_exit_status(ret)
            if snapshot_name != 's4' and snapshot_name != 's5':
                snapshot_path_list.append(
                    first_disk_source.replace('qcow2', snapshot_name))
        return snapshot_path_list

    def get_secret_list():
        """
        Get secret list.

        :return secret list
        """
        logging.info("Get secret list ...")
        secret_list_result = virsh.secret_list()
        secret_list = results_stdout_52lts(
            secret_list_result).strip().splitlines()
        # First two lines contain table header followed by entries
        # for each secret, such as:
        #
        # UUID                                  Usage
        # --------------------------------------------------------------------------------
        # b4e8f6d3-100c-4e71-9f91-069f89742273  ceph client.libvirt secret
        secret_list = secret_list[2:]
        result = []
        # If secret list is empty.
        if secret_list:
            for line in secret_list:
                # Split on whitespace, assume 1 column
                linesplit = line.split(None, 1)
                result.append(linesplit[0])
        return result

    mon_host = params.get("mon_host")
    disk_src_name = params.get("disk_source_name")
    disk_src_config = params.get("disk_source_config")
    disk_src_host = params.get("disk_source_host")
    disk_src_port = params.get("disk_source_port")
    disk_src_pool = params.get("disk_source_pool")
    disk_format = params.get("disk_format", "raw")
    driver_iothread = params.get("driver_iothread")
    snap_name = params.get("disk_snap_name")
    attach_device = "yes" == params.get("attach_device", "no")
    attach_disk = "yes" == params.get("attach_disk", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_snapshot = "yes" == params.get("test_snapshot", "no")
    test_blockcopy = "yes" == params.get("test_blockcopy", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    test_vm_parts = "yes" == params.get("test_vm_parts", "no")
    additional_guest = "yes" == params.get("additional_guest", "no")
    create_snapshot = "yes" == params.get("create_snapshot", "no")
    convert_image = "yes" == params.get("convert_image", "no")
    create_volume = "yes" == params.get("create_volume", "no")
    create_by_xml = "yes" == params.get("create_by_xml", "no")
    client_key = params.get("client_key")
    client_name = params.get("client_name")
    auth_key = params.get("auth_key")
    auth_user = params.get("auth_user")
    auth_type = params.get("auth_type")
    auth_usage = params.get("secret_usage")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    vol_name = params.get("vol_name")
    cloned_vol_name = params.get("cloned_volume", "cloned_test_volume")
    create_from_cloned_volume = params.get("create_from_cloned_volume",
                                           "create_from_cloned_test_volume")
    vol_cap = params.get("vol_cap")
    vol_cap_unit = params.get("vol_cap_unit")
    start_vm = "yes" == params.get("start_vm", "no")
    test_disk_readonly = "yes" == params.get("test_disk_readonly", "no")
    test_disk_internal_snapshot = "yes" == params.get(
        "test_disk_internal_snapshot", "no")
    test_json_pseudo_protocol = "yes" == params.get("json_pseudo_protocol",
                                                    "no")
    disk_snapshot_with_sanlock = "yes" == params.get(
        "disk_internal_with_sanlock", "no")
    auth_place_in_source = params.get("auth_place_in_source")

    # Prepare a blank params to confirm if delete the configure at the end of the test
    ceph_cfg = ""
    # Create config file if it doesn't exist
    ceph_cfg = ceph.create_config_file(mon_host)

    # After libvirt 3.9.0, auth element can be put into source part.
    if auth_place_in_source and not libvirt_version.version_compare(3, 9, 0):
        test.cancel(
            "place auth in source is not supported in current libvirt version")

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)
    if additional_guest:
        guest_name = "%s_%s" % (vm_name, '1')
        timeout = params.get("clone_timeout", 360)
        utils_libguestfs.virt_clone_cmd(vm_name,
                                        guest_name,
                                        True,
                                        timeout=timeout,
                                        ignore_status=False)
        additional_vm = vm.clone(guest_name)
        if start_vm:
            virsh.start(guest_name)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    key_opt = ""
    secret_uuid = None
    snapshot_path = None
    key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
    img_file = os.path.join(data_dir.get_tmp_dir(), "%s_test.img" % vm_name)
    front_end_img_file = os.path.join(data_dir.get_tmp_dir(),
                                      "%s_frontend_test.img" % vm_name)
    # Construct a unsupported error message list to skip these kind of tests
    unsupported_err = []
    if driver_iothread:
        unsupported_err.append('IOThreads not supported')
    if test_snapshot:
        unsupported_err.append('live disk snapshot not supported')
    if test_disk_readonly:
        if not libvirt_version.version_compare(5, 0, 0):
            unsupported_err.append('Could not create file: Permission denied')
            unsupported_err.append('Permission denied')
        else:
            unsupported_err.append(
                'unsupported configuration: external snapshot ' +
                'for readonly disk vdb is not supported')
    if test_disk_internal_snapshot:
        unsupported_err.append(
            'unsupported configuration: internal snapshot for disk ' +
            'vdb unsupported for storage type raw')
    if test_blockcopy:
        unsupported_err.append('block copy is not supported')
    if attach_disk:
        unsupported_err.append('No such file or directory')
    if create_volume:
        unsupported_err.append("backing 'volume' disks isn't yet supported")
        unsupported_err.append('this function is not supported')

    try:
        # Clean up dirty secrets in test environments if there have.
        dirty_secret_list = get_secret_list()
        if dirty_secret_list:
            for dirty_secret_uuid in dirty_secret_list:
                virsh.secret_undefine(dirty_secret_uuid)
        # Prepare test environment.
        qemu_config = LibvirtQemuConfig()

        if disk_snapshot_with_sanlock:
            # Install necessary package:sanlock,libvirt-lock-sanlock
            if not utils_package.package_install(["sanlock"]):
                test.error("fail to install sanlock")
            if not utils_package.package_install(["libvirt-lock-sanlock"]):
                test.error("fail to install libvirt-lock-sanlock")

            # Set virt_use_sanlock
            result = process.run("setsebool -P virt_use_sanlock 1", shell=True)
            if result.exit_status:
                test.error("Failed to set virt_use_sanlock value")

            # Update lock_manager in qemu.conf
            qemu_config.lock_manager = 'sanlock'

            # Update qemu-sanlock.conf.
            san_lock_config = LibvirtSanLockConfig()
            san_lock_config.user = '******'
            san_lock_config.group = 'sanlock'
            san_lock_config.host_id = 1
            san_lock_config.auto_disk_leases = True
            process.run("mkdir -p /var/lib/libvirt/sanlock", shell=True)
            san_lock_config.disk_lease_dir = "/var/lib/libvirt/sanlock"
            san_lock_config.require_lease_for_disks = False

            # Start sanlock service and restart libvirtd to enforce changes.
            result = process.run("systemctl start wdmd", shell=True)
            if result.exit_status:
                test.error("Failed to start wdmd service")
            result = process.run("systemctl start sanlock", shell=True)
            if result.exit_status:
                test.error("Failed to start sanlock service")
            utils_libvirtd.Libvirtd().restart()

            # Prepare lockspace and lease file for sanlock in order.
            sanlock_cmd_dict = OrderedDict()
            sanlock_cmd_dict[
                "truncate -s 1M /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to truncate TEST_LS"
            sanlock_cmd_dict[
                "sanlock direct init -s TEST_LS:0:/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to sanlock direct init TEST_LS:0"
            sanlock_cmd_dict[
                "chown sanlock:sanlock /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to chown sanlock TEST_LS"
            sanlock_cmd_dict[
                "restorecon -R -v /var/lib/libvirt/sanlock"] = "Failed to restorecon sanlock"
            sanlock_cmd_dict[
                "truncate -s 1M /var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to truncate test-disk-resource-lock"
            sanlock_cmd_dict[
                "sanlock direct init -r TEST_LS:test-disk-resource-lock:" +
                "/var/lib/libvirt/sanlock/test-disk-resource-lock:0"] = "Failed to sanlock direct init test-disk-resource-lock"
            sanlock_cmd_dict[
                "chown sanlock:sanlock " +
                "/var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to chown test-disk-resource-loc"
            sanlock_cmd_dict[
                "sanlock client add_lockspace -s TEST_LS:1:" +
                "/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to client add_lockspace -s TEST_LS:0"
            for sanlock_cmd in sanlock_cmd_dict.keys():
                result = process.run(sanlock_cmd, shell=True)
                if result.exit_status:
                    test.error(sanlock_cmd_dict[sanlock_cmd])

            # Create one lease device and add it to VM.
            san_lock_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            lease_device = Lease()
            lease_device.lockspace = 'TEST_LS'
            lease_device.key = 'test-disk-resource-lock'
            lease_device.target = {
                'path': '/var/lib/libvirt/sanlock/test-disk-resource-lock'
            }
            san_lock_vmxml.add_device(lease_device)
            san_lock_vmxml.sync()

        # Install ceph-common package which include rbd command
        if utils_package.package_install(["ceph-common"]):
            if client_name and client_key:
                with open(key_file, 'w') as f:
                    f.write("[%s]\n\tkey = %s\n" % (client_name, client_key))
                key_opt = "--keyring %s" % key_file

                # Create secret xml
                sec_xml = secret_xml.SecretXML("no", "no")
                sec_xml.usage = auth_type
                sec_xml.usage_name = auth_usage
                sec_xml.xmltreefile.write()

                logging.debug("Secret xml: %s", sec_xml)
                ret = virsh.secret_define(sec_xml.xml)
                libvirt.check_exit_status(ret)

                secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                         ret.stdout.strip())[0].lstrip()
                logging.debug("Secret uuid %s", secret_uuid)
                if secret_uuid is None:
                    test.error("Failed to get secret uuid")

                # Set secret value
                auth_key = params.get("auth_key")
                ret = virsh.secret_set_value(secret_uuid, auth_key,
                                             **virsh_dargs)
                libvirt.check_exit_status(ret)

            # Delete the disk if it exists
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)
        else:
            test.error("Failed to install ceph-common")

        if disk_src_config:
            config_ceph()
        disk_path = ("rbd:%s:mon_host=%s" % (disk_src_name, mon_host))
        if auth_user and auth_key:
            disk_path += (":id=%s:key=%s" % (auth_user, auth_key))
        targetdev = params.get("disk_target", "vdb")
        # To be compatible with create_disk_xml function,
        # some parameters need to be updated.
        params.update({
            "type_name": params.get("disk_type", "network"),
            "target_bus": params.get("disk_target_bus"),
            "target_dev": targetdev,
            "secret_uuid": secret_uuid,
            "source_protocol": params.get("disk_source_protocol"),
            "source_name": disk_src_name,
            "source_host_name": disk_src_host,
            "source_host_port": disk_src_port
        })
        # Prepare disk image
        if convert_image:
            first_disk = vm.get_first_disk_devices()
            blk_source = first_disk['source']
            # Convert the image to remote storage
            disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert"
                        " -O %s %s %s" % (mon_host, key_opt, disk_src_name,
                                          disk_format, blk_source, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)

        elif create_volume:
            vol_params = {
                "name": vol_name,
                "capacity": int(vol_cap),
                "capacity_unit": vol_cap_unit,
                "format": disk_format
            }

            create_pool()
            create_vol(vol_params)
            check_vol(vol_params)
        else:
            # Create an local image and make FS on it.
            disk_cmd = ("qemu-img create -f %s %s 10M && mkfs.ext4 -F %s" %
                        (disk_format, img_file, img_file))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Convert the image to remote storage
            disk_cmd = (
                "rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O"
                " %s %s %s" % (mon_host, key_opt, disk_src_name, disk_format,
                               img_file, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Create disk snapshot if needed.
            if create_snapshot:
                snap_cmd = ("rbd -m %s %s snap create %s@%s" %
                            (mon_host, key_opt, disk_src_name, snap_name))
                process.run(snap_cmd, ignore_status=False, shell=True)
            if test_json_pseudo_protocol:
                # Create one frontend image with the rbd backing file.
                json_str = ('json:{"file.driver":"rbd",'
                            '"file.filename":"rbd:%s:mon_host=%s"}' %
                            (disk_src_name, mon_host))
                # pass different json string according to the auth config
                if auth_user and auth_key:
                    json_str = ('%s:id=%s:key=%s"}' %
                                (json_str[:-2], auth_user, auth_key))
                disk_cmd = ("qemu-img create -f qcow2 -b '%s' %s" %
                            (json_str, front_end_img_file))
                disk_path = front_end_img_file
                process.run(disk_cmd, ignore_status=False, shell=True)
        # If hot plug, start VM first, and then wait the OS boot.
        # Otherwise stop VM if running.
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login().close()
        else:
            if not vm.is_dead():
                vm.destroy()
        if attach_device:
            if create_volume:
                params.update({"source_pool": pool_name})
                params.update({"type_name": "volume"})
                # No need auth options for volume
                if "auth_user" in params:
                    params.pop("auth_user")
                if "auth_type" in params:
                    params.pop("auth_type")
                if "secret_type" in params:
                    params.pop("secret_type")
                if "secret_uuid" in params:
                    params.pop("secret_uuid")
                if "secret_usage" in params:
                    params.pop("secret_usage")
            # After 3.9.0,the auth element can be place in source part.
            if auth_place_in_source:
                params.update({"auth_in_source": auth_place_in_source})
            xml_file = libvirt.create_disk_xml(params)
            if additional_guest:
                # Copy xml_file for additional guest VM.
                shutil.copyfile(xml_file, additional_xml_file)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name,
                                      xml_file,
                                      flagstr=opts,
                                      debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
            if additional_guest:
                # Make sure the additional VM is running
                if additional_vm.is_dead():
                    additional_vm.start()
                    additional_vm.wait_for_login().close()
                ret = virsh.attach_device(guest_name,
                                          additional_xml_file,
                                          "",
                                          debug=True)
                libvirt.check_result(ret, skip_if=unsupported_err)
        elif attach_disk:
            opts = params.get("attach_option", "")
            ret = virsh.attach_disk(vm_name, disk_path, targetdev, opts)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif test_disk_readonly:
            params.update({'readonly': "yes"})
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name,
                                      xml_file,
                                      flagstr=opts,
                                      debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif test_disk_internal_snapshot:
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name,
                                      xml_file,
                                      flagstr=opts,
                                      debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif disk_snapshot_with_sanlock:
            if vm.is_dead():
                vm.start()
            snapshot_path = make_snapshot()
            if vm.is_alive():
                vm.destroy()
        elif not create_volume:
            libvirt.set_vm_disk(vm, params)
        if test_blockcopy:
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                test.fail("Can't create the domain")
        elif vm.is_dead():
            vm.start()
        # Wait for vm is running
        vm.wait_for_login(timeout=600).close()
        if additional_guest:
            if additional_vm.is_dead():
                additional_vm.start()
        # Check qemu command line
        if test_qemu_cmd:
            check_qemu_cmd()
        # Check partitions in vm
        if test_vm_parts:
            if not check_in_vm(
                    vm, targetdev, old_parts, read_only=create_snapshot):
                test.fail("Failed to check vm partitions")
            if additional_guest:
                if not check_in_vm(additional_vm, targetdev, old_parts):
                    test.fail("Failed to check vm partitions")
        # Save and restore operation
        if test_save_restore:
            check_save_restore()
        if test_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option)
        if test_blockcopy:
            check_blockcopy(targetdev)
        if test_disk_readonly:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, 'vdb')
        if test_disk_internal_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, targetdev)
        # Detach the device.
        if attach_device:
            xml_file = libvirt.create_disk_xml(params)
            ret = virsh.detach_device(vm_name, xml_file)
            libvirt.check_exit_status(ret)
            if additional_guest:
                ret = virsh.detach_device(guest_name, xml_file)
                libvirt.check_exit_status(ret)
        elif attach_disk:
            ret = virsh.detach_disk(vm_name, targetdev)
            libvirt.check_exit_status(ret)

        # Check disk in vm after detachment.
        if attach_device or attach_disk:
            session = vm.wait_for_login()
            new_parts = utils_disk.get_parts_list(session)
            if len(new_parts) != len(old_parts):
                test.fail("Disk still exists in vm" " after detachment")
            session.close()

    except virt_vm.VMStartError as details:
        for msg in unsupported_err:
            if msg in str(details):
                test.cancel(str(details))
        else:
            test.fail("VM failed to start." "Error: %s" % str(details))
    finally:
        # Remove ceph configure file if created.
        if ceph_cfg:
            os.remove(ceph_cfg)
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snap in snapshot_lists:
                virsh.snapshot_delete(vm_name, snap, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        if additional_guest:
            virsh.remove_domain(guest_name,
                                "--remove-all-storage",
                                ignore_stauts=True)
        # Remove the snapshot.
        if create_snapshot:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
                   " purge {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)
        elif create_volume:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt,
                             os.path.join(disk_src_pool, cloned_vol_name)))
            process.run(cmd, ignore_status=True, shell=True)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(
                       mon_host, key_opt,
                       os.path.join(disk_src_pool, create_from_cloned_volume)))
            process.run(cmd, ignore_status=True, shell=True)
            clean_up_volume_snapshots()
        else:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)

        # Delete tmp files.
        if os.path.exists(key_file):
            os.remove(key_file)
        if os.path.exists(img_file):
            os.remove(img_file)
        # Clean up volume, pool
        if vol_name and vol_name in str(virsh.vol_list(pool_name).stdout):
            virsh.vol_delete(vol_name, pool_name)
        if pool_name and pool_name in virsh.pool_state_dict():
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)

        # Clean up secret
        secret_list = get_secret_list()
        if secret_list:
            for secret_uuid in secret_list:
                virsh.secret_undefine(secret_uuid)

        logging.info("Restoring vm...")
        vmxml_backup.sync()

        if disk_snapshot_with_sanlock:
            # Restore virt_use_sanlock setting.
            process.run("setsebool -P virt_use_sanlock 0", shell=True)
            # Restore qemu config
            qemu_config.restore()
            utils_libvirtd.Libvirtd().restart()
            # Force shutdown sanlock service.
            process.run("sanlock client shutdown -f 1", shell=True)
            # Clean up lockspace folder
            process.run("rm -rf  /var/lib/libvirt/sanlock/*", shell=True)
            if snapshot_path is not None:
                for snapshot in snapshot_path:
                    if os.path.exists(snapshot):
                        os.remove(snapshot)
Exemplo n.º 16
0
def run(test, params, env):
    """
    1. prepare a vHBA
    2. find the nodedev's lun name
    3. prepare the lun dev's xml
    4. start vm
    5. attach disk xml to vm
    6. login vm and check the disk
    7. detach the virtual disk
    8. check the blkdev gone
    9. cleanup env.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    wwpn = params.get("wwpn", "WWPN_EXAMPLE")
    wwnn = params.get("wwnn", "WWNN_EXAMPLE")
    disk_device = params.get("disk_device", "disk")
    device_type = params.get("device_type", "file")
    device_target = params.get("device_target", "vdb")
    lun_dir_method = params.get("lun_dir_method", "by-path")
    driver_name = params.get("driver_name", "qemu")
    driver_type = params.get("driver_type", "raw")
    target_bus = params.get("target_bus", "virtio")
    readonly = params.get("readonly", "no")
    new_vhbas = []
    blk_dev = ""
    lun_dev = ""
    lun_dev_path = ""
    lun_sl = []
    new_disk = ""
    old_mpath_conf = ""
    mpath_conf_path = "/etc/multipath.conf"
    original_mpath_conf_exist = os.path.exists(mpath_conf_path)
    vm = env.get_vm(vm_name)
    try:
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml_backup = vmxml.copy()
        old_disk_count = vmxml.get_disk_count(vm_name)
        # Prepare vHBA
        online_hbas = utils_npiv.find_hbas("hba")
        old_vhbas = utils_npiv.find_hbas("vhba")
        if not online_hbas:
            raise exceptions.TestSkipError("Host doesn't have online hba!")
        old_mpath_conf = utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                                           replace_existing=True)
        first_online_hba = online_hbas[0]
        new_vhba = utils_npiv.nodedev_create_from_xml(
                {"nodedev_parent": first_online_hba,
                 "scsi_wwnn": wwnn,
                 "scsi_wwpn": wwpn})
        utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas),
                            timeout=_TIMEOUT)
        if not utils_npiv.is_vhbas_added(old_vhbas):
            raise exceptions.TestFail("vHBA is not successfully created.")
        new_vhbas.append(new_vhba)
        new_vhba_scsibus = re.sub("\D", "", new_vhba)
        # Get the new block device generated by the new vHBA
        utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus),
                            timeout=_TIMEOUT)
        blk_devs = get_blks_by_scsi(new_vhba_scsibus)
        if not blk_devs:
            raise exceptions.TestFail("block device not found with scsi_%s",
                                      new_vhba_scsibus)
        first_blk_dev = blk_devs[0]
        # Get the symbolic link of the device in /dev/disk/by-[path|uuid|id]
        logging.debug("first_blk_dev = %s, lun_dir_method = %s"
                      % (first_blk_dev, lun_dir_method))
        utils_misc.wait_for(
            lambda: get_symbols_by_blk(first_blk_dev, lun_dir_method),
            timeout=_TIMEOUT)
        lun_sl = get_symbols_by_blk(first_blk_dev, lun_dir_method)
        if not lun_sl:
            raise exceptions.TestFail("lun symbolic links not found under "
                                      "/dev/disk/%s/ for block device %s." %
                                      (lun_dir_method, blk_dev))
        lun_dev = lun_sl[0]
        lun_dev_path = "/dev/disk/" + lun_dir_method + "/" + lun_dev
        # Prepare xml of virtual disk
        disk_params = {'type_name': device_type, 'device': disk_device,
                       'driver_name': driver_name, 'driver_type': driver_type,
                       'source_file': lun_dev_path,
                       'target_dev': device_target, 'target_bus': target_bus,
                       'readonly': readonly}
        disk_xml = os.path.join(data_dir.get_tmp_dir(), 'disk_xml.xml')
        lun_disk_xml = utlv.create_disk_xml(disk_params)
        copyfile(lun_disk_xml, disk_xml)
        if not vm.is_alive():
            vm.start()
        session = vm.wait_for_login()
        libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir,
                               vm.address_cache)
        old_disks = libvirt_vm.get_disks()
        # Attach disk
        dev_attach_status = virsh.attach_device(
                    vm_name, disk_xml, debug=True)
        utlv.check_exit_status(dev_attach_status)

        cur_disk_count = vmxml.get_disk_count(vm_name)
        cur_disks = libvirt_vm.get_disks()
        if cur_disk_count <= old_disk_count:
            raise exceptions.TestFail(
                    "Failed to attach disk: %s" % lun_disk_xml)
        new_disk = "".join(list(set(old_disks) ^ set(cur_disks)))
        logging.debug("Attached device in vm:%s", new_disk)
        # Check disk in VM
        output = session.cmd_status_output('mkfs.ext4 -F %s' % new_disk)
        logging.debug("mkfs.ext4 the disk in vm, result: %s", output[1])
        if not check_vm_disk(session, new_disk, readonly):
            raise exceptions.TestFail("Failed check the disk in vm.")
        session.cmd_status_output('umount %s' % new_disk)
        # Detach disk
        dev_detach_status = virsh.detach_device(vm_name, disk_xml, debug=True)
        utlv.check_exit_status(dev_detach_status)
        cur_disks = libvirt_vm.get_disks()
        if cur_disks != old_disks:
            raise exceptions.TestFail("Detach disk failed.")
        session.close()

    finally:
        utils_npiv.vhbas_cleanup(new_vhbas)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
        process.system('service multipathd restart', verbose=True)
        if old_mpath_conf:
            utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                              conf_content=old_mpath_conf,
                                              replace_existing=True)
        if not original_mpath_conf_exist and os.path.exists(mpath_conf_path):
            os.remove(mpath_conf_path)
Exemplo n.º 17
0
def run(test, params, env):
    """
    Test push-mode incremental backup

    Steps:
    1. create a vm with extra disk vdb
    2. create some data on vdb in vm
    3. start a push mode full backup on vdb
    4. create some data on vdb in vm
    5. start a push mode incremental backup
    6. repeat step 4 and 5 as required
    7. check the full/incremental backup file data
    """
    def backup_job_done(vm_name, vm_disk):
        """
        Check if a backup job for a vm's specific disk is finished.

        :param vm_name: vm's name
        :param vm_disk: the disk to be checked, such as 'vdb'
        :return: 'True' means job finished
        """
        result = virsh.blockjob(vm_name, vm_disk, debug=True)
        if "no current block job" in result.stdout_text.strip().lower():
            return True

    # Cancel the test if libvirt version is too low
    if not libvirt_version.version_compare(6, 0, 0):
        test.cancel("Current libvirt version doesn't support "
                    "incremental backup.")

    hotplug_disk = "yes" == params.get("hotplug_disk", "no")
    original_disk_size = params.get("original_disk_size", "100M")
    original_disk_type = params.get("original_disk_type", "local")
    original_disk_target = params.get("original_disk_target", "vdb")
    target_driver = params.get("target_driver", "qcow2")
    target_type = params.get("target_type", "file")
    target_blkdev_path = params.get("target_blkdev_path")
    target_blkdev_size = params.get("target_blkdev_size", original_disk_size)
    reuse_target_file = "yes" == params.get("reuse_target_file")
    prepare_target_file = "yes" == params.get("prepare_target_file")
    prepare_target_blkdev = "yes" == params.get("prepare_target_blkdev")
    backup_rounds = int(params.get("backup_rounds", 3))
    backup_error = "yes" == params.get("backup_error")
    tmp_dir = data_dir.get_data_dir()
    virsh_dargs = {'debug': True, 'ignore_status': True}

    try:
        vm_name = params.get("main_vm")
        vm = env.get_vm(vm_name)

        # Make sure there is no checkpoint metadata before test
        utils_backup.clean_checkpoints(vm_name)

        # Backup vm xml
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml_backup = vmxml.copy()
        utils_backup.enable_inc_backup_for_vm(vm)

        # Prepare the disk to be backuped.
        disk_params = {}
        disk_path = ""
        if original_disk_type == "local":
            image_name = "{}_image.qcow2".format(original_disk_target)
            disk_path = os.path.join(tmp_dir, image_name)
            libvirt.create_local_disk("file", disk_path, original_disk_size,
                                      "qcow2")
            disk_params = {
                "device_type": "disk",
                "type_name": "file",
                "driver_type": "qcow2",
                "target_dev": original_disk_target,
                "source_file": disk_path
            }
            if original_disk_target:
                disk_params["target_dev"] = original_disk_target
        elif original_disk_type == "ceph":
            ceph_mon_host = params.get("ceph_mon_host",
                                       "EXAMPLE_MON_HOST_AUTHX")
            ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORT")
            ceph_pool_name = params.get("ceph_pool_name", "EXAMPLE_POOL")
            ceph_file_name = params.get("ceph_file_name", "EXAMPLE_FILE")
            ceph_disk_name = ceph_pool_name + "/" + ceph_file_name
            ceph_client_name = params.get("ceph_client_name",
                                          "EXAMPLE_CLIENT_NAME")
            ceph_client_key = params.get("ceph_client_key",
                                         "EXAMPLE_CLIENT_KEY")
            ceph_auth_user = params.get("ceph_auth_user", "EXAMPLE_AUTH_USER")
            ceph_auth_key = params.get("ceph_auth_key", "EXAMPLE_AUTH_KEY")
            auth_sec_usage_type = "ceph"

            enable_auth = "yes" == params.get("enable_auth", "yes")
            key_file = os.path.join(tmp_dir, "ceph.key")
            key_opt = ""
            # Prepare a blank params to confirm if delete the configure at the end of the test
            ceph_cfg = ""
            if not utils_package.package_install(["ceph-common"]):
                test.error("Failed to install ceph-common")
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_host)
            if enable_auth:
                # If enable auth, prepare a local file to save key
                if ceph_client_name and ceph_client_key:
                    with open(key_file, 'w') as f:
                        f.write("[%s]\n\tkey = %s\n" %
                                (ceph_client_name, ceph_client_key))
                    key_opt = "--keyring %s" % key_file
                    auth_sec_dict = {
                        "sec_usage": auth_sec_usage_type,
                        "sec_name": "ceph_auth_secret"
                    }
                    auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                    virsh.secret_set_value(auth_sec_uuid,
                                           ceph_auth_key,
                                           debug=True)
                    disk_params_auth = {
                        "auth_user": ceph_auth_user,
                        "secret_type": auth_sec_usage_type,
                        "secret_uuid": auth_sec_uuid,
                        "auth_in_source": True
                    }
                else:
                    test.error("No ceph client name/key provided.")
                disk_path = "rbd:%s:mon_host=%s:keyring=%s" % (
                    ceph_disk_name, ceph_mon_host, key_file)
            ceph.rbd_image_rm(ceph_mon_host, ceph_pool_name, ceph_file_name,
                              ceph_cfg, key_file)
            process.run("qemu-img create -f qcow2 %s %s" %
                        (disk_path, original_disk_size),
                        shell=True,
                        verbose=True)
            disk_params = {
                'device_type': 'disk',
                'type_name': 'network',
                "driver_type": "qcow2",
                'target_dev': original_disk_target
            }
            disk_params_src = {
                'source_protocol': 'rbd',
                'source_name': ceph_disk_name,
                'source_host_name': ceph_mon_host,
                'source_host_port': ceph_host_port
            }
            disk_params.update(disk_params_src)
            disk_params.update(disk_params_auth)
        else:
            test.error("The disk type '%s' not supported in this script." %
                       original_disk_type)
        if hotplug_disk:
            vm.start()
            session = vm.wait_for_login().close()
            disk_xml = libvirt.create_disk_xml(disk_params)
            virsh.attach_device(vm_name, disk_xml, debug=True)
        else:
            disk_xml = libvirt.create_disk_xml(disk_params)
            virsh.attach_device(vm.name,
                                disk_xml,
                                flagstr="--config",
                                debug=True)
            vm.start()
        session = vm.wait_for_login()
        new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys())
        session.close()
        if len(new_disks_in_vm) != 1:
            test.fail("Test disk not prepared in vm")

        # Use the newly added disk as test disk
        test_disk_in_vm = "/dev/" + new_disks_in_vm[0]
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        vm_disks = list(vmxml.get_disk_all().keys())

        checkpoint_list = []
        is_incremental = False
        backup_path_list = []
        for backup_index in range(backup_rounds):
            # Prepare backup xml
            backup_params = {"backup_mode": "push"}
            if backup_index > 0:
                is_incremental = True
                backup_params["backup_incremental"] = "checkpoint_" + str(
                    backup_index - 1)

            backup_disk_xmls = []
            for vm_disk in vm_disks:
                backup_disk_params = {"disk_name": vm_disk}
                if vm_disk != original_disk_target:
                    backup_disk_params["enable_backup"] = "no"
                else:
                    backup_disk_params["enable_backup"] = "yes"
                    backup_disk_params["disk_type"] = target_type
                    target_params = {"attrs": {}}
                    if target_type == "file":
                        target_file_name = "target_file_%s" % backup_index
                        target_file_path = os.path.join(
                            tmp_dir, target_file_name)
                        if prepare_target_file:
                            libvirt.create_local_disk("file", target_file_path,
                                                      original_disk_size,
                                                      target_driver)
                        target_params["attrs"]["file"] = target_file_path
                        backup_path_list.append(target_file_path)
                    elif target_type == "block":
                        if prepare_target_blkdev:
                            target_blkdev_path = libvirt.setup_or_cleanup_iscsi(
                                is_setup=True, image_size=target_blkdev_size)
                        target_params["attrs"]["dev"] = target_blkdev_path
                        backup_path_list.append(target_blkdev_path)
                    else:
                        test.fail(
                            "We do not support backup target type: '%s'" %
                            target_type)
                    logging.debug("target params: %s", target_params)
                    backup_disk_params["backup_target"] = target_params
                    driver_params = {"type": target_driver}
                    backup_disk_params["backup_driver"] = driver_params
                backup_disk_xml = utils_backup.create_backup_disk_xml(
                    backup_disk_params)
                backup_disk_xmls.append(backup_disk_xml)
            logging.debug("disk list %s", backup_disk_xmls)
            backup_xml = utils_backup.create_backup_xml(
                backup_params, backup_disk_xmls)
            logging.debug("ROUND_%s Backup Xml: %s", backup_index, backup_xml)
            # Prepare checkpoint xml
            checkpoint_name = "checkpoint_%s" % backup_index
            checkpoint_list.append(checkpoint_name)
            cp_params = {"checkpoint_name": checkpoint_name}
            cp_params["checkpoint_desc"] = params.get(
                "checkpoint_desc", "desc of cp_%s" % backup_index)
            disk_param_list = []
            for vm_disk in vm_disks:
                cp_disk_param = {"name": vm_disk}
                if vm_disk != original_disk_target:
                    cp_disk_param["checkpoint"] = "no"
                else:
                    cp_disk_param["checkpoint"] = "bitmap"
                    cp_disk_bitmap = params.get("cp_disk_bitmap")
                    if cp_disk_bitmap:
                        cp_disk_param["bitmap"] = cp_disk_bitmap + str(
                            backup_index)
                disk_param_list.append(cp_disk_param)
            checkpoint_xml = utils_backup.create_checkpoint_xml(
                cp_params, disk_param_list)
            logging.debug("ROUND_%s Checkpoint Xml: %s", backup_index,
                          checkpoint_xml)

            # Start backup
            backup_options = backup_xml.xml + " " + checkpoint_xml.xml

            # Create some data in vdb
            dd_count = "1"
            dd_seek = str(backup_index * 10 + 10)
            dd_bs = "1M"
            session = vm.wait_for_login()
            utils_disk.dd_data_to_vm_disk(session, test_disk_in_vm, dd_bs,
                                          dd_seek, dd_count)
            session.close()

            if reuse_target_file:
                backup_options += " --reuse-external"
            backup_result = virsh.backup_begin(vm_name,
                                               backup_options,
                                               debug=True)
            if backup_result.exit_status:
                raise utils_backup.BackupBeginError(
                    backup_result.stderr.strip())

            # Wait for the backup job actually finished
            if not utils_misc.wait_for(
                    lambda: backup_job_done(vm_name, original_disk_target),
                    60):
                test.fail("Backup job not finished in 60s")

        for checkpoint_name in checkpoint_list:
            virsh.checkpoint_delete(vm_name, checkpoint_name, debug=True)
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Compare the backup data and original data
        original_data_file = os.path.join(tmp_dir, "original_data.qcow2")
        cmd = "qemu-img convert -f qcow2 %s -O qcow2 %s" % (disk_path,
                                                            original_data_file)
        process.run(cmd, shell=True, verbose=True)

        for backup_path in backup_path_list:
            if target_driver == "qcow2":
                # Clear backup image's backing file before comparison
                qemu_cmd = ("qemu-img rebase -u -f qcow2 -b '' -F qcow2 %s" %
                            backup_path)
                process.run(qemu_cmd, shell=True, verbose=True)
            if not utils_backup.cmp_backup_data(
                    original_data_file,
                    backup_path,
                    backup_file_driver=target_driver):
                test.fail("Backup and original data are not identical for"
                          "'%s' and '%s'" % (disk_path, backup_path))
            else:
                logging.debug("'%s' contains correct backup data", backup_path)
    except utils_backup.BackupBeginError as details:
        if backup_error:
            logging.debug("Backup failed as expected.")
        else:
            test.fail(details)
    finally:
        # Remove checkpoints
        if "checkpoint_list" in locals() and checkpoint_list:
            for checkpoint_name in checkpoint_list:
                virsh.checkpoint_delete(vm_name, checkpoint_name)

        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Restoring vm
        vmxml_backup.sync()

        # Remove local backup file
        if "target_file_path" in locals():
            if os.path.exists(target_file_path):
                os.remove(target_file_path)

        # Remove iscsi devices
        libvirt.setup_or_cleanup_iscsi(False)

        # Remove ceph related data
        if original_disk_type == "ceph":
            ceph.rbd_image_rm(ceph_mon_host, ceph_pool_name, ceph_file_name,
                              ceph_cfg, key_file)
            if "auth_sec_uuid" in locals() and auth_sec_uuid:
                virsh.secret_undefine(auth_sec_uuid)
            if "ceph_cfg" in locals() and os.path.exists(ceph_cfg):
                os.remove(ceph_cfg)
            if os.path.exists(key_file):
                os.remove(key_file)
Exemplo n.º 18
0
 if attach_device:
     if create_volume:
         params.update({"source_pool": pool_name})
         params.update({"type_name": "volume"})
         # No need auth options for volume
         if "auth_user" in params:
             params.pop("auth_user")
         if "auth_type" in params:
             params.pop("auth_type")
         if "secret_type" in params:
             params.pop("secret_type")
         if "secret_uuid" in params:
             params.pop("secret_uuid")
         if "secret_usage" in params:
             params.pop("secret_usage")
     xml_file = libvirt.create_disk_xml(params)
     opts = params.get("attach_option", "")
     ret = virsh.attach_device(vm_name, xml_file,
                               flagstr=opts, debug=True)
     libvirt.check_result(ret, skip_if=unsupported_err)
     if additional_guest:
         # Make sure the additional VM is running
         if additional_vm.is_dead():
             additional_vm.start()
         ret = virsh.attach_device(guest_name, xml_file,
                                   "", debug=True)
         libvirt.check_result(ret, skip_if=unsupported_err)
 elif attach_disk:
     opts = params.get("attach_option", "")
     ret = virsh.attach_disk(vm_name, disk_path,
                             targetdev, opts)
Exemplo n.º 19
0
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocol = params.get("disk_source_protocol", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    pool_target = params.get("pool_target", "/dev/disk/by-path")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")
    vg_name = params.get("virt_disk_vg_name", "vg_test_0")
    lv_name = params.get("virt_disk_lv_name", "lv_test_0")
    driver_packed = params.get("driver_packed", "on")
    disk_packed = "yes" == params.get("disk_packed", "no")
    scsi_packed = "yes" == params.get("scsi_packed", "no")

    # Indicate the PPC platform
    on_ppc = False
    if platform.platform().count('ppc64'):
        on_ppc = True

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            test.cancel("'iscsi' disk doesn't support in"
                        " current libvirt version.")
    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            test.cancel("'volume' type disk doesn't support in"
                        " current libvirt version.")
    if pool_type == "iscsi-direct":
        if not libvirt_version.version_compare(4, 7, 0):
            test.cancel("iscsi-direct pool is not supported in"
                        " current libvirt version.")
    if ((disk_packed or scsi_packed)
            and not libvirt_version.version_compare(6, 3, 0)):
        test.cancel("The virtio packed attribute is not supported in"
                    " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Fix no more PCI slots issue in certain cases.
    vm_dump_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    machine_type = params.get("machine_type", "pc")
    if machine_type == 'q35':
        vm_dump_xml.remove_all_device_by_type('controller')
        machine_list = vm_dump_xml.os.machine.split("-")
        vm_dump_xml.set_os_attrs(
            **{"machine": machine_list[0] + "-q35-" + machine_list[2]})
        q35_pcie_dict0 = {
            'controller_model': 'pcie-root',
            'controller_type': 'pci',
            'controller_index': 0
        }
        q35_pcie_dict1 = {
            'controller_model': 'pcie-root-port',
            'controller_type': 'pci'
        }
        vm_dump_xml.add_device(libvirt.create_controller_xml(q35_pcie_dict0))
        # Add enough controllers to match multiple times disk attaching requirements
        for i in list(range(1, 12)):
            q35_pcie_dict1.update({'controller_index': "%d" % i})
            vm_dump_xml.add_device(
                libvirt.create_controller_xml(q35_pcie_dict1))
        vm_dump_xml.sync()

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
        else:
            if not vm.is_dead():
                vm.destroy()

        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocol
            secret_xml.target = secret_usage_target
            with open(secret_xml.xml) as f:
                logging.debug("Define secret by XML: %s", f.read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                test.error("Fail to get new created secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(
                chap_passwd.encode(encoding)).decode(encoding)
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        if disk_type == "block":
            iscsi_target = libvirt.setup_or_cleanup_iscsi(
                is_setup=True,
                is_login=True,
                image_size="1G",
                chap_user=chap_user,
                chap_passwd=chap_passwd,
                portal_ip=disk_src_host)
        else:
            iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                is_setup=True,
                is_login=False,
                image_size='1G',
                chap_user=chap_user,
                chap_passwd=chap_passwd,
                portal_ip=disk_src_host)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.host_name = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_pool
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = pool_target
            if chap_auth:
                pool_src_xml.auth_type = "chap"
                pool_src_xml.auth_username = chap_user
                pool_src_xml.secret_usage = secret_usage_target
                poolxml.set_source(pool_src_xml)
            if pool_type == "iscsi-direct":
                iscsi_initiator = params.get('iscsi_initiator')
                pool_src_xml.iqn_name = iscsi_initiator
                poolxml.set_source(pool_src_xml)
            # Create iscsi/iscsi-direct pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            xml = virsh.pool_dumpxml(disk_src_pool)
            logging.debug("Pool '%s' XML:\n%s", disk_src_pool, xml)

            def get_vol():
                """Get the volume info"""
                # Refresh the pool
                cmd_result = virsh.pool_refresh(disk_src_pool)
                libvirt.check_exit_status(cmd_result)
                # Get volume name
                cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
                libvirt.check_exit_status(cmd_result)
                vol_list = []
                vol_list = re.findall(r"(\S+)\ +(\S+)",
                                      str(cmd_result.stdout.strip()))
                if len(vol_list) > 1:
                    return vol_list[1]
                else:
                    return None

            # Wait for a while so that we can get the volume info
            vol_info = utils_misc.wait_for(get_vol, 10)
            if vol_info:
                vol_name, vol_path = vol_info
            else:
                test.error("Failed to get volume info")
            # Snapshot doesn't support raw disk format, create a qcow2 volume
            # disk for snapshot operation.
            if pool_type == "iscsi":
                process.run('qemu-img create -f qcow2 %s %s' %
                            (vol_path, '100M'),
                            shell=True,
                            verbose=True)
            else:
                # Get iscsi URL to create a qcow2 volume disk
                disk_path = ("iscsi://[%s]/%s/%s" %
                             (disk_src_host, iscsi_target, lun_num))
                blk_source = "/mnt/test.qcow2"
                process.run('qemu-img create -f qcow2 %s %s' %
                            (blk_source, '100M'),
                            shell=True,
                            verbose=True)
                process.run('qemu-img convert -O qcow2 %s %s' %
                            (blk_source, disk_path),
                            shell=True,
                            verbose=True)

        # Create block device
        if disk_type == "block":
            logging.debug("iscsi dev name: %s", iscsi_target)
            lv_utils.vg_create(vg_name, iscsi_target)
            device_source = libvirt.create_local_disk("lvm",
                                                      size="10M",
                                                      vgname=vg_name,
                                                      lvname=lv_name)
            logging.debug("New created volume: %s", lv_name)

        # Create iscsi network disk XML
        disk_params = {
            'device_type': disk_device,
            'type_name': disk_type,
            'target_dev': disk_target,
            'target_bus': disk_target_bus,
            'readonly': disk_readonly
        }
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {
                'source_protocol': disk_src_protocol,
                'source_name': iscsi_target + "/%s" % lun_num,
                'source_host_name': disk_src_host,
                'source_host_port': disk_src_port
            }
        elif disk_type == "volume":
            if pool_type == "iscsi":
                disk_params_src = {
                    'source_pool': disk_src_pool,
                    'source_volume': vol_name,
                    'driver_type': 'qcow2',
                    'source_mode': disk_src_mode
                }
            # iscsi-direct pool don't include source_mode option
            else:
                disk_params_src = {
                    'source_pool': disk_src_pool,
                    'source_volume': vol_name,
                    'driver_type': 'qcow2'
                }
        elif disk_type == "block":
            disk_params_src = {
                'source_file': device_source,
                'driver_type': 'raw'
            }
            # Start guest with packed attribute in disk
            if disk_packed:
                disk_params_src['driver_packed'] = driver_packed
            # Start guest with packed attribute in scsi controller
            if scsi_packed:
                scsi_controller = Controller("controller")
                scsi_controller.type = "scsi"
                scsi_controller.model = "virtio-scsi"
                scsi_controller.driver = {'packed': driver_packed}
                vm_dump_xml.add_device(scsi_controller)
                vm_dump_xml.sync()
        else:
            test.cancel("Unsupported disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth and disk_type != "volume":
            disk_params_auth = {
                'auth_user': chap_user,
                'secret_type': disk_src_protocol,
                'secret_usage': secret_xml.target
            }
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)
        attach_option = params.get("attach_option", "")
        cmd_result = virsh.attach_device(domainarg=vm_name,
                                         filearg=disk_xml,
                                         flagstr=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        # Wait for domain is stable
        vm.wait_for_login().close()
        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            # virsh snapshot-revert is not supported on combined internal and external snapshots
            # see more details from,https://bugzilla.redhat.com/show_bug.cgi?id=1733173
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            try:
                virsh.snapshot_list(vm_name, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots list for %s" % vm_name)

            try:
                virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots info for %s" % vm_name)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            virsh.snapshot_create_as(vm_name,
                                     snapshot_name2,
                                     ignore_status=False,
                                     debug=True)

            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            if snapshot_name2 not in cmd_result:
                test.error("Snapshot %s not found" % snapshot_name2)
        elif domain_operation == "start_with_packed":
            expect_xml_line = "packed=\"%s\"" % driver_packed
            libvirt.check_dumpxml(vm, expect_xml_line)
            expect_qemu_line = "packed=%s" % driver_packed
            libvirt.check_qemu_cmd_line(expect_qemu_line)
        elif domain_operation == "":
            logging.debug("No domain operation provided, so skip it")
        else:
            logging.error("Unsupported operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                test.error("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    # Here the script needs wait for a while for the guest to
                    # recognize the hotplugged disk on PPC
                    if on_ppc:
                        time.sleep(10)
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError) as e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                test.error("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name,
                                       disk_target,
                                       wait_remove_event=True)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)

    finally:
        # Clean up snapshot
        # Shut down before cleaning up snapshots
        if vm.is_alive():
            vm.destroy()
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
        # Restore vm
        vmxml_backup.sync("--snapshots-metadata")
        # Destroy pool and undefine secret, which may not exist
        try:
            if disk_type == "volume":
                virsh.pool_destroy(disk_src_pool)
            if disk_type == "block":
                clean_up_lvm(iscsi_target, vg_name, lv_name)
            if chap_auth:
                virsh.secret_undefine(secret_uuid)
        except Exception:
            pass
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
Exemplo n.º 20
0
def run(test, params, env):
    """
    Test command: virsh pool-define-as; pool-build; pool-start; vol-create-as;
    vol-list; attach-device; login; mount and dd; reboot; check persistence;
    detach-device; pool-destroy; pool-undefine; clear lv,vg and pv;
    Create a libvirt npiv pool from a vHBA's device mapper device and create
    a volume out of the newly created pool and attach it to a guest, mount it,
    reboot and check persistence after reboot.

    Pre-requisite :
    Host should have a vHBA associated with a mpath device
    """

    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    pool_type = params.get("pool_type", "dir")
    scsi_wwpn = params.get("scsi_wwpn", "WWPN_EXAMPLE")
    scsi_wwnn = params.get("scsi_wwnn", "WWNN_EXAMPLE")
    pool_target = params.get("pool_target", "pool_target")
    target_device = params.get("disk_target_dev", "vda")
    volume_name = params.get("volume_name", "imagefrommapper.qcow2")
    volume_capacity = params.get("volume_capacity", '1G')
    allocation = params.get("allocation", '1G')
    frmt = params.get("volume_format", 'qcow2')
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    mount_disk = None
    test_unit = None

    if 'EXAMPLE' in scsi_wwnn or 'EXAMPLE' in scsi_wwpn:
        raise exceptions.TestSkipError("Please provide proper WWPN/WWNN")

    if not vm.is_alive():
        vm.start()
    pool_extra_args = ""
    libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir,
                           vm.address_cache)
    process.run("service multipathd restart", shell=True)
    online_hbas_list = nodedev.find_hbas("hba")
    first_online_hba = online_hbas_list[0]
    old_mpath_devs = nodedev.find_mpath_devs()
    logging.debug("the old mpath devs are: %s" % old_mpath_devs)
    new_vhbas = nodedev.nodedev_create_from_xml(
        {"nodedev_parent": first_online_hba,
         "scsi_wwnn": scsi_wwnn,
         "scsi_wwpn": scsi_wwpn})
    logging.info("Newly created vHBA %s" % new_vhbas)
    process.run("service multipathd restart", shell=True)

    utils_misc.wait_for(
        lambda: nodedev.is_mpath_devs_added(old_mpath_devs), timeout=5)

    cur_mpath_devs = nodedev.find_mpath_devs()
    logging.debug("the current mpath devs are: %s" % cur_mpath_devs)
    new_mpath_devs = list(set(cur_mpath_devs).difference(
        set(old_mpath_devs)))

    logging.debug("newly added mpath devs are: %s" % new_mpath_devs)
    if not new_mpath_devs:
        raise exceptions.TestFail("No newly added mpath devices found, \
                please check your FC settings")
    source_dev = os.path.join('/dev/mapper/', new_mpath_devs[0])
    logging.debug("We are going to use \"%s\" as our source device"
                  " to create a logical pool" % source_dev)

    cmd = "parted %s mklabel msdos -s" % source_dev
    cmd_result = process.run(cmd, shell=True)
    utlv.check_exit_status(cmd_result)

    if source_dev:
        pool_extra_args = ' --source-dev %s' % source_dev
    else:
        raise exceptions.TestFail(
            "The vHBA %s does not have any associated mpath device" % new_vhbas)

    pool_ins = libvirt_storage.StoragePool()
    if pool_ins.pool_exists(pool_name):
        raise exceptions.TestFail("Pool %s already exist" % pool_name)
    # if no online hba cards on host, mark case failed
    if not online_hbas_list:
        raise exceptions.TestSkipError("Host doesn't have online hba cards")
    try:
        cmd_result = virsh.pool_define_as(
            pool_name, pool_type, pool_target, pool_extra_args, ignore_status=True,
            debug=True)
        utlv.check_exit_status(cmd_result)

        cmd_result = virsh.pool_build(pool_name)
        utlv.check_exit_status(cmd_result)

        cmd_result = virsh.pool_start(pool_name)
        utlv.check_exit_status(cmd_result)

        utlv.check_actived_pool(pool_name)
        pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name)
        logging.debug("Pool detail: %s", pool_detail)

        cmd_result = virsh.vol_create_as(
            volume_name, pool_name, volume_capacity, allocation, frmt, "", debug=True)
        utlv.check_exit_status(cmd_result)

        vol_list = utlv.get_vol_list(pool_name, timeout=10)
        logging.debug('Volume list %s', vol_list)
        for unit in vol_list:
            test_unit = vol_list[unit]
            logging.debug(unit)

        disk_params = {'type_name': "file", 'target_dev': target_device,
                       'target_bus': "virtio", 'source_file': test_unit,
                       'driver_name': "qemu", 'driver_type': "raw"}
        disk_xml = utlv.create_disk_xml(disk_params)
        session = vm.wait_for_login()

        bf_disks = libvirt_vm.get_disks()

        attach_success = virsh.attach_device(
            vm_name, disk_xml, debug=True)

        utlv.check_exit_status(attach_success)

        logging.debug("Disks before attach: %s", bf_disks)

        af_disks = libvirt_vm.get_disks()

        logging.debug("Disks after attach: %s", af_disks)

        mount_disk = "".join(list(set(bf_disks) ^ set(af_disks)))
        if not mount_disk:
            raise exceptions.TestFail("Can not get attached device in vm.")
        logging.debug("Attached device in vm:%s", mount_disk)

        output = session.cmd_status_output('lsblk', timeout=15)
        logging.debug("%s", output[1])

        session.cmd_status_output('mkfs.ext4 %s' % mount_disk)
        if mount_disk:
            logging.info("%s", mount_disk)
            mount_success = mount_and_dd(session, mount_disk)
            if not mount_success:
                raise exceptions.TestFail("Can not find mounted device")
        session.close()

        virsh.reboot(vm_name, debug=True)

        session = vm.wait_for_login()
        output = session.cmd_status_output('mount')
        logging.debug("Mount output: %s", output[1])
        if '/mnt' in output[1]:
            logging.debug("Mount Successful accross reboot")
        session.close()

        status = virsh.detach_device(vm_name, disk_xml,
                                     debug=True)
        utlv.check_exit_status(status)

    finally:
        vm.destroy(gracefully=False)
        logging.debug('Destroying pool %s', pool_name)
        virsh.pool_destroy(pool_name)
        logging.debug('Undefining pool %s', pool_name)
        virsh.pool_undefine(pool_name)
        if test_unit:
            process.system('lvremove -f %s' % test_unit, verbose=True)
            process.system('vgremove -f %s' % pool_name, verbose=True)
            process.system('pvremove -f %s' % source_dev, verbose=True)
        if new_vhbas:
            nodedev.vhbas_cleanup(new_vhbas.split())
        process.run("service multipathd restart", shell=True)
def run(test, params, env):
    """
    Test virsh {at|de}tach-device command.

    The command can attach new disk/detach device.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh attach/detach-device operation.
    3.Recover test environment.
    4.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    pre_vm_state = params.get("at_dt_device_pre_vm_state", "running")
    virsh_dargs = {"debug": True, "ignore_status": True}

    def is_attached(vmxml_devices, disk_type, source_file, target_dev):
        """
        Check attached device and disk exist or not.

        :param vmxml_devices: VMXMLDevices instance
        :param disk_type: disk's device type: cdrom or floppy
        :param source_file : disk's source file to check
        :param target_dev : target device name
        :return: True/False if backing file and device found
        """
        disks = vmxml_devices.by_device_tag("disk")
        for disk in disks:
            if disk.device != disk_type:
                continue
            if disk.target["dev"] != target_dev:
                continue
            if disk.xmltreefile.find("source") is not None:
                if disk.source.attrs["file"] != source_file:
                    continue
            else:
                continue
            # All three conditions met
            logging.debug("Find %s in given disk XML", source_file)
            return True
        logging.debug("Not find %s in gievn disk XML", source_file)
        return False

    def check_result(disk_source, disk_type, disk_target, flags, attach=True):
        """
        Check the test result of attach/detach-device command.
        """
        vm_state = pre_vm_state
        active_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        active_attached = is_attached(active_vmxml.devices, disk_type, disk_source, disk_target)
        if vm_state != "transient":
            inactive_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name, options="--inactive")
            inactive_attached = is_attached(inactive_vmxml.devices, disk_type, disk_source, disk_target)

        if flags.count("config") and not flags.count("live"):
            if vm_state != "transient":
                if attach:
                    if not inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML not updated" " when --config options used for" " attachment"
                        )
                    if vm_state != "shutoff":
                        if active_attached:
                            raise exceptions.TestFail(
                                "Active domain XML updated " "when --config options used " "for attachment"
                            )
                else:
                    if inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML not updated" " when --config options used for" " detachment"
                        )
        elif flags.count("live") and not flags.count("config"):
            if attach:
                if vm_state in ["paused", "running", "transient"]:
                    if not active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated" " when --live options used for" " attachment"
                        )
                if vm_state in ["paused", "running"]:
                    if inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML updated " "when --live options used for" " attachment"
                        )
            else:
                if vm_state in ["paused", "running", "transient"]:
                    if active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated" " when --live options used for" " detachment"
                        )
        elif flags.count("live") and flags.count("config"):
            if attach:
                if vm_state in ["paused", "running"]:
                    if not active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated" " when --live --config options" " used for attachment"
                        )
                    if not inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML not updated" " when --live --config options " "used for attachment"
                        )
            else:
                if vm_state in ["paused", "running"]:
                    if active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated " "when --live --config options " "used for detachment"
                        )
                    if inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML updated " "when --live --config options" " used for detachment"
                        )
        elif flags.count("current") or flags == "":
            if attach:
                if vm_state in ["paused", "running", "transient"]:
                    if not active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated" " when --current options used " "for attachment"
                        )
                if vm_state in ["paused", "running"]:
                    if inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML updated " "when --current options used " "for live attachment"
                        )
                if vm_state == "shutoff" and not inactive_attached:
                    raise exceptions.TestFail(
                        "Inactive domain XML not updated" " when --current options used for" " attachment"
                    )
            else:
                if vm_state in ["paused", "running", "transient"]:
                    if active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated" " when --current options used " "for detachment"
                        )
                if vm_state == "shutoff" and inactive_attached:
                    raise exceptions.TestFail(
                        "Inactive domain XML not updated" " when --current options used for" " detachment"
                    )

    vm_ref = params.get("at_dt_device_vm_ref", "name")
    at_status_error = "yes" == params.get("at_status_error", "no")
    dt_status_error = "yes" == params.get("dt_status_error", "no")

    # Disk specific attributes.
    at_options = params.get("at_dt_device_at_options", "")
    dt_options = params.get("at_dt_device_dt_options", "")
    device = params.get("at_dt_device_device", "disk")
    device_source_name = params.get("at_dt_device_source", "attach.img")
    device_target = params.get("at_dt_device_target", "vdd")

    if vm.is_alive():
        vm.destroy(gracefully=False)
    # Back up xml file.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Turn VM into certain state.
    if pre_vm_state == "running":
        logging.info("Starting %s..." % vm_name)
        if vm.is_dead():
            vm.start()
            vm.wait_for_login().close()
    elif pre_vm_state == "shutoff":
        logging.info("Shuting down %s..." % vm_name)
        if vm.is_alive():
            vm.destroy(gracefully=False)
    elif pre_vm_state == "paused":
        logging.info("Pausing %s..." % vm_name)
        if vm.is_dead():
            vm.start()
            vm.wait_for_login().close()
        if not vm.pause():
            raise exceptions.TestSkipError("Cann't pause the domain")
    elif pre_vm_state == "transient":
        logging.info("Creating %s..." % vm_name)
        vm.undefine()
        if virsh.create(backup_xml.xml, **virsh_dargs).exit_status:
            backup_xml.define()
            raise exceptions.TestSkipError("Cann't create the domain")
        vm.wait_for_login().close()

    # Test.
    domid = vm.get_id()
    domuuid = vm.get_uuid()

    # Confirm how to reference a VM.
    if vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    else:
        vm_ref = ""

    try:
        device_source = os.path.join(data_dir.get_tmp_dir(), device_source_name)
        libvirt.create_local_disk("file", device_source, "1")

        # Get disk xml file.
        disk_params = {
            "type_name": "file",
            "device_type": device,
            "target_dev": device_target,
            "target_bus": "virtio",
            "source_file": device_source,
            "driver_name": "qemu",
            "driver_type": "raw",
        }
        disk_xml = libvirt.create_disk_xml(disk_params)

        # Attach the disk.
        ret = virsh.attach_device(vm_ref, disk_xml, flagstr=at_options, debug=True)
        libvirt.check_exit_status(ret, at_status_error)

        # Check if command take affect config file.
        if vm.is_paused():
            vm.resume()
            vm.wait_for_login().close()

        # Sleep a while for vm is stable
        time.sleep(3)
        if not ret.exit_status:
            check_result(device_source, device, device_target, at_options)

        # Detach the disk.
        if pre_vm_state == "paused":
            if not vm.pause():
                raise exceptions.TestFail("Cann't pause the domain")
        disk_xml = libvirt.create_disk_xml(disk_params)
        ret = virsh.detach_device(vm_ref, disk_xml, flagstr=dt_options, debug=True)
        libvirt.check_exit_status(ret, dt_status_error)

        # Check if command take affect config file.
        if vm.is_paused():
            vm.resume()
            vm.wait_for_login().close()

        # Sleep a while for vm is stable
        time.sleep(3)
        if not ret.exit_status:
            check_result(device_source, device, device_target, dt_options, False)

        # Try to start vm at last.
        if vm.is_dead():
            vm.start()
            vm.wait_for_login().close()

    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()

        if os.path.exists(device_source):
            os.remove(device_source)
Exemplo n.º 22
0
                           'target_bus': target_bus,
                           'source_pool': pool_name,
                           'source_volume': test_vol,
                           'driver_type': driver_type}
        else:
            disk_params = {'type_name': disk_type,
                           'device': disk_device,
                           'driver_name': driver_name,
                           'driver_type': driver_type,
                           'source_file': path_to_blk,
                           'target_dev': device_target,
                           'target_bus': target_bus}
        if vm.is_alive():
            vm.destroy(gracefully=False)
        new_disk = disk.Disk()
        new_disk.xml = open(utlv.create_disk_xml(disk_params)).read()

        # start vm with the virtual disk
        vmxml.devices = vmxml.devices.append(new_disk)
        vmxml.sync()
        vm.start()
        session = vm.wait_for_login()
        cur_disks = virt_vm.get_disks()
        mount_disk = "".join(list(set(old_disks) ^ set(cur_disks)))

        # mkfs and mount disk in vm, create a file on that disk.
        if not mount_disk:
            logging.debug("old_disk: %s, new_disk: %s", old_disks, cur_disks)
            raise exceptions.TestFail("No new disk found in vm.")
        mkfs_and_mount(session, mount_disk)
        create_file_in_vm(session, "/mnt/before_snapshot.txt", "before")
Exemplo n.º 23
0
def run(test, params, env):
    """
    Test steps:
    1. Prepare a multipath device.
    2. Prepare virtual disk xml using this multipath device.
    3. Hot/Cold-plug the disk xml to virtual machine.
    4. Check the attached disk in the virtual machine.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    mpath_conf_path = params.get('mpath_conf_path', '/etc/multipath.conf')
    mpath_conf_bkup_path = params.get('mpath_conf_bkup_path',
                                      '/etc/multipath.conf.bkup')

    def prepare_multipath_conf():
        """
        Prepare the multipath.conf to make sure iscsi lun can be seen as a
        multipath device.
        """
        mpath_conf_content = ("defaults {\n"
                              "    user_friendly_names yes\n"
                              "    path_grouping_policy multibus\n"
                              "    failback immediate\n"
                              "    no_path_retry fail\n"
                              "}\n")
        if os.path.exists(mpath_conf_bkup_path):
            os.remove(mpath_conf_bkup_path)
        if os.path.exists(mpath_conf_path):
            shutil.move(mpath_conf_path, mpath_conf_bkup_path)
        with open(mpath_conf_path, 'wt') as mpath_conf_file:
            mpath_conf_file.write(mpath_conf_content)

    def recover_multipath_conf():
        """
        Recover the multipath.conf.
        """
        if os.path.exists(mpath_conf_bkup_path):
            if os.path.exists(mpath_conf_path):
                os.remove(mpath_conf_path)
            shutil.move(mpath_conf_bkup_path, mpath_conf_path)

    def check_in_vm(vm, old_parts):
        """
        Check mount/read/write disk in VM.

        :param vm: Virtual machine to be checked.
        :param old_parts: Original disk partitions in VM.
        """
        try:
            session = vm.wait_for_login()
            if platform.platform().count('ppc64'):
                time.sleep(10)
            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                test.fail("The number of new partitions is invalid in VM")
            else:
                added_part = added_parts[0]
            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   "mkdir -p test && mount /dev/{0} test && echo"
                   " teststring > test/testfile && umount test"
                   .format(added_part))
            status, output = session.cmd_status_output(cmd)
            session.close()
            if status:
                test.fail("Disk operation in VM failed:%s" % output)
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as err:
            test.fail("Error happens when check disk in vm: %s" % err)

    storage_size = params.get("storage_size", "1G")
    hotplug_disk = "yes" == params.get("hotplug_disk", "no")
    status_error = "yes" == params.get("status_error")
    # Start VM and get all partions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    try:
        # Setup backend storage
        prepare_multipath_conf()
        mpath.restart_multipathd()
        old_mpath_devs = mpath.find_mpath_devs()
        libvirt.setup_or_cleanup_iscsi(is_setup=True)
        mpath.restart_multipathd()
        cur_mpath_devs = mpath.find_mpath_devs()
        new_mpath_devs = list(set(cur_mpath_devs).difference(
            set(old_mpath_devs)))
        logging.debug("newly added mpath devs are: %s", new_mpath_devs)
        # Prepare disk xml
        disk_params = {}
        disk_params['type_name'] = params.get("virt_disk_type", "block")
        disk_params['source_file'] = '/dev/mapper/' + new_mpath_devs[0]
        disk_params['device_type'] = params.get("virt_disk_device", "lun")
        disk_params['sgio'] = params.get("sgio", "filtered")
        disk_params['rawio'] = params.get("rawio", "no")
        disk_params['target_dev'] = params.get("virt_disk_device_target", "sdb")
        disk_params['target_bus'] = params.get("virt_disk_device_bus", "scsi")
        disk_params['driver_name'] = params.get("virt_disk_drive_name", "qemu")
        disk_params['driver_type'] = params.get("virt_disk_device_format", "raw")
        disk_xml = libvirt.create_disk_xml(disk_params)
        # Test disk operation with newly added disk xml
        attach_option = ""
        if not hotplug_disk:
            attach_option = "--config"
        result = virsh.attach_device(vm_name, disk_xml, flagstr=attach_option,
                                     ignore_status=True, debug=True)
        libvirt.check_exit_status(result, status_error)
        if not hotplug_disk:
            vm.destroy(gracefully=False)
            vm.start()
            vm.wait_for_login().close()
        check_in_vm(vm, old_parts)
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
        # Clean up backend storage
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
        recover_multipath_conf()
Exemplo n.º 24
0
            logging.info(
                "Using the first volume %s to attach to a guest", test_unit)

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        session = vm.wait_for_login()
        output = session.cmd_status_output('lsblk')
        logging.debug("%s", output[1])
        old_count = vmxml.get_disk_count(vm_name)
        bf_disks = libvirt_vm.get_disks()

        # prepare disk xml which will be hot/cold attached to vm
        disk_params = {'type_name': 'volume', 'target_dev': target_device,
                       'target_bus': 'virtio', 'source_pool': pool_name,
                       'source_volume': test_unit, 'driver_type': vol_format}
        disk_xml = os.path.join(data_dir.get_tmp_dir(), 'disk_xml.xml')
        lun_disk_xml = utlv.create_disk_xml(disk_params)
        copyfile(lun_disk_xml, disk_xml)
        disk_xml_str = open(lun_disk_xml).read()
        logging.debug("The disk xml is: %s", disk_xml_str)

        # hot attach disk xml to vm
        if attach_method == "hot":
            copyfile(lun_disk_xml, disk_xml)
            dev_attach_status = virsh.attach_device(vm_name, disk_xml,
                                                    debug=True)
            # Pool/vol virtual disk is not supported by mpath pool yet.
            if dev_attach_status.exit_status and pool_type == "mpath":
                raise exceptions.TestSkipError("mpath pool vol is not "
                                               "supported in virtual disk yet,"
                                               "the error message is: %s",
                                               dev_attach_status.stderr)
Exemplo n.º 25
0
def run(test, params, env):
    """
    Test rbd disk device.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    additional_xml_file = os.path.join(data_dir.get_tmp_dir(), "additional_disk.xml")

    def config_ceph():
        """
        Write the configs to the file.
        """
        src_host = disk_src_host.split()
        src_port = disk_src_port.split()
        conf_str = "mon_host = "
        hosts = []
        for host, port in zip(src_host, src_port):
            hosts.append("%s:%s" % (host, port))
        with open(disk_src_config, 'w') as f:
            f.write(conf_str + ','.join(hosts) + '\n')

    def create_pool():
        """
        Define and start a pool.
        """
        sp = libvirt_storage.StoragePool()
        if create_by_xml:
            p_xml = pool_xml.PoolXML(pool_type=pool_type)
            p_xml.name = pool_name
            s_xml = pool_xml.SourceXML()
            s_xml.vg_name = disk_src_pool
            source_host = []
            for (host_name, host_port) in zip(
                    disk_src_host.split(), disk_src_port.split()):
                source_host.append({'name': host_name,
                                    'port': host_port})

            s_xml.hosts = source_host
            if auth_type:
                s_xml.auth_type = auth_type
            if auth_user:
                s_xml.auth_username = auth_user
            if auth_usage:
                s_xml.secret_usage = auth_usage
            p_xml.source = s_xml
            logging.debug("Pool xml: %s", p_xml)
            p_xml.xmltreefile.write()
            ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_build(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_start(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
        else:
            auth_opt = ""
            if client_name and client_key:
                auth_opt = ("--auth-type %s --auth-username %s --secret-usage '%s'"
                            % (auth_type, auth_user, auth_usage))
            if not sp.define_rbd_pool(pool_name, mon_host,
                                      disk_src_pool, extra=auth_opt):
                test.fail("Failed to define storage pool")
            if not sp.build_pool(pool_name):
                test.fail("Failed to build storage pool")
            if not sp.start_pool(pool_name):
                test.fail("Failed to start storage pool")

        # Check pool operation
        ret = virsh.pool_refresh(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_uuid(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        # pool-info
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'no':
            test.fail("Failed to check pool information")
        # pool-autostart
        if not sp.set_pool_autostart(pool_name):
            test.fail("Failed to set pool autostart")
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'yes':
            test.fail("Failed to check pool information")
        # pool-autostart --disable
        if not sp.set_pool_autostart(pool_name, "--disable"):
            test.fail("Failed to set pool autostart")
        # If port is not pre-configured, port value should not be hardcoded in pool information.
        if "yes" == params.get("rbd_port", "no"):
            if 'port' in virsh.pool_dumpxml(pool_name):
                test.fail("port attribute should not be in pool information")
        # find-storage-pool-sources-as
        if "yes" == params.get("find_storage_pool_sources_as", "no"):
            ret = virsh.find_storage_pool_sources_as("rbd", mon_host)
            libvirt.check_result(ret, skip_if=unsupported_err)

    def create_vol(vol_params):
        """
        Create volume.

        :param p_name. Pool name.
        :param vol_params. Volume parameters dict.
        :return: True if create successfully.
        """
        pvt = libvirt.PoolVolumeTest(test, params)
        if create_by_xml:
            pvt.pre_vol_by_xml(pool_name, **vol_params)
        else:
            pvt.pre_vol(vol_name, None, '2G', None, pool_name)

    def check_vol(vol_params):
        """
        Check volume information.
        """
        pv = libvirt_storage.PoolVolume(pool_name)
        # Supported operation
        if vol_name not in pv.list_volumes():
            test.fail("Volume %s doesn't exist" % vol_name)
        ret = virsh.vol_dumpxml(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        # vol-info
        if not pv.volume_info(vol_name):
            test.fail("Can't see volume info")
        # vol-key
        ret = virsh.vol_key(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip():
            test.fail("Volume key isn't correct")
        # vol-path
        ret = virsh.vol_path(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip():
            test.fail("Volume path isn't correct")
        # vol-pool
        ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if pool_name not in ret.stdout.strip():
            test.fail("Volume pool isn't correct")
        # vol-name
        ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if vol_name not in ret.stdout.strip():
            test.fail("Volume name isn't correct")
        # vol-resize
        ret = virsh.vol_resize(vol_name, "2G", pool_name)
        libvirt.check_exit_status(ret)

        # Not supported operation
        # vol-clone
        ret = virsh.vol_clone(vol_name, cloned_vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-create-from
        volxml = vol_xml.VolXML()
        vol_params.update({"name": "%s" % create_from_cloned_volume})
        v_xml = volxml.new_vol(**vol_params)
        v_xml.xmltreefile.write()
        ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)

        # vol-wipe
        ret = virsh.vol_wipe(vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-upload
        ret = virsh.vol_upload(vol_name, vm.get_first_disk_devices()['source'],
                               "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-download
        ret = virsh.vol_download(vol_name, cloned_vol_name, "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)

    def check_qemu_cmd():
        """
        Check qemu command line options.
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        process.run(cmd, shell=True)
        if disk_src_name:
            cmd += " | grep file=rbd:%s:" % disk_src_name
            if auth_user and auth_key:
                cmd += ('id=%s:auth_supported=cephx' % auth_user)
        if disk_src_config:
            cmd += " | grep 'conf=%s'" % disk_src_config
        elif mon_host:
            hosts = '\:6789\;'.join(mon_host.split())
            cmd += " | grep 'mon_host=%s'" % hosts
        if driver_iothread:
            cmd += " | grep iothread%s" % driver_iothread
        # Run the command
        process.run(cmd, shell=True)

    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(),
                                 "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()

    def check_snapshot(snap_option, target_dev='vda'):
        """
        Test snapshot operation.
        """
        snap_name = "s1"
        snap_mem = os.path.join(data_dir.get_tmp_dir(), "rbd.mem")
        snap_disk = os.path.join(data_dir.get_tmp_dir(), "rbd.disk")
        xml_snap_exp = ["disk name='%s' snapshot='external' type='file'" % target_dev]
        xml_dom_exp = ["source file='%s'" % snap_disk,
                       "backingStore type='network' index='1'",
                       "source protocol='rbd' name='%s'" % disk_src_name]
        if snap_option.count("disk-only"):
            options = ("%s --diskspec %s,file=%s --disk-only" %
                       (snap_name, target_dev, snap_disk))
        elif snap_option.count("disk-mem"):
            options = ("%s --memspec file=%s --diskspec %s,file="
                       "%s" % (snap_name, snap_mem, target_dev, snap_disk))
            xml_snap_exp.append("memory snapshot='external' file='%s'"
                                % snap_mem)
        else:
            options = snap_name

        ret = virsh.snapshot_create_as(vm_name, options)
        if test_disk_internal_snapshot or test_disk_readonly:
            libvirt.check_result(ret, expected_fails=unsupported_err)
        else:
            libvirt.check_result(ret, skip_if=unsupported_err)

        # check xml file.
        if not ret.exit_status:
            snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name,
                                              debug=True).stdout.strip()
            dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
            # Delete snapshots.
            libvirt.clean_up_snapshots(vm_name)
            if os.path.exists(snap_mem):
                os.remove(snap_mem)
            if os.path.exists(snap_disk):
                os.remove(snap_disk)

            if not all([x in snap_xml for x in xml_snap_exp]):
                test.fail("Failed to check snapshot xml")
            if not all([x in dom_xml for x in xml_dom_exp]):
                test.fail("Failed to check domain xml")

    def check_blockcopy(target):
        """
        Block copy operation test.
        """
        blk_file = os.path.join(data_dir.get_tmp_dir(), "blk.rbd")
        if os.path.exists(blk_file):
            os.remove(blk_file)
        blk_mirror = ("mirror type='file' file='%s' "
                      "format='raw' job='copy'" % blk_file)

        # Do blockcopy
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_result(ret, skip_if=unsupported_err)

        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count(blk_mirror):
            test.fail("Can't see block job in domain xml")

        # Abort
        ret = virsh.blockjob(vm_name, target, "--abort")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if dom_xml.count(blk_mirror):
            test.fail("Failed to abort block job")
        if os.path.exists(blk_file):
            os.remove(blk_file)

        # Sleep for a while after abort operation.
        time.sleep(5)
        # Do blockcopy again
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_exit_status(ret)

        # Wait for complete
        def wait_func():
            ret = virsh.blockjob(vm_name, target, "--info")
            return ret.stderr.count("Block Copy: [100 %]")
        timeout = params.get("blockjob_timeout", 600)
        utils_misc.wait_for(wait_func, int(timeout))

        # Pivot
        ret = virsh.blockjob(vm_name, target, "--pivot")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count("source file='%s'" % blk_file):
            test.fail("Failed to pivot block job")
        # Remove the disk file.
        if os.path.exists(blk_file):
            os.remove(blk_file)

    def check_in_vm(vm_obj, target, old_parts, read_only=False):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm_obj.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]

            if not added_part:
                logging.error("Can't see added partition in VM")
                return False

            cmd = ("mount /dev/{0} /mnt && ls /mnt && (sleep 15;"
                   " touch /mnt/testfile; umount /mnt)"
                   .format(added_part))
            s, o = session.cmd_status_output(cmd, timeout=60)
            session.close()
            logging.info("Check disk operation in VM:\n, %s, %s", s, o)
            # Readonly fs, check the error messages.
            # The command may return True, read-only
            # messges can be found from the command output
            if read_only:
                if "Read-only file system" not in o:
                    return False
                else:
                    return True

            # Other errors
            if s != 0:
                return False
            return True

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def clean_up_volume_snapshots():
        """
        Get all snapshots for rbd_vol.img volume,unprotect and then clean up them.
        """
        cmd = ("rbd -m {0} {1} info {2}"
               "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name)))
        if process.run(cmd, ignore_status=True, shell=True).exit_status:
            return
        # Get snapshot list.
        cmd = ("rbd -m {0} {1} snap"
               " list {2}"
               "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name)))
        snaps_out = process.run(cmd, ignore_status=True, shell=True).stdout_text
        snap_names = []
        if snaps_out:
            for line in snaps_out.rsplit("\n"):
                if line.startswith("SNAPID") or line == "":
                    continue
                snap_line = line.rsplit()
                if len(snap_line) == 4:
                    snap_names.append(snap_line[1])
            logging.debug("Find snapshots: %s", snap_names)
            # Unprotect snapshot first,otherwise it will fail to purge volume
            for snap_name in snap_names:
                cmd = ("rbd -m {0} {1} snap"
                       " unprotect {2}@{3}"
                       "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name), snap_name))
                process.run(cmd, ignore_status=True, shell=True)
        # Purge volume,and then delete volume.
        cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
               " purge {2} && rbd -m {0} {1} rm {2}"
               "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name)))
        process.run(cmd, ignore_status=True, shell=True)

    def make_snapshot():
        """
        make external snapshots.

        :return external snapshot path list
        """
        logging.info("Making snapshot...")
        first_disk_source = vm.get_first_disk_devices()['source']
        snapshot_path_list = []
        snapshot2_file = os.path.join(data_dir.get_tmp_dir(), "mem.s2")
        snapshot3_file = os.path.join(data_dir.get_tmp_dir(), "mem.s3")
        snapshot4_file = os.path.join(data_dir.get_tmp_dir(), "mem.s4")
        snapshot4_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s4")
        snapshot5_file = os.path.join(data_dir.get_tmp_dir(), "mem.s5")
        snapshot5_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s5")

        # Attempt to take different types of snapshots.
        snapshots_param_dict = {"s1": "s1 --disk-only --no-metadata",
                                "s2": "s2 --memspec %s --no-metadata" % snapshot2_file,
                                "s3": "s3 --memspec %s --no-metadata --live" % snapshot3_file,
                                "s4": "s4 --memspec %s --diskspec vda,file=%s --no-metadata" % (snapshot4_file, snapshot4_disk_file),
                                "s5": "s5 --memspec %s --diskspec vda,file=%s --live --no-metadata" % (snapshot5_file, snapshot5_disk_file)}
        for snapshot_name in sorted(snapshots_param_dict.keys()):
            ret = virsh.snapshot_create_as(vm_name, snapshots_param_dict[snapshot_name],
                                           **virsh_dargs)
            libvirt.check_exit_status(ret)
            if snapshot_name != 's4' and snapshot_name != 's5':
                snapshot_path_list.append(first_disk_source.replace('qcow2', snapshot_name))
        return snapshot_path_list

    def get_secret_list():
        """
        Get secret list.

        :return secret list
        """
        logging.info("Get secret list ...")
        secret_list_result = virsh.secret_list()
        secret_list = results_stdout_52lts(secret_list_result).strip().splitlines()
        # First two lines contain table header followed by entries
        # for each secret, such as:
        #
        # UUID                                  Usage
        # --------------------------------------------------------------------------------
        # b4e8f6d3-100c-4e71-9f91-069f89742273  ceph client.libvirt secret
        secret_list = secret_list[2:]
        result = []
        # If secret list is empty.
        if secret_list:
            for line in secret_list:
                # Split on whitespace, assume 1 column
                linesplit = line.split(None, 1)
                result.append(linesplit[0])
        return result

    mon_host = params.get("mon_host")
    disk_src_name = params.get("disk_source_name")
    disk_src_config = params.get("disk_source_config")
    disk_src_host = params.get("disk_source_host")
    disk_src_port = params.get("disk_source_port")
    disk_src_pool = params.get("disk_source_pool")
    disk_format = params.get("disk_format", "raw")
    driver_iothread = params.get("driver_iothread")
    snap_name = params.get("disk_snap_name")
    attach_device = "yes" == params.get("attach_device", "no")
    attach_disk = "yes" == params.get("attach_disk", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_snapshot = "yes" == params.get("test_snapshot", "no")
    test_blockcopy = "yes" == params.get("test_blockcopy", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    test_vm_parts = "yes" == params.get("test_vm_parts", "no")
    additional_guest = "yes" == params.get("additional_guest", "no")
    create_snapshot = "yes" == params.get("create_snapshot", "no")
    convert_image = "yes" == params.get("convert_image", "no")
    create_volume = "yes" == params.get("create_volume", "no")
    create_by_xml = "yes" == params.get("create_by_xml", "no")
    client_key = params.get("client_key")
    client_name = params.get("client_name")
    auth_key = params.get("auth_key")
    auth_user = params.get("auth_user")
    auth_type = params.get("auth_type")
    auth_usage = params.get("secret_usage")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    vol_name = params.get("vol_name")
    cloned_vol_name = params.get("cloned_volume", "cloned_test_volume")
    create_from_cloned_volume = params.get("create_from_cloned_volume", "create_from_cloned_test_volume")
    vol_cap = params.get("vol_cap")
    vol_cap_unit = params.get("vol_cap_unit")
    start_vm = "yes" == params.get("start_vm", "no")
    test_disk_readonly = "yes" == params.get("test_disk_readonly", "no")
    test_disk_internal_snapshot = "yes" == params.get("test_disk_internal_snapshot", "no")
    test_json_pseudo_protocol = "yes" == params.get("json_pseudo_protocol", "no")
    disk_snapshot_with_sanlock = "yes" == params.get("disk_internal_with_sanlock", "no")

    # Create /etc/ceph/ceph.conf file to suppress false warning error message.
    process.run("mkdir -p /etc/ceph", ignore_status=True, shell=True)
    cmd = ("echo 'mon_host = {0}' >/etc/ceph/ceph.conf"
           .format(mon_host))
    process.run(cmd, ignore_status=True, shell=True)

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)
    if additional_guest:
        guest_name = "%s_%s" % (vm_name, '1')
        timeout = params.get("clone_timeout", 360)
        utils_libguestfs.virt_clone_cmd(vm_name, guest_name,
                                        True, timeout=timeout,
                                        ignore_status=False)
        additional_vm = vm.clone(guest_name)
        if start_vm:
            virsh.start(guest_name)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    key_opt = ""
    secret_uuid = None
    snapshot_path = None
    key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
    img_file = os.path.join(data_dir.get_tmp_dir(),
                            "%s_test.img" % vm_name)
    front_end_img_file = os.path.join(data_dir.get_tmp_dir(),
                                      "%s_frontend_test.img" % vm_name)
    # Construct a unsupported error message list to skip these kind of tests
    unsupported_err = []
    if driver_iothread:
        unsupported_err.append('IOThreads not supported')
    if test_snapshot:
        unsupported_err.append('live disk snapshot not supported')
    if test_disk_readonly:
        if not libvirt_version.version_compare(5, 0, 0):
            unsupported_err.append('Could not create file: Permission denied')
            unsupported_err.append('Permission denied')
        else:
            unsupported_err.append('unsupported configuration: external snapshot ' +
                                   'for readonly disk vdb is not supported')
    if test_disk_internal_snapshot:
        unsupported_err.append('unsupported configuration: internal snapshot for disk ' +
                               'vdb unsupported for storage type raw')
    if test_blockcopy:
        unsupported_err.append('block copy is not supported')
    if attach_disk:
        unsupported_err.append('No such file or directory')
    if create_volume:
        unsupported_err.append("backing 'volume' disks isn't yet supported")
        unsupported_err.append('this function is not supported')

    try:
        # Clean up dirty secrets in test environments if there have.
        dirty_secret_list = get_secret_list()
        if dirty_secret_list:
            for dirty_secret_uuid in dirty_secret_list:
                virsh.secret_undefine(dirty_secret_uuid)
        # Prepare test environment.
        qemu_config = LibvirtQemuConfig()

        if disk_snapshot_with_sanlock:
            # Install necessary package:sanlock,libvirt-lock-sanlock
            if not utils_package.package_install(["sanlock"]):
                test.error("fail to install sanlock")
            if not utils_package.package_install(["libvirt-lock-sanlock"]):
                test.error("fail to install libvirt-lock-sanlock")

            # Set virt_use_sanlock
            result = process.run("setsebool -P virt_use_sanlock 1", shell=True)
            if result.exit_status:
                test.error("Failed to set virt_use_sanlock value")

            # Update lock_manager in qemu.conf
            qemu_config.lock_manager = 'sanlock'

            # Update qemu-sanlock.conf.
            san_lock_config = LibvirtSanLockConfig()
            san_lock_config.user = '******'
            san_lock_config.group = 'sanlock'
            san_lock_config.host_id = 1
            san_lock_config.auto_disk_leases = True
            process.run("mkdir -p /var/lib/libvirt/sanlock", shell=True)
            san_lock_config.disk_lease_dir = "/var/lib/libvirt/sanlock"
            san_lock_config.require_lease_for_disks = False

            # Start sanlock service and restart libvirtd to enforce changes.
            result = process.run("systemctl start wdmd", shell=True)
            if result.exit_status:
                test.error("Failed to start wdmd service")
            result = process.run("systemctl start sanlock", shell=True)
            if result.exit_status:
                test.error("Failed to start sanlock service")
            utils_libvirtd.Libvirtd().restart()

            # Prepare lockspace and lease file for sanlock in order.
            sanlock_cmd_dict = OrderedDict()
            sanlock_cmd_dict["truncate -s 1M /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to truncate TEST_LS"
            sanlock_cmd_dict["sanlock direct init -s TEST_LS:0:/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to sanlock direct init TEST_LS:0"
            sanlock_cmd_dict["chown sanlock:sanlock /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to chown sanlock TEST_LS"
            sanlock_cmd_dict["restorecon -R -v /var/lib/libvirt/sanlock"] = "Failed to restorecon sanlock"
            sanlock_cmd_dict["truncate -s 1M /var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to truncate test-disk-resource-lock"
            sanlock_cmd_dict["sanlock direct init -r TEST_LS:test-disk-resource-lock:" +
                             "/var/lib/libvirt/sanlock/test-disk-resource-lock:0"] = "Failed to sanlock direct init test-disk-resource-lock"
            sanlock_cmd_dict["chown sanlock:sanlock " +
                             "/var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to chown test-disk-resource-loc"
            sanlock_cmd_dict["sanlock client add_lockspace -s TEST_LS:1:" +
                             "/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to client add_lockspace -s TEST_LS:0"
            for sanlock_cmd in sanlock_cmd_dict.keys():
                result = process.run(sanlock_cmd, shell=True)
                if result.exit_status:
                    test.error(sanlock_cmd_dict[sanlock_cmd])

            # Create one lease device and add it to VM.
            san_lock_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            lease_device = Lease()
            lease_device.lockspace = 'TEST_LS'
            lease_device.key = 'test-disk-resource-lock'
            lease_device.target = {'path': '/var/lib/libvirt/sanlock/test-disk-resource-lock'}
            san_lock_vmxml.add_device(lease_device)
            san_lock_vmxml.sync()

        # Install ceph-common package which include rbd command
        if utils_package.package_install(["ceph-common"]):
            if client_name and client_key:
                with open(key_file, 'w') as f:
                    f.write("[%s]\n\tkey = %s\n" %
                            (client_name, client_key))
                key_opt = "--keyring %s" % key_file

                # Create secret xml
                sec_xml = secret_xml.SecretXML("no", "no")
                sec_xml.usage = auth_type
                sec_xml.usage_name = auth_usage
                sec_xml.xmltreefile.write()

                logging.debug("Secret xml: %s", sec_xml)
                ret = virsh.secret_define(sec_xml.xml)
                libvirt.check_exit_status(ret)

                secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                         ret.stdout.strip())[0].lstrip()
                logging.debug("Secret uuid %s", secret_uuid)
                if secret_uuid is None:
                    test.error("Failed to get secret uuid")

                # Set secret value
                auth_key = params.get("auth_key")
                ret = virsh.secret_set_value(secret_uuid, auth_key,
                                             **virsh_dargs)
                libvirt.check_exit_status(ret)

            # Delete the disk if it exists
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)
        else:
            test.error("Failed to install ceph-common")

        if disk_src_config:
            config_ceph()
        disk_path = ("rbd:%s:mon_host=%s" %
                     (disk_src_name, mon_host))
        if auth_user and auth_key:
            disk_path += (":id=%s:key=%s" %
                          (auth_user, auth_key))
        targetdev = params.get("disk_target", "vdb")
        # To be compatible with create_disk_xml function,
        # some parameters need to be updated.
        params.update({
            "type_name": params.get("disk_type", "network"),
            "target_bus": params.get("disk_target_bus"),
            "target_dev": targetdev,
            "secret_uuid": secret_uuid,
            "source_protocol": params.get("disk_source_protocol"),
            "source_name": disk_src_name,
            "source_host_name": disk_src_host,
            "source_host_port": disk_src_port})
        # Prepare disk image
        if convert_image:
            first_disk = vm.get_first_disk_devices()
            blk_source = first_disk['source']
            # Convert the image to remote storage
            disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert"
                        " -O %s %s %s" % (mon_host, key_opt,
                                          disk_src_name, disk_format,
                                          blk_source, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)

        elif create_volume:
            vol_params = {"name": vol_name, "capacity": int(vol_cap),
                          "capacity_unit": vol_cap_unit, "format": disk_format}

            create_pool()
            create_vol(vol_params)
            check_vol(vol_params)
        else:
            # Create an local image and make FS on it.
            disk_cmd = ("qemu-img create -f %s %s 10M && mkfs.ext4 -F %s" %
                        (disk_format, img_file, img_file))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Convert the image to remote storage
            disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O"
                        " %s %s %s" % (mon_host, key_opt, disk_src_name,
                                       disk_format, img_file, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Create disk snapshot if needed.
            if create_snapshot:
                snap_cmd = ("rbd -m %s %s snap create %s@%s" %
                            (mon_host, key_opt, disk_src_name, snap_name))
                process.run(snap_cmd, ignore_status=False, shell=True)
            if test_json_pseudo_protocol:
                # Create one frontend image with the rbd backing file.
                json_str = ('json:{"file.driver":"rbd",'
                            '"file.filename":"rbd:%s:mon_host=%s"}'
                            % (disk_src_name, mon_host))
                # pass different json string according to the auth config
                if auth_user and auth_key:
                    json_str = ('%s:id=%s:key=%s"}' % (json_str[:-2], auth_user, auth_key))
                disk_cmd = ("qemu-img create -f qcow2 -b '%s' %s" %
                            (json_str, front_end_img_file))
                disk_path = front_end_img_file
                process.run(disk_cmd, ignore_status=False, shell=True)
        # If hot plug, start VM first, and then wait the OS boot.
        # Otherwise stop VM if running.
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login().close()
        else:
            if not vm.is_dead():
                vm.destroy()
        if attach_device:
            if create_volume:
                params.update({"source_pool": pool_name})
                params.update({"type_name": "volume"})
                # No need auth options for volume
                if "auth_user" in params:
                    params.pop("auth_user")
                if "auth_type" in params:
                    params.pop("auth_type")
                if "secret_type" in params:
                    params.pop("secret_type")
                if "secret_uuid" in params:
                    params.pop("secret_uuid")
                if "secret_usage" in params:
                    params.pop("secret_usage")
            xml_file = libvirt.create_disk_xml(params)
            if additional_guest:
                # Copy xml_file for additional guest VM.
                shutil.copyfile(xml_file, additional_xml_file)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name, xml_file,
                                      flagstr=opts, debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
            if additional_guest:
                # Make sure the additional VM is running
                if additional_vm.is_dead():
                    additional_vm.start()
                    additional_vm.wait_for_login().close()
                ret = virsh.attach_device(guest_name, additional_xml_file,
                                          "", debug=True)
                libvirt.check_result(ret, skip_if=unsupported_err)
        elif attach_disk:
            opts = params.get("attach_option", "")
            ret = virsh.attach_disk(vm_name, disk_path,
                                    targetdev, opts)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif test_disk_readonly:
            params.update({'readonly': "yes"})
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name, xml_file,
                                      flagstr=opts, debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif test_disk_internal_snapshot:
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name, xml_file,
                                      flagstr=opts, debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif disk_snapshot_with_sanlock:
            if vm.is_dead():
                vm.start()
            snapshot_path = make_snapshot()
            if vm.is_alive():
                vm.destroy()
        elif not create_volume:
            libvirt.set_vm_disk(vm, params)
        if test_blockcopy:
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                test.fail("Can't create the domain")
        elif vm.is_dead():
            vm.start()
        # Wait for vm is running
        vm.wait_for_login(timeout=600).close()
        if additional_guest:
            if additional_vm.is_dead():
                additional_vm.start()
        # Check qemu command line
        if test_qemu_cmd:
            check_qemu_cmd()
        # Check partitions in vm
        if test_vm_parts:
            if not check_in_vm(vm, targetdev, old_parts,
                               read_only=create_snapshot):
                test.fail("Failed to check vm partitions")
            if additional_guest:
                if not check_in_vm(additional_vm, targetdev, old_parts):
                    test.fail("Failed to check vm partitions")
        # Save and restore operation
        if test_save_restore:
            check_save_restore()
        if test_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option)
        if test_blockcopy:
            check_blockcopy(targetdev)
        if test_disk_readonly:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, 'vdb')
        if test_disk_internal_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, targetdev)
        # Detach the device.
        if attach_device:
            xml_file = libvirt.create_disk_xml(params)
            ret = virsh.detach_device(vm_name, xml_file)
            libvirt.check_exit_status(ret)
            if additional_guest:
                ret = virsh.detach_device(guest_name, xml_file)
                libvirt.check_exit_status(ret)
        elif attach_disk:
            ret = virsh.detach_disk(vm_name, targetdev)
            libvirt.check_exit_status(ret)

        # Check disk in vm after detachment.
        if attach_device or attach_disk:
            session = vm.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            if len(new_parts) != len(old_parts):
                test.fail("Disk still exists in vm"
                          " after detachment")
            session.close()

    except virt_vm.VMStartError as details:
        for msg in unsupported_err:
            if msg in str(details):
                test.cancel(str(details))
        else:
            test.fail("VM failed to start."
                      "Error: %s" % str(details))
    finally:
        # Remove /etc/ceph/ceph.conf file if exists.
        if os.path.exists('/etc/ceph/ceph.conf'):
            os.remove('/etc/ceph/ceph.conf')
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snap in snapshot_lists:
                virsh.snapshot_delete(vm_name, snap, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        if additional_guest:
            virsh.remove_domain(guest_name,
                                "--remove-all-storage",
                                ignore_stauts=True)
        # Remove the snapshot.
        if create_snapshot:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
                   " purge {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)
        elif create_volume:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, os.path.join(disk_src_pool, cloned_vol_name)))
            process.run(cmd, ignore_status=True, shell=True)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, os.path.join(disk_src_pool, create_from_cloned_volume)))
            process.run(cmd, ignore_status=True, shell=True)
            clean_up_volume_snapshots()
        else:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)

        # Delete tmp files.
        if os.path.exists(key_file):
            os.remove(key_file)
        if os.path.exists(img_file):
            os.remove(img_file)
        # Clean up volume, pool
        if vol_name and vol_name in str(virsh.vol_list(pool_name).stdout):
            virsh.vol_delete(vol_name, pool_name)
        if pool_name and pool_name in virsh.pool_state_dict():
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)

        # Clean up secret
        secret_list = get_secret_list()
        if secret_list:
            for secret_uuid in secret_list:
                virsh.secret_undefine(secret_uuid)

        logging.info("Restoring vm...")
        vmxml_backup.sync()

        if disk_snapshot_with_sanlock:
            # Restore virt_use_sanlock setting.
            process.run("setsebool -P virt_use_sanlock 0", shell=True)
            # Restore qemu config
            qemu_config.restore()
            utils_libvirtd.Libvirtd().restart()
            # Force shutdown sanlock service.
            process.run("sanlock client shutdown -f 1", shell=True)
            # Clean up lockspace folder
            process.run("rm -rf  /var/lib/libvirt/sanlock/*", shell=True)
            if snapshot_path is not None:
                for snapshot in snapshot_path:
                    if os.path.exists(snapshot):
                        os.remove(snapshot)
Exemplo n.º 26
0
        elif disk_cache != cache_options:
            check_cache_after_cmd = False

    # Eject cdrom test
    eject_cdrom = "yes" == params.get("at_dt_disk_eject_cdrom", "no")
    save_vm = "yes" == params.get("at_dt_disk_save_vm", "no")
    save_file = os.path.join(test.tmpdir, "vm.save")
    try:
        if eject_cdrom:
            eject_params = {
                'type_name': "file",
                'device_type': "cdrom",
                'target_dev': "hdc",
                'target_bus': "ide"
            }
            eject_xml = libvirt.create_disk_xml(eject_params)
            logging.debug("Eject CDROM by XML: %s", open(eject_xml).read())
            # Run command tiwce to make sure cdrom tray open first #BZ892289
            # Open tray
            virsh.attach_device(domainarg=vm_name,
                                filearg=eject_xml,
                                debug=True)
            # Add time sleep between two attach commands.
            if time_sleep:
                time.sleep(float(time_sleep))
            # Eject cdrom
            result = virsh.attach_device(domainarg=vm_name,
                                         filearg=eject_xml,
                                         debug=True)
            if result.exit_status != 0:
                raise error.TestFail("Eject CDROM failed")
Exemplo n.º 27
0
def run(test, params, env):
    """
    Test per-image DAC disk hotplug to VM.

    (1).Init variables for test.
    (2).Creat disk xml with per-image DAC
    (3).Start VM
    (4).Attach the disk to VM and check result.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user")
    qemu_group = params.get("qemu_group")
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")

    # Get per-image DAC setting
    vol_name = params.get('vol_name')
    target_dev = params.get('target_dev')
    disk_type_name = params.get("disk_type_name")
    img_user = params.get("img_user")
    img_group = params.get("img_group")
    relabel = 'yes' == params.get('relabel', 'yes')

    if not libvirt_version.version_compare(1, 2, 7):
        raise error.TestNAError("per-image DAC only supported on version 1.2.7"
                                " and after.")

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)

    img_path = None
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # set qemu conf
        qemu_conf.user = qemu_user
        qemu_conf.group = qemu_group
        if dynamic_ownership:
            qemu_conf.dynamic_ownership = 1
        else:
            qemu_conf.dynamic_ownership = 0
        logging.debug("the qemu.conf content is: %s" % qemu_conf)
        libvirtd.restart()

        first_disk = vm.get_first_disk_devices()
        blk_source = first_disk['source']
        owner_str = format_user_group_str(qemu_user, qemu_group)
        src_usr, src_grp = owner_str.split(':')
        os.chown(blk_source, int(src_usr), int(src_grp))
        vm.start()

        # Init a QemuImg instance and create a img.
        params['image_name'] = vol_name
        tmp_dir = data_dir.get_tmp_dir()
        image = qemu_storage.QemuImg(params, tmp_dir, vol_name)
        # Create a image.
        img_path, result = image.create(params)

        # Create disk xml for attach.
        params['source_file'] = img_path
        sec_label = "%s:%s" % (img_user, img_group)
        params['sec_label'] = sec_label
        params['type_name'] = disk_type_name
        sec_label_id = format_user_group_str(img_user, img_group)

        disk_xml = utlv.create_disk_xml(params)

        # Change img file to qemu:qemu and 660 mode
        os.chown(img_path, 107, 107)
        os.chmod(img_path, 432)

        img_label_before = check_ownership(img_path)
        if img_label_before:
            logging.debug("the image ownership before "
                          "attach: %s" % img_label_before)

        # Do the attach action.
        option = "--persistent"
        result = virsh.attach_device(vm_name, filearg=disk_xml,
                                     flagstr=option, debug=True)
        utlv.check_exit_status(result, status_error)

        if not result.exit_status:
            img_label_after = check_ownership(img_path)
            if dynamic_ownership and relabel:
                if img_label_after != sec_label_id:
                    raise error.TestFail("The image dac label %s is not "
                                         "expected." % img_label_after)

            ret = virsh.detach_disk(vm_name, target=target_dev,
                                    extra=option,
                                    debug=True)
            utlv.check_exit_status(ret, status_error)
    finally:
        # clean up
        vm.destroy()
        qemu_conf.restore()
        vmxml.sync()
        libvirtd.restart()
        if img_path and os.path.exists(img_path):
            os.unlink(img_path)
Exemplo n.º 28
0
def run(test, params, env):
    """
    Test vm backingchain, blockcopy
    """
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)
    status_error = 'yes' == params.get('status_error', 'no')
    error_msg = params.get('error_msg', '')
    case = params.get('case', '')
    blockcommand = params.get('blockcommand', '')
    blk_top = int(params.get('top', 0))
    blk_base = int(params.get('base', 0))
    opts = params.get('opts', '--verbose --wait')
    check_func = params.get('check_func', '')
    disk_type = params.get('disk_type', '')
    disk_src = params.get('disk_src', '')
    driver_type = params.get('driver_type', 'qcow2')
    vol_name = params.get('vol_name', 'vol_blockpull')
    pool_name = params.get('pool_name', '')
    brick_path = os.path.join(data_dir.get_tmp_dir(), pool_name)
    vg_name = params.get('vg_name', 'HostVG')
    vol_size = params.get('vol_size', '10M')

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()

    # List to collect paths to delete after test
    file_to_del = []
    virsh_dargs = {'debug': True, 'ignore_status': False}

    try:
        all_disks = vmxml.get_disk_source(vm_name)
        if not all_disks:
            test.error('Not found any disk file in vm.')
        image_file = all_disks[0].find('source').get('file')
        logging.debug('Image file of vm: %s', image_file)

        # Get all dev of virtio disks to calculate the dev of new disk
        all_vdisks = [disk for disk in all_disks if disk.find('target').get('dev').startswith('vd')]
        disk_dev = all_vdisks[-1].find('target').get('dev')
        new_dev = disk_dev[:-1] + chr(ord(disk_dev[-1]) + 1)

        # Setup iscsi target
        if disk_src == 'iscsi':
            disk_target = libvirt.setup_or_cleanup_iscsi(
                is_setup=True, is_login=True,
                image_size='1G')
            logging.debug('ISCSI target: %s', disk_target)

        # Setup lvm
        elif disk_src == 'lvm':
            # Stop multipathd to avoid vgcreate fail
            multipathd = service.Factory.create_service("multipathd")
            multipathd_status = multipathd.status()
            if multipathd_status:
                multipathd.stop()

            # Setup iscsi target
            device_name = libvirt.setup_or_cleanup_iscsi(
                is_setup=True, is_login=True,
                image_size='1G')
            logging.debug('ISCSI target for lvm: %s', device_name)

            # Create logical device
            logical_device = device_name
            lv_utils.vg_create(vg_name, logical_device)
            vg_created = True

            # Create logical volume as backing store
            vol_bk, vol_disk = 'vol1', 'vol2'
            lv_utils.lv_create(vg_name, vol_bk, vol_size)

            disk_target = '/dev/%s/%s' % (vg_name, vol_bk)
            src_vol = '/dev/%s/%s' % (vg_name, vol_disk)

        # Setup gluster
        elif disk_src == 'gluster':
            host_ip = gluster.setup_or_cleanup_gluster(
                is_setup=True, brick_path=brick_path, **params)
            logging.debug(host_ip)
            gluster_img = 'test.img'
            img_create_cmd = "qemu-img create -f raw /mnt/%s 10M" % gluster_img
            process.run("mount -t glusterfs %s:%s /mnt; %s; umount /mnt"
                        % (host_ip, vol_name, img_create_cmd), shell=True)
            disk_target = 'gluster://%s/%s/%s' % (host_ip, vol_name, gluster_img)

        else:
            test.error('Wrong disk source, unsupported by this test.')

        new_image = os.path.join(os.path.split(image_file)[0], 'test.img')
        params['snapshot_list'] = ['s%d' % i for i in range(1, 5)]

        if disk_src == 'lvm':
            new_image = src_vol
            if disk_type == 'block':
                new_image = disk_target
                for i in range(2, 6):
                    lv_utils.lv_create(vg_name, 'vol%s' % i, vol_size)
                snapshot_image_list = ['/dev/%s/vol%s' % (vg_name, i) for i in range(2, 6)]
        else:
            file_to_del.append(new_image)
            snapshot_image_list = [new_image.replace('img', i) for i in params['snapshot_list']]
        cmd_create_img = 'qemu-img create -f %s -b %s %s -F raw' % (driver_type, disk_target, new_image)
        if disk_type == 'block' and driver_type == 'raw':
            pass
        else:
            process.run(cmd_create_img, verbose=True, shell=True)
        info_new = utils_misc.get_image_info(new_image)
        logging.debug(info_new)

        # Create xml of new disk and add it to vmxml
        if disk_type:
            new_disk = Disk()
            new_disk.xml = libvirt.create_disk_xml({
                'type_name': disk_type,
                'driver_type': driver_type,
                'target_dev': new_dev,
                'source_file': new_image
            })

            logging.debug(new_disk.xml)

            vmxml.devices = vmxml.devices.append(new_disk)
            vmxml.xmltreefile.write()
            logging.debug(vmxml)
            vmxml.sync()

        vm.start()
        logging.debug(virsh.dumpxml(vm_name))

        # Create backing chain
        for i in range(len(params['snapshot_list'])):
            virsh.snapshot_create_as(
                vm_name,
                '%s --disk-only --diskspec %s,file=%s,stype=%s' %
                (params['snapshot_list'][i], new_dev, snapshot_image_list[i],
                 disk_type),
                **virsh_dargs
            )

            # Get path of each snapshot file
            snaps = virsh.domblklist(vm_name, debug=True).stdout.splitlines()
            for line in snaps:
                if line.lstrip().startswith(('hd', 'sd', 'vd')):
                    file_to_del.append(line.split()[-1])

        qemu_img_cmd = 'qemu-img info --backing-chain %s' % snapshot_image_list[-1]
        if libvirt_storage.check_qemu_image_lock_support():
            qemu_img_cmd += " -U"
        bc_info = process.run(qemu_img_cmd, verbose=True, shell=True).stdout_text

        if not disk_type == 'block':
            bc_chain = snapshot_image_list[::-1] + [new_image, disk_target]
        else:
            bc_chain = snapshot_image_list[::-1] + [new_image]
        bc_result = check_backingchain(bc_chain, bc_info)
        if not bc_result:
            test.fail('qemu-img info output of backing chain is not correct: %s'
                      % bc_info)

        # Generate blockpull/blockcommit options
        virsh_blk_cmd = eval('virsh.%s' % blockcommand)
        if blockcommand == 'blockpull' and blk_base != 0:
            opts += '--base {dev}[{}]'.format(blk_base, dev=new_dev)
        elif blockcommand == 'blockcommit':
            opt_top = ' --top {dev}[{}]'.format(blk_top, dev=new_dev) if blk_top != 0 else ''
            opt_base = ' --base {dev}[{}]'.format(blk_base, dev=new_dev) if blk_base != 0 else ''
            opts += opt_top + opt_base + ' --active' if blk_top == 0 else ''

        # Do blockpull/blockcommit
        virsh_blk_cmd(vm_name, new_dev, opts, **virsh_dargs)
        if blockcommand == 'blockcommit':
            virsh.blockjob(vm_name, new_dev, '--pivot', **virsh_dargs)
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug("XML after %s: %s" % (blockcommand, vmxml))

        # Check backing chain after blockpull/blockcommit
        check_bc_func_name = 'check_bc_%s' % check_func
        if check_bc_func_name in globals():
            check_bc = eval(check_bc_func_name)
            if not callable(check_bc):
                logging.warning('Function "%s" is not callable.', check_bc_func_name)
            if not check_bc(blockcommand, vmxml, new_dev, bc_chain):
                test.fail('Backing chain check after %s failed' % blockcommand)
        else:
            logging.warning('Function "%s" is not implemented.', check_bc_func_name)

        virsh.dumpxml(vm_name, debug=True)

        # Check whether login is successful
        try:
            vm.wait_for_login().close()
        except Exception as e:
            test.fail('Vm login failed')

    finally:
        logging.info('Start cleaning up.')
        for ss in params.get('snapshot_list', []):
            virsh.snapshot_delete(vm_name, '%s --metadata' % ss, debug=True)
        bkxml.sync()
        for path in file_to_del:
            logging.debug('Remove %s', path)
            if os.path.exists(path):
                os.remove(path)
        if disk_src == 'iscsi':
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif disk_src == 'lvm':
            process.run('rm -rf /dev/%s/%s' % (vg_name, vol_disk), ignore_status=True)
            if 'vol_bk' in locals():
                lv_utils.lv_remove(vg_name, vol_bk)
            if 'vg_created' in locals() and vg_created:
                lv_utils.vg_remove(vg_name)
                cmd = "pvs |grep %s |awk '{print $1}'" % vg_name
                pv_name = process.system_output(cmd, shell=True, verbose=True).strip()
                if pv_name:
                    process.run("pvremove %s" % pv_name, verbose=True, ignore_status=True)
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif disk_src == 'gluster':
            gluster.setup_or_cleanup_gluster(
                is_setup=False, brick_path=brick_path, **params)
        if 'multipathd_status' in locals() and multipathd_status:
            multipathd.start()
Exemplo n.º 29
0
def run(test, params, env):
    """
    Test steps:
    1. Prepare a multipath device.
    2. Prepare virtual disk xml using this multipath device.
    3. Hot/Cold-plug the disk xml to virtual machine.
    4. Check the attached disk in the virtual machine.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    mpath_conf_path = params.get('mpath_conf_path', '/etc/multipath.conf')
    mpath_conf_bkup_path = params.get('mpath_conf_bkup_path',
                                      '/etc/multipath.conf.bkup')
    mpath_conf_exist = False

    def prepare_multipath_conf():
        """
        Prepare the multipath.conf to make sure iscsi lun can be seen as a
        multipath device.
        :return: True means the multipath.conf exists at first, False means not.
        """
        multipath_conf_exist = False
        mpath_conf_content = ("defaults {\n"
                              "    user_friendly_names yes\n"
                              "    path_grouping_policy multibus\n"
                              "    failback immediate\n"
                              "    no_path_retry fail\n"
                              "}\n")
        if os.path.exists(mpath_conf_bkup_path):
            os.remove(mpath_conf_bkup_path)
        if os.path.exists(mpath_conf_path):
            multipath_conf_exist = True
            shutil.move(mpath_conf_path, mpath_conf_bkup_path)
        with open(mpath_conf_path, 'wt') as mpath_conf_file:
            mpath_conf_file.write(mpath_conf_content)
        return multipath_conf_exist

    def recover_multipath_conf(remove_mpath_conf=False):
        """
        Recover the multipath.conf.
        :param remove_mpath_conf: True to remove multipath.conf.
        """
        if os.path.exists(mpath_conf_bkup_path):
            if os.path.exists(mpath_conf_path):
                os.remove(mpath_conf_path)
            shutil.move(mpath_conf_bkup_path, mpath_conf_path)
        if os.path.exists(mpath_conf_path) and remove_mpath_conf:
            os.remove(mpath_conf_path)

    def check_in_vm(vm, old_parts):
        """
        Check mount/read/write disk in VM.

        :param vm: Virtual machine to be checked.
        :param old_parts: Original disk partitions in VM.
        """
        try:
            session = vm.wait_for_login()
            if platform.platform().count('ppc64'):
                time.sleep(10)
            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                test.fail("The number of new partitions is invalid in VM")
            else:
                added_part = added_parts[0]
            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   "mkdir -p test && mount /dev/{0} test && echo"
                   " teststring > test/testfile && umount test"
                   .format(added_part))
            status, output = session.cmd_status_output(cmd)
            session.close()
            if status:
                test.fail("Disk operation in VM failed:%s" % output)
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as err:
            test.fail("Error happens when check disk in vm: %s" % err)

    storage_size = params.get("storage_size", "1G")
    hotplug_disk = "yes" == params.get("hotplug_disk", "no")
    status_error = "yes" == params.get("status_error")
    # Start VM and get all partions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    try:
        # Setup backend storage
        mpath_conf_exist = prepare_multipath_conf()
        mpath.restart_multipathd()
        old_mpath_devs = mpath.find_mpath_devs()
        libvirt.setup_or_cleanup_iscsi(is_setup=True)
        mpath.restart_multipathd()
        cur_mpath_devs = mpath.find_mpath_devs()
        new_mpath_devs = list(set(cur_mpath_devs).difference(
            set(old_mpath_devs)))
        logging.debug("newly added mpath devs are: %s", new_mpath_devs)
        # Prepare disk xml
        disk_params = {}
        disk_params['type_name'] = params.get("virt_disk_type", "block")
        disk_params['source_file'] = '/dev/mapper/' + new_mpath_devs[0]
        disk_params['device_type'] = params.get("virt_disk_device", "lun")
        disk_params['sgio'] = params.get("sgio", "filtered")
        disk_params['rawio'] = params.get("rawio", "no")
        disk_params['target_dev'] = params.get("virt_disk_device_target", "sdb")
        disk_params['target_bus'] = params.get("virt_disk_device_bus", "scsi")
        disk_params['driver_name'] = params.get("virt_disk_drive_name", "qemu")
        disk_params['driver_type'] = params.get("virt_disk_device_format", "raw")
        disk_xml = libvirt.create_disk_xml(disk_params)
        # Test disk operation with newly added disk xml
        attach_option = ""
        if not hotplug_disk:
            attach_option = "--config"
        result = virsh.attach_device(vm_name, disk_xml, flagstr=attach_option,
                                     ignore_status=True, debug=True)
        libvirt.check_exit_status(result, status_error)
        if not hotplug_disk:
            vm.destroy(gracefully=False)
            vm.start()
            vm.wait_for_login().close()
        check_in_vm(vm, old_parts)
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
        # Clean up backend storage
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
        recover_multipath_conf(not mpath_conf_exist)
        mpath.restart_multipathd()
Exemplo n.º 30
0
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        session = vm.wait_for_login()
        output = session.cmd_status_output('lsblk')
        logging.debug("%s", output[1])
        old_count = vmxml.get_disk_count(vm_name)
        bf_disks = libvirt_vm.get_disks()
        disk_params = {
            'type_name': 'volume',
            'target_dev': target_device,
            'target_bus': 'virtio',
            'source_pool': pool_name,
            'source_volume': test_unit,
            'driver_type': 'raw'
        }
        disk_xml = os.path.join(data_dir.get_tmp_dir(), 'disk_xml.xml')
        lun_disk_xml = utlv.create_disk_xml(disk_params)

        copyfile(lun_disk_xml, disk_xml)
        attach_success = virsh.attach_device(vm_name, disk_xml, debug=True)

        utlv.check_exit_status(attach_success)

        virsh.reboot(vm_name, debug=True)

        logging.info("Checking disk availability in domain")
        if not vmxml.get_disk_count(vm_name):
            raise exceptions.TestFail("No disk in domain %s." % vm_name)
        new_count = vmxml.get_disk_count(vm_name)

        if new_count <= old_count:
            raise exceptions.TestFail("Failed to attach disk %s" %
Exemplo n.º 31
0
def run(test, params, env):
    """
    Test pure checkpoint commands
    """

    def prepare_checkpoints(disk="vdb", num=1, cp_prefix="test_checkpoint_"):
        """
        Create checkpoints for specific disk

        :param disk: The disk to create checkpoint.
        :param num: How many checkpoints to be created
        :param cp_prefix: The prefix to name the checkpoint.
        """
        option_pattern = ("{0} --diskspec vda,checkpoint=no "
                          "--diskspec {1},checkpoint=bitmap,bitmap={0}")
        for i in range(num):
            # create checkpoints
            checkpoint_name = cp_prefix + str(i)
            options = option_pattern.format(checkpoint_name, disk)
            virsh.checkpoint_create_as(vm_name, options, **virsh_dargs)
            current_checkpoints.append(checkpoint_name)

    # Cancel the test if libvirt version is too low
    if not libvirt_version.version_compare(6, 0, 0):
        test.cancel("Current libvirt version doesn't support "
                    "incremental backup.")

    checkpoint_cmd = params.get("checkpoint_cmd")
    cmd_flag = params.get("flag")
    required_checkpoints = int(params.get("required_checkpoints", 0))
    test_disk_size = params.get("test_disk_size", "100M")
    test_disk_target = params.get("test_disk_target", "vdb")
    status_error = "yes" == params.get("status_error")
    tmp_dir = data_dir.get_tmp_dir()
    current_checkpoints = []
    virsh_dargs = {'debug': True, 'ignore_status': False}

    try:
        vm_name = params.get("main_vm")
        vm = env.get_vm(vm_name)

        # Make sure there is no checkpoint metadata before test
        utils_backup.clean_checkpoints(vm_name)

        # Backup vm xml
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml_backup = vmxml.copy()
        utils_backup.enable_inc_backup_for_vm(vm)

        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Prepare the disk to be used.
        disk_params = {}
        disk_path = ""
        image_name = "{}_image.qcow2".format(test_disk_target)
        disk_path = os.path.join(tmp_dir, image_name)
        libvirt.create_local_disk("file", disk_path, test_disk_size,
                                  "qcow2")
        disk_params = {"device_type": "disk",
                       "type_name": "file",
                       "driver_type": "qcow2",
                       "target_dev": test_disk_target,
                       "source_file": disk_path}
        disk_xml = libvirt.create_disk_xml(disk_params)
        virsh.attach_device(vm.name, disk_xml,
                            flagstr="--config", **virsh_dargs)
        vm.start()
        session = vm.wait_for_login()
        new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys())
        session.close()
        if required_checkpoints > 0:
            prepare_checkpoints(test_disk_target, required_checkpoints)
        if checkpoint_cmd == "checkpoint-create":
            if not current_checkpoints:
                test.fail("No existing checkpoints prepared.")
            if "--redefine" in cmd_flag:
                no_domain = "yes" == params.get("no_domain")
                extra_flag = params.get("extra_flag")
                image_with_bitmap = "yes" == params.get("image_with_bitmap")
                cp_dumpxml_options = ""
                if no_domain:
                    cp_dumpxml_options = "--no-domain"
                    if libvirt_version.version_compare(6, 6, 0):
                        # libvirt-6.6.0-9.el8 starts to allow redefine VM
                        # backup checkpoint without the domain XML (bz1901830)
                        status_error = False
                checkpoint_redef = current_checkpoints[0]
                cp_xml = checkpoint_xml.CheckpointXML.new_from_checkpoint_dumpxml(
                        vm_name, checkpoint_redef, cp_dumpxml_options)
                logging.debug("Checkpoint XML to be redefined is: %s", cp_xml)
                xml_file = cp_xml.xml
                virsh.checkpoint_delete(vm_name, checkpoint_redef,
                                        "--metadata", **virsh_dargs)
                cmd_options = xml_file + " " + cmd_flag
                if extra_flag:
                    cmd_options += " " + extra_flag
                    if "--redefine-validate" in extra_flag:
                        if not libvirt_version.version_compare(6, 6, 0):
                            test.cancel("--redefine-validate not supported in "
                                        "current libvirt versoin.")
                        if not image_with_bitmap:
                            status_error = True
                            # replace vdb's image with a new qcow2 file to make sure
                            # the image has no block dirty bitmap anymore
                            vm.destroy(gracefully=False)
                            libvirt.create_local_disk("file", disk_path,
                                                      test_disk_size, "qcow2")
                            vm.start()
                            vm.wait_for_login().close()
                result = virsh.checkpoint_create(vm_name, cmd_options, debug=True)
                libvirt.check_exit_status(result, status_error)
        elif checkpoint_cmd == "checkpoint-create-as":
            if "--print-xml" in cmd_flag:
                checkpoint_name = "test_checkpoint_0"
                options = ("{0} --diskspec vda,checkpoint=no --diskspec {1},"
                           "checkpoint=bitmap,bitmap={0} "
                           "--print-xml".format(checkpoint_name, test_disk_target))
                virsh.checkpoint_create_as(vm_name, options, **virsh_dargs)
                # The checkpiont should not be created, so we have following check
                cp_list_result = virsh.checkpoint_list(vm_name, checkpoint_name, debug=True)
                libvirt.check_exit_status(cp_list_result, True)
        elif checkpoint_cmd == "checkpoint-info":
            if len(current_checkpoints) != 3:
                test.fail("We should prepare 3 checkpoints.")
            parent_checkpoint = current_checkpoints[0]
            test_checkpoint = current_checkpoints[1]
            stdout = virsh.checkpoint_info(vm_name, test_checkpoint,
                                           **virsh_dargs).stdout_text.strip()
            if (
                    not re.search("domain.*%s" % vm_name, stdout, re.IGNORECASE) or
                    not re.search("parent.*%s" % parent_checkpoint, stdout, re.IGNORECASE) or
                    not re.search("children.*1", stdout, re.IGNORECASE) or
                    not re.search("descendants.*1", stdout, re.IGNORECASE)
               ):
                test.fail("checkpoint-info return inaccurate informaion: %s" % stdout)
        elif checkpoint_cmd == "checkpoint-list":
            logic_error = False
            if not cmd_flag:
                stdout = virsh.checkpoint_list(vm_name,
                                               **virsh_dargs).stdout_text.strip()
                for checkpoint in current_checkpoints:
                    if checkpoint not in stdout:
                        logic_error = True
            elif cmd_flag == "--parent":
                stdout = virsh.checkpoint_list(vm_name, cmd_flag,
                                               **virsh_dargs).stdout_text.strip()
                for checkpoint in current_checkpoints:
                    if checkpoint == current_checkpoints[-1]:
                        if stdout.count(checkpoint) != 1:
                            logic_error = True
                    else:
                        if stdout.count(checkpoint) != 2:
                            logic_error = True
            elif cmd_flag == "--roots":
                stdout = virsh.checkpoint_list(vm_name, cmd_flag,
                                               **virsh_dargs).stdout_text.strip()
                for checkpoint in current_checkpoints:
                    if checkpoint == current_checkpoints[0]:
                        if stdout.count(checkpoint) != 1:
                            logic_eror = True
                    else:
                        if stdout.count(checkpoint) != 0:
                            logic_error = True
            elif cmd_flag == "--tree":
                stdout = virsh.checkpoint_list(vm_name, cmd_flag,
                                               **virsh_dargs).stdout_text.strip()
                lines = stdout.splitlines()
                prev_indent_num = -1
                for line in lines:
                    for checkpoint in current_checkpoints:
                        if checkpoint in line:
                            cur_indent_num = line.rstrip().count(" ")
                            if cur_indent_num <= prev_indent_num:
                                logic_error = True
                                break
                            prev_indent_num = cur_indent_num
            elif cmd_flag == "--name":
                stdout = virsh.checkpoint_list(vm_name, cmd_flag,
                                               **virsh_dargs).stdout_text.strip()
                checkpoint_names = stdout.splitlines()
                if not operator.eq(checkpoint_names, current_checkpoints):
                    logic_error = True
            elif cmd_flag == "--topological":
                stdout = virsh.checkpoint_list(vm_name, cmd_flag,
                                               **virsh_dargs).stdout_text.strip()
                for checkpoint in current_checkpoints:
                    if checkpoint not in stdout:
                        logical_error = True
            elif cmd_flag == "--from":
                cmd_options = cmd_flag + " " + current_checkpoints[0]
                stdout = virsh.checkpoint_list(vm_name, cmd_options,
                                               **virsh_dargs).stdout_text.strip()
                if (current_checkpoints[0] in stdout
                        or current_checkpoints[2] in stdout
                        or current_checkpoints[1] not in stdout):
                    logic_error = True
            elif cmd_flag == "--descendants":
                cmd_options = cmd_flag + " " + current_checkpoints[0]
                stdout = virsh.checkpoint_list(vm_name, cmd_options,
                                               **virsh_dargs).stdout_text.strip()
                if (current_checkpoints[0] in stdout
                        or current_checkpoints[1] not in stdout
                        or current_checkpoints[2] not in stdout):
                    logic_error = True
            elif cmd_flag == "--no-leaves":
                stdout = virsh.checkpoint_list(vm_name, cmd_flag,
                                               **virsh_dargs).stdout_text.strip()
                if (current_checkpoints[0] not in stdout
                        or current_checkpoints[1] not in stdout
                        or current_checkpoints[2] in stdout):
                    logic_error = True
            elif cmd_flag == "--leaves":
                stdout = virsh.checkpoint_list(vm_name, cmd_flag,
                                               **virsh_dargs).stdout_text.strip()
                if (current_checkpoints[0] in stdout
                        or current_checkpoints[1] in stdout
                        or current_checkpoints[2] not in stdout):
                    logic_error = True
            if logic_error:
                test.fail("checkpoint-list with '%s' gives wrong output"
                          % cmd_flag)
        elif checkpoint_cmd == "checkpoint-dumpxml":
            if "--size" in cmd_flag:
                if not libvirt_version.version_compare(6, 6, 0):
                    test.cancel("Current libvirt version doesn't support "
                                "'--size' for 'checkpoint-dumpxml'.")
                test_disk = new_disks_in_vm[-1]
                test_disk_path = "/dev/" + test_disk
                test_checkpoint = current_checkpoints[-1]
                dd_count = 1
                dd_bs = "1M"
                dd_seek = "10"
                dd_size = dd_count * 1024 * 1024
                session = vm.wait_for_login()
                utils_disk.dd_data_to_vm_disk(session, test_disk_path,
                                              bs=dd_bs, seek=dd_seek,
                                              count=str(dd_count))
                session.close()
                stdout = virsh.checkpoint_dumpxml(vm_name,
                                                  test_checkpoint + " --size",
                                                  **virsh_dargs).stdout_text.strip()
                re_pattern = ".*%s.*%s.*size.*" % (test_disk, test_checkpoint)
                size_info_line = re.search(re_pattern, stdout)
                if not size_info_line:
                    test.fail("There is no size info for disk:%s checkpoint:%s"
                              % (test_disk, test_checkpoint))
                if str(dd_size) not in size_info_line.group(0):
                    test.fail("Size info incorrect in checkpoint xml, "
                              "'dd_size' is %s, size info in xml is:%s"
                              % (dd_size, size_info_line.group(0)))
            elif "--security-info" in cmd_flag:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
                password = "******"
                vm_xml.VMXML.set_graphics_attr(vm_name, {'passwd': password})
                vm.start()
                vm.wait_for_login().close()
                prepare_checkpoints()
                test_checkpoint = current_checkpoints[0]
                stdout = virsh.checkpoint_dumpxml(vm_name,
                                                  test_checkpoint,
                                                  **virsh_dargs).stdout_text.strip()
                if password in stdout:
                    logging.debug("checkpoint xml is: %s", stdout)
                    test.fail("Security info displayed in unsecurity dumpxml.")
                stdout = virsh.checkpoint_dumpxml(vm_name,
                                                  test_checkpoint + " --security-info",
                                                  **virsh_dargs).stdout_text.strip()
                if password not in stdout:
                    logging.debug("checkpoint xml is: %s", stdout)
                    test.fail("Security info not displayed in security dumpxml.")
        elif checkpoint_cmd == "virsh_list":
            stdout = virsh.dom_list(cmd_flag, **virsh_dargs).stdout_text.strip()
            if ((vm_name in stdout and cmd_flag == "--without-checkpoint") or
                    (vm_name not in stdout and cmd_flag == "--with-checkpoint")):
                test.fail("virsh list with '%s' contains wrong data" % cmd_flag)
        # Make sure vm is running and check checkpoints can be normally deleted
        if not vm.is_alive():
            vm.start()
            vm.wait_for_login().close()
        utils_backup.clean_checkpoints(vm_name, clean_metadata=False,
                                       ignore_status=False)
    finally:
        # Remove checkpoints
        utils_backup.clean_checkpoints(vm_name,
                                       clean_metadata=not vm.is_alive())

        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Restoring vm
        vmxml_backup.sync()

        # Remove test image
        if "disk_path" in locals():
            if os.path.exists(disk_path):
                os.remove(disk_path)
Exemplo n.º 32
0
def run(test, params, env):
    """
    Test per-image DAC disk hotplug to VM.

    (1).Init variables for test.
    (2).Creat disk xml with per-image DAC
    (3).Start VM
    (4).Attach the disk to VM and check result.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user")
    qemu_group = params.get("qemu_group")
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")

    # Get per-image DAC setting
    vol_name = params.get('vol_name')
    target_dev = params.get('target_dev')
    disk_type_name = params.get("disk_type_name")
    img_user = params.get("img_user")
    img_group = params.get("img_group")
    relabel = 'yes' == params.get('relabel', 'yes')

    if not libvirt_version.version_compare(1, 2, 7):
        raise error.TestNAError("per-image DAC only supported on version 1.2.7"
                                " and after.")

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)

    img_path = None
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # set qemu conf
        qemu_conf.user = qemu_user
        qemu_conf.group = qemu_group
        if dynamic_ownership:
            qemu_conf.dynamic_ownership = 1
        else:
            qemu_conf.dynamic_ownership = 0
        logging.debug("the qemu.conf content is: %s" % qemu_conf)
        libvirtd.restart()

        first_disk = vm.get_first_disk_devices()
        blk_source = first_disk['source']
        owner_str = format_user_group_str(qemu_user, qemu_group)
        src_usr, src_grp = owner_str.split(':')
        os.chown(blk_source, int(src_usr), int(src_grp))
        vm.start()

        # Init a QemuImg instance and create a img.
        params['image_name'] = vol_name
        tmp_dir = data_dir.get_tmp_dir()
        image = qemu_storage.QemuImg(params, tmp_dir, vol_name)
        # Create a image.
        img_path, result = image.create(params)

        # Create disk xml for attach.
        params['source_file'] = img_path
        sec_label = "%s:%s" % (img_user, img_group)
        params['sec_label'] = sec_label
        params['type_name'] = disk_type_name
        sec_label_id = format_user_group_str(img_user, img_group)

        disk_xml = utlv.create_disk_xml(params)

        # Change img file to qemu:qemu and 660 mode
        os.chown(img_path, 107, 107)
        os.chmod(img_path, 432)

        img_label_before = check_ownership(img_path)
        if img_label_before:
            logging.debug("the image ownership before "
                          "attach: %s" % img_label_before)

        # Do the attach action.
        option = "--persistent"
        result = virsh.attach_device(vm_name,
                                     filearg=disk_xml,
                                     flagstr=option,
                                     debug=True)
        utlv.check_exit_status(result, status_error)

        if not result.exit_status:
            img_label_after = check_ownership(img_path)
            if dynamic_ownership and relabel:
                if img_label_after != sec_label_id:
                    raise error.TestFail("The image dac label %s is not "
                                         "expected." % img_label_after)

            ret = virsh.detach_disk(vm_name,
                                    target=target_dev,
                                    extra=option,
                                    debug=True)
            utlv.check_exit_status(ret, status_error)
    finally:
        # clean up
        vm.destroy()
        qemu_conf.restore()
        vmxml.sync()
        libvirtd.restart()
        if img_path and os.path.exists(img_path):
            os.unlink(img_path)
Exemplo n.º 33
0
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocol = params.get("disk_source_protocol", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            raise error.TestNAError("'iscsi' disk doesn't support in" +
                                    " current libvirt version.")
    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            raise error.TestNAError("'volume' type disk doesn't support in" +
                                    " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocol
            secret_xml.target = secret_usage_target
            logging.debug("Define secret by XML: %s",
                          open(secret_xml.xml).read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                raise error.TestError("Fail to get new created secret uuid")

            # Set secret value
            secret_string = base64.b64encode(chap_passwd)
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        iscsi_target = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                      is_login=False,
                                                      chap_user=chap_user,
                                                      chap_passwd=chap_passwd)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.host_name = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_pool
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = "/dev/disk/by-path"
            # Create iscsi pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get volume name
            cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            try:
                vol_name = re.findall(r"(\S+)\ +(\S+)[\ +\n]",
                                      str(cmd_result.stdout))[1][0]
            except IndexError:
                raise error.TestError("Fail to get volume name")

        # Create iscsi network disk XML
        disk_params = {
            'device_type': disk_device,
            'type_name': disk_type,
            'target_dev': disk_target,
            'target_bus': disk_target_bus,
            'readonly': disk_readonly
        }
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {
                'source_protocol': disk_src_protocol,
                'source_name': iscsi_target + "/1",
                'source_host_name': disk_src_host,
                'source_host_port': disk_src_port
            }
        elif disk_type == "volume":
            disk_params_src = {
                'source_pool': disk_src_pool,
                'source_volume': vol_name,
                'source_mode': disk_src_mode
            }
        else:
            error.TestNAError("Unsupport disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth:
            disk_params_auth = {
                'auth_user': chap_user,
                'secret_type': disk_src_protocol,
                'secret_usage': secret_xml.target
            }
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)

        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
        else:
            if not vm.is_dead():
                vm.destroy()
        attach_option = params.get("attach_option", "")
        disk_xml_f = open(disk_xml)
        disk_xml_content = disk_xml_f.read()
        disk_xml_f.close()
        logging.debug("Attach disk by XML: %s", disk_xml_content)
        cmd_result = virsh.attach_device(domainarg=vm_name,
                                         filearg=disk_xml,
                                         flagstr=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(test.tmpdir, "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_info(vm_name, snapshot_name1,
                                             **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            sn_create_op = "%s --disk_ony %s" % (snapshot_name2, disk_target)
            cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op,
                                                  **virsh_dargs)

            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_delete(vm_name, snapshot_name2,
                                               **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            pass
        else:
            logging.error("Unsupport operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                raise error.TestError("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError), e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                raise error.TestError("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name, disk_target)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)
Exemplo n.º 34
0
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocol = params.get("disk_source_protocol", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")
    # Indicate the PPC platform
    on_ppc = False
    if platform.platform().count('ppc64'):
        on_ppc = True

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            test.cancel("'iscsi' disk doesn't support in"
                        " current libvirt version.")
    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            test.cancel("'volume' type disk doesn't support in"
                        " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
        else:
            if not vm.is_dead():
                vm.destroy()

        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocol
            secret_xml.target = secret_usage_target
            with open(secret_xml.xml) as f:
                logging.debug("Define secret by XML: %s", f.read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                test.error("Fail to get new created secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(chap_passwd.encode(encoding)).decode(encoding)
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                               is_login=False,
                                                               image_size='1G',
                                                               chap_user=chap_user,
                                                               chap_passwd=chap_passwd,
                                                               portal_ip=disk_src_host)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.host_name = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_pool
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = "/dev/disk/by-path"
            # Create iscsi pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            def get_vol():
                """Get the volume info"""
                # Refresh the pool
                cmd_result = virsh.pool_refresh(disk_src_pool)
                libvirt.check_exit_status(cmd_result)
                # Get volume name
                cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
                libvirt.check_exit_status(cmd_result)
                vol_list = []
                vol_list = re.findall(r"(\S+)\ +(\S+)",
                                      str(cmd_result.stdout.strip()))
                if len(vol_list) > 1:
                    return vol_list[1]
                else:
                    return None

            # Wait for a while so that we can get the volume info
            vol_info = utils_misc.wait_for(get_vol, 10)
            if vol_info:
                vol_name, vol_path = vol_info
            else:
                test.error("Failed to get volume info")
            # Snapshot doesn't support raw disk format, create a qcow2 volume
            # disk for snapshot operation.
            process.run('qemu-img create -f qcow2 %s %s' % (vol_path, '100M'),
                        shell=True)

        # Create iscsi network disk XML
        disk_params = {'device_type': disk_device,
                       'type_name': disk_type,
                       'target_dev': disk_target,
                       'target_bus': disk_target_bus,
                       'readonly': disk_readonly}
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {'source_protocol': disk_src_protocol,
                               'source_name': iscsi_target + "/%s" % lun_num,
                               'source_host_name': disk_src_host,
                               'source_host_port': disk_src_port}
        elif disk_type == "volume":
            disk_params_src = {'source_pool': disk_src_pool,
                               'source_volume': vol_name,
                               'driver_type': 'qcow2',
                               'source_mode': disk_src_mode}
        else:
            test.cancel("Unsupport disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth:
            disk_params_auth = {'auth_user': chap_user,
                                'secret_type': disk_src_protocol,
                                'secret_usage': secret_xml.target}
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)
        attach_option = params.get("attach_option", "")
        cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml,
                                         flagstr=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        # Wait for domain is stable
        vm.wait_for_login().close()
        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            try:
                virsh.snapshot_list(vm_name, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots list for %s" % vm_name)

            try:
                virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots info for %s" % vm_name)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            snapshot_file = os.path.join(data_dir.get_tmp_dir(), snapshot_name2)
            sn_create_op = ("%s --disk-only --diskspec %s,file=%s"
                            % (snapshot_name2, disk_target, snapshot_file))
            cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op,
                                                  **virsh_dargs)

            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            if snapshot_name2 not in cmd_result:
                test.error("Snapshot %s not found" % snapshot_name2)
        elif domain_operation == "":
            logging.debug("No domain operation provided, so skip it")
        else:
            logging.error("Unsupport operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                test.error("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    # Here the script needs wait for a while for the guest to
                    # recognize the hotplugged disk on PPC
                    if on_ppc:
                        time.sleep(10)
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError) as e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                test.error("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name, disk_target)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)

    finally:
        # Clean up snapshot
        # Shut down before cleaning up snapshots
        if vm.is_alive():
            vm.destroy()
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
        # Restore vm
        vmxml_backup.sync("--snapshots-metadata")
        # Destroy pool and undefine secret, which may not exist
        try:
            if disk_type == "volume":
                virsh.pool_destroy(disk_src_pool)
            if chap_auth:
                virsh.secret_undefine(secret_uuid)
        except Exception:
            pass
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
                                                "driver", "cache")
        if cache_options == "default":
            if disk_cache is not None:
                check_cache_after_cmd = False
        elif disk_cache != cache_options:
            check_cache_after_cmd = False

    # Eject cdrom test
    eject_cdrom = "yes" == params.get("at_dt_disk_eject_cdrom", "no")
    save_vm = "yes" == params.get("at_dt_disk_save_vm", "no")
    save_file = os.path.join(test.tmpdir, "vm.save")
    try:
        if eject_cdrom:
            eject_params = {'type_name': "file", 'device_type': "cdrom",
                            'target_dev': device_target, 'target_bus': device_disk_bus}
            eject_xml = libvirt.create_disk_xml(eject_params)
            logging.debug("Eject CDROM by XML: %s", open(eject_xml).read())
            # Run command tiwce to make sure cdrom tray open first #BZ892289
            # Open tray
            virsh.attach_device(domainarg=vm_name, filearg=eject_xml, debug=True)
            # Add time sleep between two attach commands.
            if time_sleep:
                time.sleep(float(time_sleep))
            # Eject cdrom
            result = virsh.attach_device(domainarg=vm_name, filearg=eject_xml,
                                         debug=True)
            if result.exit_status != 0:
                raise error.TestFail("Eject CDROM failed")
            if vm_xml.VMXML.check_disk_exist(vm_name, device_source):
                raise error.TestFail("Find %s after do eject" % device_source)
        # Save and restore VM
def run(test, params, env):
    """
    Test virsh {at|de}tach-device command.

    The command can attach new disk/detach device.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh attach/detach-device operation.
    3.Recover test environment.
    4.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    pre_vm_state = params.get("at_dt_device_pre_vm_state", "running")
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def is_attached(vmxml_devices, disk_type, source_file, target_dev):
        """
        Check attached device and disk exist or not.

        :param vmxml_devices: VMXMLDevices instance
        :param disk_type: disk's device type: cdrom or floppy
        :param source_file : disk's source file to check
        :param target_dev : target device name
        :return: True/False if backing file and device found
        """
        disks = vmxml_devices.by_device_tag('disk')
        for disk in disks:
            if disk.device != disk_type:
                continue
            if disk.target['dev'] != target_dev:
                continue
            if disk.xmltreefile.find('source') is not None:
                if disk.source.attrs['file'] != source_file:
                    continue
            else:
                continue
            # All three conditions met
            logging.debug("Find %s in given disk XML", source_file)
            return True
        logging.debug("Not find %s in gievn disk XML", source_file)
        return False

    def check_result(disk_source, disk_type, disk_target, flags, attach=True):
        """
        Check the test result of attach/detach-device command.
        """
        vm_state = pre_vm_state
        active_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        if not attach:
            utils_misc.wait_for(
                lambda: not is_attached(active_vmxml.devices, disk_type,
                                        disk_source, disk_target), 20)
        active_attached = is_attached(active_vmxml.devices, disk_type,
                                      disk_source, disk_target)
        if vm_state != "transient":
            inactive_vmxml = vm_xml.VMXML.new_from_dumpxml(
                vm_name, options="--inactive")
            inactive_attached = is_attached(inactive_vmxml.devices, disk_type,
                                            disk_source, disk_target)

        if flags.count("config") and not flags.count("live"):
            if vm_state != "transient":
                if attach:
                    if not inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML not updated"
                            " when --config options used for"
                            " attachment")
                    if vm_state != "shutoff":
                        if active_attached:
                            raise exceptions.TestFail(
                                "Active domain XML updated "
                                "when --config options used "
                                "for attachment")
                else:
                    if inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML not updated"
                            " when --config options used for"
                            " detachment")
        elif flags.count("live") and not flags.count("config"):
            if attach:
                if vm_state in ["paused", "running", "transient"]:
                    if not active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated"
                            " when --live options used for"
                            " attachment")
                if vm_state in ["paused", "running"]:
                    if inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML updated "
                            "when --live options used for"
                            " attachment")
            else:
                if vm_state in ["paused", "running", "transient"]:
                    if active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated"
                            " when --live options used for"
                            " detachment")
        elif flags.count("live") and flags.count("config"):
            if attach:
                if vm_state in ["paused", "running"]:
                    if not active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated"
                            " when --live --config options"
                            " used for attachment")
                    if not inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML not updated"
                            " when --live --config options "
                            "used for attachment")
            else:
                if vm_state in ["paused", "running"]:
                    if active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated "
                            "when --live --config options "
                            "used for detachment")
                    if inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML updated "
                            "when --live --config options"
                            " used for detachment")
        elif flags.count("current") or flags == "":
            if attach:
                if vm_state in ["paused", "running", "transient"]:
                    if not active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated"
                            " when --current options used "
                            "for attachment")
                if vm_state in ["paused", "running"]:
                    if inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML updated "
                            "when --current options used "
                            "for live attachment")
                if vm_state == "shutoff" and not inactive_attached:
                    raise exceptions.TestFail(
                        "Inactive domain XML not updated"
                        " when --current options used for"
                        " attachment")
            else:
                if vm_state in ["paused", "running", "transient"]:
                    if active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated"
                            " when --current options used "
                            "for detachment")
                if vm_state == "shutoff" and inactive_attached:
                    raise exceptions.TestFail(
                        "Inactive domain XML not updated"
                        " when --current options used for"
                        " detachment")

    vm_ref = params.get("at_dt_device_vm_ref", "name")
    at_status_error = "yes" == params.get("at_status_error", 'no')
    dt_status_error = "yes" == params.get("dt_status_error", 'no')

    # Disk specific attributes.
    at_options = params.get("at_dt_device_at_options", "")
    dt_options = params.get("at_dt_device_dt_options", "")
    device = params.get("at_dt_device_device", "disk")
    device_source_name = params.get("at_dt_device_source", "attach.img")
    device_target = params.get("at_dt_device_target", "vdd")

    if vm.is_alive():
        vm.destroy(gracefully=False)
    # Back up xml file.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Turn VM into certain state.
    if pre_vm_state == "running":
        logging.info("Starting %s..." % vm_name)
        if vm.is_dead():
            vm.start()
            vm.wait_for_login().close()
    elif pre_vm_state == "shutoff":
        logging.info("Shutting down %s..." % vm_name)
        if vm.is_alive():
            vm.destroy(gracefully=False)
    elif pre_vm_state == "paused":
        logging.info("Pausing %s..." % vm_name)
        if vm.is_dead():
            vm.start()
            vm.wait_for_login().close()
        if not vm.pause():
            raise exceptions.TestSkipError("Can't pause the domain")
    elif pre_vm_state == "transient":
        logging.info("Creating %s..." % vm_name)
        vm.undefine()
        if virsh.create(backup_xml.xml, **virsh_dargs).exit_status:
            backup_xml.define()
            raise exceptions.TestSkipError("Can't create the domain")
        vm.wait_for_login().close()

    # Test.
    domid = vm.get_id()
    domuuid = vm.get_uuid()
    wait_stable = 10

    # Confirm how to reference a VM.
    if vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    else:
        vm_ref = ""

    try:
        device_source = os.path.join(data_dir.get_tmp_dir(),
                                     device_source_name)
        libvirt.create_local_disk("file", device_source, "1")

        # Get disk xml file.
        disk_params = {
            'type_name': "file",
            'device_type': device,
            'target_dev': device_target,
            'target_bus': "virtio",
            'source_file': device_source,
            'driver_name': "qemu",
            'driver_type': "raw"
        }
        disk_xml = libvirt.create_disk_xml(disk_params)

        # Attach the disk.
        ret = virsh.attach_device(vm_ref,
                                  disk_xml,
                                  flagstr=at_options,
                                  debug=True)
        libvirt.check_exit_status(ret, at_status_error)

        # Check if command take affect config file.
        if vm.is_paused():
            vm.resume()
            vm.wait_for_login().close()

        #Sleep a while for vm is stable
        time.sleep(wait_stable)
        if not ret.exit_status:
            check_result(device_source, device, device_target, at_options)

        # Detach the disk.
        if pre_vm_state == "paused":
            if not vm.pause():
                raise exceptions.TestFail("Can't pause the domain")
        disk_xml = libvirt.create_disk_xml(disk_params)
        ret = virsh.detach_device(vm_ref,
                                  disk_xml,
                                  flagstr=dt_options,
                                  debug=True)
        libvirt.check_exit_status(ret, dt_status_error)

        # Check if command take affect config file.
        if vm.is_paused():
            vm.resume()
            vm.wait_for_login().close()

        #Sleep a while for vm is stable
        time.sleep(wait_stable)
        if not ret.exit_status:
            check_result(device_source, device, device_target, dt_options,
                         False)

        # Try to start vm at last.
        if vm.is_dead():
            vm.start()
            vm.wait_for_login().close()

    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()

        if os.path.exists(device_source):
            os.remove(device_source)
Exemplo n.º 37
0
def run(test, params, env):
    """
    Integration test of backup and backing_chain.

    Steps:
    1. craete a vm with extra disk vdb
    2. create some data on vdb
    3. start a pull mode full backup on vdb
    4. create some data on vdb
    5. start a pull mode incremental backup
    6. repeat step 5 to 7
    7. before the last round of backup job, do a blockcommit/pull/copy
    8. check the full/incremental backup file data
    """
    def run_blk_cmd():
        """
        Run blockcommit/blockpull/blockcopy command.
        """
        def run_blockpull():
            """
            Run blockpull command.
            """
            if from_to == "mid_to_top":
                cmd_option = ("--base {0}[{1}] --wait").format(
                    original_disk_target, middle_layer1_index)
            elif from_to == "base_to_top":
                cmd_option = ("--base {0}[{1}] --wait").format(
                    original_disk_target, base_layer_index)
            virsh.blockpull(vm_name,
                            original_disk_target,
                            cmd_option,
                            debug=True,
                            ignore_status=False)

        def run_blockcommit():
            """
            Run blockcommit command.
            """
            if from_to == "top_to_base":
                # Do blockcommit from top layer to base layer
                cmd_option = (
                    "--top {0}[{1}] --base {0}[{2}] --active --pivot "
                    "--wait".format(original_disk_target, top_layer_index,
                                    base_layer_index))

            elif from_to == "mid_to_mid":
                # Do blockcommit from middle layer to another middle layer
                if len(indice) < 4:
                    test.fail(
                        "At lease 4 layers required for the test 'mid_to_mid'")
                cmd_option = ("--top {0}[{1}] --base {0}[{2}] "
                              "--wait".format(original_disk_target,
                                              middle_layer1_index,
                                              middle_layer2_index))
            elif from_to == "top_to_mid":
                # Do blockcommit from top layer to middle layer
                cmd_option = (
                    "--top {0}[{1}] --base {0}[{2}] --active --pivot "
                    "--wait".format(original_disk_target, top_layer_index,
                                    middle_layer1_index))
            elif from_to == "mid_to_base":
                # Do blockcommit from middle layer to base layer
                cmd_option = ("--top {0}[{1}] --base {0}[{2}] "
                              "--wait".format(original_disk_target,
                                              middle_layer1_index,
                                              base_layer_index))
            virsh.blockcommit(vm_name,
                              original_disk_target,
                              cmd_option,
                              debug=True,
                              ignore_stauts=False)

        def run_blockcopy():
            """
            Run blockcopy command.
            """
            copy_dest = os.path.join(tmp_dir, "copy_dest.qcow2")
            cmd_option = "--wait --verbose --transient-job --pivot"
            if blockcopy_method == "shallow_copy":
                cmd_option += " --shallow"
            if blockcopy_reuse == "reuse_external":
                cmd_option += " --reuse-external"
                if blockcopy_method == "shallow_copy":
                    create_img_cmd = "qemu-img create -f qcow2 -F qcow2 -b %s %s"
                    create_img_cmd %= (backend_img, copy_dest)
                else:
                    create_img_cmd = "qemu-img create -f qcow2 %s %s"
                    create_img_cmd %= (copy_dest, original_disk_size)
                process.run(create_img_cmd, shell=True, ignore_status=False)
            virsh.blockcopy(vm_name,
                            original_disk_target,
                            copy_dest,
                            cmd_option,
                            debug=True,
                            ignore_status=False)

        # Get disk backing store indice info in vm disk xml
        cur_vm_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        cur_disk_xmls = cur_vm_xml.get_devices(device_type="disk")
        cur_test_disk_xml = ''
        for disk_xml in cur_disk_xmls:
            if disk_xml.target['dev'] == original_disk_target:
                cur_test_disk_xml = disk_xml
                logging.debug("Current disk xml for %s is:\n %s",
                              original_disk_target, cur_test_disk_xml)
                break
        indice = re.findall(r".*index=['|\"](\d+)['|\"].*",
                            str(cur_test_disk_xml))
        logging.debug("backing store indice for %s is: %s",
                      original_disk_target, indice)
        if len(indice) < 3:
            test.fail("At least 3 layers required for the test.")
        top_layer_index = indice[0]
        middle_layer1_index = indice[1]
        middle_layer2_index = indice[-2]
        base_layer_index = indice[-1]
        logging.debug(
            "Following backing store will be used: %s",
            "top:%s; middle_1: %s, middle_2:%s, base: %s" %
            (top_layer_index, middle_layer1_index, middle_layer2_index,
             base_layer_index))
        # Start the block command
        if blockcommand == "blockpull":
            run_blockpull()
        if blockcommand == "blockcommit":
            run_blockcommit()
        if blockcommand == "blockcopy":
            run_blockcopy()

    def create_shutoff_snapshot(original_img, snapshot_img):
        """
        Create shutoff snapshot, which means the disk snapshot is not controlled
        by libvirt, but created directly by qemu command.

        :param original_img: The image we will take shutoff snapshot for.
        :param snapshot_img: The newly created shutoff snapshot image.
        """
        cmd = "qemu-img info --output=json -f qcow2 {}".format(original_img)
        img_info = process.run(cmd, shell=True,
                               ignore_status=False).stdout_text
        json_data = json.loads(img_info)
        cmd = "qemu-img create -f qcow2 -F qcow2 -b {0} {1}".format(
            original_img, snapshot_img)
        process.run(cmd, shell=True, ignore_status=False)
        try:
            bitmaps = json_data['format-specific']['data']['bitmaps']
            for bitmap in bitmaps:
                bitmap_flags = bitmap['flags']
                bitmap_name = bitmap['name']
                if 'auto' in bitmap_flags and 'in-use' not in bitmap_flags:
                    cmd = "qemu-img bitmap -f qcow2 {0} --add {1}".format(
                        snapshot_img, bitmap_name)
                    process.run(cmd, shell=True, ignore_status=False)
        except Exception as bitmap_error:
            logging.debug("Cannot add bitmap to new image, skip it: %s",
                          bitmap_error)

    # Cancel the test if libvirt version is too low
    if not libvirt_version.version_compare(6, 0, 0):
        test.cancel("Current libvirt version doesn't support "
                    "incremental backup.")

    # vm's origianl disk config
    original_disk_size = params.get("original_disk_size", "100M")
    original_disk_type = params.get("original_disk_type", "local")
    original_disk_target = params.get("original_disk_target", "vdb")

    # pull mode backup config
    scratch_type = params.get("scratch_type", "file")
    nbd_protocol = params.get("nbd_protocol", "tcp")
    nbd_tcp_port = params.get("nbd_tcp_port", "10809")

    # test config
    backup_rounds = int(params.get("backup_rounds", 4))
    shutoff_snapshot = "yes" == params.get("shutoff_snapshot")
    blockcommand = params.get("blockcommand")
    from_to = params.get("from_to")
    blockcopy_method = params.get("blockcopy_method")
    blockcopy_reuse = params.get("blockcopy_reuse")
    backup_error = "yes" == params.get("backup_error")
    tmp_dir = data_dir.get_tmp_dir()

    try:
        vm_name = params.get("main_vm")
        vm = env.get_vm(vm_name)

        # Make sure there is no checkpoint metadata before test
        utils_backup.clean_checkpoints(vm_name)

        # Backup vm xml
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml_backup = vmxml.copy()
        disks_not_tested = list(vmxml.get_disk_all().keys())
        logging.debug("Not tested disks are: %s", disks_not_tested)
        utils_backup.enable_inc_backup_for_vm(vm)

        # Destroy vm before test
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Prepare the disk to be backuped.
        disk_params = {}
        disk_path = ""
        if original_disk_type == "local":
            image_name = "%s_image.qcow2" % original_disk_target
            disk_path = os.path.join(tmp_dir, image_name)
            libvirt.create_local_disk("file", disk_path, original_disk_size,
                                      "qcow2")
            disk_params = {
                "device_type": "disk",
                "type_name": "file",
                "driver_type": "qcow2",
                "target_dev": original_disk_target,
                "source_file": disk_path
            }
            if original_disk_target:
                disk_params["target_dev"] = original_disk_target
        else:
            logging.cancel("The disk type '%s' not supported in this script.",
                           original_disk_type)
        disk_xml = libvirt.create_disk_xml(disk_params)
        virsh.attach_device(vm.name, disk_xml, flagstr="--config", debug=True)
        vm.start()
        session = vm.wait_for_login()
        new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys())
        session.close()
        if len(new_disks_in_vm) != 1:
            test.fail("Test disk not prepared in vm")

        # Use the newly added disk as the test disk
        test_disk_in_vm = "/dev/" + new_disks_in_vm[0]

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        vm_disks = list(vmxml.get_disk_all().keys())

        checkpoint_list = []
        is_incremental = False
        backup_file_list = []
        snapshot_list = []
        cur_disk_xml = disk_xml
        cur_disk_path = disk_path
        cur_disk_params = disk_params
        backend_img = ""
        for backup_index in range(backup_rounds):
            # Do external snapshot
            if shutoff_snapshot:
                virsh.detach_disk(vm.name,
                                  original_disk_target,
                                  extra="--persistent",
                                  ignore_status=False,
                                  debug=True)
                if vm.is_alive():
                    vm.destroy(gracefully=False)
                shutoff_snapshot_name = "shutoff_snap_%s" % str(backup_index)
                shutoff_snapshot_path = os.path.join(tmp_dir,
                                                     shutoff_snapshot_name)

                create_shutoff_snapshot(cur_disk_path, shutoff_snapshot_path)
                cur_disk_params["source_file"] = shutoff_snapshot_path
                cur_disk_xml = libvirt.create_disk_xml(cur_disk_params)
                virsh.attach_device(vm.name,
                                    cur_disk_xml,
                                    flagstr="--config",
                                    ignore_status=False,
                                    debug=True)
                vm.start()
                vm.wait_for_login().close()
                cur_disk_path = shutoff_snapshot_path
            else:
                snapshot_name = "snap_%s" % str(backup_index)
                snapshot_option = ""
                snapshot_file_name = os.path.join(tmp_dir, snapshot_name)
                for disk_name in disks_not_tested:
                    snapshot_option += "--diskspec %s,snapshot=no " % disk_name
                snapshot_option += "--diskspec %s,file=%s" % (
                    original_disk_target, snapshot_file_name)
                virsh.snapshot_create_as(vm_name,
                                         "%s --disk-only %s" %
                                         (snapshot_name, snapshot_option),
                                         debug=True)
                snapshot_list.append(snapshot_name)

            # Prepare backup xml
            backup_params = {"backup_mode": "pull"}
            if backup_index > 0:
                is_incremental = True
                backup_params["backup_incremental"] = "checkpoint_" + str(
                    backup_index - 1)

            # Set libvirt default nbd export name and bitmap name
            nbd_export_name = original_disk_target
            nbd_bitmap_name = "backup-" + original_disk_target

            backup_server_dict = {"name": "localhost", "port": nbd_tcp_port}
            backup_params["backup_server"] = backup_server_dict
            backup_disk_xmls = []
            for vm_disk in vm_disks:
                backup_disk_params = {"disk_name": vm_disk}
                if vm_disk != original_disk_target:
                    backup_disk_params["enable_backup"] = "no"
                else:
                    backup_disk_params["enable_backup"] = "yes"
                    backup_disk_params["disk_type"] = scratch_type

                    # Prepare nbd scratch file/dev params
                    scratch_params = {"attrs": {}}
                    scratch_file_name = "scratch_file_%s" % backup_index
                    scratch_file_path = os.path.join(tmp_dir,
                                                     scratch_file_name)
                    scratch_params["attrs"]["file"] = scratch_file_path
                    logging.debug("scratch_params: %s", scratch_params)
                    backup_disk_params["backup_scratch"] = scratch_params
                backup_disk_xml = utils_backup.create_backup_disk_xml(
                    backup_disk_params)
                backup_disk_xmls.append(backup_disk_xml)
            logging.debug("disk list %s", backup_disk_xmls)
            backup_xml = utils_backup.create_backup_xml(
                backup_params, backup_disk_xmls)
            logging.debug("ROUND_%s Backup Xml: %s", backup_index, backup_xml)

            # Prepare checkpoint xml
            checkpoint_name = "checkpoint_%s" % backup_index
            checkpoint_list.append(checkpoint_name)
            cp_params = {"checkpoint_name": checkpoint_name}
            cp_params["checkpoint_desc"] = params.get(
                "checkpoint_desc", "desc of cp_%s" % backup_index)
            disk_param_list = []
            for vm_disk in vm_disks:
                cp_disk_param = {"name": vm_disk}
                if vm_disk != original_disk_target:
                    cp_disk_param["checkpoint"] = "no"
                else:
                    cp_disk_param["checkpoint"] = "bitmap"
                    cp_disk_bitmap = params.get("cp_disk_bitmap")
                    if cp_disk_bitmap:
                        cp_disk_param["bitmap"] = cp_disk_bitmap + str(
                            backup_index)
                disk_param_list.append(cp_disk_param)
            checkpoint_xml = utils_backup.create_checkpoint_xml(
                cp_params, disk_param_list)
            logging.debug("ROUND_%s Checkpoint Xml: %s", backup_index,
                          checkpoint_xml)

            # Start backup
            backup_options = backup_xml.xml + " " + checkpoint_xml.xml

            # Create some data in vdb
            dd_count = "1"
            dd_seek = str(backup_index * 10 + 10)
            dd_bs = "1M"
            session = vm.wait_for_login()
            utils_disk.dd_data_to_vm_disk(session, test_disk_in_vm, dd_bs,
                                          dd_seek, dd_count)
            session.close()

            backup_result = virsh.backup_begin(vm_name,
                                               backup_options,
                                               debug=True)
            if backup_result.exit_status:
                raise utils_backup.BackupBeginError(
                    backup_result.stderr.strip())

            backup_file_path = os.path.join(
                tmp_dir, "backup_file_%s.qcow2" % str(backup_index))
            backup_file_list.append(backup_file_path)
            nbd_params = {
                "nbd_protocol": nbd_protocol,
                "nbd_hostname": "localhost",
                "nbd_export": nbd_export_name,
                "nbd_tcp_port": nbd_tcp_port
            }
            if not is_incremental:
                # Do full backup
                utils_backup.pull_full_backup_to_file(nbd_params,
                                                      backup_file_path)
                logging.debug("Full backup to: %s", backup_file_path)
            else:
                # Do incremental backup
                utils_backup.pull_incremental_backup_to_file(
                    nbd_params, backup_file_path, nbd_bitmap_name,
                    original_disk_size)
            virsh.domjobabort(vm_name, debug=True)
            # Start to run the blockcommit/blockpull cmd before the last round
            # of backup job, this is to test if the block command will keep the
            # dirty bitmap data.
            if backup_index == backup_rounds - 2:
                run_blk_cmd()
                cur_disk_path = vm.get_blk_devices(
                )[original_disk_target]['source']

            if backup_index == backup_rounds - 3:
                backend_img = vm.get_blk_devices(
                )[original_disk_target]['source']

        # Get current active image for the test disk
        vm_disks = vm.get_blk_devices()
        current_active_image = vm_disks[original_disk_target]['source']
        logging.debug("The current active image for '%s' is '%s'",
                      original_disk_target, current_active_image)

        for checkpoint_name in checkpoint_list:
            virsh.checkpoint_delete(vm_name,
                                    checkpoint_name,
                                    debug=True,
                                    ignore_status=False)
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Compare the backup data and original data

        original_data_file = os.path.join(tmp_dir, "original_data.qcow2")
        cmd = "qemu-img convert -f qcow2 %s -O qcow2 %s" % (
            current_active_image, original_data_file)
        process.run(cmd, shell=True, verbose=True)
        for backup_file in backup_file_list:
            if not utils_backup.cmp_backup_data(original_data_file,
                                                backup_file):
                test.fail("Backup and original data are not identical for"
                          "'%s' and '%s'" %
                          (current_active_image, backup_file))
            else:
                logging.debug("'%s' contains correct backup data", backup_file)
    except utils_backup.BackupBeginError as details:
        if backup_error:
            logging.debug("Backup failed as expected.")
        else:
            test.fail(details)
    finally:
        # Remove checkpoints' metadata again to make sure vm has no checkpoints
        if "checkpoint_list" in locals():
            for checkpoint_name in checkpoint_list:
                virsh.checkpoint_delete(vm_name,
                                        checkpoint_name,
                                        options="--metadata")
        # Remove snapshots
        if "snapshot_list" in locals():
            for snapshot_name in snapshot_list:
                virsh.snapshot_delete(vm_name,
                                      "%s --metadata" % snapshot_name,
                                      debug=True)

        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Restoring vm
        vmxml_backup.sync()

        for file_name in os.listdir(tmp_dir):
            file_path = os.path.join(tmp_dir, file_name)
            if 'env' not in file_path:
                if os.path.isfile(file_path):
                    os.remove(file_path)
                elif os.path.isdir(file_path):
                    shutil.rmtree(file_path)
Exemplo n.º 38
0
def run(test, params, env):
    """
    Test command: virsh pool-define; pool-define-as; pool-start;
    vol-list pool; attach-device LUN to guest; mount the device;
    dd to the mounted device; unmount; pool-destroy; pool-undefine;

    Pre-requiste:
    Host needs to have a wwpn and wwnn of a vHBA which is zoned and mapped to
    SAN controller.
    """
    pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML")
    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    pre_def_pool = params.get("pre_def_pool", "no")
    define_pool = params.get("define_pool", "no")
    define_pool_as = params.get("define_pool_as", "no")
    pool_create_as = params.get("pool_create_as", "no")
    need_pool_build = params.get("need_pool_build", "no")
    need_vol_create = params.get("need_vol_create", "no")
    pool_type = params.get("pool_type", "dir")
    source_format = params.get("pool_src_format", "")
    source_name = params.get("pool_source_name", "")
    source_path = params.get("pool_source_path", "/")
    pool_target = params.get("pool_target", "pool_target")
    pool_adapter_type = params.get("pool_adapter_type", "")
    pool_adapter_parent = params.get("pool_adapter_parent", "")
    target_device = params.get("disk_target_dev", "sdc")
    pool_wwnn = params.get("pool_wwnn", "POOL_WWNN_EXAMPLE")
    pool_wwpn = params.get("pool_wwpn", "POOL_WWPN_EXAMPLE")
    vhba_wwnn = params.get("vhba_wwnn", "VHBA_WWNN_EXAMPLE")
    vhba_wwpn = params.get("vhba_wwpn", "VHBA_WWPN_EXAMPLE")
    volume_name = params.get("volume_name", "imagefrommapper.qcow2")
    volume_capacity = params.get("volume_capacity", '1G')
    allocation = params.get("allocation", '1G')
    vol_format = params.get("volume_format", 'raw')
    attach_method = params.get("attach_method", "hot")
    test_unit = None
    mount_disk = None
    pool_kwargs = {}
    pool_extra_args = ""
    emulated_image = "emulated-image"
    disk_xml = ""
    new_vhbas = []
    source_dev = ""
    mpath_vol_path = ""
    old_mpath_conf = ""
    mpath_conf_path = "/etc/multipath.conf"
    original_mpath_conf_exist = os.path.exists(mpath_conf_path)

    if pool_type == "scsi":
        if ('EXAMPLE' in pool_wwnn) or ('EXAMPLE' in pool_wwpn):
            raise exceptions.TestSkipError(
                    "No wwpn and wwnn provided for npiv scsi pool.")
    if pool_type == "logical":
        if ('EXAMPLE' in vhba_wwnn) or ('EXAMPLE' in vhba_wwpn):
            raise exceptions.TestSkipError(
                    "No wwpn and wwnn provided for vhba.")
    online_hbas_list = utils_npiv.find_hbas("hba")
    logging.debug("The online hbas are: %s", online_hbas_list)
    old_mpath_conf = utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                                       replace_existing=True)
    if not online_hbas_list:
        raise exceptions.TestSkipError(
            "Host doesn't have online hba cards")
    old_vhbas = utils_npiv.find_hbas("vhba")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    if not vm.is_alive():
        vm.start()
    libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir,
                           vm.address_cache)
    pool_ins = libvirt_storage.StoragePool()
    if pool_ins.pool_exists(pool_name):
        raise exceptions.TestFail("Pool %s already exist" % pool_name)
    if pool_type == "scsi":
        if define_pool == "yes":
            if pool_adapter_parent == "":
                pool_adapter_parent = online_hbas_list[0]
            pool_kwargs = {'source_path': source_path,
                           'source_name': source_name,
                           'source_format': source_format,
                           'pool_adapter_type': pool_adapter_type,
                           'pool_adapter_parent': pool_adapter_parent,
                           'pool_wwnn': pool_wwnn,
                           'pool_wwpn': pool_wwpn}
    elif pool_type == "logical":
        if (not vhba_wwnn) or (not vhba_wwpn):
            raise exceptions.TestFail("No wwnn/wwpn provided to create vHBA.")
        old_mpath_devs = utils_npiv.find_mpath_devs()
        new_vhba = utils_npiv.nodedev_create_from_xml({
                "nodedev_parent": online_hbas_list[0],
                "scsi_wwnn": vhba_wwnn,
                "scsi_wwpn": vhba_wwpn})
        utils_misc.wait_for(
            lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_DELAY_TIME*2)
        if not new_vhba:
            raise exceptions.TestFail("vHBA not sucessfully generated.")
        new_vhbas.append(new_vhba)
        utils_misc.wait_for(
            lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs),
            timeout=_DELAY_TIME*5)
        if not utils_npiv.is_mpath_devs_added(old_mpath_devs):
            raise exceptions.TestFail("mpath dev not generated.")
        cur_mpath_devs = utils_npiv.find_mpath_devs()
        new_mpath_devs = list(set(cur_mpath_devs).difference(
            set(old_mpath_devs)))
        logging.debug("The newly added mpath dev is: %s", new_mpath_devs)
        source_dev = "/dev/mapper/" + new_mpath_devs[0]
        logging.debug("We are going to use \"%s\" as our source device"
                      " to create a logical pool", source_dev)
        try:
            cmd = "parted %s mklabel msdos -s" % source_dev
            cmd_result = process.run(cmd, shell=True)
        except Exception as e:
            raise exceptions.TestError("Error occurred when parted mklable")
        if define_pool_as == "yes":
            pool_extra_args = ""
            if source_dev:
                pool_extra_args = ' --source-dev %s' % source_dev
    elif pool_type == "mpath":
        if (not vhba_wwnn) or (not vhba_wwpn):
            raise exceptions.TestFail("No wwnn/wwpn provided to create vHBA.")
        old_mpath_devs = utils_npiv.find_mpath_devs()
        new_vhba = utils_npiv.nodedev_create_from_xml({
                "nodedev_parent": online_hbas_list[0],
                "scsi_wwnn": vhba_wwnn,
                "scsi_wwpn": vhba_wwpn})
        utils_misc.wait_for(
            lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_DELAY_TIME*2)
        if not new_vhba:
            raise exceptions.TestFail("vHBA not sucessfully generated.")
        new_vhbas.append(new_vhba)
        utils_misc.wait_for(
            lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs),
            timeout=_DELAY_TIME*2)
        if not utils_npiv.is_mpath_devs_added(old_mpath_devs):
            raise exceptions.TestFail("mpath dev not generated.")
        cur_mpath_devs = utils_npiv.find_mpath_devs()
        new_mpath_devs = list(set(cur_mpath_devs).difference(
            set(old_mpath_devs)))
        logging.debug("The newly added mpath dev is: %s", new_mpath_devs)
        mpath_vol_path = "/dev/mapper/" + new_mpath_devs[0]
        try:
            cmd = "parted %s mklabel msdos -s" % mpath_vol_path
            cmd_result = process.run(cmd, shell=True)
        except Exception as e:
            raise exceptions.TestError("Error occurred when parted mklable")
    if pre_def_pool == "yes":
        try:
            pvt = utlv.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name, pool_type,
                         pool_target, emulated_image,
                         **pool_kwargs)
            utils_misc.wait_for(
                    lambda: utils_npiv.is_vhbas_added(old_vhbas),
                    _DELAY_TIME*2)
            virsh.pool_dumpxml(pool_name, to_file=pool_xml_f)
            virsh.pool_destroy(pool_name)
        except Exception as e:
            pvt.cleanup_pool(pool_name, pool_type, pool_target,
                             emulated_image, **pool_kwargs)
            raise exceptions.TestError(
                "Error occurred when prepare pool xml:\n %s" % e)
        if os.path.exists(pool_xml_f):
            with open(pool_xml_f, 'r') as f:
                logging.debug("Create pool from file: %s", f.read())
    try:
        # define/create/start the pool
        if (pre_def_pool == "yes") and (define_pool == "yes"):
            pool_define_status = virsh.pool_define(pool_xml_f,
                                                   ignore_status=True,
                                                   debug=True)
            utlv.check_exit_status(pool_define_status)
        if define_pool_as == "yes":
            pool_define_as_status = virsh.pool_define_as(
                pool_name, pool_type,
                pool_target, pool_extra_args,
                ignore_status=True, debug=True
                )
            utlv.check_exit_status(pool_define_as_status)
        if pool_create_as == "yes":
            if pool_type != "scsi":
                raise exceptions.TestSkipError("pool-create-as only needs to "
                                               "be covered by scsi pool for "
                                               "NPIV test.")
            cmd = "virsh pool-create-as %s %s \
                   --adapter-wwnn %s --adapter-wwpn %s \
                   --adapter-parent %s --target %s"\
                   % (pool_name, pool_type, pool_wwnn, pool_wwpn,
                      online_hbas_list[0], pool_target)
            cmd_status = process.system(cmd, verbose=True)
            if cmd_status:
                raise exceptions.TestFail("pool-create-as scsi pool failed.")
        if need_pool_build == "yes":
            pool_build_status = virsh.pool_build(pool_name, "--overwrite")
            utlv.check_exit_status(pool_build_status)

        pool_ins = libvirt_storage.StoragePool()
        if not pool_ins.pool_exists(pool_name):
            raise exceptions.TestFail("define or create pool failed.")
        else:
            if not pool_ins.is_pool_active(pool_name):
                pool_start_status = virsh.pool_start(pool_name)
                utlv.check_exit_status(pool_start_status)
                utlv.check_actived_pool(pool_name)
                pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name)
                logging.debug("Pool detail: %s", pool_detail)

        # create vol if required
        if need_vol_create == "yes":
            vol_create_as_status = virsh.vol_create_as(
                    volume_name, pool_name,
                    volume_capacity, allocation,
                    vol_format, "", debug=True
                    )
            utlv.check_exit_status(vol_create_as_status)
        virsh.pool_refresh(pool_name)
        vol_list = utlv.get_vol_list(pool_name, vol_check=True,
                                     timeout=_DELAY_TIME*3)
        logging.debug('Volume list is: %s' % vol_list)

        # use test_unit to save the first vol in pool
        if pool_type == "mpath":
            cmd = "virsh vol-list %s | grep \"%s\" |\
                   awk '{FS=\" \"} {print $1}'" % (pool_name, mpath_vol_path)
            cmd_result = process.run(cmd, shell=True)
            status = cmd_result.exit_status
            output = cmd_result.stdout_text.strip()
            if cmd_result.exit_status:
                raise exceptions.TestFail("vol-list pool %s failed", pool_name)
            if not output:
                raise exceptions.TestFail("Newly added mpath dev not in pool.")
            test_unit = output
            logging.info(
                "Using %s to attach to a guest", test_unit)
        else:
            test_unit = list(vol_list.keys())[0]
            logging.info(
                "Using the first volume %s to attach to a guest", test_unit)

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        session = vm.wait_for_login()
        output = session.cmd_status_output('lsblk')
        logging.debug("%s", output[1])
        old_count = vmxml.get_disk_count(vm_name)
        bf_disks = libvirt_vm.get_disks()

        # prepare disk xml which will be hot/cold attached to vm
        disk_params = {'type_name': 'volume', 'target_dev': target_device,
                       'target_bus': 'virtio', 'source_pool': pool_name,
                       'source_volume': test_unit, 'driver_type': vol_format}
        disk_xml = os.path.join(data_dir.get_tmp_dir(), 'disk_xml.xml')
        lun_disk_xml = utlv.create_disk_xml(disk_params)
        copyfile(lun_disk_xml, disk_xml)
        disk_xml_str = open(lun_disk_xml).read()
        logging.debug("The disk xml is: %s", disk_xml_str)

        # hot attach disk xml to vm
        if attach_method == "hot":
            copyfile(lun_disk_xml, disk_xml)
            dev_attach_status = virsh.attach_device(vm_name, disk_xml,
                                                    debug=True)
            # Pool/vol virtual disk is not supported by mpath pool yet.
            if dev_attach_status.exit_status and pool_type == "mpath":
                raise exceptions.TestSkipError("mpath pool vol is not "
                                               "supported in virtual disk yet,"
                                               "the error message is: %s",
                                               dev_attach_status.stderr)
                session.close()
            utlv.check_exit_status(dev_attach_status)
        # cold attach disk xml to vm
        elif attach_method == "cold":
            if vm.is_alive():
                vm.destroy(gracefully=False)
            new_disk = disk.Disk()
            new_disk.xml = disk_xml_str
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            vmxml.devices = vmxml.devices.append(new_disk)
            vmxml.sync()
            logging.debug(vmxml)
            try:
                vm.start()
            except virt_vm.VMStartError as e:
                logging.debug(e)
                if pool_type == "mpath":
                    raise exceptions.TestSkipError("'mpath' pools for backing "
                                                   "'volume' disks isn't "
                                                   "supported for now")
                else:
                    raise exceptions.TestFail("Failed to start vm")
            session = vm.wait_for_login()
        else:
            pass

        # checking attached disk in vm
        logging.info("Checking disk availability in domain")
        if not vmxml.get_disk_count(vm_name):
            raise exceptions.TestFail("No disk in domain %s." % vm_name)
        new_count = vmxml.get_disk_count(vm_name)

        if new_count <= old_count:
            raise exceptions.TestFail(
                "Failed to attach disk %s" % lun_disk_xml)
        logging.debug("Disks before attach: %s", bf_disks)

        af_disks = libvirt_vm.get_disks()
        logging.debug("Disks after attach: %s", af_disks)

        mount_disk = "".join(list(set(bf_disks) ^ set(af_disks)))
        if not mount_disk:
            raise exceptions.TestFail("Can not get attached device in vm.")
        logging.debug("Attached device in vm:%s", mount_disk)

        logging.debug("Creating file system for %s", mount_disk)
        output = session.cmd_status_output(
            'echo yes | mkfs.ext4 %s' % mount_disk)
        logging.debug("%s", output[1])
        if mount_disk:
            mount_success = mount_and_dd(session, mount_disk)
            if not mount_success:
                raise exceptions.TestFail("Mount failed")
        else:
            raise exceptions.TestFail("Partition not available for disk")
        logging.debug("Unmounting disk")
        session.cmd_status_output('umount %s' % mount_disk)
        output = session.cmd_status_output('mount')
        logging.debug("%s", output[1])
        mount_success = mount_and_dd(session, mount_disk)
        if not mount_success:
            raise exceptions.TestFail("Mount failed")
        logging.debug("Unmounting disk")
        session.cmd_status_output('umount %s' % mount_disk)
        session.close()

        # detach disk from vm
        dev_detach_status = virsh.detach_device(vm_name, disk_xml,
                                                debug=True)
        utlv.check_exit_status(dev_detach_status)

    finally:
        vm.destroy(gracefully=False)
        vmxml_backup.sync()
        logging.debug('Destroying pool %s', pool_name)
        virsh.pool_destroy(pool_name)
        logging.debug('Undefining pool %s', pool_name)
        virsh.pool_undefine(pool_name)
        if os.path.exists(pool_xml_f):
            os.remove(pool_xml_f)
        if os.path.exists(disk_xml):
            data_dir.clean_tmp_files()
            logging.debug("Cleanup disk xml")
        if pre_def_pool == "yes":
            # Do not apply cleanup_pool for logical pool, logical pool will
            # be cleaned below
            pvt.cleanup_pool(pool_name, pool_type, pool_target,
                             emulated_image, **pool_kwargs)
        if (test_unit and
                (need_vol_create == "yes" and (pre_def_pool == "no")) and
                (pool_type == "logical")):
            process.system('lvremove -f %s/%s' % (pool_name, test_unit),
                           verbose=True)
            process.system('vgremove -f %s' % pool_name, verbose=True)
            process.system('pvremove -f %s' % source_dev, verbose=True)
        if new_vhbas:
            utils_npiv.vhbas_cleanup(new_vhbas)
        # Restart multipathd, this is to avoid bz1399075
        if source_dev:
            utils_misc.wait_for(lambda: utils_npiv.restart_multipathd(source_dev),
                                _DELAY_TIME*5, 0.0, 5.0)
        elif mpath_vol_path:
            utils_misc.wait_for(lambda: utils_npiv.restart_multipathd(mpath_vol_path),
                                _DELAY_TIME*5, 0.0, 5.0)
        else:
            utils_npiv.restart_multipathd()
        if old_mpath_conf:
            utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                              conf_content=old_mpath_conf,
                                              replace_existing=True)
        if not original_mpath_conf_exist and os.path.exists(mpath_conf_path):
            os.remove(mpath_conf_path)
Exemplo n.º 39
0
def run(test, params, env):
    """
    1. prepare a vHBA
    2. find the nodedev's lun name
    3. prepare the lun dev's xml
    4. start vm
    5. attach disk xml to vm
    6. login vm and check the disk
    7. detach the virtual disk
    8. check the blkdev gone
    9. cleanup env.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    wwpn = params.get("wwpn", "WWPN_EXAMPLE")
    wwnn = params.get("wwnn", "WWNN_EXAMPLE")
    disk_device = params.get("disk_device", "disk")
    device_type = params.get("device_type", "file")
    device_target = params.get("device_target", "vdb")
    lun_dir_method = params.get("lun_dir_method", "by-path")
    driver_name = params.get("driver_name", "qemu")
    driver_type = params.get("driver_type", "raw")
    target_bus = params.get("target_bus", "virtio")
    readonly = params.get("readonly", "no")
    new_vhbas = []
    blk_dev = ""
    lun_dev = ""
    lun_dev_path = ""
    lun_sl = []
    new_disk = ""
    old_mpath_conf = ""
    mpath_conf_path = "/etc/multipath.conf"
    original_mpath_conf_exist = os.path.exists(mpath_conf_path)
    vm = env.get_vm(vm_name)
    try:
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml_backup = vmxml.copy()
        old_disk_count = vmxml.get_disk_count(vm_name)
        # Prepare vHBA
        online_hbas = utils_npiv.find_hbas("hba")
        old_vhbas = utils_npiv.find_hbas("vhba")
        if not online_hbas:
            raise exceptions.TestSkipError("Host doesn't have online hba!")
        old_mpath_conf = utils_npiv.prepare_multipath_conf(
            conf_path=mpath_conf_path, replace_existing=True)
        first_online_hba = online_hbas[0]
        new_vhba = utils_npiv.nodedev_create_from_xml({
            "nodedev_parent": first_online_hba,
            "scsi_wwnn": wwnn,
            "scsi_wwpn": wwpn
        })
        utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas),
                            timeout=_TIMEOUT)
        if not utils_npiv.is_vhbas_added(old_vhbas):
            raise exceptions.TestFail("vHBA is not successfully created.")
        new_vhbas.append(new_vhba)
        new_vhba_scsibus = re.sub("\D", "", new_vhba)
        # Get the new block device generated by the new vHBA
        utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus),
                            timeout=_TIMEOUT)
        blk_devs = get_blks_by_scsi(new_vhba_scsibus)
        if not blk_devs:
            raise exceptions.TestFail("block device not found with scsi_%s",
                                      new_vhba_scsibus)
        first_blk_dev = blk_devs[0]
        # Get the symbolic link of the device in /dev/disk/by-[path|uuid|id]
        logging.debug("first_blk_dev = %s, lun_dir_method = %s" %
                      (first_blk_dev, lun_dir_method))
        utils_misc.wait_for(
            lambda: get_symbols_by_blk(first_blk_dev, lun_dir_method),
            timeout=_TIMEOUT)
        lun_sl = get_symbols_by_blk(first_blk_dev, lun_dir_method)
        if not lun_sl:
            raise exceptions.TestFail("lun symbolic links not found under "
                                      "/dev/disk/%s/ for block device %s." %
                                      (lun_dir_method, blk_dev))
        lun_dev = lun_sl[0]
        lun_dev_path = "/dev/disk/" + lun_dir_method + "/" + lun_dev
        # Prepare xml of virtual disk
        disk_params = {
            'type_name': device_type,
            'device': disk_device,
            'driver_name': driver_name,
            'driver_type': driver_type,
            'source_file': lun_dev_path,
            'target_dev': device_target,
            'target_bus': target_bus,
            'readonly': readonly
        }
        disk_xml = os.path.join(data_dir.get_tmp_dir(), 'disk_xml.xml')
        lun_disk_xml = utlv.create_disk_xml(disk_params)
        copyfile(lun_disk_xml, disk_xml)
        if not vm.is_alive():
            vm.start()
        session = vm.wait_for_login()
        libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir,
                               vm.address_cache)
        old_disks = libvirt_vm.get_disks()
        # Attach disk
        dev_attach_status = virsh.attach_device(vm_name, disk_xml, debug=True)
        utlv.check_exit_status(dev_attach_status)

        cur_disk_count = vmxml.get_disk_count(vm_name)
        cur_disks = libvirt_vm.get_disks()
        if cur_disk_count <= old_disk_count:
            raise exceptions.TestFail("Failed to attach disk: %s" %
                                      lun_disk_xml)
        new_disk = "".join(list(set(old_disks) ^ set(cur_disks)))
        logging.debug("Attached device in vm:%s", new_disk)
        # Check disk in VM
        output = session.cmd_status_output('mkfs.ext4 -F %s' % new_disk)
        logging.debug("mkfs.ext4 the disk in vm, result: %s", output[1])
        if not check_vm_disk(session, new_disk, readonly):
            raise exceptions.TestFail("Failed check the disk in vm.")
        session.cmd_status_output('umount %s' % new_disk)
        # Detach disk
        dev_detach_status = virsh.detach_device(vm_name, disk_xml, debug=True)
        utlv.check_exit_status(dev_detach_status)
        cur_disks = libvirt_vm.get_disks()
        if cur_disks != old_disks:
            raise exceptions.TestFail("Detach disk failed.")
        session.close()

    finally:
        utils_npiv.vhbas_cleanup(new_vhbas)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
        process.system('service multipathd restart', verbose=True)
        if old_mpath_conf:
            utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                              conf_content=old_mpath_conf,
                                              replace_existing=True)
        if not original_mpath_conf_exist and os.path.exists(mpath_conf_path):
            os.remove(mpath_conf_path)
Exemplo n.º 40
0
def run(test, params, env):
    """
    Test the pull-mode backup function

    Steps:
    1. craete a vm with extra disk vdb
    2. create some data on vdb
    3. start a pull mode full backup on vdb
    4. create some data on vdb
    5. start a pull mode incremental backup
    6. repeat step 5 to 7
    7. check the full/incremental backup file data
    """

    # Basic case config
    hotplug_disk = "yes" == params.get("hotplug_disk", "no")
    original_disk_size = params.get("original_disk_size", "100M")
    original_disk_type = params.get("original_disk_type", "local")
    original_disk_target = params.get("original_disk_target", "vdb")
    local_hostname = params.get("loal_hostname", "localhost")
    local_ip = params.get("local_ip", "127.0.0.1")
    local_user_name = params.get("local_user_name", "root")
    local_user_password = params.get("local_user_password", "redhat")
    tmp_dir = data_dir.get_tmp_dir()
    # Backup config
    scratch_type = params.get("scratch_type", "file")
    reuse_scratch_file = "yes" == params.get("reuse_scratch_file")
    prepare_scratch_file = "yes" == params.get("prepare_scratch_file")
    scratch_blkdev_path = params.get("scratch_blkdev_path")
    scratch_blkdev_size = params.get("scratch_blkdev_size", original_disk_size)
    prepare_scratch_blkdev = "yes" == params.get("prepare_scratch_blkdev")
    backup_rounds = int(params.get("backup_rounds", 3))
    backup_error = "yes" == params.get("backup_error")
    expect_backup_canceled = "yes" == params.get("expect_backup_canceled")
    # NBD service config
    nbd_protocol = params.get("nbd_protocol", "unix")
    nbd_socket = params.get("nbd_socket", "/tmp/pull_backup.socket")
    nbd_tcp_port = params.get("nbd_tcp_port", "10809")
    nbd_hostname = local_hostname
    set_exportname = "yes" == params.get("set_exportname")
    set_exportbitmap = "yes" == params.get("set_exportbitmap")
    # TLS service config
    tls_enabled = "yes" == params.get("tls_enabled")
    tls_x509_verify = "yes" == params.get("tls_x509_verify")
    custom_pki_path = "yes" == params.get("custom_pki_path")
    tls_client_ip = tls_server_ip = local_ip
    tls_client_cn = tls_server_cn = local_hostname
    tls_client_user = tls_server_user = local_user_name
    tls_client_pwd = tls_server_pwd = local_user_password
    tls_provide_client_cert = "yes" == params.get("tls_provide_client_cert")
    tls_error = "yes" == params.get("tls_error")
    # LUKS config
    scratch_luks_encrypted = "yes" == params.get("scratch_luks_encrypted")
    luks_passphrase = params.get("luks_passphrase", "password")

    # Cancel the test if libvirt support related functions
    if not libvirt_version.version_compare(6, 0, 0):
        test.cancel("Current libvirt version doesn't support "
                    "incremental backup.")
    if tls_enabled and not libvirt_version.version_compare(6, 6, 0):
        test.cancel("Current libvirt version doesn't support pull mode "
                    "backup with tls nbd.")

    try:
        vm_name = params.get("main_vm")
        vm = env.get_vm(vm_name)

        # Make sure there is no checkpoint metadata before test
        utils_backup.clean_checkpoints(vm_name)

        # Backup vm xml
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml_backup = vmxml.copy()
        utils_backup.enable_inc_backup_for_vm(vm)

        # Prepare tls env
        if tls_enabled:
            # Prepare pki
            tls_config = {
                "qemu_tls": "yes",
                "auto_recover": "yes",
                "client_ip": tls_client_ip,
                "server_ip": tls_server_ip,
                "client_cn": tls_client_cn,
                "server_cn": tls_server_cn,
                "client_user": tls_client_user,
                "server_user": tls_server_user,
                "client_pwd": tls_client_pwd,
                "server_pwd": tls_server_pwd,
            }
            if custom_pki_path:
                pki_path = os.path.join(tmp_dir, "inc_bkup_pki")
            else:
                pki_path = "/etc/pki/libvirt-backup/"
            if tls_x509_verify:
                tls_config["client_ip"] = tls_client_ip
            tls_config["custom_pki_path"] = pki_path
            tls_obj = TLSConnection(tls_config)
            tls_obj.conn_setup(True, tls_provide_client_cert)
            logging.debug("TLS certs in: %s" % pki_path)
            # Set qemu.conf
            qemu_config = LibvirtQemuConfig()
            if tls_x509_verify:
                qemu_config.backup_tls_x509_verify = True
            else:
                qemu_config.backup_tls_x509_verify = False
            if custom_pki_path:
                qemu_config.backup_tls_x509_cert_dir = pki_path
            utils_libvirtd.Libvirtd().restart()

        # Prepare libvirt secret
        if scratch_luks_encrypted:
            utils_secret.clean_up_secrets()
            luks_secret_uuid = libvirt.create_secret(params)
            virsh.secret_set_value(luks_secret_uuid,
                                   luks_passphrase,
                                   encode=True,
                                   debug=True)

        # Prepare the disk to be backuped.
        disk_params = {}
        disk_path = ""
        if original_disk_type == "local":
            image_name = "{}_image.qcow2".format(original_disk_target)
            disk_path = os.path.join(tmp_dir, image_name)
            libvirt.create_local_disk("file", disk_path, original_disk_size,
                                      "qcow2")
            disk_params = {
                "device_type": "disk",
                "type_name": "file",
                "driver_type": "qcow2",
                "target_dev": original_disk_target,
                "source_file": disk_path
            }
            if original_disk_target:
                disk_params["target_dev"] = original_disk_target
        elif original_disk_type == "iscsi":
            iscsi_host = '127.0.0.1'
            iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                is_setup=True,
                is_login=False,
                image_size=original_disk_size,
                portal_ip=iscsi_host)
            disk_path = ("iscsi://[%s]/%s/%s" %
                         (iscsi_host, iscsi_target, lun_num))
            process.run("qemu-img create -f qcow2 %s %s" %
                        (disk_path, original_disk_size),
                        shell=True,
                        verbose=True)
            disk_params = {
                'device_type': 'disk',
                'type_name': 'network',
                "driver_type": "qcow2",
                'target_dev': original_disk_target
            }
            disk_params_src = {
                'source_protocol': 'iscsi',
                'source_name': iscsi_target + "/%s" % lun_num,
                'source_host_name': iscsi_host,
                'source_host_port': '3260'
            }
            disk_params.update(disk_params_src)
        elif original_disk_type == "gluster":
            gluster_vol_name = "gluster_vol"
            gluster_pool_name = "gluster_pool"
            gluster_img_name = "gluster.qcow2"
            gluster_host_ip = gluster.setup_or_cleanup_gluster(
                is_setup=True,
                vol_name=gluster_vol_name,
                pool_name=gluster_pool_name,
                **params)
            disk_path = 'gluster://%s/%s/%s' % (
                gluster_host_ip, gluster_vol_name, gluster_img_name)
            process.run("qemu-img create -f qcow2 %s %s" %
                        (disk_path, original_disk_size),
                        shell=True,
                        verbose=True)
            disk_params = {
                'device_type': 'disk',
                'type_name': 'network',
                "driver_type": "qcow2",
                'target_dev': original_disk_target
            }
            disk_params_src = {
                'source_protocol': 'gluster',
                'source_name': gluster_vol_name + "/%s" % gluster_img_name,
                'source_host_name': gluster_host_ip,
                'source_host_port': '24007'
            }
            disk_params.update(disk_params_src)
        else:
            test.error("The disk type '%s' not supported in this script.",
                       original_disk_type)
        if hotplug_disk:
            vm.start()
            session = vm.wait_for_login().close()
            disk_xml = libvirt.create_disk_xml(disk_params)
            virsh.attach_device(vm_name, disk_xml, debug=True)
        else:
            disk_xml = libvirt.create_disk_xml(disk_params)
            virsh.attach_device(vm.name,
                                disk_xml,
                                flagstr="--config",
                                debug=True)
            vm.start()
        session = vm.wait_for_login()
        new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys())
        session.close()
        if len(new_disks_in_vm) != 1:
            test.fail("Test disk not prepared in vm")

        # Use the newly added disk as the test disk
        test_disk_in_vm = "/dev/" + new_disks_in_vm[0]

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        vm_disks = list(vmxml.get_disk_all().keys())

        checkpoint_list = []
        is_incremental = False
        backup_file_list = []
        for backup_index in range(backup_rounds):
            # Prepare backup xml
            backup_params = {"backup_mode": "pull"}
            if backup_index > 0:
                is_incremental = True
                backup_params["backup_incremental"] = "checkpoint_" + str(
                    backup_index - 1)

            # Set libvirt default nbd export name and bitmap name
            nbd_export_name = original_disk_target
            nbd_bitmap_name = "backup-" + original_disk_target

            backup_server_dict = {}
            if nbd_protocol == "unix":
                backup_server_dict["transport"] = "unix"
                backup_server_dict["socket"] = nbd_socket
            else:
                backup_server_dict["name"] = nbd_hostname
                backup_server_dict["port"] = nbd_tcp_port
                if tls_enabled:
                    backup_server_dict["tls"] = "yes"
            backup_params["backup_server"] = backup_server_dict
            backup_disk_xmls = []
            for vm_disk in vm_disks:
                backup_disk_params = {"disk_name": vm_disk}
                if vm_disk != original_disk_target:
                    backup_disk_params["enable_backup"] = "no"
                else:
                    backup_disk_params["enable_backup"] = "yes"
                    backup_disk_params["disk_type"] = scratch_type

                    # Custom nbd export name and bitmap name if required
                    if set_exportname:
                        nbd_export_name = original_disk_target + "_custom_exp"
                        backup_disk_params["exportname"] = nbd_export_name
                    if set_exportbitmap:
                        nbd_bitmap_name = original_disk_target + "_custom_bitmap"
                        backup_disk_params["exportbitmap"] = nbd_bitmap_name

                    # Prepare nbd scratch file/dev params
                    scratch_params = {"attrs": {}}
                    scratch_path = None
                    if scratch_type == "file":
                        scratch_file_name = "scratch_file_%s" % backup_index
                        scratch_path = os.path.join(tmp_dir, scratch_file_name)
                        if prepare_scratch_file:
                            libvirt.create_local_disk("file", scratch_path,
                                                      original_disk_size,
                                                      "qcow2")
                        scratch_params["attrs"]["file"] = scratch_path
                    elif scratch_type == "block":
                        if prepare_scratch_blkdev:
                            scratch_path = libvirt.setup_or_cleanup_iscsi(
                                is_setup=True, image_size=scratch_blkdev_size)
                        scratch_params["attrs"]["dev"] = scratch_path
                    else:
                        test.fail(
                            "We do not support backup scratch type: '%s'" %
                            scratch_type)
                    if scratch_luks_encrypted:
                        encryption_dict = {
                            "encryption": "luks",
                            "secret": {
                                "type": "passphrase",
                                "uuid": luks_secret_uuid
                            }
                        }
                        scratch_params["encryption"] = encryption_dict
                    logging.debug("scratch params: %s", scratch_params)
                    backup_disk_params["backup_scratch"] = scratch_params

                backup_disk_xml = utils_backup.create_backup_disk_xml(
                    backup_disk_params)
                backup_disk_xmls.append(backup_disk_xml)
            logging.debug("disk list %s", backup_disk_xmls)
            backup_xml = utils_backup.create_backup_xml(
                backup_params, backup_disk_xmls)
            logging.debug("ROUND_%s Backup Xml: %s", backup_index, backup_xml)

            # Prepare checkpoint xml
            checkpoint_name = "checkpoint_%s" % backup_index
            checkpoint_list.append(checkpoint_name)
            cp_params = {"checkpoint_name": checkpoint_name}
            cp_params["checkpoint_desc"] = params.get(
                "checkpoint_desc", "desc of cp_%s" % backup_index)
            disk_param_list = []
            for vm_disk in vm_disks:
                cp_disk_param = {"name": vm_disk}
                if vm_disk != original_disk_target:
                    cp_disk_param["checkpoint"] = "no"
                else:
                    cp_disk_param["checkpoint"] = "bitmap"
                    cp_disk_bitmap = params.get("cp_disk_bitmap")
                    if cp_disk_bitmap:
                        cp_disk_param["bitmap"] = cp_disk_bitmap + str(
                            backup_index)
                disk_param_list.append(cp_disk_param)
            checkpoint_xml = utils_backup.create_checkpoint_xml(
                cp_params, disk_param_list)
            logging.debug("ROUND_%s Checkpoint Xml: %s", backup_index,
                          checkpoint_xml)

            # Create some data in vdb
            dd_count = "1"
            dd_seek = str(backup_index * 10 + 10)
            dd_bs = "1M"
            session = vm.wait_for_login()
            utils_disk.dd_data_to_vm_disk(session, test_disk_in_vm, dd_bs,
                                          dd_seek, dd_count)
            session.close()
            # Start backup
            backup_options = backup_xml.xml + " " + checkpoint_xml.xml
            if reuse_scratch_file:
                backup_options += " --reuse-external"
            backup_result = virsh.backup_begin(vm_name,
                                               backup_options,
                                               ignore_status=True,
                                               debug=True)
            if backup_result.exit_status:
                raise utils_backup.BackupBeginError(
                    backup_result.stderr.strip())
            # If required, do some error operations during backup job
            error_operation = params.get("error_operation")
            if error_operation:
                if "destroy_vm" in error_operation:
                    vm.destroy(gracefully=False)
                if "kill_qemu" in error_operation:
                    utils_misc.safe_kill(vm.get_pid(), signal.SIGKILL)
                if utils_misc.wait_for(
                        lambda: utils_backup.is_backup_canceled(vm_name),
                        timeout=5):
                    raise utils_backup.BackupCanceledError()
                elif expect_backup_canceled:
                    test.fail("Backup job should be canceled but not.")
            backup_file_path = os.path.join(
                tmp_dir, "backup_file_%s.qcow2" % str(backup_index))
            backup_file_list.append(backup_file_path)
            nbd_params = {
                "nbd_protocol": nbd_protocol,
                "nbd_export": nbd_export_name
            }
            if nbd_protocol == "unix":
                nbd_params["nbd_socket"] = nbd_socket
            elif nbd_protocol == "tcp":
                nbd_params["nbd_hostname"] = nbd_hostname
                nbd_params["nbd_tcp_port"] = nbd_tcp_port
                if tls_enabled:
                    nbd_params["tls_dir"] = pki_path
                    nbd_params["tls_server_ip"] = tls_server_ip
            if not is_incremental:
                # Do full backup
                try:
                    utils_backup.pull_full_backup_to_file(
                        nbd_params, backup_file_path)
                except Exception as details:
                    if tls_enabled and tls_error:
                        raise utils_backup.BackupTLSError(details)
                    else:
                        test.fail("Fail to get full backup data: %s" % details)
                logging.debug("Full backup to: %s", backup_file_path)
            else:
                # Do incremental backup
                utils_backup.pull_incremental_backup_to_file(
                    nbd_params, backup_file_path, nbd_bitmap_name,
                    original_disk_size)
            # Check if scratch file encrypted
            if scratch_luks_encrypted and scratch_path:
                cmd = "qemu-img info -U %s" % scratch_path
                result = process.run(cmd, shell=True,
                                     verbose=True).stdout_text.strip()
                if (not re.search("format.*luks", result, re.IGNORECASE)
                        or not re.search("encrypted.*yes", result,
                                         re.IGNORECASE)):
                    test.fail("scratch file/dev is not encrypted by LUKS")
            virsh.domjobabort(vm_name, debug=True)

        for checkpoint_name in checkpoint_list:
            virsh.checkpoint_delete(vm_name, checkpoint_name, debug=True)
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Compare the backup data and original data
        original_data_file = os.path.join(tmp_dir, "original_data.qcow2")
        cmd = "qemu-img convert -f qcow2 %s -O qcow2 %s" % (disk_path,
                                                            original_data_file)
        process.run(cmd, shell=True, verbose=True)
        for backup_file in backup_file_list:
            if not utils_backup.cmp_backup_data(original_data_file,
                                                backup_file):
                test.fail("Backup and original data are not identical for"
                          "'%s' and '%s'" % (disk_path, backup_file))
            else:
                logging.debug("'%s' contains correct backup data", backup_file)
    except utils_backup.BackupBeginError as detail:
        if backup_error:
            logging.debug("Backup failed as expected.")
        else:
            test.fail("Backup failed to start: %s" % detail)
    except utils_backup.BackupTLSError as detail:
        if tls_error:
            logging.debug("Failed to get backup data as expected.")
        else:
            test.fail("Failed to get tls backup data: %s" % detail)
    except utils_backup.BackupCanceledError as detail:
        if expect_backup_canceled:
            logging.debug("Backup canceled as expected.")
            if not vm.is_alive():
                logging.debug("Check if vm can be started again when backup "
                              "canceled.")
                vm.start()
                vm.wait_for_login().close()
        else:
            test.fail("Backup job canceled: %s" % detail)
    finally:
        # Remove checkpoints
        clean_checkpoint_metadata = not vm.is_alive()
        if "error_operation" in locals() and error_operation is not None:
            if "kill_qemu" in error_operation:
                clean_checkpoint_metadata = True
        utils_backup.clean_checkpoints(
            vm_name, clean_metadata=clean_checkpoint_metadata)

        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Restoring vm
        vmxml_backup.sync()

        # Remove iscsi devices
        if original_disk_type == "iscsi" or scratch_type == "block":
            libvirt.setup_or_cleanup_iscsi(False)

        # Remove gluster devices
        if original_disk_type == "gluster":
            gluster.setup_or_cleanup_gluster(is_setup=False,
                                             vol_name=gluster_vol_name,
                                             pool_name=gluster_pool_name,
                                             **params)

        # Recover qemu.conf
        if "qemu_config" in locals():
            qemu_config.restore()

        # Remove tls object
        if "tls_obj" in locals():
            del tls_obj

        # Remove libvirt secret
        if "luks_secret_uuid" in locals():
            virsh.secret_undefine(luks_secret_uuid, ignore_status=True)
Exemplo n.º 41
0
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocal = params.get("disk_source_protocal", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")

    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            raise error.TestNAError("'volume' type disk doesn't support in"
                                    + " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocal
            secret_xml.target = secret_usage_target
            logging.debug("Define secret by XML: %s", open(secret_xml.xml).read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                raise error.TestError("Fail to get new created secret uuid")

            # Set secret value
            secret_string = base64.b64encode(chap_passwd)
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        iscsi_target = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                      is_login=False,
                                                      chap_user=chap_user,
                                                      chap_passwd=chap_passwd)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.hostname = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_host
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = "/dev/disk/by-path"
            # Create iscsi pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get volume name
            cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            try:
                vol_name = re.findall(r"(\S+)\ +(\S+)[\ +\n]",
                                      str(cmd_result.stdout))[1][0]
            except IndexError:
                raise error.TestError("Fail to get volume name")

        # Create iscsi network disk XML
        disk_params = {'device_type': disk_device,
                       'type_name': disk_type,
                       'target_dev': disk_target,
                       'target_bus': disk_target_bus,
                       'readonly': disk_readonly}
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {'source_protocol': disk_src_protocal,
                               'source_name': iscsi_target + "/1",
                               'source_host_name': disk_src_host,
                               'source_host_port': disk_src_port}
        elif disk_type == "volume":
            disk_params_src = {'source_pool': disk_src_pool,
                               'source_volume': vol_name,
                               'source_mode': disk_src_mode}
        else:
            error.TestNAError("Unsupport disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth:
            disk_params_auth = {'auth_user': chap_user,
                                'secret_type': disk_src_protocal,
                                'secret_usage': secret_xml.target}
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)

        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
        else:
            if not vm.is_dead():
                vm.destroy()
        attach_option = params.get("attach_option", "")
        # Attach the iscsi network disk to domain
        logging.debug("Attach disk by XML: %s", open(disk_xml).read())
        cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml,
                                         flagstrs=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            vm.start()
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(test.tmpdir, "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_info(vm_name, snapshot_name1,
                                             **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            sn_create_op = "%s --disk_ony %s" % (snapshot_name2, disk_target)
            cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op,
                                                  **virsh_dargs)

            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_delete(vm_name, snapshot_name2,
                                               **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            pass
        else:
            logging.error("Unsupport operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                raise error.TestError("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError), e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                raise error.TestError("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name, disk_target)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)
Exemplo n.º 42
0
def run(test, params, env):
    """
    1. prepare a fc lun with one of following methods
        - create a scsi pool&vol
        - create a vhba
    2. prepare the virtual disk xml, as one of following
        - source = /dev/disk/by-path
        - source = /dev/mapper/mpathX
        - source = pool&vol format
    3. start a vm with above disk as vdb
    4. create disk-only snapshot of vdb
    5. check the snapshot-list and snapshot file's existence
    6. mount vdb and touch file to it
    7. revert the snapshot and check file's existence
    8. delete snapshot
    9. cleanup env.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    wwpn = params.get("wwpn", "WWPN_EXAMPLE")
    wwnn = params.get("wwnn", "WWNN_EXAMPLE")
    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "file")
    disk_size = params.get("disk_size", "100M")
    device_target = params.get("device_target", "vdb")
    driver_name = params.get("driver_name", "qemu")
    driver_type = params.get("driver_type", "raw")
    target_bus = params.get("target_bus", "virtio")
    vd_format = params.get("vd_format", "")
    snapshot_dir = params.get("snapshot_dir", "/tmp")
    snapshot_name = params.get("snapshot_name", "s1")
    pool_name = params.get("pool_name", "")
    pool_target = params.get("pool_target", "/dev")
    snapshot_disk_only = "yes" == params.get("snapshot_disk_only", "no")
    new_vhbas = []
    current_vhbas = []
    new_vhba = []
    path_to_blk = ""
    lun_sl = []
    new_disk = ""
    pool_ins = None
    old_mpath_conf = ""
    mpath_conf_path = "/etc/multipath.conf"
    original_mpath_conf_exist = os.path.exists(mpath_conf_path)

    vm = env.get_vm(vm_name)
    online_hbas = utils_npiv.find_hbas("hba")
    if not online_hbas:
        raise exceptions.TestSkipError("There is no online hba cards.")
    old_mpath_conf = utils_npiv.prepare_multipath_conf(
        conf_path=mpath_conf_path, replace_existing=True)
    first_online_hba = online_hbas[0]
    old_vhbas = utils_npiv.find_hbas("vhba")
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    virt_vm = libvirt_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache)
    old_disks = virt_vm.get_disks()

    if vm.is_alive():
        vm.destroy(gracefully=False)
    if pool_name:
        pool_ins = libvirt_storage.StoragePool()
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    try:
        # prepare a fc lun
        if vd_format in ['scsi_vol']:
            if pool_ins.pool_exists(pool_name):
                raise exceptions.TestFail("Pool %s already exist" % pool_name)
            prepare_scsi_pool(pool_name, wwnn, wwpn, first_online_hba,
                              pool_target)
            utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas),
                                timeout=_TIMEOUT)
            if not utils_npiv.is_vhbas_added(old_vhbas):
                raise exceptions.TestFail("vHBA not successfully created")
            current_vhbas = utils_npiv.find_hbas("vhba")
            new_vhba = list(set(current_vhbas).difference(set(old_vhbas)))[0]
            new_vhbas.append(new_vhba)
            new_vhba_scsibus = re.sub("\D", "", new_vhba)
            utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus),
                                timeout=_TIMEOUT)
            new_blks = get_blks_by_scsi(new_vhba_scsibus)
            if not new_blks:
                raise exceptions.TestFail(
                    "block device not found with scsi_%s", new_vhba_scsibus)
            vol_list = utlv.get_vol_list(pool_name,
                                         vol_check=True,
                                         timeout=_TIMEOUT * 3)
            path_to_blk = list(vol_list.values())[0]
        elif vd_format in ['mpath', 'by_path']:
            old_mpath_devs = utils_npiv.find_mpath_devs()
            new_vhba = utils_npiv.nodedev_create_from_xml({
                "nodedev_parent": first_online_hba,
                "scsi_wwnn": wwnn,
                "scsi_wwpn": wwpn
            })
            utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas),
                                timeout=_TIMEOUT * 2)
            if not new_vhba:
                raise exceptions.TestFail("vHBA not successfully generated.")
            new_vhbas.append(new_vhba)
            if vd_format == "mpath":
                utils_misc.wait_for(
                    lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs),
                    timeout=_TIMEOUT * 5)
                if not utils_npiv.is_mpath_devs_added(old_mpath_devs):
                    raise exceptions.TestFail("mpath dev not generated.")
                cur_mpath_devs = utils_npiv.find_mpath_devs()
                new_mpath_devs = list(
                    set(cur_mpath_devs).difference(set(old_mpath_devs)))
                logging.debug("The newly added mpath dev is: %s",
                              new_mpath_devs)
                path_to_blk = "/dev/mapper/" + new_mpath_devs[0]
            elif vd_format == "by_path":
                new_vhba_scsibus = re.sub("\D", "", new_vhba)
                utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus),
                                    timeout=_TIMEOUT)
                new_blks = get_blks_by_scsi(new_vhba_scsibus)
                if not new_blks:
                    raise exceptions.TestFail("blk dev not found with scsi_%s",
                                              new_vhba_scsibus)
                first_blk_dev = new_blks[0]
                utils_misc.wait_for(lambda: get_symbols_by_blk(first_blk_dev),
                                    timeout=_TIMEOUT)
                lun_sl = get_symbols_by_blk(first_blk_dev)
                if not lun_sl:
                    raise exceptions.TestFail(
                        "lun symbolic links not found in "
                        "/dev/disk/by-path/ for %s" % first_blk_dev)
                lun_dev = lun_sl[0]
                path_to_blk = os.path.join(_BY_PATH_DIR, lun_dev)
            else:
                pass
        else:
            raise exceptions.TestSkipError("Not provided how to pass"
                                           "virtual disk to VM.")

        # create qcow2 file on the block device with specified size
        if path_to_blk:
            cmd = "qemu-img create -f qcow2 %s %s" % (path_to_blk, disk_size)
            try:
                process.run(cmd, shell=True)
            except process.cmdError as detail:
                raise exceptions.TestFail(
                    "Fail to create qcow2 on blk dev: %s", detail)
        else:
            raise exceptions.TestFail("Don't have a valid path to blk dev.")

        # prepare disk xml
        if "vol" in vd_format:
            vol_list = utlv.get_vol_list(pool_name,
                                         vol_check=True,
                                         timeout=_TIMEOUT * 3)
            test_vol = list(vol_list.keys())[0]
            disk_params = {
                'type_name': disk_type,
                'target_dev': device_target,
                'target_bus': target_bus,
                'source_pool': pool_name,
                'source_volume': test_vol,
                'driver_type': driver_type
            }
        else:
            disk_params = {
                'type_name': disk_type,
                'device': disk_device,
                'driver_name': driver_name,
                'driver_type': driver_type,
                'source_file': path_to_blk,
                'target_dev': device_target,
                'target_bus': target_bus
            }
        if vm.is_alive():
            vm.destroy(gracefully=False)
        new_disk = disk.Disk()
        new_disk.xml = open(utlv.create_disk_xml(disk_params)).read()

        # start vm with the virtual disk
        vmxml.devices = vmxml.devices.append(new_disk)
        vmxml.sync()
        vm.start()
        session = vm.wait_for_login()
        cur_disks = virt_vm.get_disks()
        mount_disk = "".join(list(set(old_disks) ^ set(cur_disks)))

        # mkfs and mount disk in vm, create a file on that disk.
        if not mount_disk:
            logging.debug("old_disk: %s, new_disk: %s", old_disks, cur_disks)
            raise exceptions.TestFail("No new disk found in vm.")
        mkfs_and_mount(session, mount_disk)
        create_file_in_vm(session, "/mnt/before_snapshot.txt", "before")

        # virsh snapshot-create-as vm s --disk-only --diskspec vda,file=path
        if snapshot_disk_only:
            vm_blks = list(vm.get_disk_devices().keys())
            options = "%s --disk-only" % snapshot_name
            for vm_blk in vm_blks:
                snapshot_file = snapshot_dir + "/" + vm_blk + "." + snapshot_name
                if os.path.exists(snapshot_file):
                    os.remove(snapshot_file)
                options = options + " --diskspec %s,file=%s" % (vm_blk,
                                                                snapshot_file)
        else:
            options = snapshot_name
        utlv.check_exit_status(virsh.snapshot_create_as(vm_name, options))

        # check virsh snapshot-list
        logging.debug("Running: snapshot-list %s", vm_name)
        snapshot_list = virsh.snapshot_list(vm_name)
        logging.debug("snapshot list is: %s", snapshot_list)
        if not snapshot_list:
            raise exceptions.TestFail("snapshots not found after creation.")

        # snapshot-revert doesn't support external snapshot for now. so
        # only check this with internal snapshot.
        if not snapshot_disk_only:
            create_file_in_vm(session, "/mnt/after_snapshot.txt", "after")
            logging.debug("Running: snapshot-revert %s %s", vm_name,
                          snapshot_name)
            utlv.check_exit_status(
                virsh.snapshot_revert(vm_name, snapshot_name))
            session = vm.wait_for_login()
            file_existence, file_content = get_file_in_vm(
                session, "/mnt/after_snapshot.txt")
            logging.debug("file exist = %s, file content = %s", file_existence,
                          file_content)
            if file_existence:
                raise exceptions.TestFail("The file created "
                                          "after snapshot still exists.")
            file_existence, file_content = get_file_in_vm(
                session, "/mnt/before_snapshot.txt")
            logging.debug("file eixst = %s, file content = %s", file_existence,
                          file_content)
            if ((not file_existence) or (file_content.strip() != "before")):
                raise exceptions.TestFail("The file created "
                                          "before snapshot is lost.")
        # delete snapshots
        # if diskonly, delete --metadata and remove files
        # if not diskonly, delete snapshot
        if snapshot_disk_only:
            options = "--metadata"
        else:
            options = ""
        for snap in snapshot_list:
            logging.debug("deleting snapshot %s with options %s", snap,
                          options)
            result = virsh.snapshot_delete(vm_name, snap, options)
            logging.debug("result of snapshot-delete: %s",
                          result.stdout.strip())
            if snapshot_disk_only:
                vm_blks = list(vm.get_disk_devices().keys())
                for vm_blk in vm_blks:
                    snapshot_file = snapshot_dir + "/" + vm_blk + "." + snap
                    if os.path.exists(snapshot_file):
                        os.remove(snapshot_file)
        snapshot_list = virsh.snapshot_list(vm_name)
        if snapshot_list:
            raise exceptions.TestFail("Snapshot not deleted: %s",
                                      snapshot_list)
    except Exception as detail:
        raise exceptions.TestFail("exception happens: %s", detail)
    finally:
        logging.debug("Start to clean up env...")
        vmxml_backup.sync()
        if pool_ins and pool_ins.pool_exists(pool_name):
            virsh.pool_destroy(pool_name)
        for new_vhba in new_vhbas:
            virsh.nodedev_destroy(new_vhba)
        utils_npiv.restart_multipathd()
        if old_mpath_conf:
            utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                              conf_content=old_mpath_conf,
                                              replace_existing=True)
        if not original_mpath_conf_exist and os.path.exists(mpath_conf_path):
            os.remove(mpath_conf_path)
Exemplo n.º 43
0
def run(test, params, env):
    """
    Test blkdevio tuning

    Positive test has covered the following combination.
    -------------------------
    | total | read  | write |
    -------------------------
    |   0   |   0   |   0   |
    | non-0 |   0   |   0   |
    |   0   | non-0 | non-0 |
    |   0   | non-0 |  0    |
    |   0   |   0   | non-0 |
    -------------------------

    Negative test has covered unsupported combination and
    invalid command arguments.

    NB: only qemu-kvm-rhev supports block I/O throttling on >= RHEL6.5,
    the qemu-kvm is okay for block I/O throttling on >= RHEL7.0.
    """

    # Run test case
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    start_vm = params.get("start_vm", "yes")
    change_parameters = params.get("change_parameters", "no")
    attach_disk = "yes" == params.get("attach_disk", "no")
    attach_before_start = "yes" == params.get("attach_before_start", "yes")
    disk_type = params.get("disk_type", 'file')
    disk_format = params.get("disk_format", 'qcow2')
    disk_bus = params.get("disk_bus", 'virtio')
    disk_alias = params.get("disk_alias")
    attach_options = params.get("attach_options")
    slice_test = "yes" == params.get("disk_slice_enabled", "yes")
    test_size = params.get("test_size", "1")

    original_vm_xml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Used for default device of blkdeviotune
    device = params.get("device_name", "vmblk")
    sys_image_target = vm.get_first_disk_devices()["target"]

    # Make sure vm is down if start not requested
    if (start_vm == "no" or attach_before_start) and vm and vm.is_alive():
        vm.destroy()

    disk_source = tempfile.mktemp(dir=data_dir.get_tmp_dir())
    params["input_source_file"] = disk_source
    params["disk_slice"] = {"slice_test": "yes"}
    if attach_disk and not slice_test:
        libvirt.create_local_disk(disk_type,
                                  path=disk_source,
                                  size='1',
                                  disk_format=disk_format)
        attach_extra = ""
        if disk_alias:
            attach_extra += " --alias %s" % disk_alias
        if disk_bus:
            attach_extra += " --targetbus %s" % disk_bus
        if disk_format:
            attach_extra += " --subdriver %s" % disk_format
        if attach_options:
            attach_extra += " %s" % attach_options
    test_dict = dict(params)
    test_dict['vm'] = vm
    # Coldplug disk with slice image
    if attach_disk and slice_test and attach_before_start:
        libvirt.create_local_disk(disk_type="file",
                                  extra=" -o preallocation=full",
                                  path=disk_source,
                                  disk_format="qcow2",
                                  size=test_size)
        disk_xml = libvirt.create_disk_xml(params)
        ret = virsh.attach_device(vm_name, disk_xml, flagstr="--config")
        libvirt.check_exit_status(ret)
    # Coldplug disk without slice image
    if attach_disk and attach_before_start and not slice_test:
        ret = virsh.attach_disk(vm_name,
                                disk_source,
                                device,
                                extra=attach_extra,
                                debug=True)
        libvirt.check_exit_status(ret)
    # Recover previous running guest
    if vm and not vm.is_alive() and start_vm == "yes":
        try:
            vm.start()
            vm.wait_for_login().close()
        except (virt_vm.VMError, remote.LoginError) as detail:
            vm.destroy()
            test.fail(str(detail))

    # Hotplug disk with slice image
    if attach_disk and slice_test and not attach_before_start:
        libvirt.create_local_disk(disk_type="file",
                                  extra=" -o preallocation=full",
                                  path=disk_source,
                                  disk_format="qcow2",
                                  size=test_size)
        disk_xml = libvirt.create_disk_xml(params)
        ret = virsh.attach_device(vm_name, disk_xml, flagstr="")
        libvirt.check_exit_status(ret)

    # Hotplug disk without slice image
    if attach_disk and not attach_before_start and not slice_test:
        ret = virsh.attach_disk(vm_name,
                                disk_source,
                                device,
                                extra=attach_extra,
                                debug=True)
        libvirt.check_exit_status(ret)

    if device == "vmblk":
        test_dict['device_name'] = sys_image_target

    # Make sure libvirtd service is running
    if not utils_libvirtd.libvirtd_is_running():
        test.cancel("libvirt service is not running!")

    # Positive and negative testing
    try:
        if change_parameters == "no":
            get_blkdevio_parameter(test_dict, test)
        else:
            set_blkdevio_parameter(test_dict, test)
    finally:
        # Restore guest
        original_vm_xml.sync()
        libvirt.delete_local_disk('file', path=disk_source)
def run(test, params, env):
    """
    Test the pull-mode backup function

    Steps:
    1. create a vm with extra disk vdb
    2. create some data on vdb
    3. start a pull mode full backup on vdb
    4. monitor block-threshold event on scratch file/dev
    5. create some data on vdb's same position as step 2 to trigger event
    6. check the block-threshold event captured
    """

    # Basic case config
    hotplug_disk = "yes" == params.get("hotplug_disk", "no")
    original_disk_size = params.get("original_disk_size", "100M")
    original_disk_type = params.get("original_disk_type", "local")
    original_disk_target = params.get("original_disk_target", "vdb")
    event_type = params.get("event_type")
    usage_threshold = params.get("usage_threshold", "100")
    tmp_dir = data_dir.get_tmp_dir()
    local_hostname = params.get("loal_hostname", "localhost")
    # Backup config
    scratch_type = params.get("scratch_type", "file")
    reuse_scratch_file = "yes" == params.get("reuse_scratch_file")
    scratch_blkdev_size = params.get("scratch_blkdev_size", original_disk_size)
    # NBD service config
    nbd_protocol = params.get("nbd_protocol", "unix")
    nbd_socket = params.get("nbd_socket", "/tmp/pull_backup.socket")
    nbd_tcp_port = params.get("nbd_tcp_port", "10809")
    nbd_hostname = local_hostname
    # LUKS config
    scratch_luks_encrypted = "yes" == params.get("scratch_luks_encrypted")
    luks_passphrase = params.get("luks_passphrase", "password")
    # Open a new virsh session for event monitor
    virsh_session = aexpect.ShellSession(virsh.VIRSH_EXEC, auto_close=True)
    # Cancel the test if libvirt support related functions
    if not libvirt_version.version_compare(7, 0, 0):
        test.cancel("Current libvirt version doesn't support "
                    "event monitor for incremental backup.")

    def get_backup_disk_index(vm_name, disk_name):
        """
        Get the index of the backup disk to be monitored by the virsh event

        :param vm_name: vm name
        :param disk_name: virtual disk name, such as 'vdb'
        :return: the index of the virtual disk in backup xml
        """
        backup_xml = virsh.backup_dumpxml(vm_name).stdout.strip()
        logging.debug("%s's current backup xml is: %s" % (vm_name, backup_xml))
        backup_xml_dom = xml_utils.XMLTreeFile(backup_xml)
        index_xpath = "/disks/disk"
        for disk_element in backup_xml_dom.findall(index_xpath):
            if disk_element.get("name") == disk_name:
                return disk_element.get("index")

    def is_event_captured(virsh_session, re_pattern):
        """
        Check if event captured

        :param virsh_session: the virsh session of the event monitor
        :param re_pattern: the re pattern used to represent the event
        :return: True means event captured, False means not
        """
        ret_output = virsh_session.get_stripped_output()
        if (not re.search(re_pattern, ret_output, re.IGNORECASE)):
            return False
        logging.debug("event monitor output: %s", ret_output)
        return True

    try:
        vm_name = params.get("main_vm")
        vm = env.get_vm(vm_name)

        # Make sure thedisk_element.getre is no checkpoint metadata before test
        utils_backup.clean_checkpoints(vm_name)

        # Backup vm xml
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml_backup = vmxml.copy()
        utils_backup.enable_inc_backup_for_vm(vm)

        # Prepare libvirt secret
        if scratch_luks_encrypted:
            utils_secret.clean_up_secrets()
            luks_secret_uuid = libvirt.create_secret(params)
            virsh.secret_set_value(luks_secret_uuid,
                                   luks_passphrase,
                                   encode=True,
                                   debug=True)

        # Prepare the disk to be backuped.
        disk_params = {}
        disk_path = ""
        image_name = "{}_image.qcow2".format(original_disk_target)
        disk_path = os.path.join(tmp_dir, image_name)
        libvirt.create_local_disk("file", disk_path, original_disk_size,
                                  "qcow2")
        disk_params = {
            "device_type": "disk",
            "type_name": "file",
            "driver_type": "qcow2",
            "target_dev": original_disk_target,
            "source_file": disk_path
        }
        disk_params["target_dev"] = original_disk_target
        disk_xml = libvirt.create_disk_xml(disk_params)
        virsh.attach_device(vm.name, disk_xml, flagstr="--config", debug=True)
        vm.start()
        session = vm.wait_for_login()
        new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys())
        session.close()
        if len(new_disks_in_vm) != 1:
            test.fail("Test disk not prepared in vm")

        # Use the newly added disk as the test disk
        test_disk_in_vm = "/dev/" + new_disks_in_vm[0]

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        vm_disks = list(vmxml.get_disk_all().keys())

        checkpoint_list = []
        backup_file_list = []

        # Prepare backup xml
        backup_params = {"backup_mode": "pull"}
        # Set libvirt default nbd export name and bitmap name
        nbd_export_name = original_disk_target
        nbd_bitmap_name = "backup-" + original_disk_target

        backup_server_dict = {}
        if nbd_protocol == "unix":
            backup_server_dict["transport"] = "unix"
            backup_server_dict["socket"] = nbd_socket
        else:
            backup_server_dict["name"] = nbd_hostname
            backup_server_dict["port"] = nbd_tcp_port
        backup_params["backup_server"] = backup_server_dict
        backup_disk_xmls = []
        for vm_disk in vm_disks:
            backup_disk_params = {"disk_name": vm_disk}
            if vm_disk != original_disk_target:
                backup_disk_params["enable_backup"] = "no"
            else:
                backup_disk_params["enable_backup"] = "yes"
                backup_disk_params["disk_type"] = scratch_type
                # Prepare nbd scratch file/dev params
                scratch_params = {"attrs": {}}
                scratch_path = None
                if scratch_type == "file":
                    scratch_file_name = "scratch_file"
                    scratch_path = os.path.join(tmp_dir, scratch_file_name)
                    if reuse_scratch_file:
                        libvirt.create_local_disk("file", scratch_path,
                                                  original_disk_size, "qcow2")
                    scratch_params["attrs"]["file"] = scratch_path
                elif scratch_type == "block":
                    scratch_path = libvirt.setup_or_cleanup_iscsi(
                        is_setup=True, image_size=scratch_blkdev_size)
                    scratch_params["attrs"]["dev"] = scratch_path
                else:
                    test.fail("We do not support backup scratch type: '%s'" %
                              scratch_type)
                if scratch_luks_encrypted:
                    encryption_dict = {
                        "encryption": "luks",
                        "secret": {
                            "type": "passphrase",
                            "uuid": luks_secret_uuid
                        }
                    }
                    scratch_params["encryption"] = encryption_dict
                logging.debug("scratch params: %s", scratch_params)
                backup_disk_params["backup_scratch"] = scratch_params

            backup_disk_xml = utils_backup.create_backup_disk_xml(
                backup_disk_params)
            backup_disk_xmls.append(backup_disk_xml)
        logging.debug("disk list %s", backup_disk_xmls)
        backup_xml = utils_backup.create_backup_xml(backup_params,
                                                    backup_disk_xmls)
        logging.debug("Backup Xml: %s", backup_xml)

        # Prepare checkpoint xml
        checkpoint_name = "checkpoint"
        checkpoint_list.append(checkpoint_name)
        cp_params = {"checkpoint_name": checkpoint_name}
        cp_params["checkpoint_desc"] = params.get("checkpoint_desc",
                                                  "desc of cp")
        disk_param_list = []
        for vm_disk in vm_disks:
            cp_disk_param = {"name": vm_disk}
            if vm_disk != original_disk_target:
                cp_disk_param["checkpoint"] = "no"
            else:
                cp_disk_param["checkpoint"] = "bitmap"
                cp_disk_bitmap = params.get("cp_disk_bitmap")
                if cp_disk_bitmap:
                    cp_disk_param["bitmap"] = cp_disk_bitmap
            disk_param_list.append(cp_disk_param)
        checkpoint_xml = utils_backup.create_checkpoint_xml(
            cp_params, disk_param_list)
        logging.debug("Checkpoint Xml: %s", checkpoint_xml)

        # Generate some random data in vm's test disk
        def dd_data_to_testdisk():
            """
            Generate some data to vm's test disk
            """
            dd_count = "1"
            dd_seek = "10"
            dd_bs = "1M"
            session = vm.wait_for_login()
            utils_disk.dd_data_to_vm_disk(session, test_disk_in_vm, dd_bs,
                                          dd_seek, dd_count)
            session.close()

        dd_data_to_testdisk()

        # Start backup
        backup_options = backup_xml.xml + " " + checkpoint_xml.xml
        if reuse_scratch_file:
            backup_options += " --reuse-external"
        backup_result = virsh.backup_begin(vm_name, backup_options, debug=True)

        # Start to monitor block-threshold of backup disk's scratch file/dev
        backup_disk_index = get_backup_disk_index(vm_name,
                                                  original_disk_target)
        if not backup_disk_index:
            test.fail("Backup xml has no index for disks.")
        backup_disk_obj = original_disk_target + "[%s]" % backup_disk_index
        virsh.domblkthreshold(
            vm_name, original_disk_target + "[%s]" % backup_disk_index,
            usage_threshold)
        event_cmd = "event %s %s --loop" % (vm_name, event_type)
        virsh_session.sendline(event_cmd)

        # Generate some random data to same position of vm's test disk
        dd_data_to_testdisk()

        # Check if the block-threshold event captured by monitor
        if event_type == "block-threshold":
            event_pattern = (".*block-threshold.*%s.*%s\[%s\].* %s .*" %
                             (vm_name, original_disk_target, backup_disk_index,
                              usage_threshold))
        if not utils_misc.wait_for(
                lambda: is_event_captured(virsh_session, event_pattern), 10):
            test.fail("Event not captured by event monitor")

        # Abort backup job
        virsh.domjobabort(vm_name, debug=True)

    finally:
        # Remove checkpoints
        if "checkpoint_list" in locals() and checkpoint_list:
            for checkpoint_name in checkpoint_list:
                virsh.checkpoint_delete(vm_name, checkpoint_name)

        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Restoring vm
        vmxml_backup.sync()

        # Remove libvirt secret
        if "luks_secret_uuid" in locals():
            virsh.secret_undefine(luks_secret_uuid, ignore_status=True)

        # Remove iscsi devices
        if scratch_type == "block":
            libvirt.setup_or_cleanup_iscsi(False)

        # Remove scratch file
        if "scratch_path" in locals():
            if scratch_type == "file" and os.path.exists(scratch_path):
                os.remove(scratch_path)
Exemplo n.º 45
0
 if attach_device:
     if create_volume:
         params.update({"source_pool": pool_name})
         params.update({"type_name": "volume"})
         # No need auth options for volume
         if "auth_user" in params:
             params.pop("auth_user")
         if "auth_type" in params:
             params.pop("auth_type")
         if "secret_type" in params:
             params.pop("secret_type")
         if "secret_uuid" in params:
             params.pop("secret_uuid")
         if "secret_usage" in params:
             params.pop("secret_usage")
     xml_file = libvirt.create_disk_xml(params)
     opts = params.get("attach_option", "")
     ret = virsh.attach_device(vm_name,
                               xml_file,
                               flagstr=opts,
                               debug=True)
     libvirt.check_result(ret, skip_if=unsupported_err)
     if additional_guest:
         ret = virsh.attach_device(guest_name, xml_file, "", debug=True)
         libvirt.check_result(ret, skip_if=unsupported_err)
 elif attach_disk:
     opts = params.get("attach_option", "")
     ret = virsh.attach_disk(vm_name, disk_path, targetdev, opts)
     libvirt.check_result(ret, skip_if=unsupported_err)
 elif test_disk_readonly:
     params.update({'readonly': "yes"})
Exemplo n.º 46
0
def run(test, params, env):
    """
    Test command: virsh pool-define;pool-start;vol-list pool;
    attach-device LUN to guest; mount the device, dd; unmount;
    reboot guest; mount the device, dd again; pool-destroy; pool-undefine;

    Create a libvirt npiv pool from an XML file. The test needs to have a wwpn
    and wwnn of a vhba in host which is zoned & mapped to a SAN controller.

    Pre-requiste:
    Host needs to have a wwpn and wwnn of a vHBA which is zoned and mapped to
    SAN controller.
    """
    pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML")
    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    pre_def_pool = "yes" == params.get("pre_def_pool", "no")
    pool_type = params.get("pool_type", "dir")
    source_format = params.get("pool_src_format", "")
    source_name = params.get("pool_source_name", "")
    source_path = params.get("pool_source_path", "/")
    pool_target = params.get("pool_target", "pool_target")
    pool_adapter_type = params.get("pool_adapter_type", "")
    pool_adapter_parent = params.get("pool_adapter_parent", "")
    target_device = params.get("pool_target_device", "sdc")
    pool_wwnn = params.get("pool_wwnn", "WWNN_EXAMPLE")
    pool_wwpn = params.get("pool_wwpn", "WWPN_EXAMPLE")
    test_unit = None
    mount_disk = None

    if 'EXAMPLE' in pool_wwnn or 'EXAMPLE' in pool_wwpn:
        raise exceptions.TestSkipError("Please provide proper WWPN/WWNN")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    if not vm.is_alive():
        vm.start()
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir,
                           vm.address_cache)

    pool_ins = libvirt_storage.StoragePool()
    if pre_def_pool and pool_ins.pool_exists(pool_name):
        raise exceptions.TestFail("Pool %s already exist" % pool_name)
    online_hbas_list = nodedev.find_hbas("hba")
    logging.debug("The online hbas are: %s", online_hbas_list)

    # if no online hba cards on host test fails
    if not online_hbas_list:
        raise exceptions.TestSkipError("Host doesn't have online hba cards")
    else:
        if pool_adapter_parent == "":
            pool_adapter_parent = online_hbas_list[0]

    kwargs = {'source_path': source_path,
              'source_name': source_name,
              'source_format': source_format,
              'pool_adapter_type': pool_adapter_type,
              'pool_adapter_parent': pool_adapter_parent,
              'pool_wwnn': pool_wwnn,
              'pool_wwpn': pool_wwpn}

    pvt = utlv.PoolVolumeTest(test, params)
    emulated_image = "emulated-image"
    old_vhbas = nodedev.find_hbas("vhba")
    try:
        pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image,
                     **kwargs)
        utils_misc.wait_for(
            lambda: nodedev.is_vhbas_added(old_vhbas), _DELAY_TIME)
        virsh.pool_dumpxml(pool_name, to_file=pool_xml_f)
        virsh.pool_destroy(pool_name)
    except Exception as e:
        pvt.cleanup_pool(pool_name, pool_type, pool_target,
                         emulated_image, **kwargs)
        raise exceptions.TestError(
            "Error occurred when prepare pool xml:\n %s" % e)
    if os.path.exists(pool_xml_f):
        with open(pool_xml_f, 'r') as f:
            logging.debug("Create pool from file:\n %s", f.read())

    try:
        cmd_result = virsh.pool_define(pool_xml_f, ignore_status=True,
                                       debug=True)
        utlv.check_exit_status(cmd_result)

        cmd_result = virsh.pool_start(pool_name)
        utlv.check_exit_status(cmd_result)
        utlv.check_actived_pool(pool_name)
        pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name)
        logging.debug("Pool detail: %s", pool_detail)

        vol_list = utlv.get_vol_list(pool_name, timeout=10)
        test_unit = list(vol_list.keys())[0]
        logging.info(
            "Using the first LUN unit %s to attach to a guest", test_unit)

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        session = vm.wait_for_login()
        output = session.cmd_status_output('lsblk')
        logging.debug("%s", output[1])
        old_count = vmxml.get_disk_count(vm_name)
        bf_disks = libvirt_vm.get_disks()
        disk_params = {'type_name': 'volume', 'target_dev': target_device,
                       'target_bus': 'virtio', 'source_pool': pool_name,
                       'source_volume': test_unit, 'driver_type': 'raw'}
        disk_xml = os.path.join(data_dir.get_tmp_dir(), 'disk_xml.xml')
        lun_disk_xml = utlv.create_disk_xml(disk_params)

        copyfile(lun_disk_xml, disk_xml)
        attach_success = virsh.attach_device(
            vm_name, disk_xml, debug=True)

        utlv.check_exit_status(attach_success)

        virsh.reboot(vm_name, debug=True)

        logging.info("Checking disk availability in domain")
        if not vmxml.get_disk_count(vm_name):
            raise exceptions.TestFail("No disk in domain %s." % vm_name)
        new_count = vmxml.get_disk_count(vm_name)

        if new_count <= old_count:
            raise exceptions.TestFail(
                "Failed to attach disk %s" % lun_disk_xml)

        session = vm.wait_for_login()
        output = session.cmd_status_output('lsblk')
        logging.debug("%s", output[1])
        logging.debug("Disks before attach: %s", bf_disks)

        af_disks = libvirt_vm.get_disks()
        logging.debug("Disks after attach: %s", af_disks)

        mount_disk = "".join(list(set(bf_disks) ^ set(af_disks)))
        if not mount_disk:
            raise exceptions.TestFail("Can not get attached device in vm.")
        logging.debug("Attached device in vm:%s", mount_disk)

        logging.debug("Creating file system for %s", mount_disk)
        output = session.cmd_status_output(
            'echo yes | mkfs.ext4 %s' % mount_disk)
        logging.debug("%s", output[1])
        if mount_disk:
            mount_success = mount_and_dd(session, mount_disk)
            if not mount_success:
                raise exceptions.TestFail("Mount failed")
        else:
            raise exceptions.TestFail("Partition not available for disk")

        logging.debug("Unmounting disk")
        session.cmd_status_output('umount %s' % mount_disk)

        virsh.reboot(vm_name, debug=True)

        session = vm.wait_for_login()
        output = session.cmd_status_output('mount')
        logging.debug("%s", output[1])
        mount_success = mount_and_dd(session, mount_disk)
        if not mount_success:
            raise exceptions.TestFail("Mount failed")

        logging.debug("Unmounting disk")
        session.cmd_status_output('umount %s' % mount_disk)
        session.close()

        detach_status = virsh.detach_device(vm_name, disk_xml,
                                            debug=True)
        utlv.check_exit_status(detach_status)

    finally:
        vm.destroy(gracefully=False)
        vmxml_backup.sync()
        logging.debug('Destroying pool %s', pool_name)
        virsh.pool_destroy(pool_name)
        logging.debug('Undefining pool %s', pool_name)
        virsh.pool_undefine(pool_name)
        pvt.cleanup_pool(pool_name, pool_type, pool_target,
                         emulated_image, **kwargs)
        if os.path.exists(pool_xml_f):
            os.remove(pool_xml_f)
        if os.path.exists(disk_xml):
            logging.debug("Cleanup disk xml")
            data_dir.clean_tmp_files()
def run(test, params, env):
    """
    Test virsh {at|de}tach-disk command.

    The command can attach new disk/detach disk.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh attach/detach-disk operation.
    3.Recover test environment.
    4.Confirm the test result.
    """

    def check_vm_partition(vm, device, os_type, target_name, old_parts):
        """
        Check VM disk's partition.

        :param vm. VM guest.
        :param os_type. VM's operation system type.
        :param target_name. Device target type.
        :return: True if check successfully.
        """
        logging.info("Checking VM partittion...")
        if vm.is_dead():
            vm.start()
        try:
            attached = False
            if os_type == "linux":
                session = vm.wait_for_login()
                new_parts = libvirt.get_parts_list(session)
                added_parts = list(set(new_parts).difference(set(old_parts)))
                logging.debug("Added parts: %s" % added_parts)
                for i in range(len(added_parts)):
                    if device == "disk":
                        if target_name.startswith("vd"):
                            if added_parts[i].startswith("vd"):
                                attached = True
                        elif target_name.startswith("hd") or target_name.startswith("sd"):
                            if added_parts[i].startswith("sd"):
                                attached = True
                    elif device == "cdrom":
                        if added_parts[i].startswith("sr"):
                            attached = True
                session.close()
            return attached
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def acpiphp_module_modprobe(vm, os_type):
        """
        Add acpiphp module if VM's os type is rhle5.*

        :param vm. VM guest.
        :param os_type. VM's operation system type.
        :return: True if operate successfully.
        """
        if vm.is_dead():
            vm.start()
        try:
            if os_type == "linux":
                session = vm.wait_for_login()
                s_rpm, _ = session.cmd_status_output(
                    "rpm --version")
                # If status is different from 0, this
                # guest OS doesn't support the rpm package
                # manager
                if s_rpm:
                    session.close()
                    return True
                _, o_vd = session.cmd_status_output(
                    "rpm -qa | grep redhat-release")
                if o_vd.find("5Server") != -1:
                    s_mod, o_mod = session.cmd_status_output(
                        "modprobe acpiphp")
                    del o_mod
                    if s_mod != 0:
                        session.close()
                        return False
                session.close()
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def check_shareable(at_with_shareable, test_twice):
        """
        check if current libvirt version support shareable option

        at_with_shareable: True or False. Whether attach disk with shareable option
        test_twice: True or False. Whether perform operations twice
        return: True or cancel the test
        """
        if at_with_shareable or test_twice:
            if libvirt_version.version_compare(3, 9, 0):
                return True
            else:
                test.cancel("Current libvirt version doesn't support shareable feature")

    # Get test command.
    test_cmd = params.get("at_dt_disk_test_cmd", "attach-disk")

    vm_ref = params.get("at_dt_disk_vm_ref", "name")
    at_options = params.get("at_dt_disk_at_options", "")
    dt_options = params.get("at_dt_disk_dt_options", "")
    at_with_shareable = "yes" == params.get("at_with_shareable", 'no')
    pre_vm_state = params.get("at_dt_disk_pre_vm_state", "running")
    status_error = "yes" == params.get("status_error", 'no')
    no_attach = params.get("at_dt_disk_no_attach", 'no')
    os_type = params.get("os_type", "linux")
    qemu_file_lock = params.get("qemu_file_lock", "")
    if qemu_file_lock:
        if utils_misc.compare_qemu_version(2, 9, 0):
            logging.info('From qemu-kvm-rhev 2.9.0:'
                         'QEMU image locking, which should prevent multiple '
                         'runs of QEMU or qemu-img when a VM is running.')
            if test_cmd == "detach-disk" or pre_vm_state == "shut off":
                test.cancel('This case is not supported.')
            else:
                logging.info('The expect result is failure as opposed with succeed')
                status_error = True

    # Disk specific attributes.
    device = params.get("at_dt_disk_device", "disk")
    device_source_name = params.get("at_dt_disk_device_source", "attach.img")
    device_source_format = params.get("at_dt_disk_device_source_format", "raw")
    device_target = params.get("at_dt_disk_device_target", "vdd")
    device_disk_bus = params.get("at_dt_disk_bus_type", "virtio")
    source_path = "yes" == params.get("at_dt_disk_device_source_path", "yes")
    create_img = "yes" == params.get("at_dt_disk_create_image", "yes")
    test_twice = "yes" == params.get("at_dt_disk_test_twice", "no")
    test_type = "yes" == params.get("at_dt_disk_check_type", "no")
    test_audit = "yes" == params.get("at_dt_disk_check_audit", "no")
    test_block_dev = "yes" == params.get("at_dt_disk_iscsi_device", "no")
    test_logcial_dev = "yes" == params.get("at_dt_disk_logical_device", "no")
    restart_libvirtd = "yes" == params.get("at_dt_disk_restart_libvirtd", "no")
    detach_disk_with_print_xml = "yes" == params.get("detach_disk_with_print_xml", "no")
    vg_name = params.get("at_dt_disk_vg", "vg_test_0")
    lv_name = params.get("at_dt_disk_lv", "lv_test_0")
    serial = params.get("at_dt_disk_serial", "")
    address = params.get("at_dt_disk_address", "")
    address2 = params.get("at_dt_disk_address2", "")
    cache_options = params.get("cache_options", "")
    time_sleep = params.get("time_sleep", 3)
    if check_shareable(at_with_shareable, test_twice):
        at_options += " --mode shareable"
    if serial:
        at_options += (" --serial %s" % serial)
    if address2:
        at_options_twice = at_options + (" --address %s" % address2)
    if address:
        at_options += (" --address %s" % address)
    if cache_options:
        if cache_options.count("directsync"):
            if not libvirt_version.version_compare(1, 0, 0):
                test.cancel("'directsync' cache option doesn't "
                            "support in current libvirt version.")
        at_options += (" --cache %s" % cache_options)

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Create virtual device file.
    device_source_path = os.path.join(data_dir.get_tmp_dir(), device_source_name)
    if test_block_dev:
        device_source = libvirt.setup_or_cleanup_iscsi(True)
        if not device_source:
            # We should skip this case
            test.cancel("Can not get iscsi device name in host")
        if test_logcial_dev:
            lv_utils.vg_create(vg_name, device_source)
            device_source = libvirt.create_local_disk("lvm",
                                                      size="10M",
                                                      vgname=vg_name,
                                                      lvname=lv_name)
            logging.debug("New created volume: %s", lv_name)
    else:
        if source_path and create_img:
            device_source = libvirt.create_local_disk(
                "file", path=device_source_path,
                size="1G", disk_format=device_source_format)
        else:
            device_source = device_source_name

    # if we are testing audit, we need to start audit servcie first.
    if test_audit:
        auditd_service = Factory.create_service("auditd")
        if not auditd_service.status():
            auditd_service.start()
        logging.info("Auditd service status: %s" % auditd_service.status())

    # If we are testing cdrom device, we need to detach hdc in VM first.
    if device == "cdrom":
        if vm.is_alive():
            vm.destroy(gracefully=False)
        s_detach = virsh.detach_disk(vm_name, device_target, "--config")
        if not s_detach:
            logging.error("Detach hdc failed before test.")

    # If we are testing detach-disk, we need to attach certain device first.
    if test_cmd == "detach-disk" and no_attach != "yes":
        s_at_options = "--driver qemu --config"
        #Since lock feature is introduced in libvirt 3.9.0 afterwards, disk shareable options
        #need be set if disk needs be attached multitimes
        if check_shareable(at_with_shareable, test_twice):
            s_at_options += " --mode shareable"

        s_attach = virsh.attach_disk(vm_name, device_source, device_target,
                                     s_at_options, debug=True).exit_status
        if s_attach != 0:
            logging.error("Attaching device failed before testing detach-disk")
        else:
            logging.debug("Attaching device succeeded before testing detach-disk")
        if test_twice:
            device_target2 = params.get("at_dt_disk_device_target2",
                                        device_target)
            device_source = libvirt.create_local_disk(
                "file", path=device_source_path,
                size="1", disk_format=device_source_format)
            s_attach = virsh.attach_disk(vm_name, device_source, device_target2,
                                         s_at_options).exit_status
            if s_attach != 0:
                logging.error("Attaching device failed before testing "
                              "detach-disk test_twice")

    vm.start()
    vm.wait_for_login()

    # Add acpiphp module before testing if VM's os type is rhle5.*
    if not acpiphp_module_modprobe(vm, os_type):
        test.error("Add acpiphp module failed before test.")

    # Turn VM into certain state.
    if pre_vm_state == "paused":
        logging.info("Suspending %s..." % vm_name)
        if vm.is_alive():
            vm.pause()
    elif pre_vm_state == "shut off":
        logging.info("Shuting down %s..." % vm_name)
        if vm.is_alive():
            vm.destroy(gracefully=False)

    # Get disk count before test.
    disk_count_before_cmd = vm_xml.VMXML.get_disk_count(vm_name)

    # Test.
    domid = vm.get_id()
    domuuid = vm.get_uuid()

    # Confirm how to reference a VM.
    if vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    else:
        vm_ref = ""

    if test_cmd == "attach-disk":
        status = virsh.attach_disk(vm_ref, device_source, device_target,
                                   at_options, debug=True).exit_status
    elif test_cmd == "detach-disk":
        # For detach disk with print-xml option, it only print information,and not actual disk detachment.
        if detach_disk_with_print_xml and libvirt_version.version_compare(4, 5, 0):
            ret = virsh.detach_disk(vm_ref, device_target, at_options)
            libvirt.check_exit_status(ret)
            cmd = ("echo \"%s\" | grep -A 16 %s"
                   % (ret.stdout.strip(), device_source_name))
            if process.system(cmd, ignore_status=True, shell=True):
                test.error("Check disk with source image name failed")
        status = virsh.detach_disk(vm_ref, device_target, dt_options,
                                   debug=True).exit_status

    if restart_libvirtd:
        libvirtd_serv = utils_libvirtd.Libvirtd()
        libvirtd_serv.restart()

    if test_twice:
        device_target2 = params.get("at_dt_disk_device_target2", device_target)
        device_source = libvirt.create_local_disk(
            "file", path=device_source_path,
            size="1G", disk_format=device_source_format)
        if test_cmd == "attach-disk":
            if address2:
                at_options = at_options_twice
            status = virsh.attach_disk(vm_ref, device_source,
                                       device_target2, at_options,
                                       debug=True).exit_status
        elif test_cmd == "detach-disk":
            status = virsh.detach_disk(vm_ref, device_target2, dt_options,
                                       debug=True).exit_status

    # Resume guest after command. On newer libvirt this is fixed as it has
    # been a bug. The change in xml file is done after the guest is resumed.
    if pre_vm_state == "paused":
        vm.resume()
        time.sleep(5)

    # Check audit log
    check_audit_after_cmd = True
    if test_audit:
        grep_audit = ('grep "%s" /var/log/audit/audit.log'
                      % test_cmd.split("-")[0])
        cmd = (grep_audit + ' | ' + 'grep "%s" | tail -n1 | grep "res=success"'
               % device_source)
        if process.run(cmd, shell=True).exit_status:
            logging.error("Audit check failed")
            check_audit_after_cmd = False

    # Need wait a while for xml to sync
    time.sleep(float(time_sleep))
    # Check disk count after command.
    check_count_after_cmd = True
    disk_count_after_cmd = vm_xml.VMXML.get_disk_count(vm_name)
    if test_cmd == "attach-disk":
        if disk_count_after_cmd == disk_count_before_cmd:
            check_count_after_cmd = False
    elif test_cmd == "detach-disk":
        if disk_count_after_cmd < disk_count_before_cmd:
            check_count_after_cmd = False

    # Recover VM state.
    if pre_vm_state == "shut off":
        vm.start()

    # Check in VM after command.
    check_vm_after_cmd = True
    check_vm_after_cmd = check_vm_partition(vm, device, os_type,
                                            device_target, old_parts)

    # Check disk type after attach.
    check_disk_type = True
    if test_type:
        if test_block_dev:
            check_disk_type = vm_xml.VMXML.check_disk_type(vm_name,
                                                           device_source,
                                                           "block")
        else:
            check_disk_type = vm_xml.VMXML.check_disk_type(vm_name,
                                                           device_source,
                                                           "file")
    # Check disk serial after attach.
    check_disk_serial = True
    if serial:
        disk_serial = vm_xml.VMXML.get_disk_serial(vm_name, device_target)
        if serial != disk_serial:
            check_disk_serial = False

    # Check disk address after attach.
    check_disk_address = True
    if address:
        disk_address = vm_xml.VMXML.get_disk_address(vm_name, device_target)
        if address != disk_address:
            check_disk_address = False

    # Check multifunction address after attach.
    check_disk_address2 = True
    if address2:
        disk_address2 = vm_xml.VMXML.get_disk_address(vm_name, device_target2)
        if address2 != disk_address2:
            check_disk_address2 = False

    # Check disk cache option after attach.
    check_cache_after_cmd = True
    if cache_options:
        disk_cache = vm_xml.VMXML.get_disk_attr(vm_name, device_target,
                                                "driver", "cache")
        if cache_options == "default":
            if disk_cache is not None:
                check_cache_after_cmd = False
        elif disk_cache != cache_options:
            check_cache_after_cmd = False

    # Eject cdrom test
    eject_cdrom = "yes" == params.get("at_dt_disk_eject_cdrom", "no")
    save_vm = "yes" == params.get("at_dt_disk_save_vm", "no")
    save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save")
    try:
        if eject_cdrom:
            eject_params = {'type_name': "file", 'device_type': "cdrom",
                            'target_dev': device_target, 'target_bus': device_disk_bus}
            eject_xml = libvirt.create_disk_xml(eject_params)
            with open(eject_xml) as eject_file:
                logging.debug("Eject CDROM by XML: %s", eject_file.read())
            # Run command tiwce to make sure cdrom tray open first #BZ892289
            # Open tray
            virsh.attach_device(domainarg=vm_name, filearg=eject_xml, debug=True)
            # Add time sleep between two attach commands.
            if time_sleep:
                time.sleep(float(time_sleep))
            # Eject cdrom
            result = virsh.attach_device(domainarg=vm_name, filearg=eject_xml,
                                         debug=True)
            if result.exit_status != 0:
                test.fail("Eject CDROM failed")
            if vm_xml.VMXML.check_disk_exist(vm_name, device_source):
                test.fail("Find %s after do eject" % device_source)
        # Save and restore VM
        if save_vm:
            result = virsh.save(vm_name, save_file, debug=True)
            libvirt.check_exit_status(result)
            result = virsh.restore(save_file, debug=True)
            libvirt.check_exit_status(result)
            if vm_xml.VMXML.check_disk_exist(vm_name, device_source):
                test.fail("Find %s after do restore" % device_source)

        # Destroy VM.
        vm.destroy(gracefully=False)

        # Check disk count after VM shutdown (with --config).
        check_count_after_shutdown = True
        inactive_vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        disk_count_after_shutdown = len(inactive_vmxml.get_disk_all())
        if test_cmd == "attach-disk":
            if disk_count_after_shutdown == disk_count_before_cmd:
                check_count_after_shutdown = False
        elif test_cmd == "detach-disk":
            if disk_count_after_shutdown < disk_count_before_cmd:
                check_count_after_shutdown = False

    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.debug("Restore the VM XML")
        backup_xml.sync()
        if os.path.exists(save_file):
            os.remove(save_file)
        if test_block_dev:
            if test_logcial_dev:
                libvirt.delete_local_disk("lvm", vgname=vg_name, lvname=lv_name)
                lv_utils.vg_remove(vg_name)
                process.run("pvremove %s" % device_source, shell=True, ignore_status=True)
            libvirt.setup_or_cleanup_iscsi(False)
        else:
            libvirt.delete_local_disk("file", device_source)

    # Check results.
    if status_error:
        if not status:
            test.fail("virsh %s exit with unexpected value."
                      % test_cmd)
    else:
        if status:
            test.fail("virsh %s failed." % test_cmd)
        if test_cmd == "attach-disk":
            if at_options.count("config"):
                if not check_count_after_shutdown:
                    test.fail("Cannot see config attached device "
                              "in xml file after VM shutdown.")
                if not check_disk_serial:
                    test.fail("Serial set failed after attach")
                if not check_disk_address:
                    test.fail("Address set failed after attach")
                if not check_disk_address2:
                    test.fail("Address(multifunction) set failed"
                              " after attach")
            else:
                if not check_count_after_cmd:
                    test.fail("Cannot see device in xml file"
                              " after attach.")
                if not check_vm_after_cmd:
                    test.fail("Cannot see device in VM after"
                              " attach.")
                if not check_disk_type:
                    test.fail("Check disk type failed after"
                              " attach.")
                if not check_audit_after_cmd:
                    test.fail("Audit hotplug failure after attach")
                if not check_cache_after_cmd:
                    test.fail("Check cache failure after attach")
                if at_options.count("persistent"):
                    if not check_count_after_shutdown:
                        test.fail("Cannot see device attached "
                                  "with persistent after "
                                  "VM shutdown.")
                else:
                    if check_count_after_shutdown:
                        test.fail("See non-config attached device "
                                  "in xml file after VM shutdown.")
        elif test_cmd == "detach-disk":
            if dt_options.count("config"):
                if check_count_after_shutdown:
                    test.fail("See config detached device in "
                              "xml file after VM shutdown.")
            else:
                if check_count_after_cmd:
                    test.fail("See device in xml file "
                              "after detach.")
                if check_vm_after_cmd:
                    test.fail("See device in VM after detach.")
                if not check_audit_after_cmd:
                    test.fail("Audit hotunplug failure "
                              "after detach")

                if dt_options.count("persistent"):
                    if check_count_after_shutdown:
                        test.fail("See device deattached "
                                  "with persistent after "
                                  "VM shutdown.")
                else:
                    if not check_count_after_shutdown:
                        test.fail("See non-config detached "
                                  "device in xml file after "
                                  "VM shutdown.")

        else:
            test.error("Unknown command %s." % test_cmd)