Exemplo n.º 1
0
def destroy_nodedev(dev_name):
    """
    Destroy (stop) a device on the node

    :param dev_name: name of mediated device
    """
    virsh.nodedev_destroy(dev_name, debug=True)
def nodedev_destroy(scsi_host, params={}):
    """
    Destroy a nodedev of scsi_host#.
    :param scsi_host: The scsi to destroy
    :param params: Contain status_error
    """
    status_error = params.get("status_error", "no")
    result = virsh.nodedev_destroy(scsi_host)
    logging.info("destroying scsi:%s", scsi_host)
    status = result.exit_status
    # Check status_error
    if status_error == "yes":
        if status:
            logging.info("It's an expected %s", result.stderr)
        else:
            raise exceptions.TestFail(
                "%d not a expected command "
                "return value", status)
    elif status_error == "no":
        if status:
            raise exceptions.TestFail(result.stderr)
        else:
            # Check nodedev value
            if not check_nodedev(scsi_host):
                logging.info(result.stdout)
            else:
                raise exceptions.TestFail("The relevant directory still exists"
                                          "or mismatch with result")
Exemplo n.º 3
0
def destroy_nodedev(params):
    """
    Destroy (stop) a device on the node
    :params: the parameter dictionary
    """
    dev_name = params.get("nodedev_new_dev")
    options = params.get("nodedev_options")
    status_error = params.get("status_error", "no")

    result = virsh.nodedev_destroy(dev_name, options)
    status = result.exit_status

    # Check status_error
    if status_error == "yes":
        if status:
            logging.info("It's an expected %s", result.stderr)
        else:
            raise error.TestFail("%d not a expected command "
                                 "return value", status)
    elif status_error == "no":
        if status:
            raise error.TestFail(result.stderr)
        else:
            # Check nodedev value
            if not check_nodedev(dev_name):
                logging.info(result.stdout)
            else:
                raise error.TestFail("The relevant directory still exists"
                                     "or mismatch with result")
def nodedev_destroy(scsi_host, params={}):
    """
    Destroy a nodedev of scsi_host#.
    :param scsi_host: The scsi to destroy
    :param params: Contain status_error
    """
    status_error = params.get("status_error", "no")
    result = virsh.nodedev_destroy(scsi_host)
    logging.info("destroying scsi:%s", scsi_host)
    status = result.exit_status
    # Check status_error
    if status_error == "yes":
        if status:
            logging.info("It's an expected %s", result.stderr)
        else:
            raise exceptions.TestFail("%d not a expected command "
                                      "return value", status)
    elif status_error == "no":
        if status:
            raise exceptions.TestFail(result.stderr)
        else:
            # Check nodedev value
            if not check_nodedev(scsi_host):
                logging.info(result.stdout)
            else:
                raise exceptions.TestFail("The relevant directory still exists"
                                          "or mismatch with result")
Exemplo n.º 5
0
def run(test, params, env):
    """
    Round trip for persistent setup via nodedev API:
    define, set autostart, start, destroy, undefine

    The test assumes no other mediated device is available
    in the test environment.

    A typical node device xml would look like:
    <device>
        <parent>css_0_0_0062</parent>
            <capability type="mdev">
                <type id="vfio_ccw-io"/>
                <uuid>8d312cf6-f92a-485c-8db8-ba9299848f46</uuid>
            </capability>
    </device>
    """

    schid = None

    try:
        schid, _ = ccw.get_device_info()
        ccw.set_override(schid)
        nodedev_file_path = get_device_xml(schid)
        virsh.nodedev_define(nodedev_file_path,
                             ignore_status=False,
                             debug=True)
        device_name = get_device_name()
        virsh.nodedev_autostart(device_name, ignore_status=False, debug=True)
        check_autostart(device_name)
        virsh.nodedev_start(device_name, ignore_status=False, debug=True)
        virsh.nodedev_destroy(device_name, ignore_status=False, debug=True)
        virsh.nodedev_undefine(device_name, ignore_status=False, debug=True)
    finally:
        if schid:
            ccw.unset_override(schid)
Exemplo n.º 6
0
def destroy_nodedev(test, params):
    """
    Destroy (stop) a device on the node
    :params: the parameter dictionary
    """
    dev_name = params.get("nodedev_dev_name")
    if dev_name == "nodedev_NIC_name":
        dev_name = params.get("nodedev_NIC_name")
    else:
        # Check nodedev value
        # if not check_nodedev(dev_name):
        # logging.info(result.stdout)
        dev_name = params.get("nodedev_new_dev")

    options = params.get("nodedev_options")
    status_error = params.get("status_error", "no")

    # libvirt acl polkit related params
    uri = params.get("virsh_uri")
    if uri and not utils_split_daemons.is_modular_daemon():
        uri = "qemu:///system"
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    result = virsh.nodedev_destroy(dev_name, options, uri=uri,
                                   debug=True,
                                   unprivileged_user=unprivileged_user)
    status = result.exit_status

    # Check status_error
    if status_error == "yes":
        if status:
            logging.info("It's an expected %s", result.stderr)
        else:
            test.fail("%d not a expected command "
                      "return value", status)
    elif status_error == "no":
        if status:
            test.fail(result.stderr)
        else:
            # Check nodedev value
            if not check_nodedev(dev_name):
                logging.info(result.stdout.strip())
            else:
                test.fail("The relevant directory still exists"
                          "or mismatch with result")
Exemplo n.º 7
0
def nodedev_destroy(scsi_host, params={}):
    """
    Destroy a nodedev of scsi_host#.
    :param scsi_host: The scsi to destroy
    :param params: Contain status_error
    """
    status_error = "yes" == params.get("status_error", "no")
    result = virsh.nodedev_destroy(scsi_host)
    LOG.info("destroying scsi:%s", scsi_host)
    # Check status_error
    libvirt.check_exit_status(result, status_error)
    # Check nodedev value
    if not check_nodedev(scsi_host):
        LOG.info(result.stdout_text)
    else:
        raise exceptions.TestFail("The relevant directory still exists"
                                  " or mismatch with result")
Exemplo n.º 8
0
def nodedev_destroy(scsi_host, params={}):
    """
    Destroy a nodedev of scsi_host#.
    :param scsi_host: The scsi to destroy
    :param params: Contain status_error
    """
    status_error = "yes" == params.get("status_error", "no")
    result = virsh.nodedev_destroy(scsi_host)
    logging.info("destroying scsi:%s", scsi_host)
    # Check status_error
    libvirt.check_exit_status(result, status_error)
    # Check nodedev value
    if not check_nodedev(scsi_host):
        logging.info(results_stdout_52lts(result))
    else:
        raise exceptions.TestFail("The relevant directory still exists"
                                  " or mismatch with result")
def destroy_nodedev(test, params):
    """
    Destroy (stop) a device on the node
    :params: the parameter dictionary
    """
    dev_name = params.get("nodedev_dev_name")
    if dev_name == "nodedev_NIC_name":
        dev_name = params.get("nodedev_NIC_name")
    else:
        # Check nodedev value
        # if not check_nodedev(dev_name):
        # logging.info(result.stdout)
        dev_name = params.get("nodedev_new_dev")

    options = params.get("nodedev_options")
    status_error = params.get("status_error", "no")

    # libvirt acl polkit related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    result = virsh.nodedev_destroy(dev_name, options, uri=uri,
                                   debug=True,
                                   unprivileged_user=unprivileged_user)
    status = result.exit_status

    # Check status_error
    if status_error == "yes":
        if status:
            logging.info("It's an expected %s", result.stderr)
        else:
            test.fail("%d not a expected command "
                      "return value", status)
    elif status_error == "no":
        if status:
            test.fail(result.stderr)
        else:
            # Check nodedev value
            if not check_nodedev(dev_name):
                logging.info(result.stdout.strip())
            else:
                test.fail("The relevant directory still exists"
                          "or mismatch with result")
def destroy_nodedev(params):
    """
    Destroy (stop) a device on the node
    :params: the parameter dictionary
    """
    dev_name = params.get("nodedev_new_dev")
    options = params.get("nodedev_options")
    status_error = params.get("status_error", "no")

    # libvirt acl polkit related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    result = virsh.nodedev_destroy(dev_name,
                                   options,
                                   uri=uri,
                                   debug=True,
                                   unprivileged_user=unprivileged_user)
    status = result.exit_status

    # Check status_error
    if status_error == "yes":
        if status:
            logging.info("It's an expected %s", result.stderr)
        else:
            raise error.TestFail("%d not a expected command "
                                 "return value", status)
    elif status_error == "no":
        if status:
            raise error.TestFail(result.stderr)
        else:
            # Check nodedev value
            if not check_nodedev(dev_name):
                logging.info(result.stdout)
            else:
                raise error.TestFail("The relevant directory still exists"
                                     "or mismatch with result")
Exemplo n.º 11
0
def run(test, params, env):
    vd_formats = []
    disk_devices = []
    driver_names = []
    driver_types = []
    device_targets = []
    target_buses = []
    wwnns = []
    wwpns = []

    vm_names = params.get("vms", "avocado-vt-vm1 avocado-vt-vm2").split()
    fc_host_dir = params.get("fc_host_dir", "/sys/class/fc_host")
    vm0_disk_type = params.get("vm0_disk_type", "block")
    vm1_disk_type = params.get("vm1_disk_type", "block")
    vm0_vd_format = params.get("vm0_vd_format", "by_path")
    vm1_vd_format = params.get("vm1_vd_foramt", "by_path")
    vm0_disk_device = vm1_disk_device = params.get("disk_device", "disk")
    vm0_driver_name = vm1_driver_name = params.get("driver_name", "qemu")
    vm0_driver_type = vm1_driver_type = params.get("driver_type", "qcow2")
    vm0_device_target = vm1_device_target = params.get("device_target", "vda")
    vm0_target_bus = vm1_target_bus = params.get("target_bus", "virtio")
    vm0_wwnn = params.get("vm0_wwnn", "ENTER.WWNN.FOR.VM0")
    vm0_wwpn = params.get("vm0_wwpn", "ENTER.WWPN.FOR.VM0")
    vm1_wwnn = params.get("vm1_wwnn", "ENTER.WWNN.FOR.VM1")
    vm1_wwpn = params.get("vm1_wwpn", "ENTER.WWPN.FOR.VM1")

    disk_types = [vm0_disk_type, vm1_disk_type]
    vd_formats = [vm0_vd_format, vm1_vd_format]
    disk_devices = [vm0_disk_device, vm1_disk_device]
    driver_names = [vm0_driver_name, vm1_driver_name]
    driver_types = [vm0_driver_type, vm1_driver_type]
    device_targets = [vm0_device_target, vm1_device_target]
    target_buses = [vm0_target_bus, vm1_target_bus]
    wwnns = [vm0_wwnn, vm1_wwnn]
    wwpns = [vm0_wwpn, vm1_wwpn]
    old_mpath_conf = ""
    mpath_conf_path = "/etc/multipath.conf"
    original_mpath_conf_exist = os.path.exists(mpath_conf_path)

    new_vhbas = []
    path_to_blks = []
    vmxml_backups = []
    vms = []

    try:
        online_hbas = utils_npiv.find_hbas("hba")
        if not online_hbas:
            test.cancel("There is no online hba cards.")
        old_mpath_conf = utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                                           replace_existing=True)
        first_online_hba = online_hbas[0]
        if len(vm_names) != 2:
            test.cancel("This test needs exactly 2 vms.")
        for vm_index in range(len(vm_names)):
            logging.debug("prepare vm %s", vm_names[vm_index])
            vm = env.get_vm(vm_names[vm_index])
            vms.append(vm)
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[vm_index])
            vmxml_backup = vmxml.copy()
            vmxml_backups.append(vmxml_backup)
            old_vhbas = utils_npiv.find_hbas("vhba")
            old_mpath_devs = utils_npiv.find_mpath_devs()
            new_vhba = utils_npiv.nodedev_create_from_xml(
                    {"nodedev_parent": first_online_hba,
                     "scsi_wwnn": wwnns[vm_index],
                     "scsi_wwpn": wwpns[vm_index]})
            utils_misc.wait_for(
                    lambda: utils_npiv.is_vhbas_added(old_vhbas),
                    timeout=_TIMEOUT*2)
            if not new_vhba:
                test.fail("vHBA not sucessfully generated.")
            new_vhbas.append(new_vhba)
            if vd_formats[vm_index] == "mpath":
                utils_misc.wait_for(
                        lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs),
                        timeout=_TIMEOUT*5)
                if not utils_npiv.is_mpath_devs_added(old_mpath_devs):
                    test.fail("mpath dev not generated.")
                cur_mpath_devs = utils_npiv.find_mpath_devs()
                new_mpath_devs = list(set(cur_mpath_devs).difference(
                    set(old_mpath_devs)))
                logging.debug("The newly added mpath dev is: %s",
                              new_mpath_devs)
                path_to_blk = os.path.join(_MPATH_DIR, new_mpath_devs[0])
            elif vd_formats[vm_index] == "by_path":
                new_vhba_scsibus = re.sub("\D", "", new_vhba)
                utils_misc.wait_for(lambda: get_blks_by_scsi(test, new_vhba_scsibus),
                                    timeout=_TIMEOUT)
                new_blks = get_blks_by_scsi(test, new_vhba_scsibus)
                if not new_blks:
                    test.fail("blk dev not found with scsi_%s" % new_vhba_scsibus)
                first_blk_dev = new_blks[0]
                utils_misc.wait_for(
                        lambda: get_symbols_by_blk(test, first_blk_dev),
                        timeout=_TIMEOUT)
                lun_sl = get_symbols_by_blk(test, first_blk_dev)
                if not lun_sl:
                    test.fail("lun symbolic links not found in "
                              "/dev/disk/by-path/ for %s" %
                              first_blk_dev)
                lun_dev = lun_sl[0]
                path_to_blk = os.path.join(_BYPATH_DIR, lun_dev)
            path_to_blks.append(path_to_blk)
            img_src = vm.get_first_disk_devices()['source']
            img_info = utils_misc.get_image_info(img_src)
            src_fmt = img_info["format"]
            dest_fmt = "qcow2"
            convert_img_to_dev(test, src_fmt, dest_fmt, img_src, path_to_blk)
            disk_obj = prepare_disk_obj(disk_types[vm_index], disk_devices[vm_index],
                                        driver_names[vm_index], driver_types[vm_index],
                                        path_to_blk, device_targets[vm_index],
                                        target_buses[vm_index])
            replace_vm_first_vd(vm_names[vm_index], disk_obj)
            if vm.is_dead():
                logging.debug("Start vm %s with updated vda", vm_names[vm_index])
                vm.start()

        # concurrently create file in vm with threads
        create_file_in_vm_threads = []
        for vm in vms:
            cli_t = threading.Thread(target=create_file_in_vm,
                                     args=(vm, _VM_FILE_PATH, vm.name, _REPEAT,)
                                     )
            logging.debug("Start creating file in vm: %s", vm.name)
            create_file_in_vm_threads.append(cli_t)
            cli_t.start()
        for thrd in create_file_in_vm_threads:
            thrd.join()

        # reboot vm and check if previously create file still exist with
        # correct content
        for vm in vms:
            session = vm.wait_for_login()
            session.cmd_status_output("sync")
            if vm.is_alive:
                vm.destroy(gracefully=True)
            else:
                test.fail("%s is not running" % vm.name)
            vm.start()
            session = vm.wait_for_login()
            if check_file_in_vm(session, _VM_FILE_PATH, vm.name, _REPEAT):
                logging.debug("file exists after reboot with correct content")
            else:
                test.fail("Failed to check the test file in vm")
            session.close()
    except Exception as detail:
        test.fail("Test failed with exception: %s" % detail)
    finally:
        logging.debug("Start to clean up env...")
        for vmxml_backup in vmxml_backups:
            vmxml_backup.sync()
        for new_vhba in new_vhbas:
            virsh.nodedev_destroy(new_vhba)
        process.system('service multipathd restart', verbose=True)
        if old_mpath_conf:
            utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                              conf_content=old_mpath_conf,
                                              replace_existing=True)
        if not original_mpath_conf_exist and os.path.exists(mpath_conf_path):
            os.remove(mpath_conf_path)
Exemplo n.º 12
0
def run(test, params, env):
    """
    1. prepare a fc lun with one of following methods
        - create a scsi pool&vol
        - create a vhba
    2. prepare the virtual disk xml, as one of following
        - source = /dev/disk/by-path
        - source = /dev/mapper/mpathX
        - source = pool&vol format
    3. start a vm with above disk as vdb
    4. create disk-only snapshot of vdb
    5. check the snapshot-list and snapshot file's existence
    6. mount vdb and touch file to it
    7. revert the snapshot and check file's existence
    8. delete snapshot
    9. cleanup env.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    wwpn = params.get("wwpn", "WWPN_EXAMPLE")
    wwnn = params.get("wwnn", "WWNN_EXAMPLE")
    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "file")
    disk_size = params.get("disk_size", "100M")
    device_target = params.get("device_target", "vdb")
    driver_name = params.get("driver_name", "qemu")
    driver_type = params.get("driver_type", "raw")
    target_bus = params.get("target_bus", "virtio")
    vd_format = params.get("vd_format", "")
    snapshot_dir = params.get("snapshot_dir", "/tmp")
    snapshot_name = params.get("snapshot_name", "s1")
    pool_name = params.get("pool_name", "")
    pool_target = params.get("pool_target", "/dev")
    snapshot_disk_only = "yes" == params.get("snapshot_disk_only", "no")
    new_vhbas = []
    current_vhbas = []
    new_vhba = []
    path_to_blk = ""
    lun_sl = []
    new_disk = ""
    pool_ins = None
    old_mpath_conf = ""
    mpath_conf_path = "/etc/multipath.conf"
    original_mpath_conf_exist = os.path.exists(mpath_conf_path)

    vm = env.get_vm(vm_name)
    online_hbas = utils_npiv.find_hbas("hba")
    if not online_hbas:
        raise exceptions.TestSkipError("There is no online hba cards.")
    old_mpath_conf = utils_npiv.prepare_multipath_conf(
        conf_path=mpath_conf_path, replace_existing=True)
    first_online_hba = online_hbas[0]
    old_vhbas = utils_npiv.find_hbas("vhba")
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    virt_vm = libvirt_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache)
    old_disks = virt_vm.get_disks()

    if vm.is_alive():
        vm.destroy(gracefully=False)
    if pool_name:
        pool_ins = libvirt_storage.StoragePool()
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    try:
        # prepare a fc lun
        if vd_format in ['scsi_vol']:
            if pool_ins.pool_exists(pool_name):
                raise exceptions.TestFail("Pool %s already exist" % pool_name)
            prepare_scsi_pool(pool_name, wwnn, wwpn, first_online_hba,
                              pool_target)
            utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas),
                                timeout=_TIMEOUT)
            if not utils_npiv.is_vhbas_added(old_vhbas):
                raise exceptions.TestFail("vHBA not successfully created")
            current_vhbas = utils_npiv.find_hbas("vhba")
            new_vhba = list(set(current_vhbas).difference(set(old_vhbas)))[0]
            new_vhbas.append(new_vhba)
            new_vhba_scsibus = re.sub("\D", "", new_vhba)
            utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus),
                                timeout=_TIMEOUT)
            new_blks = get_blks_by_scsi(new_vhba_scsibus)
            if not new_blks:
                raise exceptions.TestFail(
                    "block device not found with scsi_%s", new_vhba_scsibus)
            vol_list = utlv.get_vol_list(pool_name,
                                         vol_check=True,
                                         timeout=_TIMEOUT * 3)
            path_to_blk = list(vol_list.values())[0]
        elif vd_format in ['mpath', 'by_path']:
            old_mpath_devs = utils_npiv.find_mpath_devs()
            new_vhba = utils_npiv.nodedev_create_from_xml({
                "nodedev_parent": first_online_hba,
                "scsi_wwnn": wwnn,
                "scsi_wwpn": wwpn
            })
            utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas),
                                timeout=_TIMEOUT * 2)
            if not new_vhba:
                raise exceptions.TestFail("vHBA not successfully generated.")
            new_vhbas.append(new_vhba)
            if vd_format == "mpath":
                utils_misc.wait_for(
                    lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs),
                    timeout=_TIMEOUT * 5)
                if not utils_npiv.is_mpath_devs_added(old_mpath_devs):
                    raise exceptions.TestFail("mpath dev not generated.")
                cur_mpath_devs = utils_npiv.find_mpath_devs()
                new_mpath_devs = list(
                    set(cur_mpath_devs).difference(set(old_mpath_devs)))
                logging.debug("The newly added mpath dev is: %s",
                              new_mpath_devs)
                path_to_blk = "/dev/mapper/" + new_mpath_devs[0]
            elif vd_format == "by_path":
                new_vhba_scsibus = re.sub("\D", "", new_vhba)
                utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus),
                                    timeout=_TIMEOUT)
                new_blks = get_blks_by_scsi(new_vhba_scsibus)
                if not new_blks:
                    raise exceptions.TestFail("blk dev not found with scsi_%s",
                                              new_vhba_scsibus)
                first_blk_dev = new_blks[0]
                utils_misc.wait_for(lambda: get_symbols_by_blk(first_blk_dev),
                                    timeout=_TIMEOUT)
                lun_sl = get_symbols_by_blk(first_blk_dev)
                if not lun_sl:
                    raise exceptions.TestFail(
                        "lun symbolic links not found in "
                        "/dev/disk/by-path/ for %s" % first_blk_dev)
                lun_dev = lun_sl[0]
                path_to_blk = os.path.join(_BY_PATH_DIR, lun_dev)
            else:
                pass
        else:
            raise exceptions.TestSkipError("Not provided how to pass"
                                           "virtual disk to VM.")

        # create qcow2 file on the block device with specified size
        if path_to_blk:
            cmd = "qemu-img create -f qcow2 %s %s" % (path_to_blk, disk_size)
            try:
                process.run(cmd, shell=True)
            except process.cmdError as detail:
                raise exceptions.TestFail(
                    "Fail to create qcow2 on blk dev: %s", detail)
        else:
            raise exceptions.TestFail("Don't have a valid path to blk dev.")

        # prepare disk xml
        if "vol" in vd_format:
            vol_list = utlv.get_vol_list(pool_name,
                                         vol_check=True,
                                         timeout=_TIMEOUT * 3)
            test_vol = list(vol_list.keys())[0]
            disk_params = {
                'type_name': disk_type,
                'target_dev': device_target,
                'target_bus': target_bus,
                'source_pool': pool_name,
                'source_volume': test_vol,
                'driver_type': driver_type
            }
        else:
            disk_params = {
                'type_name': disk_type,
                'device': disk_device,
                'driver_name': driver_name,
                'driver_type': driver_type,
                'source_file': path_to_blk,
                'target_dev': device_target,
                'target_bus': target_bus
            }
        if vm.is_alive():
            vm.destroy(gracefully=False)
        new_disk = disk.Disk()
        new_disk.xml = open(utlv.create_disk_xml(disk_params)).read()

        # start vm with the virtual disk
        vmxml.devices = vmxml.devices.append(new_disk)
        vmxml.sync()
        vm.start()
        session = vm.wait_for_login()
        cur_disks = virt_vm.get_disks()
        mount_disk = "".join(list(set(old_disks) ^ set(cur_disks)))

        # mkfs and mount disk in vm, create a file on that disk.
        if not mount_disk:
            logging.debug("old_disk: %s, new_disk: %s", old_disks, cur_disks)
            raise exceptions.TestFail("No new disk found in vm.")
        mkfs_and_mount(session, mount_disk)
        create_file_in_vm(session, "/mnt/before_snapshot.txt", "before")

        # virsh snapshot-create-as vm s --disk-only --diskspec vda,file=path
        if snapshot_disk_only:
            vm_blks = list(vm.get_disk_devices().keys())
            options = "%s --disk-only" % snapshot_name
            for vm_blk in vm_blks:
                snapshot_file = snapshot_dir + "/" + vm_blk + "." + snapshot_name
                if os.path.exists(snapshot_file):
                    os.remove(snapshot_file)
                options = options + " --diskspec %s,file=%s" % (vm_blk,
                                                                snapshot_file)
        else:
            options = snapshot_name
        utlv.check_exit_status(virsh.snapshot_create_as(vm_name, options))

        # check virsh snapshot-list
        logging.debug("Running: snapshot-list %s", vm_name)
        snapshot_list = virsh.snapshot_list(vm_name)
        logging.debug("snapshot list is: %s", snapshot_list)
        if not snapshot_list:
            raise exceptions.TestFail("snapshots not found after creation.")

        # snapshot-revert doesn't support external snapshot for now. so
        # only check this with internal snapshot.
        if not snapshot_disk_only:
            create_file_in_vm(session, "/mnt/after_snapshot.txt", "after")
            logging.debug("Running: snapshot-revert %s %s", vm_name,
                          snapshot_name)
            utlv.check_exit_status(
                virsh.snapshot_revert(vm_name, snapshot_name))
            session = vm.wait_for_login()
            file_existence, file_content = get_file_in_vm(
                session, "/mnt/after_snapshot.txt")
            logging.debug("file exist = %s, file content = %s", file_existence,
                          file_content)
            if file_existence:
                raise exceptions.TestFail("The file created "
                                          "after snapshot still exists.")
            file_existence, file_content = get_file_in_vm(
                session, "/mnt/before_snapshot.txt")
            logging.debug("file eixst = %s, file content = %s", file_existence,
                          file_content)
            if ((not file_existence) or (file_content.strip() != "before")):
                raise exceptions.TestFail("The file created "
                                          "before snapshot is lost.")
        # delete snapshots
        # if diskonly, delete --metadata and remove files
        # if not diskonly, delete snapshot
        if snapshot_disk_only:
            options = "--metadata"
        else:
            options = ""
        for snap in snapshot_list:
            logging.debug("deleting snapshot %s with options %s", snap,
                          options)
            result = virsh.snapshot_delete(vm_name, snap, options)
            logging.debug("result of snapshot-delete: %s",
                          result.stdout.strip())
            if snapshot_disk_only:
                vm_blks = list(vm.get_disk_devices().keys())
                for vm_blk in vm_blks:
                    snapshot_file = snapshot_dir + "/" + vm_blk + "." + snap
                    if os.path.exists(snapshot_file):
                        os.remove(snapshot_file)
        snapshot_list = virsh.snapshot_list(vm_name)
        if snapshot_list:
            raise exceptions.TestFail("Snapshot not deleted: %s",
                                      snapshot_list)
    except Exception as detail:
        raise exceptions.TestFail("exception happens: %s", detail)
    finally:
        logging.debug("Start to clean up env...")
        vmxml_backup.sync()
        if pool_ins and pool_ins.pool_exists(pool_name):
            virsh.pool_destroy(pool_name)
        for new_vhba in new_vhbas:
            virsh.nodedev_destroy(new_vhba)
        utils_npiv.restart_multipathd()
        if old_mpath_conf:
            utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                              conf_content=old_mpath_conf,
                                              replace_existing=True)
        if not original_mpath_conf_exist and os.path.exists(mpath_conf_path):
            os.remove(mpath_conf_path)
Exemplo n.º 13
0
            create_file_in_vm_threads.append(cli_t)
            cli_t.start()
        for thrd in create_file_in_vm_threads:
            thrd.join()

        # reboot vm and check if previously create file still exist with
        # correct content
        for vm in vms:
            session = vm.wait_for_login()
            session.cmd_status_output("sync")
            if vm.is_alive:
                vm.destroy(gracefully=True)
            else:
                test.fail("%s is not running", vm.name)
            vm.start()
            session = vm.wait_for_login()
            if check_file_in_vm(session, _VM_FILE_PATH, vm.name, _REPEAT):
                logging.debug("file exists after reboot with correct content")
            else:
                test.fail("Failed to check the test file in vm")
            session.close()
    except Exception, detail:
        test.fail("Test failed with exception: %s", detail)
    finally:
        logging.debug("Start to clean up env...")
        for vmxml_backup in vmxml_backups:
            vmxml_backup.sync()
        for new_vhba in new_vhbas:
            virsh.nodedev_destroy(new_vhba)
        process.system('service multipathd restart', verbose=True)
Exemplo n.º 14
0
            create_file_in_vm_threads.append(cli_t)
            cli_t.start()
        for thrd in create_file_in_vm_threads:
            thrd.join()

        # reboot vm and check if previously create file still exist with
        # correct content
        for vm in vms:
            session = vm.wait_for_login()
            session.cmd_status_output("sync")
            if vm.is_alive:
                vm.destroy(gracefully=True)
            else:
                test.fail("%s is not running", vm.name)
            vm.start()
            session = vm.wait_for_login()
            if check_file_in_vm(session, _VM_FILE_PATH, vm.name, _REPEAT):
                logging.debug("file exists after reboot with correct content")
            else:
                test.fail("Failed to check the test file in vm")
            session.close()
    except Exception, detail:
        test.fail("Test failed with exception: %s", detail)
    finally:
        logging.debug("Start to clean up env...")
        for vmxml_backup in vmxml_backups:
            vmxml_backup.sync()
        for new_vhba in new_vhbas:
            virsh.nodedev_destroy(new_vhba)
        process.system('service multipathd restart', verbose=True)
Exemplo n.º 15
0
def run(test, params, env):
    """
    1. prepare a fc lun with one of following methods
        - create a scsi pool&vol
        - create a vhba
    2. prepare the virtual disk xml, as one of following
        - source = /dev/disk/by-path
        - source = /dev/mapper/mpathX
        - source = pool&vol format
    3. start a vm with above disk as vdb
    4. create disk-only snapshot of vdb
    5. check the snapshot-list and snapshot file's existence
    6. mount vdb and touch file to it
    7. revert the snapshot and check file's existence
    8. delete snapshot
    9. cleanup env.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    wwpn = params.get("wwpn", "WWPN_EXAMPLE")
    wwnn = params.get("wwnn", "WWNN_EXAMPLE")
    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "file")
    disk_size = params.get("disk_size", "100M")
    device_target = params.get("device_target", "vdb")
    driver_name = params.get("driver_name", "qemu")
    driver_type = params.get("driver_type", "raw")
    target_bus = params.get("target_bus", "virtio")
    vd_format = params.get("vd_format", "")
    snapshot_dir = params.get("snapshot_dir", "/tmp")
    snapshot_name = params.get("snapshot_name", "s1")
    pool_name = params.get("pool_name", "")
    pool_target = params.get("pool_target", "/dev")
    snapshot_disk_only = "yes" == params.get("snapshot_disk_only", "no")
    new_vhbas = []
    current_vhbas = []
    new_vhba = []
    path_to_blk = ""
    lun_sl = []
    new_disk = ""
    pool_ins = None
    old_mpath_conf = ""
    mpath_conf_path = "/etc/multipath.conf"
    original_mpath_conf_exist = os.path.exists(mpath_conf_path)

    vm = env.get_vm(vm_name)
    online_hbas = utils_npiv.find_hbas("hba")
    if not online_hbas:
        raise exceptions.TestSkipError("There is no online hba cards.")
    old_mpath_conf = utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                                       replace_existing=True)
    first_online_hba = online_hbas[0]
    old_vhbas = utils_npiv.find_hbas("vhba")
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    virt_vm = libvirt_vm.VM(vm_name, vm.params, vm.root_dir,
                            vm.address_cache)
    old_disks = virt_vm.get_disks()

    if vm.is_alive():
        vm.destroy(gracefully=False)
    if pool_name:
        pool_ins = libvirt_storage.StoragePool()
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    try:
        # prepare a fc lun
        if vd_format in ['scsi_vol']:
            if pool_ins.pool_exists(pool_name):
                raise exceptions.TestFail("Pool %s already exist" % pool_name)
            prepare_scsi_pool(pool_name, wwnn, wwpn,
                              first_online_hba, pool_target)
            utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas),
                                timeout=_TIMEOUT)
            if not utils_npiv.is_vhbas_added(old_vhbas):
                raise exceptions.TestFail("vHBA not successfully created")
            current_vhbas = utils_npiv.find_hbas("vhba")
            new_vhba = list(set(current_vhbas).difference(
                set(old_vhbas)))[0]
            new_vhbas.append(new_vhba)
            new_vhba_scsibus = re.sub("\D", "", new_vhba)
            utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus),
                                timeout=_TIMEOUT)
            new_blks = get_blks_by_scsi(new_vhba_scsibus)
            if not new_blks:
                raise exceptions.TestFail("block device not found with scsi_%s",
                                          new_vhba_scsibus)
            first_blk_dev = new_blks[0]
            utils_misc.wait_for(
                lambda: get_symbols_by_blk(first_blk_dev),
                timeout=_TIMEOUT)
            lun_sl = get_symbols_by_blk(first_blk_dev)
            if not lun_sl:
                raise exceptions.TestFail("lun symbolic links not found under "
                                          "/dev/disk/by-path/ for blk dev %s" %
                                          first_blk_dev)
            lun_dev = lun_sl[0]
            path_to_blk = os.path.join(_BY_PATH_DIR, lun_dev)
        elif vd_format in ['mpath', 'by_path']:
            old_mpath_devs = utils_npiv.find_mpath_devs()
            new_vhba = utils_npiv.nodedev_create_from_xml(
                    {"nodedev_parent": first_online_hba,
                     "scsi_wwnn": wwnn,
                     "scsi_wwpn": wwpn})
            utils_misc.wait_for(
                lambda: utils_npiv.is_vhbas_added(old_vhbas),
                timeout=_TIMEOUT*2)
            if not new_vhba:
                raise exceptions.TestFail("vHBA not sucessfully generated.")
            new_vhbas.append(new_vhba)
            if vd_format == "mpath":
                utils_misc.wait_for(
                    lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs),
                    timeout=_TIMEOUT*5)
                if not utils_npiv.is_mpath_devs_added(old_mpath_devs):
                    raise exceptions.TestFail("mpath dev not generated.")
                cur_mpath_devs = utils_npiv.find_mpath_devs()
                new_mpath_devs = list(set(cur_mpath_devs).difference(
                    set(old_mpath_devs)))
                logging.debug("The newly added mpath dev is: %s",
                              new_mpath_devs)
                path_to_blk = "/dev/mapper/" + new_mpath_devs[0]
            elif vd_format == "by_path":
                new_vhba_scsibus = re.sub("\D", "", new_vhba)
                utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus),
                                    timeout=_TIMEOUT)
                new_blks = get_blks_by_scsi(new_vhba_scsibus)
                if not new_blks:
                    raise exceptions.TestFail("blk dev not found with scsi_%s",
                                              new_vhba_scsibus)
                first_blk_dev = new_blks[0]
                utils_misc.wait_for(
                    lambda: get_symbols_by_blk(first_blk_dev),
                    timeout=_TIMEOUT)
                lun_sl = get_symbols_by_blk(first_blk_dev)
                if not lun_sl:
                    raise exceptions.TestFail("lun symbolic links not found in "
                                              "/dev/disk/by-path/ for %s" %
                                              first_blk_dev)
                lun_dev = lun_sl[0]
                path_to_blk = os.path.join(_BY_PATH_DIR, lun_dev)
            else:
                pass
        else:
            raise exceptions.TestSkipError("Not provided how to pass"
                                           "virtual disk to VM.")

        # create qcow2 file on the block device with specified size
        if path_to_blk:
            cmd = "qemu-img create -f qcow2 %s %s" % (path_to_blk, disk_size)
            try:
                process.run(cmd, shell=True)
            except process.cmdError as detail:
                raise exceptions.TestFail("Fail to create qcow2 on blk dev: %s",
                                          detail)
        else:
            raise exceptions.TestFail("Don't have a vaild path to blk dev.")

        # prepare disk xml
        if "vol" in vd_format:
            vol_list = utlv.get_vol_list(pool_name, vol_check=True,
                                         timeout=_TIMEOUT*3)
            test_vol = list(vol_list.keys())[0]
            disk_params = {'type_name': disk_type,
                           'target_dev': device_target,
                           'target_bus': target_bus,
                           'source_pool': pool_name,
                           'source_volume': test_vol,
                           'driver_type': driver_type}
        else:
            disk_params = {'type_name': disk_type,
                           'device': disk_device,
                           'driver_name': driver_name,
                           'driver_type': driver_type,
                           'source_file': path_to_blk,
                           'target_dev': device_target,
                           'target_bus': target_bus}
        if vm.is_alive():
            vm.destroy(gracefully=False)
        new_disk = disk.Disk()
        new_disk.xml = open(utlv.create_disk_xml(disk_params)).read()

        # start vm with the virtual disk
        vmxml.devices = vmxml.devices.append(new_disk)
        vmxml.sync()
        vm.start()
        session = vm.wait_for_login()
        cur_disks = virt_vm.get_disks()
        mount_disk = "".join(list(set(old_disks) ^ set(cur_disks)))

        # mkfs and mount disk in vm, create a file on that disk.
        if not mount_disk:
            logging.debug("old_disk: %s, new_disk: %s", old_disks, cur_disks)
            raise exceptions.TestFail("No new disk found in vm.")
        mkfs_and_mount(session, mount_disk)
        create_file_in_vm(session, "/mnt/before_snapshot.txt", "before")

        # virsh snapshot-create-as vm s --disk-only --diskspec vda,file=path
        if snapshot_disk_only:
            vm_blks = list(vm.get_disk_devices().keys())
            options = "%s --disk-only" % snapshot_name
            for vm_blk in vm_blks:
                snapshot_file = snapshot_dir + "/" + vm_blk + "." + snapshot_name
                if os.path.exists(snapshot_file):
                    os.remove(snapshot_file)
                options = options + " --diskspec %s,file=%s" % (vm_blk,
                                                                snapshot_file)
        else:
            options = snapshot_name
        utlv.check_exit_status(virsh.snapshot_create_as(vm_name, options))

        # check virsh snapshot-list
        logging.debug("Running: snapshot-list %s", vm_name)
        snapshot_list = virsh.snapshot_list(vm_name)
        logging.debug("snapshot list is: %s", snapshot_list)
        if not snapshot_list:
            raise exceptions.TestFail("snapshots not found after creation.")

        # snapshot-revert doesn't support external snapshot for now. so
        # only check this with internal snapshot.
        if not snapshot_disk_only:
            create_file_in_vm(session, "/mnt/after_snapshot.txt", "after")
            logging.debug("Running: snapshot-revert %s %s",
                          vm_name, snapshot_name)
            utlv.check_exit_status(virsh.snapshot_revert(vm_name, snapshot_name))
            session = vm.wait_for_login()
            file_existence, file_content = get_file_in_vm(session,
                                                          "/mnt/after_snapshot.txt")
            logging.debug("file exist = %s, file content = %s",
                          file_existence, file_content)
            if file_existence:
                raise exceptions.TestFail("The file created "
                                          "after snapshot still exists.")
            file_existence, file_content = get_file_in_vm(session,
                                                          "/mnt/before_snapshot.txt")
            logging.debug("file eixst = %s, file content = %s",
                          file_existence, file_content)
            if ((not file_existence) or (file_content.strip() != "before")):
                raise exceptions.TestFail("The file created "
                                          "before snapshot is lost.")
        # delete snapshots
            # if diskonly, delete --metadata and remove files
            # if not diskonly, delete snapshot
        if snapshot_disk_only:
            options = "--metadata"
        else:
            options = ""
        for snap in snapshot_list:
            logging.debug("deleting snapshot %s with options %s",
                          snap, options)
            result = virsh.snapshot_delete(vm_name, snap, options)
            logging.debug("result of snapshot-delete: %s",
                          result.stdout.strip())
            if snapshot_disk_only:
                vm_blks = list(vm.get_disk_devices().keys())
                for vm_blk in vm_blks:
                    snapshot_file = snapshot_dir + "/" + vm_blk + "." + snap
                    if os.path.exists(snapshot_file):
                        os.remove(snapshot_file)
        snapshot_list = virsh.snapshot_list(vm_name)
        if snapshot_list:
            raise exceptions.TestFail("Snapshot not deleted: %s", snapshot_list)
    except Exception as detail:
        raise exceptions.TestFail("exception happens: %s", detail)
    finally:
        logging.debug("Start to clean up env...")
        vmxml_backup.sync()
        if pool_ins and pool_ins.pool_exists(pool_name):
            virsh.pool_destroy(pool_name)
        for new_vhba in new_vhbas:
            virsh.nodedev_destroy(new_vhba)
        utils_npiv.restart_multipathd()
        if old_mpath_conf:
            utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                              conf_content=old_mpath_conf,
                                              replace_existing=True)
        if not original_mpath_conf_exist and os.path.exists(mpath_conf_path):
            os.remove(mpath_conf_path)