Esempio n. 1
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered :
    CEPH-83573521	Remove a subvolume group by retaining the snapshot : ceph fs subvolume rm <vol_n...

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    1. creats fs volume create cephfs if the volume is not there
    2. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_retain_snapshot_1
    3. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_
    4. Create Data on the subvolume
        Ex:  python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 --files
            100 --files-per-dir 10 --dirs-per-dir 2 --top /mnt/cephfs_fuse1baxgbpaia_1/
    5. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_2 snap_1 --group_name subvolgroup_retain_snapshot_1
    6. Collect the data from mount volume to local disk for verififaction

    Retain the snapshots nad verify the data after cloning:
    1. ceph fs snapshot rm <vol_name> <subvol_name> snap_name [--group_name <subvol_group_name>
        --force --retain-snapshots]
    2. Remove the sub volume.
    3. Clone the new volume from the retained snapshots
    4. Check the contents of the cloned volume with the copy present locally

    Clean Up:
    1. Del all the snapshots created
    2. Del Subvolumes
    3. Del SubvolumeGroups
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_retain_snapshot_1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_retain_snapshot",
            "group_name": "subvolgroup_retain_snapshot_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_retain_snapshot subvolgroup_retain_snapshot_1",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 2 --top "
            f"{kernel_mounting_dir_1}",
            long_running=True,
        )
        snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_retain_snapshot",
            "snap_name": "snap_1",
            "group_name": "subvolgroup_retain_snapshot_1",
        }
        fs_util.create_snapshot(client1, **snapshot)
        client1.exec_command(sudo=True, cmd=f"mkdir -p /tmp/{mounting_dir}")
        client1.exec_command(
            sudo=True,
            cmd=f"cp -r {kernel_mounting_dir_1}/* /tmp/{mounting_dir}")

        fs_util.remove_subvolume(client1,
                                 **subvolume,
                                 retain_snapshots=True,
                                 force=True,
                                 validate=False)
        log.info(
            "Verifying Get the path of sub volume as subvolume will still be listed in filesystem"
        )
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_retain_snapshot subvolgroup_retain_snapshot_1",
            check_ec=False,
        )
        if rc == 0:
            raise CommandFailed(
                "Remove subvolume with --retainSnapshots has not succeeded."
                "We are still able to fetch path of subvolume after deletion")
            return 1
        log.info("Clone a subvolume from snapshot")
        retain_snapshot_1 = {
            "vol_name": default_fs,
            "subvol_name": "subvol_retain_snapshot",
            "snap_name": "snap_1",
            "target_subvol_name": "retain_snapshot_1",
            "group_name": "subvolgroup_retain_snapshot_1",
        }
        fs_util.create_clone(client1, **retain_snapshot_1)
        fs_util.validate_clone_state(client1, retain_snapshot_1)
        clonevol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} {retain_snapshot_1['target_subvol_name']}",
        )
        fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_2/"
        fs_util.fuse_mount(
            [client1],
            fuse_mounting_dir_2,
            extra_params=f" -r {clonevol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=f"diff -qr /tmp/{mounting_dir} {fuse_mounting_dir_2}")
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        rmclone_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "retain_snapshot_1"
            },
        ]
        for clone_vol in rmclone_list:
            fs_util.remove_subvolume(client1, **clone_vol)
        fs_util.remove_snapshot(client1,
                                **snapshot,
                                validate=False,
                                force=True)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1,
                                          **subvolumegroup,
                                          force=True)
Esempio n. 2
0
def run(ceph_cluster, **kw):
    """
    CEPH-83574024 - Ensure Snapshot and cloning works on nfs exports
    Pre-requisites:
    1. Create cephfs volume
       creats fs volume create <vol_name>
    2. Create nfs cluster
       ceph nfs cluster create <nfs_name> <nfs_server>

    Test operation:
    1. Create cephfs nfs export
       ceph nfs export create cephfs <fs_name> <nfs_name> <nfs_export_name> path=<export_path>
    2. Crete cephfs subvolume group
    3. Create cephfs subvolume in cephfs subvolume group
    4. Create cephfs subvolume in deafault cephfs subvolume group
    5. Mount nfs mount with cephfs export
       "mount -t nfs -o port=2049 <nfs_server>:<nfs_export> <nfs_mounting_dir>
    6. Run IOs on both cephfs subvolumes
    7. Create snapshots of both cephfs subvolumes
    8. Create clone of both cephfs subvolumes from snapshots
    9. Verify data is consistent across subvolumes, snapshots & clones

    Clean-up:
    1. Remove all the data in Cephfs file system
    2. Remove all the cephfs mounts
    3. Delete cephfs nfs export
    """
    try:
        tc = "CEPH-83574024"
        log.info(f"Running cephfs {tc} test case")

        config = kw["config"]
        build = config.get("build", config.get("rhbuild"))

        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        mon_node_ip = fs_util.get_mon_node_ips()
        mon_node_ip = ",".join(mon_node_ip)
        rhbuild = config.get("rhbuild")
        nfs_servers = ceph_cluster.get_ceph_objects("nfs")
        nfs_server = nfs_servers[0].node.hostname
        nfs_name = "cephfs-nfs"
        nfs_export_name = "/export_" + "".join(
            secrets.choice(string.digits) for i in range(3))
        export_path = "/"
        fs_name = "cephfs"
        nfs_mounting_dir = "/mnt/nfs_" + "".join(
            secrets.choice(string.ascii_uppercase + string.digits)
            for i in range(5))
        if "5.0" in rhbuild:
            client1.exec_command(
                sudo=True,
                cmd=f"ceph nfs export create cephfs {fs_name} {nfs_name} "
                f"{nfs_export_name} path={export_path}",
            )
        else:
            client1.exec_command(
                sudo=True,
                cmd=f"ceph nfs export create cephfs {nfs_name} "
                f"{nfs_export_name} {fs_name} path={export_path}",
            )
        subvolumegroup = {
            "vol_name": fs_name,
            "group_name": "subvolume_group1",
        }
        fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume_list = [
            {
                "vol_name": fs_name,
                "subvol_name": "subvolume1",
                "group_name": "subvolume_group1",
            },
            {
                "vol_name": fs_name,
                "subvol_name": "subvolume2",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(client1, **subvolume)
        commands = [
            f"mkdir -p {nfs_mounting_dir}",
            f"mount -t nfs -o port=2049 {nfs_server}:{nfs_export_name} {nfs_mounting_dir}",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command)
        out, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {fs_name} subvolume1 --group_name subvolume_group1",
        )
        subvolume1_path = out.read().decode().rstrip()
        out, rc = client1.exec_command(
            sudo=True, cmd=f"ceph fs subvolume getpath {fs_name} subvolume2")
        subvolume2_path = out.read().decode().rstrip()
        commands = [
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --file-size 4 "
            f"--files 1000 --top {nfs_mounting_dir}{subvolume1_path}",
            f"for n in {{1..20}}; do     dd if=/dev/urandom of={nfs_mounting_dir}{subvolume2_path}"
            f"/file$(printf %03d "
            "$n"
            ") bs=500k count=1000; done",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command, long_running=True)
        commands = [
            f"ceph fs subvolume snapshot create {fs_name} subvolume1 snap1 --group_name subvolume_group1",
            f"ceph fs subvolume snapshot create {fs_name} subvolume2 snap2",
        ]
        for command in commands:
            out, err = client1.exec_command(sudo=True, cmd=command)
        clone_status_1 = {
            "vol_name": fs_name,
            "subvol_name": "subvolume1",
            "snap_name": "snap1",
            "target_subvol_name": "clone1",
            "group_name": "subvolume_group1",
            "target_group_name": "subvolume_group1",
        }
        fs_util.create_clone(client1, **clone_status_1)
        fs_util.validate_clone_state(client1, clone_status_1, timeout=6000)
        clone_status_2 = {
            "vol_name": fs_name,
            "subvol_name": "subvolume2",
            "snap_name": "snap2",
            "target_subvol_name": "clone2",
        }
        fs_util.create_clone(client1, **clone_status_2)
        fs_util.validate_clone_state(client1, clone_status_2, timeout=6000)
        out, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {fs_name} clone1 --group_name subvolume_group1",
        )
        clone1_path = out.read().decode().rstrip()
        out, rc = client1.exec_command(
            sudo=True, cmd=f"ceph fs subvolume getpath {fs_name} clone2")
        clone2_path = out.read().decode().rstrip()
        commands = [
            f"diff -r {nfs_mounting_dir}{subvolume1_path} {nfs_mounting_dir}{subvolume1_path}/.snap/_snap1*",
            f"diff -r {nfs_mounting_dir}{subvolume2_path} {nfs_mounting_dir}{subvolume2_path}/.snap/_snap2*",
            f"diff -r {nfs_mounting_dir}{subvolume1_path} {nfs_mounting_dir}{clone1_path}",
            f"diff -r {nfs_mounting_dir}{subvolume2_path} {nfs_mounting_dir}{clone2_path}",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command)
        log.info("Test completed successfully")
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Cleaning up")
        client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume snapshot rm {fs_name} subvolume1 snap1 --group_name subvolume_group1",
            check_ec=False,
        )
        client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume snapshot rm {fs_name} subvolume2 snap2",
            check_ec=False,
        )
        client1.exec_command(sudo=True, cmd=f"rm -rf {nfs_mounting_dir}/*")
        client1.exec_command(sudo=True, cmd=f"umount {nfs_mounting_dir}")
        client1.exec_command(sudo=True,
                             cmd=f"rm -rf {nfs_mounting_dir}/",
                             check_ec=False)
        client1.exec_command(
            sudo=True,
            cmd=f"ceph nfs export delete {nfs_name} {nfs_export_name}",
            check_ec=False,
        )
Esempio n. 3
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573501	Create a Cloned Volume using a snapshot

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    2. creats fs volume create cephfs if the volume is not there
    3. Create 2 sub volume groups
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_1
             ceph fs subvolumegroup create cephfs subvolgroup_2
    4. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_clone_status --size 5368706371 --group_name subvolgroup_
    5. Create Data on the subvolume
        Ex:  python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 --files
            100 --files-per-dir 10 --dirs-per-dir 2 --top /mnt/cephfs_fuse1baxgbpaia_1/
    6. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_clone_status snap_1 --group_name subvolgroup_1

    Clone Operations and Clone States:
    1. Create a clone in default locaction.
        ceph fs subvolume snapshot clone cephfs subvol_clone_status snap_1 clone_status_1 --group_name subvolgroup_1
    2. Validate all the states of clone creation progress
        pending : Clone operation has not started
        in-progress : Clone operation is in progress
        complete : Clone operation has successfully finished
    3. Mount the cloned volume and check the contents
    4. Create a clone in different subvolumegroup ie., subvolumegroup 2
        ceph fs subvolume snapshot clone cephfs subvol_clone_status snap_1 clone_status_1 --group_name subvolgroup_1
            --target_group_name subvolgroup_2
    5. Validate all the states of clone creation progress
        pending : Clone operation has not started
        in-progress : Clone operation is in progress
        complete : Clone operation has successfully finished
    6.Once all the clones moved to complete state we are deleting all the clones

    Clean-up:
    1. ceph fs snapshot rm <vol_name> <subvol_name> snap_name [--group_name <subvol_group_name>]
    2. ceph fs subvolume rm <vol_name> <subvol_name> [--group_name <subvol_group_name>]
    3. ceph fs subvolumegroup rm <vol_name> <group_name>
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_clone_status_1"
            },
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_clone_status_2"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_status",
            "group_name": "subvolgroup_clone_status_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_clone_status subvolgroup_clone_status_1",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 2 --top "
            f"{kernel_mounting_dir_1}",
            long_running=True,
        )
        snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_status",
            "snap_name": "snap_1",
            "group_name": "subvolgroup_clone_status_1",
        }
        fs_util.create_snapshot(client1, **snapshot)
        clone_status_1 = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_status",
            "snap_name": "snap_1",
            "target_subvol_name": "clone_status_1",
            "group_name": "subvolgroup_clone_status_1",
        }
        fs_util.create_clone(client1, **clone_status_1)
        transitation_states = fs_util.validate_clone_state(
            client1, clone_status_1)
        valid_state_flow = [
            ["pending", "in-progress", "complete"],
            ["in-progress", "complete"],
        ]
        if transitation_states in valid_state_flow:
            return 1
        clonevol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} {clone_status_1['target_subvol_name']}",
        )
        fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_2/"
        fs_util.fuse_mount(
            [client1],
            fuse_mounting_dir_2,
            extra_params=f" -r {clonevol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=f"diff -qr {kernel_mounting_dir_1} {fuse_mounting_dir_2}")

        clone_status_2 = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_status",
            "snap_name": "snap_1",
            "target_subvol_name": "clone_status_2",
            "group_name": "subvolgroup_clone_status_1",
            "target_group_name": "subvolgroup_clone_status_2",
        }
        fs_util.create_clone(client1, **clone_status_2)
        transitation_states = fs_util.validate_clone_state(
            client1, clone_status_2)
        if transitation_states in valid_state_flow:
            return 1
        clonevol_path, rc = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} "
            f"{clone_status_2['target_subvol_name']} {clone_status_2['target_group_name']}",
        )
        fuse_mounting_dir_3 = f"/mnt/cephfs_fuse{mounting_dir}_3/"
        fs_util.fuse_mount(
            [client1],
            fuse_mounting_dir_3,
            extra_params=f" -r {clonevol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=f"diff -qr {kernel_mounting_dir_1} {fuse_mounting_dir_3}")
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        rmclone_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "clone_status_1"
            },
            {
                "vol_name": default_fs,
                "subvol_name": "clone_status_2",
                "group_name": "subvolgroup_clone_status_2",
            },
        ]
        for clone_vol in rmclone_list:
            fs_util.remove_subvolume(client1, **clone_vol)
        fs_util.remove_snapshot(client1, **snapshot)
        fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1,
                                          **subvolumegroup,
                                          force=True)
Esempio n. 4
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573502	Interrupt the cloning operation in-between and observe the behavior..
    Pre-requisites :
    1. We need atleast one client node to execute this test case
    1. creats fs volume create cephfs if the volume is not there
    2. Create 1 sub volume groups
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_1
    3. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_clone_cancel --size 5368706371 --group_name subvolgroup_
    4. Create Data on the subvolume
        Ex:  python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 --files
            100 --files-per-dir 10 --dirs-per-dir 2 --top /mnt/cephfs_fuse1baxgbpaia_1/
    5. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_clone_cancel snap_1 --group_name subvolgroup_1

    Clone Operations and Clone States:
    1. Create a clone in default locaction.
        ceph fs subvolume snapshot clone cephfs subvol_clone_cancel snap_1 clone_status_1 --group_name subvolgroup_1
    2. Create a clone and Cancel the Operation and check the state is in "canceled".
    3. Clone state should move to canceled state

    Clean-up:
    1. ceph fs snapshot rm <vol_name> <subvol_name> snap_name [--group_name <subvol_group_name>]
    2. ceph fs subvolume rm <vol_name> <subvol_name> [--group_name <subvol_group_name>]
    3. ceph fs subvolumegroup rm <vol_name> <group_name>
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10))
        )
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {"vol_name": default_fs, "group_name": "subvolgroup_clone_cancel_1"}
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_cancel",
            "group_name": "subvolgroup_clone_cancel_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_clone_cancel subvolgroup_clone_cancel_1",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 2 --top "
            f"{kernel_mounting_dir_1}",
            long_running=True,
        )
        snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_cancel",
            "snap_name": "snap_1",
            "group_name": "subvolgroup_clone_cancel_1",
        }
        fs_util.create_snapshot(client1, **snapshot)
        clone_status_3 = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_cancel",
            "snap_name": "snap_1",
            "target_subvol_name": "clone_status_3",
            "group_name": "subvolgroup_clone_cancel_1",
        }
        fs_util.create_clone(client1, **clone_status_3)
        fs_util.clone_cancel(
            client1,
            clone_status_3["vol_name"],
            clone_status_3["target_subvol_name"],
            group_name=clone_status_3.get("target_group_name", ""),
        )
        fs_util.validate_clone_state(client1, clone_status_3, "canceled")
        return 0

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        rmclone_list = [
            {"vol_name": default_fs, "subvol_name": "clone_status_3", "force": True},
        ]
        for clone_vol in rmclone_list:
            fs_util.remove_subvolume(client1, **clone_vol)
        fs_util.remove_snapshot(client1, **snapshot)
        fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1, **subvolumegroup, force=True)
Esempio n. 5
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered :
    CEPH-83573524	Ensure the subvolume attributes are retained post clone operations

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    2. create fs volume create cephfs if the volume is not there
    3. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_clone_attr_vol_1
    4. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_1
    5. Create Data on the subvolume
        Ex:  python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 --files
            100 --files-per-dir 10 --dirs-per-dir 2 --top /mnt/cephfs_fuse1baxgbpaia_1/
    6. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_2 snap_1 --group_name subvolgroup_clone_attr_vol_1
    7. Set file and size xattributes on dir

    Test Case Flow:
    1. Create Clone out of subvolume.
    2. Mount the cloned volume.
    3. Validate the contents of cloned volume with contents present of subvolume
    4. Validate file and size xattributes on dir in cloned volume

    Clean Up:
    1. Delete Cloned volume
    2. Delete subvolumegroup
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_clone_attr_vol_1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_attr_vol",
            "group_name": "subvolgroup_clone_attr_vol_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_clone_attr_vol subvolgroup_clone_attr_vol_1",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 1 --top "
            f"{kernel_mounting_dir_1}",
            long_running=True,
        )
        fs_util.set_quota_attrs(clients[0], 9999, 999, kernel_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0],
                                              kernel_mounting_dir_1)
        snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_attr_vol",
            "snap_name": "snap_1",
            "group_name": "subvolgroup_clone_attr_vol_1",
        }
        fs_util.create_snapshot(client1, **snapshot)
        client1.exec_command(sudo=True, cmd=f"mkdir -p /tmp/{mounting_dir}")

        log.info("Clone a subvolume from snapshot")
        clone_attr_vol_1 = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_attr_vol",
            "snap_name": "snap_1",
            "target_subvol_name": "clone_attr_vol_1",
            "group_name": "subvolgroup_clone_attr_vol_1",
        }
        fs_util.create_clone(client1, **clone_attr_vol_1)
        fs_util.validate_clone_state(client1, clone_attr_vol_1)
        clonevol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} {clone_attr_vol_1['target_subvol_name']}",
        )
        fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_2/"
        fs_util.fuse_mount(
            [client1],
            fuse_mounting_dir_2,
            extra_params=f" -r {clonevol_path.read().decode().strip()}",
        )
        quota_attrs_clone = fs_util.get_quota_attrs(clients[0],
                                                    fuse_mounting_dir_2)
        client1.exec_command(
            sudo=True,
            cmd=f"diff -qr {kernel_mounting_dir_1} {fuse_mounting_dir_2}")
        if quota_attrs_clone != quota_attrs:
            log.info(f"attributes of cloned volumes{quota_attrs_clone}")
            log.info(f"attributes of volumes{quota_attrs}")
            log.error(
                "Quota attributes of the clone is not matching with quota attributes of subvolume"
            )
            return 1
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        rmclone_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "clone_attr_vol_1"
            },
        ]
        for clone_vol in rmclone_list:
            fs_util.remove_subvolume(client1, **clone_vol)
        fs_util.remove_snapshot(client1,
                                **snapshot,
                                validate=False,
                                check_ec=False)
        fs_util.remove_subvolume(client1,
                                 **subvolume,
                                 validate=False,
                                 check_ec=False)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1,
                                          **subvolumegroup,
                                          force=True)