示例#1
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered :
    CEPH-83573409    Test to validate the removal of quota_max_bytes
                     Create a FS and create 10 directories and mount them on kernel and fuse client(5 mounts each)
                     Set max bytes quota to a number(say 1Gb) and fill data until it reaches the limit and
                     verify if the set quota limit is working fine.
                     Remove the quota once it reaches the max bytes and try adding more
                     data, verify if set quota is removed. Repeat the procedure for few more times.


    Pre-requisites :
    1. We need atleast one client node to execute this test case
    2. create fs volume create cephfs if the volume is not there
    3. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_clone_attr_vol_1
    4. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_1 --size 5368706371 --group_name subvolgroup_1
    5. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_1

    Test Case Flow:
    1. Mount the subvolume_1 on the client using fuse
    2. Mount the subvolume_2 on the client using kernel
    3. set byte attribute 1gb on both mount points
    4. Create 3gb bytes and check it fails
    5. Perform same on kernel mount
    6. Remove the quota of bytes and try creating the files on the same directory used in step 3 ie., set bytes to 0
    7. Perform same on Kernel mount
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if not clients:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_quota_byte_remove_1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_byte_remove_fuse",
                "group_name": "subvolgroup_quota_byte_remove_1",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_byte_remove_kernel",
                "group_name": "subvolgroup_quota_byte_remove_1",
                "size": "5368706371",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        log.info("Get the path of sub volume")
        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_byte_remove_kernel subvolgroup_quota_byte_remove_1",
        )
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )

        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_byte_remove_fuse subvolgroup_quota_byte_remove_1",
        )
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_1,
            extra_params=f" -r {subvol_path.read().decode().strip()}",
        )
        fs_util.set_quota_attrs(clients[0], 100, 1073741824,
                                fuse_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0], fuse_mounting_dir_1)
        fs_util.byte_quota_test(clients[0], fuse_mounting_dir_1, quota_attrs)

        log.info("Removing the quota to 0 and validating file quota attr")
        fs_util.set_quota_attrs(clients[0], 100, "0", fuse_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0], fuse_mounting_dir_1)
        fs_util.byte_quota_test(clients[0], fuse_mounting_dir_1, quota_attrs)

        fs_util.set_quota_attrs(clients[0], 100, 1073741824,
                                kernel_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0],
                                              kernel_mounting_dir_1)
        fs_util.byte_quota_test(clients[0], kernel_mounting_dir_1, quota_attrs)

        log.info("Increasing the quota to 0 and validating file quota attr")
        fs_util.set_quota_attrs(clients[0], 100, "0", kernel_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0],
                                              kernel_mounting_dir_1)
        fs_util.byte_quota_test(clients[0], kernel_mounting_dir_1, quota_attrs)

        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1

    finally:
        log.info("Clean Up in progess")
        for subvolume in subvolume_list:
            fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1,
                                          **subvolumegroup,
                                          force=True)
示例#2
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered :
    CEPH-83573403    Negative : Test to validate the decrease in quota limit once it reaches the max limit. (files)
                     Create a FS and create 10 directories and mount them on kernel client and fuse client(5 mountseach)
                     Set max files quota to a number(say 50) and add up to that number of files to that directory and
                     verify if the set quota limit is working fine.
                     Try setting the quota limit to lesser than what was set
                     earlier, setting quota lesser that the files already present shouldn’t be allowed. Repeat the same
                     procedures for 5-10 times.

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    2. create fs volume create cephfs if the volume is not there
    3. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_clone_attr_vol_1
    4. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_1 --size 5368706371 --group_name subvolgroup_1
    5. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_1

    Test Case Flow:
    1. Mount the subvolume_1 on the client using fuse
    2. Mount the subvolume_2 on the client using kernel
    3. set file attribute 10 on both mount points
    4. Create 11 files and check it fails at 11 iteration
    5. Perform same on kernel mount
    6. Create a directory inside fuse mount and set file attribute and verify
    7. Create a directory inside kernel mount and set file attribute and verify
    8. Decrease the quota of files to 5 and try creating the files on the same directory used in step 3
    9. Perorm same on Kernel mount
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if not clients:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_quota_file_increase_1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_file_incr_fuse",
                "group_name": "subvolgroup_quota_file_increase_1",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_file_incr_kernel",
                "group_name": "subvolgroup_quota_file_increase_1",
                "size": "5368706371",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        log.info("Get the path of sub volume")
        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_file_incr_kernel subvolgroup_quota_file_increase_1",
        )
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.strip()}",
        )

        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_file_incr_fuse subvolgroup_quota_file_increase_1",
        )
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_1,
            extra_params=f" -r {subvol_path.strip()}",
        )
        fs_util.set_quota_attrs(clients[0], 100, 10000000, fuse_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0], fuse_mounting_dir_1)
        fs_util.file_quota_test(clients[0], fuse_mounting_dir_1, quota_attrs)

        log.info("Decreasing the quota to 50 and validating file quota attr")
        fs_util.set_quota_attrs(clients[0], 50, 10000000, fuse_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0], fuse_mounting_dir_1)
        fs_util.file_quota_test(clients[0], fuse_mounting_dir_1, quota_attrs)

        fs_util.set_quota_attrs(clients[0], 100, 10000000,
                                kernel_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0],
                                              kernel_mounting_dir_1)
        fs_util.file_quota_test(clients[0], kernel_mounting_dir_1, quota_attrs)

        log.info("Decreasing the quota to 50 and validating file quota attr")
        fs_util.set_quota_attrs(clients[0], 50, 10000000,
                                kernel_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0],
                                              kernel_mounting_dir_1)
        fs_util.file_quota_test(clients[0], kernel_mounting_dir_1, quota_attrs)

        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1

    finally:
        log.info("Clean Up in progess")
        for subvolume in subvolume_list:
            fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1,
                                          **subvolumegroup,
                                          force=True)
示例#3
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered :
    CEPH-83573399	Test to validate the quota.max_files
                    Create a FS and create 10 directories and mount them on kernel client and fuse client(5 mounts each)
                    Set max files quota to a number(say 50) and add up to that number of files to that directory and
                    verify if the set quota limit is working fine. Similarly set different limit
                    on different directories and verify quota.

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    2. create fs volume create cephfs if the volume is not there
    3. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_clone_attr_vol_1
    4. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_1 --size 5368706371 --group_name subvolgroup_1
    5. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_1

    Test Case Flow:
    1. Mount the subvolume_1 on the client using fuse
    2. Mount the subvolume_2 on the client using kernel
    3. set file attribute 10 on both mount points
    4. Create 11 files and check it fails at 11 iteration
    5. Perform same on kernel mount
    6. Create a directory inside fuse mount and set file attribute and verify
    7. Create a directory inside kernel mount and set file attribute and verify
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if not clients:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")

        log.info("Cheking the file quota on root directory")
        root_folder_fuse_mount = f"/mnt/cephfs_fuse{mounting_dir}_2/"
        fs_util.fuse_mount([clients[0]], root_folder_fuse_mount)

        clients[0].exec_command(
            sudo=True,
            cmd=
            f"rm -rf {root_folder_fuse_mount}*;mkdir {root_folder_fuse_mount}test_fuse;",
        )
        fs_util.set_quota_attrs(clients[0], 20, 10000000,
                                f"{root_folder_fuse_mount}test_fuse")
        quota_attrs = fs_util.get_quota_attrs(
            clients[0], f"{root_folder_fuse_mount}test_fuse")
        fs_util.file_quota_test(clients[0],
                                f"{root_folder_fuse_mount}test_fuse",
                                quota_attrs)
        log.info("Setting Back the file quota to 0 on root directory ")
        fs_util.set_quota_attrs(clients[0], "0", 10000000,
                                f"{root_folder_fuse_mount}test_fuse")

        root_folder_kernel_mount = f"/mnt/cephfs_kernel{mounting_dir}_2/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount([clients[0]], root_folder_kernel_mount,
                             ",".join(mon_node_ips))
        clients[0].exec_command(
            sudo=True,
            cmd=
            f"rm -rf {root_folder_kernel_mount}*;mkdir {root_folder_kernel_mount}test_kernel",
        )
        fs_util.set_quota_attrs(clients[0], 20, 10000000,
                                f"{root_folder_kernel_mount}test_kernel")
        quota_attrs = fs_util.get_quota_attrs(
            clients[0], f"{root_folder_kernel_mount}test_kernel")
        fs_util.file_quota_test(clients[0],
                                f"{root_folder_kernel_mount}test_kernel",
                                quota_attrs)
        log.info("Setting Back the file quota to 0 on root directory ")
        fs_util.set_quota_attrs(clients[0], "0", 10000000,
                                f"{root_folder_kernel_mount}test_kernel")

        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_quota_file_limit_1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_file_fuse",
                "group_name": "subvolgroup_quota_file_limit_1",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_file_kernel",
                "group_name": "subvolgroup_quota_file_limit_1",
                "size": "5368706371",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)

        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        log.info("Get the path of sub volume")
        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_file_kernel subvolgroup_quota_file_limit_1",
        )
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )

        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_file_fuse subvolgroup_quota_file_limit_1",
        )
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"

        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_1,
            extra_params=f" -r {subvol_path.read().decode().strip()}",
        )

        fs_util.set_quota_attrs(clients[0], 200, 10000000,
                                kernel_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0],
                                              kernel_mounting_dir_1)
        fs_util.file_quota_test(clients[0], kernel_mounting_dir_1, quota_attrs)

        fs_util.set_quota_attrs(clients[0], 100, 10000000, fuse_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0], fuse_mounting_dir_1)
        fs_util.file_quota_test(clients[0], fuse_mounting_dir_1, quota_attrs)

        fs_util.set_quota_attrs(clients[0], 100, 10000000,
                                kernel_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0],
                                              kernel_mounting_dir_1)
        fs_util.file_quota_test(clients[0], kernel_mounting_dir_1, quota_attrs)

        clients[0].exec_command(
            sudo=True, cmd=f"cd {fuse_mounting_dir_1};rm -rf *;mkdir test;")
        fs_util.set_quota_attrs(clients[0], 30, 10000000,
                                f"{fuse_mounting_dir_1}/test")
        quota_attrs = fs_util.get_quota_attrs(clients[0],
                                              f"{fuse_mounting_dir_1}/test")
        fs_util.file_quota_test(clients[0], f"{fuse_mounting_dir_1}/test",
                                quota_attrs)

        clients[0].exec_command(
            sudo=True, cmd=f"cd {kernel_mounting_dir_1};rm -rf *;mkdir test;")
        fs_util.set_quota_attrs(clients[0], 30, 10000000,
                                f"{kernel_mounting_dir_1}/test")
        quota_attrs = fs_util.get_quota_attrs(clients[0],
                                              f"{kernel_mounting_dir_1}/test")
        fs_util.file_quota_test(clients[0], f"{kernel_mounting_dir_1}/test",
                                quota_attrs)

        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1

    finally:
        log.info("Clean Up in progess")
        if "5" in build:
            fs_util.set_quota_attrs(clients[0], "0", "0",
                                    root_folder_fuse_mount)
        for subvolume in subvolume_list:
            fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1,
                                          **subvolumegroup,
                                          force=True)
示例#4
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573502	Interrupt the cloning operation in-between and observe the behavior..
    Pre-requisites :
    1. We need atleast one client node to execute this test case
    1. creats fs volume create cephfs if the volume is not there
    2. Create 1 sub volume groups
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_1
    3. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_clone_cancel --size 5368706371 --group_name subvolgroup_
    4. Create Data on the subvolume
        Ex:  python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 --files
            100 --files-per-dir 10 --dirs-per-dir 2 --top /mnt/cephfs_fuse1baxgbpaia_1/
    5. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_clone_cancel snap_1 --group_name subvolgroup_1

    Clone Operations and Clone States:
    1. Create a clone in default locaction.
        ceph fs subvolume snapshot clone cephfs subvol_clone_cancel snap_1 clone_status_1 --group_name subvolgroup_1
    2. Create a clone and Cancel the Operation and check the state is in "canceled".
    3. Clone state should move to canceled state

    Clean-up:
    1. ceph fs snapshot rm <vol_name> <subvol_name> snap_name [--group_name <subvol_group_name>]
    2. ceph fs subvolume rm <vol_name> <subvol_name> [--group_name <subvol_group_name>]
    3. ceph fs subvolumegroup rm <vol_name> <group_name>
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10))
        )
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {"vol_name": default_fs, "group_name": "subvolgroup_clone_cancel_1"}
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_cancel",
            "group_name": "subvolgroup_clone_cancel_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_clone_cancel subvolgroup_clone_cancel_1",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 2 --top "
            f"{kernel_mounting_dir_1}",
            long_running=True,
        )
        snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_cancel",
            "snap_name": "snap_1",
            "group_name": "subvolgroup_clone_cancel_1",
        }
        fs_util.create_snapshot(client1, **snapshot)
        clone_status_3 = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_cancel",
            "snap_name": "snap_1",
            "target_subvol_name": "clone_status_3",
            "group_name": "subvolgroup_clone_cancel_1",
        }
        fs_util.create_clone(client1, **clone_status_3)
        fs_util.clone_cancel(
            client1,
            clone_status_3["vol_name"],
            clone_status_3["target_subvol_name"],
            group_name=clone_status_3.get("target_group_name", ""),
        )
        fs_util.validate_clone_state(client1, clone_status_3, "canceled")
        return 0

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        rmclone_list = [
            {"vol_name": default_fs, "subvol_name": "clone_status_3", "force": True},
        ]
        for clone_vol in rmclone_list:
            fs_util.remove_subvolume(client1, **clone_vol)
        fs_util.remove_snapshot(client1, **snapshot)
        fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1, **subvolumegroup, force=True)
def run(ceph_cluster, **kw):
    """
    Test Cases Covered :
    CEPH-83573522   Verify the retained snapshot details with "ceph fs info" command

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    1. creats fs volume create cephfs if the volume is not there
    2. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_info_retain
    3. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_
    4. Create Data on the subvolume
        Ex:  python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 --files
            100 --files-per-dir 10 --dirs-per-dir 2 --top /mnt/cephfs_fuse1baxgbpaia_1/
    5. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_2 snap_1 --group_name subvolgroup_info_retain

    Retain the snapshots nad verify the data after cloning:
    1. Check the state od subvolume it should be in complete.
    2. ceph fs snapshot rm <vol_name> <subvol_name> snap_name [--group_name <subvol_group_name>
        --force --retain-snapshots]
    3. Remove the sub volume.
    4. Check the state of subvolume it should be in snapshot-retained.

    Clean Up:
    1. Del all the snapshots created
    2. Del Subvolumes
    3. Del SubvolumeGroups
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_info_retain"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_retain_info",
            "group_name": "subvolgroup_info_retain",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_retain_info subvolgroup_info_retain",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 2 --top "
            f"{kernel_mounting_dir_1}",
            long_running=True,
        )
        snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_retain_info",
            "snap_name": "snap_1",
            "group_name": "subvolgroup_info_retain",
        }
        fs_util.create_snapshot(client1, **snapshot)
        client1.exec_command(sudo=True, cmd=f"mkdir -p /tmp/{mounting_dir}")
        client1.exec_command(
            sudo=True,
            cmd=f"cp -r {kernel_mounting_dir_1}/* /tmp/{mounting_dir}")
        subvol_info, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume info {default_fs} subvol_retain_info subvolgroup_info_retain --format json",
            check_ec=False,
        )
        subvol_info_state = json.loads(subvol_info.read().decode())
        log.info(
            f"subvol state before removing the volume with --retain-snapshots {subvol_info_state['state']}"
        )
        if subvol_info_state["state"] != "complete":
            raise CommandFailed(
                f"subvol state should be in complete state "
                f"but current state is {subvol_info_state['state']}")
        fs_util.remove_subvolume(client1,
                                 **subvolume,
                                 retain_snapshots=True,
                                 force=True,
                                 validate=False)
        log.info(
            "Verifying Get the path of sub volume as subvolume will still be listed in filesystem"
        )
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_retain_info subvolgroup_info_retain",
            check_ec=False,
        )
        if rc == 0:
            raise CommandFailed(
                "Remove subvolume with --retainSnapshots has not succeeded."
                "We are still able to fetch path of subvolume after deletion")
            return 1
        subvol_info, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume info {default_fs} subvol_retain_info subvolgroup_info_retain --format json",
            check_ec=False,
        )
        subvol_info_state = json.loads(subvol_info.read().decode())
        if subvol_info_state["state"] != "snapshot-retained":
            raise CommandFailed(
                f"subvol state should be in snapshot-retained state "
                f"but current state is {subvol_info_state['state']}")
        log.info(
            f"subvol state after removing the volume with --retain-snapshots {subvol_info_state['state']}"
        )
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        rmclone_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_retain_info"
            },
        ]
        for clone_vol in rmclone_list:
            fs_util.remove_subvolume(client1,
                                     **clone_vol,
                                     validate=False,
                                     force=True,
                                     check_ec=False)
        fs_util.remove_snapshot(client1,
                                **snapshot,
                                validate=False,
                                force=True)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1,
                                          **subvolumegroup,
                                          force=True)
示例#6
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-11319	Create first snap add more data to original then create a second snap.
                Rollback 1st snap do data validation.
                Rollback 2nd snap and do data validation.Perform cross platform rollback
                i.e. take snap on kernel mount and perform rollback using fuse mount
    Pre-requisites :
    1. We need atleast one client node to execute this test case
    1. creats fs volume create cephfs if the volume is not there
    2. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvol_cross_platform_snapshot_1
    3. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_
    4. Create Data on the subvolume. We will add known data as we are going to verify the files
        Ex: create_file_data()
    5. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_2 snap_1 --group_name subvol_cross_platform_snapshot_1

    Script Flow:
    1. Mount the subvolume on the client using Kernel and fuse mount
    2. Write data into the fuse mount point i.e., data_from_fuse_mount
    3. Collect the checksum of the files
    4. Take snapshot at this point i.e., snap_1
    5. Write data into the kernel mount point i.e., data_from_kernel_mount
    6. Collect the checksum of the files
    7. Take snapshot at this point i.e., snap_2
    8. On Kernel mount revert the snapshot to snap_1 and compare the checksum of the files collected in step 3
    9. On Fuse mount revert the snapshot to snap_2 and compare the checksum of the files colleced in step 6

    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvol_cross_platform_snapshot_1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_cross_platform_snapshot",
            "group_name": "subvol_cross_platform_snapshot_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_cross_platform_snapshot"
            f" subvol_cross_platform_snapshot_1",
        )
        subvol_path = subvol_path.read().decode().strip()
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fs_util.fuse_mount(
            [client1],
            fuse_mounting_dir_1,
            extra_params=f" -r {subvol_path}",
        )
        fs_util.create_file_data(client1, fuse_mounting_dir_1, 3, "snap1",
                                 "data_from_fuse_mount ")
        fuse_files_checksum = fs_util.get_files_and_checksum(
            client1, fuse_mounting_dir_1)
        fuse_snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_cross_platform_snapshot",
            "snap_name": "snap_1",
            "group_name": "subvol_cross_platform_snapshot_1",
        }
        fs_util.create_snapshot(client1, **fuse_snapshot)
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path}",
        )
        fs_util.create_file_data(client1, kernel_mounting_dir_1, 3, "snap1",
                                 "data_from_kernel_mount ")
        kernel_snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_cross_platform_snapshot",
            "snap_name": "snap_2",
            "group_name": "subvol_cross_platform_snapshot_1",
        }
        fs_util.create_snapshot(client1, **kernel_snapshot)
        kernel_files_checksum = fs_util.get_files_and_checksum(
            client1, kernel_mounting_dir_1)
        client1.exec_command(
            sudo=True,
            cmd=f"cd {kernel_mounting_dir_1};cp .snap/_snap_1_*/* .")
        kernel_mount_revert_snap_fuse = fs_util.get_files_and_checksum(
            client1, kernel_mounting_dir_1)
        if fuse_files_checksum != kernel_mount_revert_snap_fuse:
            log.error(
                "checksum is not when reverted to snap1 i.e., from fuse mount snapshot revert"
            )
            return 1
        client1.exec_command(
            sudo=True, cmd=f"cd {fuse_mounting_dir_1};cp .snap/_snap_2_*/* .")
        fuse_mount_revert_snap_kernel = fs_util.get_files_and_checksum(
            client1, kernel_mounting_dir_1)
        if kernel_files_checksum != fuse_mount_revert_snap_kernel:
            log.error(
                "checksum is not when reverted to snap2 i.e., from kernel mount snapshot revert"
            )
            return 1
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        fs_util.remove_snapshot(client1, **kernel_snapshot)
        fs_util.remove_snapshot(client1, **fuse_snapshot)
        fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1,
                                          **subvolumegroup,
                                          force=True)
示例#7
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered :
    CEPH-83573524	Ensure the subvolume attributes are retained post clone operations

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    2. create fs volume create cephfs if the volume is not there
    3. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_clone_attr_vol_1
    4. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_1
    5. Create Data on the subvolume
        Ex:  python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 --files
            100 --files-per-dir 10 --dirs-per-dir 2 --top /mnt/cephfs_fuse1baxgbpaia_1/
    6. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_2 snap_1 --group_name subvolgroup_clone_attr_vol_1
    7. Set file and size xattributes on dir

    Test Case Flow:
    1. Create Clone out of subvolume.
    2. Mount the cloned volume.
    3. Validate the contents of cloned volume with contents present of subvolume
    4. Validate file and size xattributes on dir in cloned volume

    Clean Up:
    1. Delete Cloned volume
    2. Delete subvolumegroup
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10))
        )
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {"vol_name": default_fs, "group_name": "subvolgroup_clone_attr_vol_1"},
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_attr_vol",
            "group_name": "subvolgroup_clone_attr_vol_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_clone_attr_vol subvolgroup_clone_attr_vol_1",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 1 --top "
            f"{kernel_mounting_dir_1}",
            long_running=True,
        )
        fs_util.set_quota_attrs(clients[0], 9999, 999, kernel_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0], kernel_mounting_dir_1)
        snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_attr_vol",
            "snap_name": "snap_1",
            "group_name": "subvolgroup_clone_attr_vol_1",
        }
        fs_util.create_snapshot(client1, **snapshot)
        client1.exec_command(sudo=True, cmd=f"mkdir -p /tmp/{mounting_dir}")

        log.info("Clone a subvolume from snapshot")
        clone_attr_vol_1 = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_attr_vol",
            "snap_name": "snap_1",
            "target_subvol_name": "clone_attr_vol_1",
            "group_name": "subvolgroup_clone_attr_vol_1",
        }
        fs_util.create_clone(client1, **clone_attr_vol_1)
        fs_util.validate_clone_state(client1, clone_attr_vol_1)
        clonevol_path, rc = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} {clone_attr_vol_1['target_subvol_name']}",
        )
        fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_2/"
        fs_util.fuse_mount(
            [client1],
            fuse_mounting_dir_2,
            extra_params=f" -r {clonevol_path.strip()}",
        )
        quota_attrs_clone = fs_util.get_quota_attrs(clients[0], fuse_mounting_dir_2)
        client1.exec_command(
            sudo=True, cmd=f"diff -qr {kernel_mounting_dir_1} {fuse_mounting_dir_2}"
        )
        if quota_attrs_clone != quota_attrs:
            log.info(f"attributes of cloned volumes{quota_attrs_clone}")
            log.info(f"attributes of volumes{quota_attrs}")
            log.error(
                "Quota attributes of the clone is not matching with quota attributes of subvolume"
            )
            return 1
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        rmclone_list = [
            {"vol_name": default_fs, "subvol_name": "clone_attr_vol_1"},
        ]
        for clone_vol in rmclone_list:
            fs_util.remove_subvolume(client1, **clone_vol)
        fs_util.remove_snapshot(client1, **snapshot, validate=False, check_ec=False)
        fs_util.remove_subvolume(client1, **subvolume, validate=False, check_ec=False)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1, **subvolumegroup, force=True)
示例#8
0
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. create fs volume create cephfs and cephfs-ec

    Subvolume Group Operations :
    1. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
    2. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
    3. Mount subvolume on both fuse and kernel clients and run IO's

    Clean-up:
    1. Remove files from mountpoint, Unmount subvolumes.
    2. Remove the pools added as part of pool_layout
    3. ceph fs subvolume rm <vol_name> <subvol_name> [--group_name <subvol_group_name>]
    4. ceph fs subvolumegroup rm <vol_name> <group_name>
    """

    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        clients = ceph_cluster.get_ceph_objects("client")
        log.info("checking Pre-requisites")
        if len(clients) < 2:
            log.info(
                f"This test requires minimum 2 client nodes.This has only {len(clients)} clients"
            )
            return 1
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        default_fs = "cephfs"
        if build.startswith("4"):
            # create EC pool
            list_cmds = [
                "ceph fs flag set enable_multiple true",
                "ceph osd pool create cephfs-data-ec 64 erasure",
                "ceph osd pool create cephfs-metadata 64",
                "ceph osd pool set cephfs-data-ec allow_ec_overwrites true",
                "ceph fs new cephfs-ec cephfs-metadata cephfs-data-ec --force",
            ]
            if fs_util.get_fs_info(clients[0], "cephfs_new"):
                default_fs = "cephfs_new"
                list_cmds.append("ceph fs volume create cephfs")
            for cmd in list_cmds:
                clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Create 2 pools, 1 - Replicated , 1 - EC Data Pool")
        create_pools = [
            "ceph osd pool create cephfs-data-pool-layout",
            "ceph osd pool create cephfs-data-pool-layout-ec 64 erasure",
            "ceph osd pool set cephfs-data-pool-layout-ec allow_ec_overwrites true",
        ]
        for cmd in create_pools:
            clients[0].exec_command(sudo=True, cmd=cmd)
        log.info("Add created data pools to each of the filesystem")
        add_pool_to_FS = [
            "ceph fs add_data_pool cephfs cephfs-data-pool-layout",
            "ceph fs add_data_pool cephfs-ec cephfs-data-pool-layout-ec",
        ]
        for cmd in add_pool_to_FS:
            clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Create cephfs subvolumegroup with desired data pool_layout")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_1",
                "pool_layout": "cephfs-data-pool-layout",
            },
            {
                "vol_name": "cephfs-ec",
                "group_name": "subvolgroup_ec1",
                "pool_layout": "cephfs-data-pool-layout-ec",
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(clients[0], **subvolumegroup)

        log.info(
            "Create 2 Sub volumes on each of the subvolume group Size 5 GB")
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_1",
                "group_name": "subvolgroup_1",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_2",
                "group_name": "subvolgroup_1",
                "size": "5368706371",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_3",
                "group_name": "subvolgroup_ec1",
                "size": "5368706371",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_4",
                "group_name": "subvolgroup_ec1",
                "size": "5368706371",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)

        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        log.info(
            "Mount 1 subvolumegroup/subvolume on kernel and 1 subvloume on Fuse → Client1"
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        log.info("Get the path of sub volume")
        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_1 subvolgroup_1",
        )
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )

        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_2 subvolgroup_1",
        )
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_1,
            extra_params=f" -r {subvol_path.read().decode().strip()}",
        )

        log.info(
            "On EC,Mount 1 subvolumegroup/subvolume on kernal and 1 subvloume on Fuse → Client2"
        )
        if build.startswith("5"):
            kernel_mounting_dir_2 = f"/mnt/cephfs_kernel{mounting_dir}_EC_1/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of sub volume")

            subvol_path, rc = clients[1].exec_command(
                sudo=True,
                cmd=
                "ceph fs subvolume getpath cephfs-ec subvol_3 subvolgroup_ec1",
            )
            fs_util.kernel_mount(
                [clients[1]],
                kernel_mounting_dir_2,
                ",".join(mon_node_ips),
                sub_dir=f"{subvol_path.read().decode().strip()}",
                extra_params=",fs=cephfs-ec",
            )

            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=
                "ceph fs subvolume getpath cephfs-ec subvol_4 subvolgroup_ec1",
            )
            fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_EC_1/"
            fs_util.fuse_mount(
                [clients[1]],
                fuse_mounting_dir_2,
                extra_params=
                f" -r {subvol_path.read().decode().strip()} --client_fs cephfs-ec",
            )

        log.info(
            "Check the Pool status before the IO's to confirm if no IO's are going on on the pool attached"
        )
        get_pool_status_before = fs_util.get_pool_df(
            client=clients[0],
            pool_name="cephfs-data-pool-layout",
            vol_name=default_fs)
        get_pool_status_before_EC = fs_util.get_pool_df(
            client=clients[1],
            pool_name="cephfs-data-pool-layout-ec",
            vol_name="cephfs-ec",
        )

        run_ios(clients[0], kernel_mounting_dir_1)
        run_ios(clients[0], fuse_mounting_dir_1)
        run_ios(clients[1], kernel_mounting_dir_2)
        run_ios(clients[1], fuse_mounting_dir_2)

        log.info(
            "Check the Pool status and verify the IO's are going only to the Pool attached"
        )
        get_pool_status_after = fs_util.get_pool_df(
            client=clients[0],
            pool_name="cephfs-data-pool-layout",
            vol_name=default_fs)
        get_pool_status_after_EC = fs_util.get_pool_df(
            client=clients[1],
            pool_name="cephfs-data-pool-layout-ec",
            vol_name="cephfs-ec",
        )

        if get_pool_status_after["used"] < get_pool_status_before["used"]:
            log.error("Pool attached is unused")
            return 1
        if get_pool_status_after_EC["used"] < get_pool_status_before_EC["used"]:
            log.info("EC Pool attached is unused")
            return 1

        log.info("Clean up the system")
        fs_util.client_clean_up("umount",
                                kernel_clients=[clients[0]],
                                mounting_dir=kernel_mounting_dir_1)
        fs_util.client_clean_up("umount",
                                kernel_clients=[clients[1]],
                                mounting_dir=kernel_mounting_dir_2)

        fs_util.client_clean_up("umount",
                                fuse_clients=[clients[0]],
                                mounting_dir=fuse_mounting_dir_1)
        fs_util.client_clean_up("umount",
                                fuse_clients=[clients[1]],
                                mounting_dir=fuse_mounting_dir_2)

        clients[1].exec_command(
            sudo=True, cmd="ceph config set mon mon_allow_pool_delete true")

        for subvolume in subvolume_list:
            fs_util.remove_subvolume(clients[0], **subvolume)

        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(clients[0], **subvolumegroup)
        log.info(
            "Remove the data pools from the filesystem and delete the created pools."
        )
        rm_pool_from_FS = [
            "ceph fs rm_data_pool cephfs cephfs-data-pool-layout",
            "ceph fs rm_data_pool cephfs-ec cephfs-data-pool-layout-ec",
            "ceph osd pool delete cephfs-data-pool-layout "
            "cephfs-data-pool-layout --yes-i-really-really-mean-it-not-faking",
            "ceph osd pool delete cephfs-data-pool-layout-ec "
            "cephfs-data-pool-layout-ec --yes-i-really-really-mean-it-not-faking",
        ]
        for cmd in rm_pool_from_FS:
            clients[0].exec_command(sudo=True, cmd=cmd)

        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
示例#9
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered :
    CEPH-83573415	Test to validate the cli - ceph fs set <fs_name> allow_new_snaps true

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    1. creats fs volume create cephfs if the volume is not there
    2. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_flag_snapshot_1
    3. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_
    4. Create Data on the subvolume
        Ex:  python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 --files
            100 --files-per-dir 10 --dirs-per-dir 2 --top /mnt/cephfs_fuse1baxgbpaia_1/
    5. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_2 snap_1 --group_name subvolgroup_flag_snapshot_1

    Retain the snapshots nad verify the data after cloning:
    1. Test allow_new_snaps value and try creating the snapshots

    Clean Up:
    1. Del all the snapshots created
    2. Del Subvolumes
    3. Del SubvolumeGroups
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_flag_snapshot_1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_retain_snapshot",
            "group_name": "subvolgroup_flag_snapshot_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_retain_snapshot subvolgroup_flag_snapshot_1",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 2 --top "
            f"{kernel_mounting_dir_1}",
            long_running=True,
        )
        snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_retain_snapshot",
            "snap_name": "snap_1",
            "group_name": "subvolgroup_flag_snapshot_1",
        }
        log.info("Test allow_new_snaps value and creating the snapshots")
        client1.exec_command(
            sudo=True, cmd=f"ceph fs set {default_fs} allow_new_snaps false")
        cmd_out, cmd_rc = fs_util.create_snapshot(client1,
                                                  **snapshot,
                                                  check_ec=False,
                                                  validate=False)
        if cmd_rc == 0:
            raise CommandFailed(
                f"ceph fs set {default_fs} allow_new_snaps false is not working properly"
            )
            return 1
        client1.exec_command(
            sudo=True, cmd=f"ceph fs set {default_fs} allow_new_snaps true")
        fs_util.create_snapshot(client1, **snapshot)
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1

    finally:
        log.info("Clean Up in progess")
        fs_util.remove_snapshot(client1,
                                **snapshot,
                                validate=False,
                                force=True)
        fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1,
                                          **subvolumegroup,
                                          force=True)
示例#10
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573408   Test to validate the quota remains intact even after rebooting the Node.
                    Create a FS and create 10 directories and mount them on kernel client and fuse client(5 mounts
                    each). Set max bytes quota to a number(say 1Gb) and also set max files quota (say 20) and verify if
                    the set quota limit is working fine by filling max number of files and also by filling data to reach
                    the max limit. Reboot the node , once the node is up verify if the set quota remains or not.
    Pre-requisites :
    1. We need atleast one client node to execute this test case
    2. create fs volume create cephfs if the volume is not there
    3. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_clone_attr_vol_1
    4. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_1 --size 5368706371 --group_name subvolgroup_1
    5. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_1

    Test Case Flow:
    1. Mount the subvolume_1 on the client using fuse
    2. Mount the subvolume_2 on the client using kernel
    3. Set quota attribute 1gb and 50 files on both mount points
    4. Reboot the client node
    5. mount the subvolumes again
    6. Validate the quota attributes after reboot
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if not clients:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_quota_byte_increase_1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_byte_incr_fuse",
                "group_name": "subvolgroup_quota_byte_increase_1",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_byte_incr_kernel",
                "group_name": "subvolgroup_quota_byte_increase_1",
                "size": "5368706371",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        log.info("Get the path of sub volume")
        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_byte_incr_kernel subvolgroup_quota_byte_increase_1",
        )
        kernel_subvol_path = subvol_path.strip()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{kernel_subvol_path}",
        )

        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_byte_incr_fuse subvolgroup_quota_byte_increase_1",
        )
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fuse_subvol_path = subvol_path.strip()
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_1,
            extra_params=f" -r {fuse_subvol_path}",
        )
        fs_util.set_quota_attrs(clients[0], 50, 1073741824,
                                fuse_mounting_dir_1)
        fuse_quota_attrs_before_reboot = fs_util.get_quota_attrs(
            clients[0], fuse_mounting_dir_1)

        fs_util.set_quota_attrs(clients[0], 50, 1073741824,
                                kernel_mounting_dir_1)
        kernel_quota_attrs_before_reboot = fs_util.get_quota_attrs(
            clients[0], kernel_mounting_dir_1)

        fs_util.reboot_node(client1)

        fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_2/"
        kernel_mounting_dir_2 = f"/mnt/cephfs_kernel{mounting_dir}_2/"
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_2,
            ",".join(mon_node_ips),
            sub_dir=f"{kernel_subvol_path}",
        )
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_2,
            extra_params=f" -r {fuse_subvol_path}",
        )
        fuse_quota_attrs_after_reboot = fs_util.get_quota_attrs(
            clients[0], fuse_mounting_dir_2)
        kernel_quota_attrs_after_reboot = fs_util.get_quota_attrs(
            clients[0], kernel_mounting_dir_2)
        log.info(
            f"Quota Attributes befores reboot:{fuse_quota_attrs_before_reboot}\n"
            f"After reboot: {fuse_quota_attrs_after_reboot}")
        if fuse_quota_attrs_after_reboot != fuse_quota_attrs_before_reboot:
            log.info(
                "Fuse mount quota attributes are not matching after reboot")
            log.error(
                f"Quota Attributes befores reboot:{fuse_quota_attrs_before_reboot}\n"
                f"After reboot: {fuse_quota_attrs_after_reboot}")
            return 1
        log.info(
            f"Quota Attributes befores reboot:{kernel_quota_attrs_before_reboot}\n"
            f"After reboot: {kernel_quota_attrs_after_reboot}")
        if kernel_quota_attrs_before_reboot != kernel_quota_attrs_after_reboot:
            log.info(
                "Fuse mount quota attributes are not matching after reboot")
            log.error(
                f"Quota Attributes befores reboot:{kernel_quota_attrs_before_reboot}\n"
                f"After reboot: {kernel_quota_attrs_after_reboot}")
            return 1
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1

    finally:
        log.info("Clean Up in progess")
        for subvolume in subvolume_list:
            fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1,
                                          **subvolumegroup,
                                          force=True)
示例#11
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573501	Create a Cloned Volume using a snapshot

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    2. creats fs volume create cephfs if the volume is not there
    3. Create 2 sub volume groups
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_1
             ceph fs subvolumegroup create cephfs subvolgroup_2
    4. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_clone_status --size 5368706371 --group_name subvolgroup_
    5. Create Data on the subvolume
        Ex:  python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 --files
            100 --files-per-dir 10 --dirs-per-dir 2 --top /mnt/cephfs_fuse1baxgbpaia_1/
    6. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_clone_status snap_1 --group_name subvolgroup_1

    Clone Operations and Clone States:
    1. Create a clone in default locaction.
        ceph fs subvolume snapshot clone cephfs subvol_clone_status snap_1 clone_status_1 --group_name subvolgroup_1
    2. Validate all the states of clone creation progress
        pending : Clone operation has not started
        in-progress : Clone operation is in progress
        complete : Clone operation has successfully finished
    3. Mount the cloned volume and check the contents
    4. Create a clone in different subvolumegroup ie., subvolumegroup 2
        ceph fs subvolume snapshot clone cephfs subvol_clone_status snap_1 clone_status_1 --group_name subvolgroup_1
            --target_group_name subvolgroup_2
    5. Validate all the states of clone creation progress
        pending : Clone operation has not started
        in-progress : Clone operation is in progress
        complete : Clone operation has successfully finished
    6.Once all the clones moved to complete state we are deleting all the clones

    Clean-up:
    1. ceph fs snapshot rm <vol_name> <subvol_name> snap_name [--group_name <subvol_group_name>]
    2. ceph fs subvolume rm <vol_name> <subvol_name> [--group_name <subvol_group_name>]
    3. ceph fs subvolumegroup rm <vol_name> <group_name>
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_clone_status_1"
            },
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_clone_status_2"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_status",
            "group_name": "subvolgroup_clone_status_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_clone_status subvolgroup_clone_status_1",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 2 --top "
            f"{kernel_mounting_dir_1}",
            long_running=True,
        )
        snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_status",
            "snap_name": "snap_1",
            "group_name": "subvolgroup_clone_status_1",
        }
        fs_util.create_snapshot(client1, **snapshot)
        clone_status_1 = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_status",
            "snap_name": "snap_1",
            "target_subvol_name": "clone_status_1",
            "group_name": "subvolgroup_clone_status_1",
        }
        fs_util.create_clone(client1, **clone_status_1)
        transitation_states = fs_util.validate_clone_state(
            client1, clone_status_1)
        valid_state_flow = [
            ["pending", "in-progress", "complete"],
            ["in-progress", "complete"],
        ]
        if transitation_states in valid_state_flow:
            return 1
        clonevol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} {clone_status_1['target_subvol_name']}",
        )
        fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_2/"
        fs_util.fuse_mount(
            [client1],
            fuse_mounting_dir_2,
            extra_params=f" -r {clonevol_path.strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=f"diff -qr {kernel_mounting_dir_1} {fuse_mounting_dir_2}")

        clone_status_2 = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_status",
            "snap_name": "snap_1",
            "target_subvol_name": "clone_status_2",
            "group_name": "subvolgroup_clone_status_1",
            "target_group_name": "subvolgroup_clone_status_2",
        }
        fs_util.create_clone(client1, **clone_status_2)
        transitation_states = fs_util.validate_clone_state(
            client1, clone_status_2)
        if transitation_states in valid_state_flow:
            return 1
        clonevol_path, rc = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} "
            f"{clone_status_2['target_subvol_name']} {clone_status_2['target_group_name']}",
        )
        fuse_mounting_dir_3 = f"/mnt/cephfs_fuse{mounting_dir}_3/"
        fs_util.fuse_mount(
            [client1],
            fuse_mounting_dir_3,
            extra_params=f" -r {clonevol_path.strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=f"diff -qr {kernel_mounting_dir_1} {fuse_mounting_dir_3}")
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        rmclone_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "clone_status_1"
            },
            {
                "vol_name": default_fs,
                "subvol_name": "clone_status_2",
                "group_name": "subvolgroup_clone_status_2",
            },
        ]
        for clone_vol in rmclone_list:
            fs_util.remove_subvolume(client1, **clone_vol)
        fs_util.remove_snapshot(client1, **snapshot)
        fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1,
                                          **subvolumegroup,
                                          force=True)
示例#12
0
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. create fs volume create cephfs and cephfs-ec filesystem

    Subvolume Group Operations :
    1. ceph fs subvolumegroup create <vol_name> <group_name>
    2. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
    3. Mount subvolume on both fuse and kernel clients and run IO's
    4. Mount subvolume on both fuse and kernel clients and run IO's
    5. Move data b/w FS created on Replicated Pool and EC_Pool

    Clean-up:
    1. Remove files from mountpoint, Unmount subvolumes.
    2. Remove the pools added as part of pool_layout
    3. ceph fs subvolume rm <vol_name> <subvol_name> [--group_name <subvol_group_name>]
    4. ceph fs subvolumegroup rm <vol_name> <group_name>
    """

    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        clients = ceph_cluster.get_ceph_objects("client")
        log.info("checking Pre-requisites")
        if len(clients) < 2:
            log.info(
                f"This test requires minimum 2 client nodes.This has only {len(clients)} clients"
            )
            return 1
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        default_fs = "cephfs"
        if build.startswith("4"):
            # create EC pool
            list_cmds = [
                "ceph fs flag set enable_multiple true",
                "ceph osd pool create cephfs-data-ec 64 erasure",
                "ceph osd pool create cephfs-metadata 64",
                "ceph osd pool set cephfs-data-ec allow_ec_overwrites true",
                "ceph fs new cephfs-ec cephfs-metadata cephfs-data-ec --force",
            ]
            if fs_util.get_fs_info(clients[0], "cephfs_new"):
                default_fs = "cephfs_new"
                list_cmds.append("ceph fs volume create cephfs")
            for cmd in list_cmds:
                clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Create SubVolumeGroups on each filesystem")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_1"
            },
            {
                "vol_name": "cephfs-ec",
                "group_name": "subvolgroup_ec1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(clients[0], **subvolumegroup)

        log.info("Create 2 Sub volumes on each of the pool with Size 5GB")
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_1",
                "group_name": "subvolgroup_1",
                "size": "5368709120",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_2",
                "group_name": "subvolgroup_ec1",
                "size": "5368709120",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)

        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        log.info(
            "Mount 1 subvolume on kernel and 1 subvloume on Fuse → Client1")
        if build.startswith("5"):
            kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of subvolume on default filesystem")
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=
                f"ceph fs subvolume getpath {default_fs} subvol_1 subvolgroup_1",
            )
            fs_util.kernel_mount(
                [clients[0]],
                kernel_mounting_dir_1,
                ",".join(mon_node_ips),
                sub_dir=f"{subvol_path.strip()}",
                extra_params=f",fs={default_fs}",
            )
            log.info("Get the path of subvolume on EC filesystem")
            fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_EC_1/"
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=
                "ceph fs subvolume getpath cephfs-ec subvol_2 subvolgroup_ec1",
            )
            fs_util.fuse_mount(
                [clients[0]],
                fuse_mounting_dir_1,
                extra_params=f" -r {subvol_path.strip()} --client_fs cephfs-ec",
            )
        else:
            kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of subvolume on default filesystem")
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=
                f"ceph fs subvolume getpath {default_fs} subvol_1 subvolgroup_1",
            )
            fs_util.kernel_mount(
                [clients[0]],
                kernel_mounting_dir_1,
                ",".join(mon_node_ips),
                sub_dir=f"{subvol_path.strip()}",
            )
            log.info("Get the path of subvolume on EC filesystem")
            fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_EC_1/"
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=
                "ceph fs subvolume getpath cephfs-ec subvol_2 subvolgroup_ec1",
            )
            fs_util.fuse_mount(
                [clients[0]],
                fuse_mounting_dir_1,
                extra_params=f" -r {subvol_path.strip()}",
            )

        run_ios(clients[0],
                kernel_mounting_dir_1,
                file_name="dd_file1",
                bs="100M",
                count=20)
        run_ios(clients[0],
                fuse_mounting_dir_1,
                file_name="dd_file2",
                bs="100M",
                count=20)

        log.info(
            "Migrate data b/w EC pool and Replicated Pool and vice versa.")
        filepath1 = f"{kernel_mounting_dir_1}{clients[0].node.hostname}dd_file1"
        filepath2 = f"{fuse_mounting_dir_1}{clients[0].node.hostname}dd_file2"

        mv_bw_pools = [
            f"mv {filepath1} {fuse_mounting_dir_1}",
            f"mv {filepath2} {kernel_mounting_dir_1}",
        ]
        for cmd in mv_bw_pools:
            clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Confirm if data b/w pools are migrated")
        verify_data_movement = [
            f" ls -l {kernel_mounting_dir_1}{clients[0].node.hostname}dd_file2",
            f" ls -l {fuse_mounting_dir_1}{clients[0].node.hostname}dd_file1",
        ]
        for cmd in verify_data_movement:
            clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Clean up the system")
        fs_util.client_clean_up("umount",
                                kernel_clients=[clients[0]],
                                mounting_dir=kernel_mounting_dir_1)
        fs_util.client_clean_up("umount",
                                fuse_clients=[clients[0]],
                                mounting_dir=fuse_mounting_dir_1)
        clients[1].exec_command(
            sudo=True, cmd="ceph config set mon mon_allow_pool_delete true")

        for subvolume in subvolume_list:
            fs_util.remove_subvolume(clients[0], **subvolume)

        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(clients[0], **subvolumegroup)
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. Requires 2 client nodes in the setup

    Operations performed :
    1. Enable Multiple File systems 1. In Replicated 2. In EC
    2. Create 2 SubVolumeGroups on each file system
    3. Create 2 Sub volumes on each of the subvolume group Size 20 GB
    4. Create 2 sub volumes on default subvolume group
    5. Mount 1 subvolumegroup/subvolume on kernal and 1 subvloume on Fuse → Client1
    6. Mount 1 subvolumeon kernal and 1 subvloumegroup/subvolume on Fuse → Client2
    7. On EC,Mount 1 subvolumegroup/subvolume on kernal and 1 subvloume on Fuse → Client2
    8. On EC,Mount 1 subvolumeon kernal and 1 subvloumegroup/subvolume on Fuse → Client1
    9. Run IOs on subvolumegroup/subvolume on kernel client and subvolume in Fuse clientusing below commands
        git clone https://github.com/distributed-system-analysis/smallfile.git
        cd smallfile
        for i in create read append read delete create overwrite rename delete-renamed mkdir rmdir create symlink
        stat chmod ls-l delete cleanup  ;
        do python3 smallfile_cli.py --operation $i --threads 8 --file-size 10240 --files 100 --top /mnt/kcephfs/vol5/
        ; done
        IO Tool 2 :
        wget -O linux.tar.gz http://download.ceph.com/qa/linux-5.4.tar.gz
        tar -xzf linux.tar.gz tardir/ ; sleep 10 ; rm -rf  tardir/ ; sleep 10 ; done
        DD on Each volume:

        Wget :
       http://download.eng.bos.redhat.com/fedora/linux/releases/34/Server/x86_64/iso/Fedora-Server-dvd-x86_64-34-1.2.iso
       http://download.eng.bos.redhat.com/fedora/linux/releases/33/Server/x86_64/iso/Fedora-Server-dvd-x86_64-33-1.2.iso
       http://download.eng.bos.redhat.com/fedora/linux/releases/32/Server/x86_64/iso/Fedora-Server-dvd-x86_64-32-1.6.iso
        Note : Run 2 IO tools mentioned above on each volume mounted
    10. Create Snapshots.Verify the snap ls | grep to see snap ot created
    11. Create Clones on all 8 volumes. Verify the clones got created using clone ls(subvolume ls)
    12. Set File Level Quota on 2 directories under subvolume and Size Level Quota on under 2 directories
        under subvolume.
    13. Verify quota based on your configuration
    14. Clear Quotas
    15. Remove Clones
    16. Remove Snapshots
    17. Unmount
    18. Remove Volumes.

    Args:
        ceph_cluster:
        **kw:

    Returns:

    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        clients = ceph_cluster.get_ceph_objects("client")
        log.info("checking Pre-requisites")
        if len(clients) < 2:
            log.info(
                f"This test requires minimum 2 client nodes.This has only {len(clients)} clients"
            )
            return 1
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        default_fs = "cephfs"
        if build.startswith("4"):
            # create EC pool
            list_cmds = [
                "ceph fs flag set enable_multiple true",
                "ceph osd pool create cephfs-data-ec 64 erasure",
                "ceph osd pool create cephfs-metadata 64",
                "ceph osd pool set cephfs-data-ec allow_ec_overwrites true",
                "ceph fs new cephfs-ec cephfs-metadata cephfs-data-ec --force",
            ]
            if fs_util.get_fs_info(clients[0], "cephfs_new"):
                default_fs = "cephfs_new"
                list_cmds.append("ceph fs volume create cephfs")
            for cmd in list_cmds:
                clients[0].exec_command(sudo=True, cmd=cmd)
        log.info("Create 2 SubVolumeGroups on each file system")
        subvolumegroup_list = [
            {"vol_name": default_fs, "group_name": "subvolgroup_1"},
            {"vol_name": default_fs, "group_name": "subvolgroup_2"},
            {"vol_name": "cephfs-ec", "group_name": "subvolgroup_1"},
            {"vol_name": "cephfs-ec", "group_name": "subvolgroup_2"},
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(clients[0], **subvolumegroup)
        log.info("Create 2 Sub volumes on each of the subvolume group Size 20 GB")
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_1",
                "group_name": "subvolgroup_1",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_2",
                "group_name": "subvolgroup_2",
                "size": "5368706371",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_3",
                "group_name": "subvolgroup_1",
                "size": "5368706371",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_4",
                "group_name": "subvolgroup_2",
                "size": "5368706371",
            },
            {"vol_name": default_fs, "subvol_name": "subvol_5", "size": "5368706371"},
            {"vol_name": default_fs, "subvol_name": "subvol_6", "size": "5368706371"},
            {"vol_name": "cephfs-ec", "subvol_name": "subvol_7", "size": "5368706371"},
            {"vol_name": "cephfs-ec", "subvol_name": "subvol_8", "size": "5368706371"},
            {"vol_name": default_fs, "subvol_name": "subvol_9", "size": "5368706371"},
            {"vol_name": "cephfs-ec", "subvol_name": "subvol_10", "size": "5368706371"},
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)

        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10))
        )
        log.info(
            "Mount 1 subvolumegroup/subvolume on kernal and 1 subvloume on Fuse → Client1"
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        log.info("Get the path of sub volume")
        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_1 subvolgroup_1",
        )
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )

        subvol_path, rc = clients[0].exec_command(
            sudo=True, cmd=f"ceph fs subvolume getpath {default_fs} subvol_5"
        )
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_1,
            extra_params=f" -r {subvol_path.read().decode().strip()}",
        )

        log.info(
            "Mount 1 subvolumeon kernal and 1 subvloumegroup/subvolume on Fuse → Client2"
        )
        kernel_mounting_dir_2 = f"/mnt/cephfs_kernel{mounting_dir}_2/"
        mon_node_ips = fs_util.get_mon_node_ips()
        log.info("Get the path of sub volume")

        subvol_path, rc = clients[1].exec_command(
            sudo=True, cmd=f"ceph fs subvolume getpath {default_fs} subvol_6"
        )
        fs_util.kernel_mount(
            [clients[1]],
            kernel_mounting_dir_2,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )

        subvol_path, rc = clients[1].exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_2 subvolgroup_2",
        )
        fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_2/"
        fs_util.fuse_mount(
            [clients[1]],
            fuse_mounting_dir_2,
            extra_params=f" -r {subvol_path.read().decode().strip()}",
        )

        log.info(
            "On EC,Mount 1 subvolumegroup/subvolume on kernal and 1 subvloume on Fuse → Client2"
        )
        if build.startswith("5"):
            kernel_mounting_dir_3 = f"/mnt/cephfs_kernel{mounting_dir}_EC_3/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of sub volume")

            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd="ceph fs subvolume getpath cephfs-ec subvol_3 subvolgroup_1",
            )
            fs_util.kernel_mount(
                [clients[0]],
                kernel_mounting_dir_3,
                ",".join(mon_node_ips),
                sub_dir=f"{subvol_path.read().decode().strip()}",
                extra_params=",fs=cephfs-ec",
            )

            subvol_path, rc = clients[0].exec_command(
                sudo=True, cmd="ceph fs subvolume getpath cephfs-ec subvol_7"
            )
            fuse_mounting_dir_3 = f"/mnt/cephfs_fuse{mounting_dir}_EC_3/"
            fs_util.fuse_mount(
                [clients[0]],
                fuse_mounting_dir_3,
                extra_params=f" -r {subvol_path.read().decode().strip()} --client_fs cephfs-ec",
            )

        log.info(
            "On EC,Mount 1 subvolumeon kernal and 1 subvloumegroup/subvolume on Fuse → Client1"
        )
        if build.startswith("5"):
            kernel_mounting_dir_4 = f"/mnt/cephfs_kernel{mounting_dir}_EC_4/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of sub volume")

            subvol_path, rc = clients[1].exec_command(
                sudo=True, cmd="ceph fs subvolume getpath cephfs-ec subvol_8"
            )
            fs_util.kernel_mount(
                [clients[1]],
                kernel_mounting_dir_4,
                ",".join(mon_node_ips),
                sub_dir=f"{subvol_path.read().decode().strip()}",
                extra_params=",fs=cephfs-ec",
            )

            subvol_path, rc = clients[1].exec_command(
                sudo=True,
                cmd="ceph fs subvolume getpath cephfs-ec subvol_4 subvolgroup_2",
            )
            fuse_mounting_dir_4 = f"/mnt/cephfs_fuse{mounting_dir}_EC_4/"
            fs_util.fuse_mount(
                [clients[1]],
                fuse_mounting_dir_4,
                extra_params=f" -r {subvol_path.read().decode().strip()} --client_fs cephfs-ec",
            )

        run_ios(clients[0], kernel_mounting_dir_1)
        run_ios(clients[0], fuse_mounting_dir_1)
        run_ios(clients[1], kernel_mounting_dir_2)
        run_ios(clients[1], fuse_mounting_dir_2)
        if build.startswith("5"):
            run_ios(clients[0], kernel_mounting_dir_3)
            run_ios(clients[1], kernel_mounting_dir_4)
            run_ios(clients[0], fuse_mounting_dir_3)
            run_ios(clients[1], fuse_mounting_dir_4)

        log.info("Create Snapshots.Verify the snap ls")
        snapshot_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_1",
                "snap_name": "snap_1",
                "group_name": "subvolgroup_1",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_2",
                "snap_name": "snap_2",
                "group_name": "subvolgroup_2",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_3",
                "snap_name": "snap_3",
                "group_name": "subvolgroup_1",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_4",
                "snap_name": "snap_4",
                "group_name": "subvolgroup_2",
            },
            {"vol_name": default_fs, "subvol_name": "subvol_5", "snap_name": "snap_5"},
            {"vol_name": default_fs, "subvol_name": "subvol_6", "snap_name": "snap_6"},
            {"vol_name": "cephfs-ec", "subvol_name": "subvol_7", "snap_name": "snap_7"},
            {"vol_name": "cephfs-ec", "subvol_name": "subvol_8", "snap_name": "snap_8"},
        ]
        for snapshot in snapshot_list:
            fs_util.create_snapshot(clients[0], **snapshot)

        clone_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_1",
                "snap_name": "snap_1",
                "target_subvol_name": "clone_1",
                "group_name": "subvolgroup_1",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_2",
                "snap_name": "snap_2",
                "target_subvol_name": "clone_2",
                "group_name": "subvolgroup_2",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_3",
                "snap_name": "snap_3",
                "target_subvol_name": "clone_3",
                "group_name": "subvolgroup_1",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_4",
                "snap_name": "snap_4",
                "target_subvol_name": "clone_4",
                "group_name": "subvolgroup_2",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_5",
                "snap_name": "snap_5",
                "target_subvol_name": "clone_5",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_6",
                "snap_name": "snap_6",
                "target_subvol_name": "clone_6",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_7",
                "snap_name": "snap_7",
                "target_subvol_name": "clone_7",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_8",
                "snap_name": "snap_8",
                "target_subvol_name": "clone_8",
            },
        ]
        for clone in clone_list:
            fs_util.create_clone(clients[0], **clone)
        log.info(
            "Set File Level Quota on 2 directories under subvolume and Size Level Quota on "
            "under 2 directories under subvolume"
        )
        subvol_path, rc = clients[0].exec_command(
            sudo=True, cmd=f"ceph fs subvolume getpath {default_fs} subvol_9"
        )
        fuse_mounting_dir_5 = f"/mnt/cephfs_fuse{mounting_dir}_5/"
        fs_util.fuse_mount(
            [clients[1]],
            fuse_mounting_dir_5,
            extra_params=f" -r {subvol_path.read().decode().strip()}",
        )
        clients[1].exec_command(
            sudo=True,
            cmd=f"setfattr -n ceph.quota.max_files -v 10 {fuse_mounting_dir_5}",
        )
        clients[1].exec_command(
            sudo=True, cmd=f"getfattr -n ceph.quota.max_files {fuse_mounting_dir_5}"
        )
        out, rc = clients[1].exec_command(
            sudo=True,
            cmd=f"cd {fuse_mounting_dir_5};touch quota{{1..15}}.txt",
        )
        log.info(out)
        if clients[1].node.exit_status == 0:
            log.warning(
                "Quota set has been failed,Able to create more files."
                "This is known limitation"
            )
        if build.startswith("5"):
            subvol_path, rc = clients[0].exec_command(
                sudo=True, cmd="ceph fs subvolume getpath cephfs-ec subvol_10"
            )
            kernel_mounting_dir_5 = f"/mnt/cephfs_kernel{mounting_dir}_5/"
            fs_util.kernel_mount(
                [clients[1]],
                kernel_mounting_dir_5,
                ",".join(mon_node_ips),
                sub_dir=f"{subvol_path.read().decode().strip()}",
                extra_params=",fs=cephfs-ec",
            )
            clients[1].exec_command(
                sudo=True,
                cmd=f"setfattr -n ceph.quota.max_files -v 10 {kernel_mounting_dir_5}",
            )
            clients[1].exec_command(
                sudo=True,
                cmd=f"getfattr -n ceph.quota.max_files {kernel_mounting_dir_5}",
            )

            out, rc = clients[1].exec_command(
                sudo=True,
                cmd=f"cd {kernel_mounting_dir_5};touch quota{{1..15}}.txt",
            )
            log.info(out)
            if clients[1].node.exit_status == 0:
                log.warning(
                    "Quota set has been failed,Able to create more files."
                    "This is known limitation"
                )
                # return 1

        log.info("Clean up the system")
        fs_util.client_clean_up(
            "umount", kernel_clients=[clients[0]], mounting_dir=kernel_mounting_dir_1
        )
        fs_util.client_clean_up(
            "umount", kernel_clients=[clients[1]], mounting_dir=kernel_mounting_dir_2
        )
        if build.startswith("5"):
            fs_util.client_clean_up(
                "umount",
                kernel_clients=[clients[0]],
                mounting_dir=kernel_mounting_dir_3,
            )

            fs_util.client_clean_up(
                "umount",
                kernel_clients=[clients[1]],
                mounting_dir=kernel_mounting_dir_4,
            )
            fs_util.client_clean_up(
                "umount",
                kernel_clients=[clients[1]],
                mounting_dir=kernel_mounting_dir_5,
            )
        fs_util.client_clean_up(
            "umount", fuse_clients=[clients[0]], mounting_dir=fuse_mounting_dir_1
        )
        fs_util.client_clean_up(
            "umount", fuse_clients=[clients[1]], mounting_dir=fuse_mounting_dir_2
        )
        if build.startswith("5"):
            fs_util.client_clean_up(
                "umount", fuse_clients=[clients[0]], mounting_dir=fuse_mounting_dir_3
            )
            fs_util.client_clean_up(
                "umount", fuse_clients=[clients[1]], mounting_dir=fuse_mounting_dir_4
            )
        fs_util.client_clean_up(
            "umount", fuse_clients=[clients[1]], mounting_dir=fuse_mounting_dir_5
        )
        clients[1].exec_command(
            sudo=True, cmd="ceph config set mon mon_allow_pool_delete true"
        )
        rmsnapshot_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_1",
                "snap_name": "snap_1",
                "group_name": "subvolgroup_1",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_2",
                "snap_name": "snap_2",
                "group_name": "subvolgroup_2",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_3",
                "snap_name": "snap_3",
                "group_name": "subvolgroup_1",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_4",
                "snap_name": "snap_4",
                "group_name": "subvolgroup_2",
            },
            {"vol_name": default_fs, "subvol_name": "subvol_5", "snap_name": "snap_5"},
            {"vol_name": default_fs, "subvol_name": "subvol_6", "snap_name": "snap_6"},
            {"vol_name": "cephfs-ec", "subvol_name": "subvol_7", "snap_name": "snap_7"},
            {"vol_name": "cephfs-ec", "subvol_name": "subvol_8", "snap_name": "snap_8"},
        ]
        for snapshot in rmsnapshot_list:
            fs_util.remove_snapshot(clients[0], **snapshot)

        rmclone_list = [
            {"vol_name": default_fs, "subvol_name": "clone_1"},
            {"vol_name": default_fs, "subvol_name": "clone_2"},
            {"vol_name": "cephfs-ec", "subvol_name": "clone_3"},
            {"vol_name": "cephfs-ec", "subvol_name": "clone_4"},
            {"vol_name": default_fs, "subvol_name": "clone_5"},
            {"vol_name": default_fs, "subvol_name": "clone_6"},
            {"vol_name": "cephfs-ec", "subvol_name": "clone_7"},
            {"vol_name": "cephfs-ec", "subvol_name": "clone_8"},
        ]
        rmsubvolume_list = rmclone_list + subvolume_list

        for subvolume in rmsubvolume_list:
            fs_util.remove_subvolume(clients[0], **subvolume)

        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(clients[0], **subvolumegroup)
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
示例#14
0
def run(ceph_cluster, **kw):
    """
    pre-requisites:
    1. Create cephfs subvolumegroup
    2. Create cephfs subvolume in subvolume group with default permission
    3. Create cephfs subvolumes in subvolumegroup with different permission
    Test operation:
    1. Get path of all created subvolumes
    2. Remove all the subvolumes
    3. Remove the subvolumegroup
    """
    try:
        tc = "CEPH-83574190"
        log.info(f"Running CephFS tests for Polarion ID -{tc}")
        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        fs_util.auth_list([client1])
        vol_name_rand = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(5)))
        subvol_group = f"subvolume_groupname_{vol_name_rand}"
        subvol_default = f"subvol_default_{vol_name_rand}"
        subvol_different = f"subvol_different_{vol_name_rand}"
        fs_util.create_subvolumegroup(client1, "cephfs", subvol_group)
        fs_util.create_subvolume(client1,
                                 "cephfs",
                                 subvol_default,
                                 group_name=subvol_group)
        fs_util.create_subvolume(client1,
                                 "cephfs",
                                 subvol_different,
                                 group_name=subvol_group,
                                 mode=777)
        out1 = fs_util.get_subvolume_info(client1,
                                          "cephfs",
                                          subvol_default,
                                          group_name=subvol_group)
        out2 = fs_util.get_subvolume_info(client1,
                                          "cephfs",
                                          subvol_different,
                                          group_name=subvol_group)
        out3, err3 = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath cephfs {subvol_default} --group_name {subvol_group}",
        )
        out4, err4 = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath cephfs {subvol_different} --group_name {subvol_group}",
        )
        mode1 = out1["mode"]
        mode2 = out2["mode"]
        if str(mode1) != "16877" or str(
                mode2) != "16895" or out3 == 1 or out4 == 1:
            return 1
        fs_util.remove_subvolume(client1,
                                 "cephfs",
                                 subvol_default,
                                 group_name=subvol_group)
        fs_util.remove_subvolume(client1,
                                 "cephfs",
                                 subvol_different,
                                 group_name=subvol_group)
        fs_util.remove_subvolumegroup(client1,
                                      "cephfs",
                                      subvol_group,
                                      force=True)
        return 0

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
示例#15
0
def run(ceph_cluster, **kw):
    """
     Test Cases Covered:
     CEPH-83573255	Try renaming the snapshot directory and rollbackCreate a FS and
                     create 10 directories and mount them on kernel client and fuse client(5 mounts each)
                     Add data (~ 100 GB). Create a Snapshot and verify the content in snap directory.
                     Try modifying the snapshot name.

     Pre-requisites :
     1. We need atleast one client node to execute this test case
     2. creats fs volume create cephfs if the volume is not there
     3. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
         Ex : ceph fs subvolumegroup create cephfs subvol_rename_sanp_1
     4. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
        [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
        Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_
    5. Create snapshot of the subvolume
         Ex: ceph fs subvolume snapshot create cephfs subvol_2 snap_1 --group_name subvol_rename_sanp_1

     Script Flow:
     1. Mount the subvolume on the client using fuse mount.
     2. Rename the snapshot snap/_snap_1 to snap/_snap_rename.
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvol_rename_sanp_1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_rename_sanp",
            "group_name": "subvol_rename_sanp_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_rename_sanp subvol_rename_sanp_1",
        )
        subvol_path = subvol_path.read().decode().strip()
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fs_util.fuse_mount(
            [client1],
            fuse_mounting_dir_1,
            extra_params=f" -r {subvol_path}",
        )
        fs_util.create_file_data(client1, fuse_mounting_dir_1, 3, "snap1",
                                 "data_from_fuse_mount ")
        fuse_snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_rename_sanp",
            "snap_name": "snap_1",
            "group_name": "subvol_rename_sanp_1",
        }
        fs_util.create_snapshot(client1, **fuse_snapshot)
        out, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"cd {fuse_mounting_dir_1};mv .snap/_snap_1_* .snap/_snap_rename_",
            check_ec=False,
        )
        if rc == 0:
            raise CommandFailed(
                "we are able to rename the snap directory.. which is not correct"
            )
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        fs_util.remove_snapshot(client1, **fuse_snapshot)
        fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1,
                                          **subvolumegroup,
                                          force=True)
示例#16
0
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. create fs volume create cephfs and cephfs-ec

    Subvolume Group Operations :
    1. ceph fs subvolumegroup create <vol_name> <group_name>
    2. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
    3. Mount subvolume on both fuse and kernel clients and run IO's
    4. ceph fs subvolume resize <vol_name> <subvolume_name> <new_size> [--group_name <subvol_group>]
    5. Mount subvolume on both fuse and kernel clients and run IO's
    6. Validate the resize.

    Clean-up:
    1. Remove files from mountpoint, Unmount subvolumes.
    2. Remove the pools added as part of pool_layout
    3. ceph fs subvolume rm <vol_name> <subvol_name> [--group_name <subvol_group_name>]
    4. ceph fs subvolumegroup rm <vol_name> <group_name>
    """

    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        clients = ceph_cluster.get_ceph_objects("client")
        log.info("checking Pre-requisites")
        if len(clients) < 2:
            log.info(
                f"This test requires minimum 2 client nodes.This has only {len(clients)} clients"
            )
            return 1
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        default_fs = "cephfs"
        if build.startswith("4"):
            # create EC pool
            list_cmds = [
                "ceph fs flag set enable_multiple true",
                "ceph osd pool create cephfs-data-ec 64 erasure",
                "ceph osd pool create cephfs-metadata 64",
                "ceph osd pool set cephfs-data-ec allow_ec_overwrites true",
                "ceph fs new cephfs-ec cephfs-metadata cephfs-data-ec --force",
            ]
            if fs_util.get_fs_info(clients[0], "cephfs_new"):
                default_fs = "cephfs_new"
                list_cmds.append("ceph fs volume create cephfs")
            for cmd in list_cmds:
                clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Create SubVolumeGroups on each filesystem")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_1"
            },
            {
                "vol_name": "cephfs-ec",
                "group_name": "subvolgroup_ec1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(clients[0], **subvolumegroup)

        log.info(
            "Create 2 Sub volumes on each of the subvolume group with Size 2GB"
        )
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_1",
                "group_name": "subvolgroup_1",
                "size": "2147483648",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_2",
                "group_name": "subvolgroup_1",
                "size": "2147483648",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_3",
                "group_name": "subvolgroup_ec1",
                "size": "2147483648",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_4",
                "group_name": "subvolgroup_ec1",
                "size": "2147483648",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)

        log.info("Validate the Subvolume Size")
        for subvolume in subvolume_list:
            subvolume_size_subvol = fs_util.get_subvolume_info(
                client=clients[0], **subvolume)
            if int(subvolume.get("size", "infinite")) != int(
                    subvolume_size_subvol["bytes_quota"]):
                log.error(
                    f"Size mismatchiing for {subvolume.get('subvol_name')} "
                    f"Expected size is : {subvolume.get('size', 'infinite')}"
                    f"Actual Size: {subvolume_size_subvol['bytes_quota']}")
                return 1

        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        log.info(
            "Mount 1 subvolumegroup/subvolume on kernel and 1 subvloume on Fuse → Client1"
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        log.info("Get the path of sub volume")
        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_1 subvolgroup_1",
        )
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )

        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_2 subvolgroup_1",
        )
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_1,
            extra_params=f" -r {subvol_path.read().decode().strip()}",
        )

        log.info(
            "On EC,Mount 1 subvolumegroup/subvolume on kernal and 1 subvloume on Fuse → Client2"
        )
        if build.startswith("5"):
            kernel_mounting_dir_2 = f"/mnt/cephfs_kernel{mounting_dir}_EC_1/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of sub volume")

            subvol_path, rc = clients[1].exec_command(
                sudo=True,
                cmd=
                "ceph fs subvolume getpath cephfs-ec subvol_3 subvolgroup_ec1",
            )
            fs_util.kernel_mount(
                [clients[1]],
                kernel_mounting_dir_2,
                ",".join(mon_node_ips),
                sub_dir=f"{subvol_path.read().decode().strip()}",
                extra_params=",fs=cephfs-ec",
            )

            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=
                "ceph fs subvolume getpath cephfs-ec subvol_4 subvolgroup_ec1",
            )
            fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_EC_1/"
            fs_util.fuse_mount(
                [clients[1]],
                fuse_mounting_dir_2,
                extra_params=
                f" -r {subvol_path.read().decode().strip()} --client_fs cephfs-ec",
            )

        run_ios(
            clients[0],
            kernel_mounting_dir_1,
            file_name="dd_before",
            bs="100M",
            count=20,
        )
        run_ios(clients[0],
                fuse_mounting_dir_1,
                file_name="dd_before",
                bs="100M",
                count=20)
        run_ios(
            clients[1],
            kernel_mounting_dir_2,
            file_name="dd_before",
            bs="100M",
            count=20,
        )
        run_ios(clients[1],
                fuse_mounting_dir_2,
                file_name="dd_before",
                bs="100M",
                count=20)

        log.info("Resize the subvolumes to 5GB and add data more than 2GB")
        resize_subvolumes = [
            f"ceph fs subvolume resize {default_fs} subvol_1 5368709120 --group_name subvolgroup_1",
            f"ceph fs subvolume resize {default_fs} subvol_2 5368709120 --group_name subvolgroup_1",
            "ceph fs subvolume resize cephfs-ec subvol_3 5368709120 --group_name subvolgroup_ec1",
            "ceph fs subvolume resize cephfs-ec subvol_4 5368709120 --group_name subvolgroup_ec1",
        ]
        for cmd in resize_subvolumes:
            clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Validate the Subvolume after Resize")
        for subvolume in subvolume_list:
            subvolume_size_subvol = fs_util.get_subvolume_info(
                client=clients[0], **subvolume)
            if 5368709120 != int(subvolume_size_subvol["bytes_quota"]):
                log.error(
                    f"Size mismatchiing for {subvolume.get('subvol_name')} Expected size is : 5368709120"
                    f"Actual Size: {subvolume_size_subvol['bytes_quota']}")
                return 1

        run_ios(clients[0],
                kernel_mounting_dir_1,
                file_name="dd_after",
                bs="100M",
                count=30)
        run_ios(clients[0],
                fuse_mounting_dir_1,
                file_name="dd_after",
                bs="100M",
                count=30)
        run_ios(clients[1],
                kernel_mounting_dir_2,
                file_name="dd_after",
                bs="100M",
                count=30)
        run_ios(clients[1],
                fuse_mounting_dir_2,
                file_name="dd_after",
                bs="100M",
                count=30)

        log.info("Clean up the system")
        fs_util.client_clean_up("umount",
                                kernel_clients=[clients[0]],
                                mounting_dir=kernel_mounting_dir_1)
        fs_util.client_clean_up("umount",
                                kernel_clients=[clients[1]],
                                mounting_dir=kernel_mounting_dir_2)

        fs_util.client_clean_up("umount",
                                fuse_clients=[clients[0]],
                                mounting_dir=fuse_mounting_dir_1)
        fs_util.client_clean_up("umount",
                                fuse_clients=[clients[1]],
                                mounting_dir=fuse_mounting_dir_2)

        clients[1].exec_command(
            sudo=True, cmd="ceph config set mon mon_allow_pool_delete true")

        for subvolume in subvolume_list:
            fs_util.remove_subvolume(clients[0], **subvolume)

        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(clients[0], **subvolumegroup)
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1